script
stringlengths 113
767k
|
---|
# Android Malware Detection
# Task : Detection if there is presence of malware by using the attributes extracted from Android applications as features.
#
import pandas as pd
import numpy as np
np.random.seed(0)
from sklearn.metrics import precision_score, recall_score, f1_score
import tensorflow as tf
tf.compat.v1.set_random_seed(0)
from tensorflow import keras
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.metrics import confusion_matrix
data = pd.read_csv(
"../input/android-malware-dataset-for-machine-learning/drebin-215-dataset-5560malware-9476-benign.csv"
)
print("Total missing values : ", sum(list(data.isna().sum())))
data
# The output class contains categorical values 'B' and 'S'. We have to encode them into integer values. The dataset contains some random characters like '?' and 'S'. We can set them to NULl and remove them using dropna()
classes, count = np.unique(data["class"], return_counts=True)
# Perform Label Encoding
lbl_enc = LabelEncoder()
print(lbl_enc.fit_transform(classes), classes)
data = data.replace(classes, lbl_enc.fit_transform(classes))
# Dataset contains special characters like ''?' and 'S'. Set them to NaN and use dropna() to remove them
data = data.replace("[?,S]", np.NaN, regex=True)
print("Total missing values : ", sum(list(data.isna().sum())))
data.dropna(inplace=True)
for c in data.columns:
data[c] = pd.to_numeric(data[c])
data
# Since the data values belong to either 0 or 1, only label encoding of last column will be enough.
print("Total Features : ", len(data.columns) - 1)
plt.bar(classes, count)
plt.title("Class balance")
plt.xlabel("Classes")
plt.ylabel("Count")
plt.show()
train_x, test_x, train_y, test_y = train_test_split(
data[data.columns[: len(data.columns) - 1]].to_numpy(),
data[data.columns[-1]].to_numpy(),
test_size=0.2,
shuffle=True,
)
print("Train features size : ", len(train_x))
print("Train labels size : ", len(train_y))
print("Test features size : ", len(test_x))
print("Test features size : ", len(test_y))
train_y
print("Train features : ", train_x.shape)
print("Train labels : ", train_y.shape)
print("Test Features : ", test_x.shape)
print("Test labels : ", test_y.shape)
train_y = train_y.reshape((-1, 1))
test_y = test_y.reshape((-1, 1))
print("Train features : ", train_x.shape)
print("Train labels : ", train_y.shape)
print("Test Features : ", test_x.shape)
print("Test labels : ", test_y.shape)
model = keras.models.Sequential()
model.add(keras.layers.Dense(215, activation="relu", input_shape=(None, 215)))
model.add(keras.layers.Dense(100, activation="relu"))
model.add(keras.layers.Dense(1, activation="sigmoid"))
model.summary()
model.compile(
optimizer=keras.optimizers.RMSprop(0.001),
loss="binary_crossentropy",
metrics=["accuracy"],
)
ep = 5
history = model.fit(train_x, train_y, validation_data=(test_x, test_y), epochs=ep)
plt.figure(figsize=(15, 5))
plt.subplot(1, 2, 1)
plt.plot(
[str(i) for i in range(1, ep + 1)],
history.history["accuracy"],
label="Train Accuracy",
)
plt.plot(
[str(i) for i in range(1, ep + 1)],
history.history["val_accuracy"],
label="Validation Accuracy",
)
plt.legend()
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.title("Epoch vs Train Loss")
plt.subplot(1, 2, 2)
plt.plot(
[str(i) for i in range(1, ep + 1)], history.history["loss"], label="Train Loss"
)
plt.plot(
[str(i) for i in range(1, ep + 1)],
history.history["val_loss"],
label="Validation Loss",
)
plt.legend()
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.title("Epoch vs Validation loss")
plt.show()
y_pred = model.predict(test_x)
for i in range(len(y_pred)):
if y_pred[i] > (1 - y_pred[i]):
y_pred[i] = 1
else:
y_pred[i] = 0
print("Precision : ", precision_score(test_y, y_pred) * 100)
print("Recall : ", recall_score(test_y, y_pred) * 100)
print("F1 Score : ", f1_score(test_y, y_pred) * 100)
classes = ["B", "S"]
cm = confusion_matrix(y_pred, test_y)
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=classes)
fig, ax = plt.subplots(figsize=(10, 10))
plt.title("Confusion Matrix")
disp = disp.plot(ax=ax)
plt.show()
|
# Welcome to this project. This is actually an excel project that I have decided to tackle with both Python and Excel. This part concerns the analysis of bike rides with Python.
# # Initializing Python packages
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# # Importing the data
dataframe = pd.read_excel(
"/kaggle/input/bike-rides-sale-dataset/Excel Project Dataset.xlsx"
)
# # Getting info about data
dataframe.head(n=5)
dataframe.tail(n=5)
# # General Information
dataframe.info()
dataframe.columns
dataframe.size
# # Data Wrangling Process
# Changing the M and S to Married and Single for better understanding
dataframe["Marital Status"] = dataframe["Marital Status"].replace(
{"M": "Married", "S": "Single"}
)
dataframe["Marital Status"].head(n=5)
# Changing the F and M to Female and Male for better understanding
dataframe["Gender"] = dataframe["Gender"].replace({"M": "Male", "F": "Female"})
dataframe["Gender"].head(n=5)
# Creating Age Groups
# create bins and labels for each age group
bins = [0, 19, 29, 59, dataframe["Age"].max()]
labels = ["Adolescent", "Young adult", "Adult", "Elderly"]
# create a new column with age groups
dataframe["Age Group"] = pd.cut(dataframe["Age"], bins=bins, labels=labels)
# # # Exploratory Data Analysis
# * What is the average income of people who purchased bikes/ not purchased by gender category ?
data1 = round(dataframe.groupby(["Purchased Bike", "Gender"])["Income"].mean(), 2)
data1
# # Visualization
# set the Seaborn theme
sns.set_style("darkgrid")
# create a bar chart using Seaborn
sns.barplot(
x=data1.index.get_level_values(0) + " " + data1.index.get_level_values(1),
y=data1.values,
)
# add labels and title
plt.title("Average Income by Purchased Bike and Gender")
plt.xlabel("Purchased Bike and Gender")
plt.ylabel("Average Income")
# show the chart
plt.show()
# * What are the three most common occupations of people who did purchased a bike ?
# retrieving only the part of the dataframe that contains the Yes value in the Purchased Bike column
data2 = dataframe[dataframe["Purchased Bike"] == "Yes"]
data2.head(n=5)
result = data2["Occupation"].value_counts()
for occupation in result.index[:3]:
print(occupation)
# * What are the bike purchases amount for each age group ?
# initialising dataframe with counts of the values
data3 = data2["Age Group"].value_counts()
# initialising for loop that will range 4 times, which is the length of the data3
for i in range(len(data3)):
# print the result of each group separately
print(f"The {data3.index[i]} bought {data3[i]} bikes.")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from xgboost import XGBRegressor
from keras.callbacks import ModelCheckpoint
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Dropout
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from matplotlib import pyplot as plt
import seaborn as sb
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import warnings
import sklearn
import tensorflow as tf
warnings.filterwarnings("ignore")
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Importing the dataset
df = pd.read_csv(
"/kaggle/input/key-indicators-of-heart-disease/heart_2022_Key_indicators.csv"
)
df.head()
df.info()
df
# Data Processing
# map HeartDisease from Yes and No to 1 and 0
df["HeartDisease"] = df["HeartDisease"].map({"Yes": 1, "No": 0})
# convert Smoking from Yes and No to 1 and 0
df["Smoking"] = df["Smoking"].map({"Yes": 1, "No": 0})
# convert alcohol drinking from Yes and No to 1 and 0
df["AlcoholDrinking"] = df["AlcoholDrinking"].map({"Yes": 1, "No": 0})
# convert stroke from Yes and No to 1 and 0
df["Stroke"] = df["Stroke"].map({"Yes": 1, "No": 0})
# convert difficulty walking to one-hot encoding
df = pd.concat(
[df, pd.get_dummies(df["DiffWalking"], prefix="DifficultyWalking")], axis=1
)
# convert sex: {Female, Male} to 1 and 0
df["Sex"] = df["Sex"].map({"Male": 1, "Female": 0})
# convert age category: {18-24, 25-29, 30-34, 35-39, 40-44, 45-49, 50-54, 55-59, 60-64, 65-69, 70-74, 75-79, 80 or older} to one-hot encoding
df = pd.concat([df, pd.get_dummies(df["AgeCategory"], prefix="AgeCategory")], axis=1)
# convert ['American Indian/Alaskan Native', 'Asian', 'Black', 'Hispanic', 'Other', 'White'] to one-hot encoding\
df = pd.concat([df, pd.get_dummies(df["Race"], prefix="Race")], axis=1)
# convert diabetic to one-hot encoding
df = pd.concat([df, pd.get_dummies(df["Diabetic"], prefix="Diabetic")], axis=1)
# convert physical activity from Yes and No to 1 and 0
df["PhysicalActivity"] = df["PhysicalActivity"].map({"Yes": 1, "No": 0})
# convert general health from {Excellent, Very good, Good, Fair, Poor} to one-hot encoding
df = pd.concat([df, pd.get_dummies(df["GenHealth"], prefix="GenHealth")], axis=1)
# convert asthma from Yes and No to 1 and 0
df["Asthma"] = df["Asthma"].map({"Yes": 1, "No": 0})
# convert kidney disease from Yes and No to 1 and 0
df["KidneyDisease"] = df["KidneyDisease"].map({"Yes": 1, "No": 0})
# convert skin cancer from Yes and No to 1 and 0
df["SkinCancer"] = df["SkinCancer"].map({"Yes": 1, "No": 0})
# drop onehot coded
df = df.drop(["DiffWalking"], axis=1)
df = df.drop(["AgeCategory"], axis=1)
df = df.drop(["Race"], axis=1)
df = df.drop(["GenHealth"], axis=1)
df = df.drop(["Diabetic"], axis=1)
df
# HeartDisease is the target variable/label
y = df["HeartDisease"]
# drop the target variable from the dataset and keep the rest for the features
X = df.drop(["HeartDisease"], axis=1)
# split the dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.15, random_state=12
)
# Arbitrary exploration: Logistic Regression
from sklearn.linear_model import LogisticRegression
lr_10 = LogisticRegression(max_iter=10)
lr_10.fit(X_train, y_train)
lr_100 = LogisticRegression(max_iter=100)
lr_100.fit(X_train, y_train)
lr_1000 = LogisticRegression(max_iter=1000)
lr_1000.fit(X_train, y_train)
# assess the model
y_pred = lr_10.predict(X_test)
print(
"Accuracy of logistic regression classifier 10 iterations on test set: {:.2f}".format(
lr_10.score(X_test, y_test)
)
)
# create a confusion matrix
from sklearn.metrics import confusion_matrix
confusion_matrix = confusion_matrix(y_test, y_pred)
print("True Negatives: ", confusion_matrix[0][0])
print("False Positives: ", confusion_matrix[0][1])
print("False Negatives: ", confusion_matrix[1][0])
print("True Positives: ", confusion_matrix[1][1])
y_pred = lr_100.predict(X_test)
print(
"Accuracy of logistic regression classifier 100 iterations on test set: {:.2f}".format(
lr_100.score(X_test, y_test)
)
)
# create a confusion matrix
from sklearn.metrics import confusion_matrix
confusion_matrix = confusion_matrix(y_test, y_pred)
print("True Negatives: ", confusion_matrix[0][0])
print("False Positives: ", confusion_matrix[0][1])
print("False Negatives: ", confusion_matrix[1][0])
print("True Positives: ", confusion_matrix[1][1])
y_pred = lr_1000.predict(X_test)
print(
"Accuracy of logistic regression classifier 1000 iterations on test set: {:.2f}".format(
lr_1000.score(X_test, y_test)
)
)
# create a confusion matrix
from sklearn.metrics import confusion_matrix
confusion_matrix = confusion_matrix(y_test, y_pred)
print("True Negatives: ", confusion_matrix[0][0])
print("False Positives: ", confusion_matrix[0][1])
print("False Negatives: ", confusion_matrix[1][0])
print("True Positives: ", confusion_matrix[1][1])
# Seems like the model favors high true negative and low false positive rates, at the cost of low true positive and high false negative rates.
# We'll weigh the outcome to account for the class imbalance in the data set
class_weight = {
0: 1.0,
1: df["HeartDisease"].value_counts()[0] / df["HeartDisease"].value_counts()[1],
}
# logistic regression with class_weight
lr = LogisticRegression(max_iter=10000, class_weight=class_weight)
lr.fit(X_train, y_train)
# assess the model
y_pred = lr.predict(X_test)
print(
"Accuracy of logistic regression classifier on test set: {:.2f}".format(
lr.score(X_test, y_test)
)
)
# create a confusion matrix
from sklearn.metrics import confusion_matrix
confusion_matrix = confusion_matrix(y_test, y_pred)
print("True Negatives: ", confusion_matrix[0][0])
print("False Positives: ", confusion_matrix[0][1])
print("False Negatives: ", confusion_matrix[1][0])
print("True Positives: ", confusion_matrix[1][1])
# assess the model
y_pred = lr.predict(X_test)
print(
"Accuracy of logistic regression classifier 10000 iterations on test set: {:.2f}".format(
lr.score(X_test, y_test)
)
)
# create a confusion matrix
from sklearn.metrics import confusion_matrix
confusion_matrix = confusion_matrix(y_test, y_pred)
print("True Negatives: ", confusion_matrix[0][0])
print("False Positives: ", confusion_matrix[0][1])
print("False Negatives: ", confusion_matrix[1][0])
print("True Positives: ", confusion_matrix[1][1])
# # Reflection
# ### After accounting for the class imbalance, it seems the model has improved in terms of identifying positive cases (True Positives) but at the cost of increased False Positives.
# ### We can use a Deep Neural Network (DNN) to see if we can achieve a better balance between True Positives and False Positives, and improve the overall performance of the model.
from keras.models import Sequential
from keras.layers import Dense, Dropout
model = Sequential()
model.add(Dense(128, input_dim=X_train.shape[1], activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(64, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(1, activation="sigmoid"))
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
history = model.fit(
X_train,
y_train,
epochs=20,
batch_size=64,
validation_data=(X_test, y_test),
class_weight=class_weight,
)
# predict the test set
y_pred = model.predict(X_test)
# convert the predicted values to 0 and 1
y_pred = y_pred > 0.5
# calculate the accuracy
from sklearn.metrics import accuracy_score
accuracy_score(y_test, y_pred)
# calculate the confusion matrix
from sklearn.metrics import confusion_matrix
# seaborn heatmap
sb.heatmap(confusion_matrix(y_test, y_pred), annot=True, fmt="d")
model = Sequential()
model.add(Dense(64, input_dim=X_train.shape[1], activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(32, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(1, activation="sigmoid"))
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
history = model.fit(
X_train,
y_train,
epochs=20,
batch_size=64,
validation_data=(X_test, y_test),
class_weight=class_weight,
)
# predict the test set
y_pred = model.predict(X_test)
# convert the predicted values to 0 and 1
y_pred = y_pred > 0.5
# calculate the accuracy
from sklearn.metrics import accuracy_score
accuracy_score(y_test, y_pred)
# calculate the confusion matrix
from sklearn.metrics import confusion_matrix
# seaborn heatmap
sb.heatmap(confusion_matrix(y_test, y_pred), annot=True, fmt="d")
|
# # ***Classification Code from Machine Learning A-Z on Udemy***
# > By Pr0fess0rOP
# # Importing Library and Dataset Splits
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
dataset = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
x = dataset.iloc[:, 1:-1]
y = dataset.iloc[:, -1]
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=42
)
# from sklearn.preprocessing import StandardScaler
# sc = StandardScaler()
# x_train = sc.fit_transform(x_train)
# x_test = sc.transform(x_test)
# # Logistic Regression
# Training the Multiple Linear Regression model on the Training set
from sklearn.linear_model import LogisticRegression
modelLR = LogisticRegression()
modelLR.fit(x_train, y_train)
# Predicting the Test set results
y_predLR = modelLR.predict(x_test)
np.set_printoptions(precision=2)
# print(np.concatenate((y_predLR.reshape(len(y_predLR),1), y_test.reshape(len(y_test),1)),1))
# Evaluating the Model Performance
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test, y_predLR)
print(cm)
accuracyLR = accuracy_score(y_test, y_predLR)
print(accuracyLR)
# # KNN
# Training the KNN model on the Training set
from sklearn.neighbors import KNeighborsClassifier
ModelKNN = KNeighborsClassifier()
ModelKNN.fit(x_train, y_train)
# Predicting the Test set results
y_predKNN = ModelKNN.predict(x_test)
np.set_printoptions(precision=2)
# print(np.concatenate((y_predKNN.reshape(len(y_predKNN),1), y_test.reshape(len(y_test),1)),1))
# Evaluating the Model Performance
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test, y_predKNN)
print(cm)
accuracyKNN = accuracy_score(y_test, y_predKNN)
print(accuracyKNN)
# # Support Vector Machine
# Training the SVR model on the Training set
from sklearn.svm import SVC
modelSVC = SVC(kernel="linear", random_state=0)
modelSVC.fit(x_train, y_train)
# Predicting the Test set results
y_predSVC = modelSVC.predict(x_test)
np.set_printoptions(precision=2)
# print(np.concatenate((y_predSVR.reshape(len(y_predSVR),1), y_test.reshape(len(y_test),1)),1))
# Evaluating the Model Performance
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test, y_predSVC)
print(cm)
accuracySVC = accuracy_score(y_test, y_predSVC)
print(accuracySVC)
# # Kernel SVM
# Training the SVR model on the Training set
from sklearn.svm import SVC
modelKSVC = SVC(kernel="rbf", random_state=0)
modelKSVC.fit(x_train, y_train)
# Predicting the Test set results
y_predKSVC = modelKSVC.predict(x_test)
np.set_printoptions(precision=2)
# print(np.concatenate((y_predSVR.reshape(len(y_predSVR),1), y_test.reshape(len(y_test),1)),1))
# Evaluating the Model Performance
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test, y_predKSVC)
print(cm)
accuracyKSVC = accuracy_score(y_test, y_predKSVC)
print(accuracyKSVC)
# # Naive Bayes
# Training the SVR model on the Training set
from sklearn.naive_bayes import GaussianNB
modelGNB = GaussianNB()
modelGNB.fit(x_train, y_train)
# Predicting the Test set results
y_predGNB = modelGNB.predict(x_test)
np.set_printoptions(precision=2)
# print(np.concatenate((y_predSVR.reshape(len(y_predSVR),1), y_test.reshape(len(y_test),1)),1))
# Evaluating the Model Performance
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test, y_predGNB)
print(cm)
accuracyGNB = accuracy_score(y_test, y_predGNB)
print(accuracyGNB)
# # Decision Tree
# Training the Decision Tree Regression model on the Training set
from sklearn.tree import DecisionTreeClassifier
modelDT = DecisionTreeClassifier(random_state=0)
modelDT.fit(x_train, y_train)
# Predicting the Test set results
y_predDT = modelDT.predict(x_test)
np.set_printoptions(precision=2)
# print(np.concatenate((y_predDT.reshape(len(y_predDT),1), y_test.reshape(len(y_test),1)),1))
# Evaluating the Model Performance
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test, y_predDT)
print(cm)
accuracyDT = accuracy_score(y_test, y_predDT)
print(accuracyDT)
# # Random Forest
# Training the Random Forest Regression model on the whole dataset
from sklearn.ensemble import RandomForestClassifier
modelRF = RandomForestClassifier(n_estimators=10, random_state=0)
modelRF.fit(x_train, y_train)
# Predicting the Test set results
y_predRF = modelRF.predict(x_test)
np.set_printoptions(precision=2)
# print(np.concatenate((y_predRF.reshape(len(y_predRF),1), y_test.reshape(len(y_test),1)),1))
# Evaluating the Model Performance
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test, y_predRF)
print(cm)
accuracyRF = accuracy_score(y_test, y_predRF)
print(accuracyRF)
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_set = pd.read_csv("/kaggle/input/emotions-analysis/EA-train.txt", delimiter=";")
test_set = pd.read_csv("/kaggle/input/emotions-analysis/EA-test.txt", delimiter=";")
train_set.columns = ["Text", "Emotion"]
test_set.columns = ["Text", "Emotion"]
train_set
test_set
# **Pre Proccessing**
# Label Encoder
train_set["Emotion"] = (
train_set["Emotion"]
.replace({"sadness": 0, "anger": 1, "love": 2, "surprise": 3, "fear": 4, "joy": 5})
.astype(int)
)
test_set["Emotion"] = (
test_set["Emotion"]
.replace({"sadness": 0, "anger": 1, "love": 2, "surprise": 3, "fear": 4, "joy": 5})
.astype(int)
)
# Remove Missing Value
print("Train set")
print(train_set.isnull().sum())
train_set = train_set.dropna()
print()
print(train_set.isnull().sum())
print(f"\nTest set")
print(test_set.isnull().sum())
test_set = test_set.dropna()
print()
print(test_set.isnull().sum())
# cleaning the training set
import re
import nltk
nltk.download("stopwords") # stop words that are not necessary for predictions
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer # used to apply steming on the text
clean_train = [] # list to include the cleaned text in.
for i in range(0, len(train_set)):
clean = re.sub(
"[^a-zA-z]", " ", train_set["Text"][i]
) # substitute everything in the text that is not a letter by space, dataset['col1']: name of the col includes the text
clean = clean.lower() # all the the text in lower case.
clean = clean.split() # split the text into words
ps = PorterStemmer() # object of the stemmer
all_stopwords = stopwords.words("english")
all_stopwords.remove("not") # remove (not) from stopwords
clean = [
ps.stem(word) for word in clean if not word in set(all_stopwords)
] # remove all stopwords from the text
clean = " ".join(clean) # join the words seperated by spaces
clean_train.append(clean)
# cleaning the test set
clean_test = [] # list to include the cleaned text in.
for i in range(0, len(test_set)):
clean = re.sub(
"[^a-zA-z]", " ", test_set["Text"][i]
) # substitute everything in the text that is not a letter by space, dataset['col1']: name of the col includes the text
clean = clean.lower() # all the the text in lower case.
clean = clean.split() # split the text into words
ps = PorterStemmer() # object of the stemmer
all_stopwords = stopwords.words("english")
all_stopwords.remove("not") # remove (not) from stopwords
clean = [
ps.stem(word) for word in clean if not word in set(all_stopwords)
] # remove all stopwords from the text
clean = " ".join(clean) # join the words seperated by spaces
clean_test.append(clean)
# **Bag of Words**
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
X_train = cv.fit_transform(clean_train).toarray()
y_train = train_set.iloc[:, -1].values
X_test = cv.transform(clean_test).toarray()
y_test = test_set.iloc[:, -1].values
y_train, y_test
X_train, X_test
# **Machine Learning Model**
from scipy.stats import uniform, randint
from sklearn.model_selection import (
RandomizedSearchCV,
) # used for tuning the models parameters
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(random_state=0)
# range for the parameters used in the model
param_grid = {
"max_depth": [3, None],
"max_features": randint(1, 9),
"min_samples_leaf": randint(1, 9),
"criterion": ["gini", "entropy"],
}
# Instantiating RandomizedSearchCV object
dt_cv = RandomizedSearchCV(
dt, param_grid, n_iter=20, cv=5, verbose=2, random_state=42, n_jobs=1
)
dt_cv.fit(X_train, y_train)
# Print the best tuned parameters and score
print("Tuned Decision Tree Parameters: {}".format(dt_cv.best_params_))
print("Best score is {}".format(dt_cv.best_score_))
dt = DecisionTreeClassifier(
criterion="gini", max_depth=None, max_features=4, min_samples_leaf=1
)
from sklearn.naive_bayes import MultinomialNB
nb = MultinomialNB()
param_grid = {"alpha": [0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5]}
# Instantiating RandomizedSearchCV object
nb_cv = RandomizedSearchCV(
nb, param_grid, n_iter=20, cv=5, verbose=2, random_state=42, n_jobs=1
) ## cv: number of random samples taken
nb_cv.fit(X_train, y_train)
# Print the best tuned parameters and score
print("Tuned Decision Tree Parameters: {}".format(nb_cv.best_params_))
print("Best score is {}".format(nb_cv.best_score_))
nb = MultinomialNB(alpha=0.4)
# **Train and Testing Model**
nb.fit(X_train, y_train)
y_pred = nb.predict(X_test)
# confusion matrix
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test, y_pred) ## TN & FP \n FN & TP
print(cm)
# plot the confusion matrix
f, ax = plt.subplots(figsize=(10, 10))
labels = ["sadness", "anger", "love", "surprise", "fear", "joy"]
from sklearn.metrics import plot_confusion_matrix
sns.heatmap(
cm,
annot=True,
xticklabels=labels,
yticklabels=labels,
linewidths=0.01,
cmap="Oranges",
linecolor="gray",
fmt=".1f",
ax=ax,
)
plt.show()
def pred_outcome(new_comment):
new_comment = re.sub("[^a-zA-Z]", " ", new_comment)
new_comment = new_comment.lower()
new_comment = new_comment.split()
ps = PorterStemmer()
all_stopwords = stopwords.words("english")
all_stopwords.remove("not")
new_comment = [
ps.stem(word) for word in new_comment if not word in set(all_stopwords)
]
new_comment = " ".join(new_comment)
new_corpus = [new_comment]
new_X = cv.transform(new_corpus).toarray()
new_y_pred = nb.predict(new_X)
if new_y_pred == 0:
print("Sadness")
elif new_y_pred == 1:
print("Anger")
elif new_y_pred == 2:
print("Love")
elif new_y_pred == 3:
print("Surprise")
elif new_y_pred == 4:
print("Fear")
elif new_y_pred == 5:
print("Joy")
text = "i am sad"
pred_outcome(text)
text = "i am angry"
pred_outcome(text)
text = "i am surprised"
pred_outcome(text)
text = "i am afraid"
pred_outcome(text)
text = "i am happy"
pred_outcome(text)
text = "love heart"
pred_outcome(text)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
#
# Original Dataset
# ----------------
# train.csv - the training set
# test.csv - the test set
# ---------------
# Splitted the original dataset explicityl by a comma and a semi-colon. Eventually, we got three columns from train.csv including chemical id, assay id and expected, resulting in the mytrainingdata, and got two columns from test.csv including chemical id, assay id, resulting in the mytestdata.
# Parsed Dataset
# -----------------
# mytrainingdata.csv - the training set
# mytestdata.csv - the test set
# Command to install RDKit
import pandas as pd
import numpy as np
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.metrics import (
accuracy_score,
confusion_matrix,
classification_report,
f1_score,
)
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn import preprocessing
import xgboost as xgb
from rdkit import Chem
from rdkit.Chem import Descriptors
# Load the data
data = pd.read_csv("/kaggle/input/mytraintingdata/mytrainingdata.csv")
test_dt = pd.read_csv("/kaggle/input/mytestdata/mytestdata.csv")
# Define a function to compute molecular descriptors
def compute_descriptors(mol):
# Add your code here to compute the descriptors of interest
pass
# Convert SMILES strings to molecular objects and compute RDKit descriptors
data["mol"] = data["Id"].apply(
lambda x: Chem.MolFromSmiles(x) if x is not None else np.nan
)
data = data.dropna()
data["Descriptors"] = data["mol"].apply(compute_descriptors)
test_dt["mol"] = test_dt["Id"].apply(
lambda x: Chem.MolFromSmiles(x) if x is not None else np.nan
)
test_dt = test_dt.dropna()
test_dt["Descriptors"] = test_dt["mol"].apply(compute_descriptors)
data["MW"] = data["mol"].apply(lambda x: Descriptors.MolWt(x))
data["LogP"] = data["mol"].apply(lambda x: Descriptors.MolLogP(x))
data["TPSA"] = data["mol"].apply(lambda x: Descriptors.TPSA(x))
data["BalabanJ"] = data["mol"].apply(lambda x: Descriptors.BalabanJ(x))
data["Kappa2"] = data["mol"].apply(lambda x: Descriptors.Kappa2(x))
test_dt["MW"] = test_dt["mol"].apply(lambda x: Descriptors.MolWt(x))
test_dt["LogP"] = test_dt["mol"].apply(lambda x: Descriptors.MolLogP(x))
test_dt["TPSA"] = test_dt["mol"].apply(lambda x: Descriptors.TPSA(x))
test_dt["BalabanJ"] = test_dt["mol"].apply(lambda x: Descriptors.BalabanJ(x))
test_dt["Kappa2"] = test_dt["mol"].apply(lambda x: Descriptors.Kappa2(x))
# Convert categorical variables to numerical
le = preprocessing.LabelEncoder()
columns = ["Id", "MW", "LogP", "TPSA", "BalabanJ", "Kappa2", "assay id"]
for column in columns:
data[column] = le.fit_transform(data[column])
classes = le.classes_
test_dt[column] = test_dt[column].map(
lambda x: x if x in classes else classes[np.argmax(np.bincount(data[column]))]
)
test_dt[column] = le.transform(test_dt[column])
# Extract feature columns and target column
X = data.drop(["Expected"], axis=1)
data["Expected"] = data["Expected"].map({1: 1, 2: 0})
y = data["Expected"]
# Scale the data
scaler = StandardScaler()
X = scaler.fit_transform(X.drop(["mol"], axis=1))
test_dt = scaler.transform(test_dt.drop(["mol"], axis=1))
# check unique values of y
unique_y = np.unique(y)
print("Unique values of y:", unique_y)
# Define the model
topmlmodel = xgb.XGBClassifier(
objective="binary:logistic",
eval_metric="logloss",
use_label_encoder=False,
n_jobs=-1,
)
# Define the parameters to be searched in the grid search
param_grid = {
"learning_rate": [0.1],
"max_depth": [11],
"n_estimators": [900],
"gamma": [0.7],
"subsample": [0.8],
"colsample_bytree": [0.8],
"reg_alpha": [0.5],
"reg_lambda": [0.5],
}
# Perform grid search to find the best parameters for the model
grid_search = GridSearchCV(topmlmodel, param_grid, cv=5)
grid_search.fit(X, y)
# Print the best parameters
print("Best parameters:", grid_search.best_params_)
# Make predictions on the test set
y_pred_test = grid_search.predict(test_dt)
y_pred_test[y_pred_test == 0] = 2
# Create a submission file
Testing_df = pd.read_csv(
"/kaggle/input/the-toxicity-prediction-challenge-ii/test_II.csv"
)
submission_pred = pd.DataFrame(y_pred_test, columns=["Predicted"])
submission_pred_concat = pd.concat([Testing_df, submission_pred], axis=1)
submission_pred_concat.rename(columns={"x": "Id"}, inplace=True)
submission_pred_concat.to_csv("XGBoost_prediction.csv", index=False)
|
# ## İş Problemi
# Maaş bilgileri ve 1986 yılına ait kariyer istatistikleri paylaşılan beyzbol oyuncularının maaş tahminleri için bir makine öğrenmesi projesi gerçekleştirilebilir mi?
# ## Veri seti hikayesi
# Bu veri seti orijinal olarak Carnegie Mellon Üniversitesi'nde bulunan StatLib kütüphanesinden alınmıştır. Veri seti 1988 ASA Grafik Bölümü Poster Oturumu'nda kullanılan verilerin bir parçasıdır. Maaş verileri orijinal olarak Sports Illustrated, 20 Nisan 1987'den alınmıştır.
# 1986 ve kariyer istatistikleri, Collier Books, Macmillan Publishing Company, New York tarafından yayınlanan 1987 Beyzbol Ansiklopedisi Güncellemesinden elde edilmiştir.
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler, RobustScaler
from sklearn.model_selection import (
train_test_split,
GridSearchCV,
cross_validate,
cross_val_score,
validation_curve,
)
from catboost import CatBoostRegressor
from lightgbm import LGBMRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from xgboost import XGBRegressor
pd.set_option("display.max_columns", None)
pd.set_option("display.width", 500)
pd.set_option("display.float_format", lambda x: "%.3f" % x)
# ## EDA
# Veri setini okutalım.
df_ = pd.read_csv("/kaggle/input/hitters-baseball-data/Hitters.csv")
df = df_.copy()
# Veri setine genel bakış.
def check_df(dataframe, head=5):
print("########Shape########")
print(dataframe.shape)
print("########Types########")
print(dataframe.dtypes)
print("########Head#########")
print(dataframe.head(head))
print("##########NA#########")
print(dataframe.isnull().sum())
print("#######Describe#####")
print(
dataframe.describe(
[0.05, 0.1, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99]
).T
)
check_df(df)
# Kategorik ve numerik değişkenlerin yakalanması.
def grab_col_names(dataframe, cat_th=10, car_th=20):
cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"]
num_but_cat = [
col
for col in dataframe.columns
if dataframe[col].dtypes != "O" and dataframe[col].nunique() < cat_th
]
cat_but_car = [
col
for col in dataframe.columns
if dataframe[col].dtypes == "O" and dataframe[col].nunique() > car_th
]
cat_cols = cat_cols + num_but_cat
cat_cols = [col for col in cat_cols if col not in cat_but_car]
num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"]
num_cols = [col for col in num_cols if col not in num_but_cat]
print(f"Observations: {dataframe.shape[0]}")
print(f"Variables: {dataframe.shape[1]}")
print(f"cat_cols: {len(cat_cols)}")
print(f"num_cols: {len(num_cols)}")
print(f"cat_but_car: {len(cat_but_car)}")
print(f"num_but_cat: {len(num_but_cat)}")
return cat_cols, num_cols, cat_but_car
cat_cols, num_cols, cat_but_car = grab_col_names(df)
# Kategorik değişkenlerin özeti.
def cat_summary(dataframe, col_name, plot=False):
print(
pd.DataFrame(
{
col_name: dataframe[col_name].value_counts(),
"Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe),
}
)
)
print("######################")
if plot:
sns.countplot(x=dataframe[col_name], data=dataframe)
plt.show()
for col in cat_cols:
cat_summary(df, col)
# Numerik değişkenlerin özeti.
def num_summary(dataframe, col_name, plot=False):
quantiles = [0.05, 0.1, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99]
print(dataframe[col_name].describe(quantiles).T)
print("######################")
if plot:
dataframe[col_name].hist(bins=20)
plt.xlabel(col_name)
plt.title(col_name)
plt.show(block=True)
for col in num_cols:
num_summary(df, col)
# Kategorik değişkenlerin hedef değişken ile ilişkisi.
def target_summary_with_cat(dataframe, target, categorical_col):
print(
pd.DataFrame(
{
"TARGET_MEAN": dataframe.groupby(categorical_col)[target].mean(),
"Count": dataframe[categorical_col].value_counts(),
"Ratio": 100
* dataframe[categorical_col].value_counts()
/ len(dataframe),
}
),
end="\n\n\n",
)
for col in cat_cols:
target_summary_with_cat(df, "Salary", col)
# Yüksek korelasyona sahip değişkenlerin belirlenmesi.
def high_correlated_cols(dataframe, plot=False, corr_th=0.90):
corr = dataframe.corr()
cor_matrix = corr.abs()
upper_triangle_matrix = cor_matrix.where(
np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)
)
drop_list = [
col
for col in upper_triangle_matrix.columns
if any(upper_triangle_matrix[col] > corr_th)
]
if plot:
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(rc={"figure.figsize": (15, 15)})
sns.heatmap(corr, cmap="RdBu")
plt.show()
return drop_list
high_correlated_cols(df, plot=True)
# Aykırı değer analizi.
def outlier_thresholds(dataframe, variable, q1=0.25, q3=0.75):
quartile1 = dataframe[variable].quantile(q1)
quartile3 = dataframe[variable].quantile(q3)
iqr = quartile3 - quartile1
low_limit = quartile1 - 1.5 * iqr
up_limit = quartile3 + 1.5 * iqr
return low_limit, up_limit
def check_outlier(dataframe, variable):
low_limit, up_limit = outlier_thresholds(dataframe, variable)
if dataframe[
(dataframe[variable] < low_limit) | (dataframe[variable] > up_limit)
].any(axis=None):
return True
else:
return False
for col in num_cols:
print(check_outlier(df, col))
# Aykırı değer baskılama.
def replace_with_thresholds(dataframe, variable):
low_limit, up_limit = outlier_thresholds(dataframe, variable)
dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit
dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit
for col in num_cols:
if check_outlier(df, col):
replace_with_thresholds(df, col)
# Eksik değer analizi.
def missing_values_table(dataframe, na_name=False):
na_cols = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0]
n_miss = dataframe[na_cols].isnull().sum().sort_values(ascending=False)
ratio = (dataframe[na_cols].isnull().sum() / dataframe.shape[0] * 100).sort_values(
ascending=False
)
missing_df = pd.concat(
[n_miss, np.round(ratio, 2)], axis=1, keys=["n_miss", "ratio"]
)
print(missing_df, end="\n")
if na_name:
return na_cols
missing_values_table(df, na_name=True)
# Eksik değerlerden kurtulma.
df.dropna(inplace=True)
# ## Feature Engineeering
# Numerik değişkenleri belirleme.
new_num_cols = [col for col in num_cols if col != "Salary"]
df[new_num_cols] = df[new_num_cols] + 0.0000000001
# Yeni değişkenler tanımlama.,
df["NEW_Hits"] = df["Hits"] / df["CHits"] + df["Hits"]
df["NEW_RBI"] = df["RBI"] / df["CRBI"]
df["NEW_Walks"] = df["Walks"] / df["CWalks"]
df["NEW_PutOuts"] = df["PutOuts"] * df["Years"]
df["Hits_Success"] = (df["Hits"] / df["AtBat"]) * 100
df["NEW_CRBI*CATBAT"] = df["CRBI"] * df["CAtBat"]
df["NEW_RBI"] = df["RBI"] / df["CRBI"]
df["NEW_Chits"] = df["CHits"] / df["Years"]
df["NEW_CHmRun"] = df["CHmRun"] * df["Years"]
df["NEW_CRuns"] = df["CRuns"] / df["Years"]
df["NEW_Chits"] = df["CHits"] * df["Years"]
df["NEW_RW"] = df["RBI"] * df["Walks"]
df["NEW_RBWALK"] = df["RBI"] / df["Walks"]
df["NEW_CH_CB"] = df["CHits"] / df["CAtBat"]
df["NEW_CHm_CAT"] = df["CHmRun"] / df["CAtBat"]
df["NEW_Diff_Atbat"] = df["AtBat"] - (df["CAtBat"] / df["Years"])
df["NEW_Diff_Hits"] = df["Hits"] - (df["CHits"] / df["Years"])
df["NEW_Diff_HmRun"] = df["HmRun"] - (df["CHmRun"] / df["Years"])
df["NEW_Diff_Runs"] = df["Runs"] - (df["CRuns"] / df["Years"])
df["NEW_Diff_RBI"] = df["RBI"] - (df["CRBI"] / df["Years"])
df["NEW_Diff_Walks"] = df["Walks"] - (df["CWalks"] / df["Years"])
# Encoding işlemleri.
def one_hot_encoder(dataframe, categorical_cols, drop_first=False):
dataframe = pd.get_dummies(
dataframe, columns=categorical_cols, drop_first=drop_first
)
return dataframe
df = one_hot_encoder(df, cat_cols, drop_first=True)
df.head()
# Scaling işlemleri.
cat_cols, num_cols, cat_but_car = grab_col_names(df)
num_cols = [col for col in num_cols if col not in ["Salary"]]
scaler = StandardScaler()
df[num_cols] = scaler.fit_transform(df[num_cols])
df.head()
# ## Model
# Bağımlı ve bağımsız değişkenlerin belirlenmesi.
y = df["Salary"]
X = df.drop(["Salary"], axis=1)
# Kullanılacak modeller.
models = [
("LR", LinearRegression()),
("Ridge", Ridge()),
("Lasso", Lasso()),
("ElasticNet", ElasticNet()),
("KNN", KNeighborsRegressor()),
("CART", DecisionTreeRegressor()),
("RF", RandomForestRegressor()),
("SVR", SVR()),
("GBM", GradientBoostingRegressor()),
("XGBoost", XGBRegressor(objective="reg:squarederror")),
("LightGBM", LGBMRegressor()),
("CatBoost", CatBoostRegressor(verbose=False)),
]
for name, regressor in models:
rmse = np.mean(
np.sqrt(
-cross_val_score(regressor, X, y, cv=10, scoring="neg_mean_squared_error")
)
)
print(f"RMSE: {round(rmse, 4)} ({name}) ")
# Random Forest Optimizasyonu.
rf_model = RandomForestRegressor(random_state=17)
rf_params = {
"max_depth": [5, 8, 15, None],
"max_features": [5, 7, "auto"],
"min_samples_split": [8, 15, 20],
"n_estimators": [200, 500],
}
rf_best_grid = GridSearchCV(rf_model, rf_params, cv=5, n_jobs=-1, verbose=True).fit(
X, y
)
rf_final = rf_model.set_params(**rf_best_grid.best_params_, random_state=17).fit(X, y)
rmse = np.mean(
np.sqrt(-cross_val_score(rf_final, X, y, cv=10, scoring="neg_mean_squared_error"))
)
rmse
# GBM Optimizasyonu.
gbm_model = GradientBoostingRegressor(random_state=17)
gbm_params = {
"learning_rate": [0.01, 0.1],
"max_depth": [3, 8],
"n_estimators": [500, 1000],
"subsample": [1, 0.5, 0.7],
}
gbm_best_grid = GridSearchCV(gbm_model, gbm_params, cv=5, n_jobs=-1, verbose=True).fit(
X, y
)
gbm_final = gbm_model.set_params(
**gbm_best_grid.best_params_,
random_state=17,
).fit(X, y)
rmse = np.mean(
np.sqrt(-cross_val_score(gbm_final, X, y, cv=10, scoring="neg_mean_squared_error"))
)
rmse
# LightGBM Optimizasyonu.
lgbm_model = LGBMRegressor(random_state=17)
lgbm_params = {
"learning_rate": [0.01, 0.1],
"n_estimators": [300, 500],
"colsample_bytree": [0.7, 1],
}
lgbm_best_grid = GridSearchCV(
lgbm_model, lgbm_params, cv=5, n_jobs=-1, verbose=True
).fit(X, y)
lgbm_final = lgbm_model.set_params(**lgbm_best_grid.best_params_, random_state=17).fit(
X, y
)
rmse = np.mean(
np.sqrt(-cross_val_score(lgbm_final, X, y, cv=10, scoring="neg_mean_squared_error"))
)
rmse
# Catboost Optimizasyonu.
catboost_model = CatBoostRegressor(random_state=17, verbose=False)
catboost_params = {
"iterations": [200, 500],
"learning_rate": [0.01, 0.1],
"depth": [3, 6],
}
catboost_best_grid = GridSearchCV(
catboost_model, catboost_params, cv=5, n_jobs=-1, verbose=True
).fit(X, y)
catboost_final = catboost_model.set_params(
**catboost_best_grid.best_params_, random_state=17
).fit(X, y)
rmse = np.mean(
np.sqrt(
-cross_val_score(catboost_final, X, y, cv=10, scoring="neg_mean_squared_error")
)
)
rmse
# Final modelinin belirlenmesi.
rf_params = {
"max_depth": [5, 8, 15, None],
"max_features": [5, 7, "auto"],
"min_samples_split": [8, 15, 20],
"n_estimators": [200, 500],
}
gbm_params = {
"learning_rate": [0.01, 0.1],
"max_depth": [3, 8],
"n_estimators": [500, 1000],
"subsample": [1, 0.5, 0.7],
}
lightgbm_params = {
"learning_rate": [0.01, 0.1],
"n_estimators": [300, 500],
"colsample_bytree": [0.7, 1],
}
catboost_params = {
"iterations": [200, 500],
"learning_rate": [0.01, 0.1],
"depth": [3, 6],
}
regressors = [
("RF", RandomForestRegressor(), rf_params),
("GBM", GradientBoostingRegressor(), gbm_params),
("LightGBM", LGBMRegressor(), lightgbm_params),
("CatBoost", CatBoostRegressor(), catboost_params),
]
best_models = {}
for name, regressor, params in regressors:
print(f"########## {name} ##########")
rmse = np.mean(
np.sqrt(
-cross_val_score(regressor, X, y, cv=10, scoring="neg_mean_squared_error")
)
)
print(f"RMSE: {round(rmse, 4)} ({name}) ")
gs_best = GridSearchCV(regressor, params, cv=3, n_jobs=-1, verbose=False).fit(X, y)
final_model = regressor.set_params(**gs_best.best_params_)
rmse = np.mean(
np.sqrt(
-cross_val_score(final_model, X, y, cv=10, scoring="neg_mean_squared_error")
)
)
print(f"RMSE (After): {round(rmse, 4)} ({name}) ")
print(f"{name} best params: {gs_best.best_params_}", end="\n\n")
best_models[name] = final_model
# Modellere göre değişken öneminin belirlenmesi.
def plot_importance(model, features, num=len(X), save=False):
feature_imp = pd.DataFrame(
{"Value": model.feature_importances_, "Feature": features.columns}
)
plt.figure(figsize=(10, 10))
sns.set(font_scale=1)
sns.barplot(
x="Value",
y="Feature",
data=feature_imp.sort_values(by="Value", ascending=False)[0:num],
)
plt.title("Features")
plt.tight_layout()
plt.show()
if save:
plt.savefig("importances.png")
plot_importance(rf_final, X)
plot_importance(gbm_final, X)
plot_importance(lgbm_final, X)
plot_importance(catboost_final, X)
# Validation Curve göre parametrelerin belirlenmesi.
def val_curve_params(model, X, y, param_name, param_range, scoring="roc_auc", cv=10):
train_score, test_score = validation_curve(
model,
X=X,
y=y,
param_name=param_name,
param_range=param_range,
scoring=scoring,
cv=cv,
)
mean_train_score = np.mean(train_score, axis=1)
mean_test_score = np.mean(test_score, axis=1)
plt.plot(param_range, mean_train_score, label="Training Score", color="b")
plt.plot(param_range, mean_test_score, label="Validation Score", color="g")
plt.title(f"Validation Curve for {type(model).__name__}")
plt.xlabel(f"Number of {param_name}")
plt.ylabel(f"{scoring}")
plt.tight_layout()
plt.legend(loc="best")
plt.show()
# RF modeline göre parametreler.
rf_val_params = [
["max_depth", [5, 8, 15, 20, 30, None]],
["max_features", [3, 5, 7, "auto"]],
["min_samples_split", [2, 5, 8, 15, 20]],
["n_estimators", [10, 50, 100, 200, 500]],
]
rf_model = RandomForestRegressor(random_state=17)
for i in range(len(rf_val_params)):
val_curve_params(
rf_model,
X,
y,
rf_val_params[i][0],
rf_val_params[i][1],
scoring="neg_mean_absolute_error",
)
rf_val_params[0][1]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# * This code is importing some commonly used libraries for data analysis and visualization in Python, After importing these libraries, you can use their functions to perform various data analysis and visualization tasks in Python.
# # **Reading the file**
import pandas as pd
df = pd.read_csv("/kaggle/input/date-of-real-estate3/real estate.csv")
df.head()
# * This code is using the pandas library to read a CSV file named real estate.
# * csv located at the path /kaggle/input/date-of-real-estate3/ and storing its contents in a pandas DataFrame called df.
# * The head() method is then called on df to display the first 5 rows of the DataFrame.
# # **Data Cleansing and Improvement**
# # Find if there is some duplicated data
df.loc[df.duplicated()]
# * This code is using the loc accessor of the pandas DataFrame df with the duplicated() method to locate and display the rows of the DataFrame that are duplicates.
df.duplicated().sum()
# * This code uses pandas DataFrame df iterate() method to locate the duplicate rows in the DataFrame, and then counts the number of duplicate rows using the sum() method.
# # **missing data**
df.isna().sum()
# * This code is using the isna() method of the pandas DataFrame df to identify the missing (NaN) values in the DataFrame, and then using the sum() method to count the number of missing values in each column.
# ## vis
df.head()
# * This code is used to display the first five rows of the data frame by default, which is a convenient way to preview a large set of data, for example df can be entered df.head(10) to display the first 10 rows.
df["Number of rooms"].unique()
# * The unique() method of a pandas Series (which is what a single column of a DataFrame is) returns an array of the unique values in the Series, in the order in which they appear.
# * This can be useful for exploring the distribution of values in a column, or for checking for any unexpected or invalid values.
df["Number of rooms"] = df["Number of rooms"].replace(["1 bedrooms"], 1)
df["Number of rooms"] = df["Number of rooms"].replace(["2 bedrooms"], 2)
df["Number of rooms"] = df["Number of rooms"].replace(["3 bedrooms"], 3)
df["Number of rooms"] = df["Number of rooms"].replace(["4 bedrooms"], 4)
df["Number of rooms"] = df["Number of rooms"].replace(["5 bedrooms"], 5)
df["Number of rooms"] = df["Number of rooms"].replace(["6+ bedrooms"], 6)
df
# * This code is replacing certain string values in the "Number of rooms" column of the pandas DataFrame df with their corresponding numeric values.
#
df["Number of bathrooms"].unique()
df["Number of bathrooms"] = df["Number of bathrooms"].replace(["1 bathrooms"], 1)
df["Number of bathrooms"] = df["Number of bathrooms"].replace(["2 bathrooms"], 2)
df["Number of bathrooms"] = df["Number of bathrooms"].replace(["3 bathrooms"], 3)
df["Number of bathrooms"] = df["Number of bathrooms"].replace(["4 bathrooms"], 4)
df["Number of bathrooms"] = df["Number of bathrooms"].replace(["5 bathrooms"], 5)
df["Number of bathrooms"] = df["Number of bathrooms"].replace(["5+ bathrooms"], 6)
df
df["neighborhood"].unique()
df["neighborhood"] = df["neighborhood"].replace(["Suwaiq"], 0)
df
df["lister type"].unique()
df["lister type"] = df["lister type"].replace(["landlord"], 0)
df["lister type"] = df["lister type"].replace(["Agent"], 1)
df
s = df["subcategory"].unique()
s
temp = list(df["price"])
L = []
for x in temp:
x = x.replace(",", "")
L.append(x)
df = df.drop(columns=["price"])
df["price"] = L
df.head()
# * In this code ,The modified DataFrame df is returned with clean price values, without any commas, and the previous version of df is ignored.
s = df["subcategory"].unique()
p = []
for cat in s:
df1 = df[df["subcategory"] == cat]
av = df1["price"].median()
print(cat, av)
p.append(av)
# * This code is calculating the median price for each unique value in the "subcategory" column of the pandas DataFrame df, and storing the results in a list called p.
plt.bar(s, p)
# * This code is using the bar() function to create a bar chart of the median prices for each unique value in the "subcategory" column of the pandas DataFrame df.
df["subcategory"] = df["subcategory"].replace(["Townhouses for Sale"], 1)
df["subcategory"] = df["subcategory"].replace(["villa-places for sale"], 2)
df["subcategory"] = df["subcategory"].replace(["Farm & Chaltes for sale"], 3)
df
df["number of floors "].unique()
df["number of floors "] = df["number of floors "].replace(["1 floor"], 0)
df["number of floors "] = df["number of floors "].replace(["2 floors"], 1)
df
df["city "].unique()
df["city "] = df["city "].replace(["Al Batinah"], 0)
df
df["category "].unique()
df["category "] = df["category "].replace(["Real Estate for Sale"], 0)
df
df["payment methods "].unique()
df["payment methods "] = df["payment methods "].replace(["cash or Installments"], 0)
df["payment methods "] = df["payment methods "].replace(["cash only"], 1)
df
df["building age "].unique()
df["building age "] = df["building age "].replace(["0"], 1)
df["building age "] = df["building age "].replace(["0-11 months"], 2)
df["building age "] = df["building age "].replace(["1-5 years"], 3)
df["building age "] = df["building age "].replace(["6-9 years "], 4)
df["building age "] = df["building age "].replace(["10-19 years"], 5)
df["building age "] = df["building age "].replace(["20+ years"], 6)
df
s = df["building age "].unique()
s
temp = list(df["price"])
L = []
for x in temp:
x = x.replace(",", "")
L.append(x)
df = df.drop(columns=["price"])
df["price"] = L
df.head()
s = df["building age "].unique()
p = []
for cat in s:
df1 = df[df["building age "] == cat]
av = df1["price"].median()
print(cat, av)
p.append(av)
# * This code is used for exploring the relationship between the "building age" and "price" columns of df, by calculating the median price for each unique building age value.
plt.bar(s, p)
# * This code is using the bar() method to create a bar chart of the median prices (p) for each unique value in the "building age" column (s).
df["property status "].unique()
df["property status "] = df["property status "].replace(["0"], 0)
df["property status "] = df["property status "].replace(["complete"], 1)
df
# **Here we have converted all the data in the table into numbers, as our data is now fully numbered.**
s = df[
"city",
"neighborhood",
"Surface area m2",
"land area m2",
"Number of rooms ",
"Price",
"Number of bathrooms ",
"number of floors",
"building age ",
"lister type",
"property status",
"category",
"payment methods",
"subcategory ",
]
s.head
import seaborn as sns
df1 = df[
"city",
"neighborhood",
"Surface area m2",
"land area m2",
"Number of rooms ",
"Price",
"Number of bathrooms ",
"number of floors",
"building age ",
"lister type",
"property status",
"category",
"payment methods",
"subcategory ",
]
df1.corr()["price"]
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
data = pd.read_csv("/kaggle/input/date-of-real-estate3/real estate.csv")
x = data.drop(columns=["price"])
y = data["price"]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
model = DecisionTreeClassifier()
model.fit(x_train, y_train)
p = model.predict(x_test)
score = accuracy_score(y_test, p)
score
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv(
"/kaggle/input/d/ruthgn/bank-marketing-data-set/bank-direct-marketing-campaigns.csv"
)
df.head()
df.columns
import tensorflow as tf
import tensorflow
# Creating a Neural Network Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.optimizers import Adam
from tensorflow import keras
from keras.layers import Dense
from keras.models import Sequential
model = Sequential()
model.add(
Dense(82, activation="relu")
) # birinci layerın kaç nöronu varsa o sayıyı koyuyoruz.
model.add(Dense(64, activation="relu"))
model.add(Dense(32, activation="relu"))
model.add(Dense(32, activation="relu"))
model.add(Dense(16, activation="relu")) # aradaki sayıları nöronlara göre ayarlıyoruz.
model.add(Dense(1)) # sondaki her zaman 1 olur.
model.compile(optimizer="Adam", loss="mse")
df.columns
x = df.drop("job", axis=1)
y = df.job
def show_shapes(): # can make yours to take inputs; this'll use local variable values
print("Expected: (num_samples, timesteps, channels)")
print("Sequences: {}".format(Sequences.shape))
print("Targets: {}".format(Targets.shape))
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten, Dense
model = Sequential(
[
Flatten(input_shape=(28, 28)),
Dense(128, activation="relu"),
Dense(128, activation="relu"),
Dense(10),
]
)
model.compile(
optimizer="adam",
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
import timeit
model_json = model.to_json()
with open("benimmodel.json", "w") as json_file:
json_file.write(model_json)
model.save("benimmodel.hd5")
|
# en_core_web_lg is the trained pipeline for the English language.
# English pipeline optimized for CPU. Components: tok2vec, tagger, parser, senter, ner, attribute_ruler, lemmatizer.
# For example, en_core_web_sm is a small English pipeline trained on written web text (blogs, news, comments), that includes vocabulary, syntax and entities.
# # Tokenization
# Tokens are the building blocks of Natural Language.
# Tokenization is a way of separating a piece of text into smaller units called tokens. Here, tokens can be either words, characters, or subwords. Hence, tokenization can be broadly classified into 3 types – word, character, and subword (n-gram characters) tokenization.
#
# import spacy and load the language library
import spacy
nlp = spacy.load("en_core_web_sm")
mystring = "we are moving to strong AI"
print(mystring)
# # create a doc object and explore tokens
#
doc = nlp(mystring)
for token in doc:
print(token.text, end="|")
# # prefixes, suffixes and infixes
# A prefix occurs at the beginning of a word or stem (sub-mit, pre-determine, un-willing); a suffix at the end (wonder-ful, depend-ent, act-ion); and an infix occurs in the middle
doc2 = nlp(
"We're here to help! Send snail-email, email [email protected] or visit us at https://ahammedmejbah.com/!"
)
for t in doc2:
print(t)
a = "deep learning with python 5 book cost $10.30"
a.split(".")
doc3 = nlp("Deep learning with python 5 books cost $10.30")
for t in doc3:
print(t)
# # Exceptions
doc4 = nlp("Lets visit st.louis in the U.S. next year")
for t in doc4:
print(t)
# # Cunting Tokens
# count the number of tokens in a text, determine the number and percentage count of particular tokens and plot the count distributions as a graph. To do this we have to import the FreqDist class from the NLTK probability package. When calling this class, a list of tokens from a text or corpus needs to be specified as a parameter in brackets.
len(doc)
# # counting vocab Entries
len(doc.vocab)
# # Tokens can be retrived by index position and slice
a = "it is better to give than to receive"
b = a.split()
b[2]
doc5 = nlp("It is better to give than to receive.")
doc5[-2]
doc5[2:5]
doc5[-4:]
# # Tokens cannot be reassigned
doc6 = nlp("My dinner was horrible.")
doc7 = nlp("Your dinner was delicious.")
# Try to change "My dinner was horrible" to "My dinner was delicious"
doc6[3] = doc7[3]
# # Named Entities
doc8 = nlp(
"Google, IBM, AWS and Apple to build a Hong Kong and Bangladesh factory for $6 million"
)
for token in doc8:
print(token.text, end="!")
print("\n---")
for ent in doc8.ents:
print(ent.text + "_" + ent.label_ + "_" + str(spacy.explain(ent.label_)))
# # Noun Chunks
# Chunking means getting a chunk of text. A meaningful piece of text from the full text. One of the main goals of chunking is to group into what is known as “noun phrases.” These are phrases of one or more words that contain a noun, maybe some descriptive words, maybe a verb, and maybe something like an adverb.
doc9 = nlp(
"Google, IBM, AWS and Apple to build a Hong Kong and Bangladesh factory for $6 million"
)
for chunk in doc9.noun_chunks:
print(chunk.text)
doc9 = nlp("Autonomous cars shift insurance liability toward manufacturers.")
for chunk in doc9.noun_chunks:
print(chunk.text)
doc10 = nlp("red cars do not carry higher insurance rates.")
for chunk in doc10.noun_chunks:
print(chunk.text)
doc11 = nlp("He was a one-eyed, one-horned, flying, purple people-eater.")
for chunk in doc11.noun_chunks:
print(chunk.text)
# # Visualizing the dependency parse
from spacy import displacy
doc = nlp(
"Google, IBM, AWS and Apple to build a Hong Kong and Bangladesh factory for $6 million"
)
displacy.render(doc, style="dep", jupyter=True, options={"distance": 110})
# # Visualizing the entity recognizer
doc = nlp(
"Google, IBM, AWS and Apple to build a Hong Kong and Bangladesh factory for $6 million"
)
displacy.render(doc, style="ent", jupyter=True)
# # Creating Visualizations Outside of Jupyter
doc = nlp(
"Google, IBM, AWS and Apple to build a Hong Kong and Bangladesh factory for $6 million"
)
displacy.serve(doc, style="dep")
|
# # Restauant Rating Prediction App
# Restaurant Rating has become the most commonly used parameter for judging a restaurant for any individual.Rating of a restaurant depends on factors like reviews, area situated, average cost for two people, votes, cuisines and the type of restaurant.
# **The main goal of this is to get insights on restaurants which people like visit and to identify the rating of the restaurant.**
# ## [Restaurant Rating Prediction App Link](https://sudhanshu2198-end-to-end-restaurant-rating--introduction-ts1jhq.streamlit.app/)
# ## [Github Link](https://github.com/sudhanshu2198/End-to-End-Restaurant-Rating-Prediction)
# 
# # Dataset Features Description
# 
# # Imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.offline as py
py.init_notebook_mode(connected=True)
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.preprocessing import OneHotEncoder
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
import optuna
from optuna.visualization import plot_optimization_history
from optuna.visualization import plot_parallel_coordinate
from optuna.visualization import plot_param_importances
import json
import joblib
import warnings
warnings.filterwarnings("ignore")
# # Data Summary
data = pd.read_csv("/kaggle/input/zomato-bangalore-restaurants/zomato.csv")
data.head()
data.info()
100 * data.isnull().sum() / len(data)
data.columns
# # Data Cleaning
df = data.copy()
df = df.drop(["url", "phone", "location", "reviews_list", "rest_type"], axis=1)
# dropping new restaurant where rating is not available
# dropping restaurants where no of votes for rating are less then 50
df = df[-df["rate"].isna()]
df = df[df["votes"] >= 50]
"""dropping votes features as it will not be available for future data on which
prediction is to be made"""
df.drop("votes", axis=1, inplace=True)
# tackling nan values
df["approx_cost(for two people)"] = (
df["approx_cost(for two people)"].str.replace(",", "").astype(float)
)
col = ["listed_in(city)", "listed_in(type)"]
ser = df.groupby(col)["approx_cost(for two people)"].transform("median")
df["approx_cost(for two people)"] = df["approx_cost(for two people)"].fillna(ser)
df["dish_liked"].replace(np.nan, "", inplace=True)
df.dropna(inplace=True)
def r_category(rating):
if rating >= 4.0:
return "Excellent"
elif rating >= 3.0:
return "Good"
elif rating >= 2.5:
return "Average"
else:
return "Poor"
def p_category(price):
if price <= 100.0:
return "Cheap"
elif price <= 250.0:
return "Resonable"
elif price <= 500.0:
return "Affordable"
else:
return "Expensive"
# deriving features
df["Cost_Per_Person"] = df["approx_cost(for two people)"] / 2
df["rate"] = df["rate"].str[0:3].astype(float)
df["Category"] = df["rate"].apply(r_category)
df["Price_Category"] = df["Cost_Per_Person"].apply(p_category)
# whether menu is available or not
df["Menu"] = df["menu_item"].map(lambda x: "No" if x == "[]" else "Yes")
# no of dishes liked by customer at a restaurant
df["dish_liked"] = df["dish_liked"].map(lambda x: 0 if x == "" else len(x.split(", ")))
# No of varieties served at a restaurant
df["No_of_Varieties"] = df["cuisines"].apply(lambda x: len(x.split(", ")))
# renaming columns for better intuition
change = {
"name": "Name",
"address": "Address",
"online_order": "Delivery",
"book_table": "Booking",
"rate": "Rating",
"dish_liked": "No_of_Best_Sellers",
"cuisines": "Cuisines",
"approx_cost(for two people)": "Average_Cost",
"listed_in(type)": "Type",
"listed_in(city)": "City",
}
df.rename(columns=change, inplace=True)
df.reset_index(drop=True, inplace=True)
# cleaning cuisine columns by properly categorizing food items
def func1(string):
l = string.split(", ")
if "Afghan" in l:
l = list(map(lambda x: x.replace("Afghan", "Afghani"), l))
if "Bubble Tea" in l:
l = list(map(lambda x: x.replace("Bubble Tea", "Beverages"), l))
if "Coffee" in l:
l = list(map(lambda x: x.replace("Coffee", "Beverages"), l))
if "Cafe" in l:
l = list(map(lambda x: x.replace("Cafe", "Beverages"), l))
if "Tea" in l:
l = list(map(lambda x: x.replace("Tea", "Beverages"), l))
if "Bubble Beverages" in l:
l = list(map(lambda x: x.replace("Bubble Beverages", "Beverages"), l))
if "Ice Cream" in l:
l = list(map(lambda x: x.replace("Ice Cream", "Desserts"), l))
if "Mithai" in l:
l = list(map(lambda x: x.replace("Mithai", "Desserts"), l))
if "Bar Food" in l:
l = list(map(lambda x: x.replace("Bar Food", "Fast Food"), l))
if "Burger" in l:
l = list(map(lambda x: x.replace("Burger", "Fast Food"), l))
if "Finger Food" in l:
l = list(map(lambda x: x.replace("Finger Food", "Fast Food"), l))
if "Momos" in l:
l = list(map(lambda x: x.replace("Momos", "Fast Food"), l))
if "Rolls" in l:
l = list(map(lambda x: x.replace("Rolls", "Fast Food"), l))
if "Wraps" in l:
l = list(map(lambda x: x.replace("Wraps", "Fast Food"), l))
if "Street Food" in l:
l = list(map(lambda x: x.replace("Street Food", "Fast Food"), l))
if "Juices" in l:
l = list(map(lambda x: x.replace("Juices", "Healthy Food"), l))
if "Salad" in l:
l = list(map(lambda x: x.replace("Salad", "Healthy Food"), l))
if "Sandwich" in l:
l = list(map(lambda x: x.replace("Sandwich", "Healthy Food"), l))
if "Grill" in l:
l = list(map(lambda x: x.replace("Grill", "BBQ"), l))
if "Steak" in l:
l = list(map(lambda x: x.replace("Steak", "BBQ"), l))
if "Sushi" in l:
l = list(map(lambda x: x.replace("Sushi", "Japanese"), l))
if "Tex-Mex" in l:
l = list(map(lambda x: x.replace("Tex-Mex", "Mexican"), l))
if "Roast Chicken" in l:
l = list(map(lambda x: x.replace("Roast Chicken", "Chinese"), l))
if "Charcoal Chicken" in l:
l = list(map(lambda x: x.replace("Charcoal Chicken", "Chinese"), l))
if "Pizza" in l:
l = list(map(lambda x: x.replace("Pizza", "Italian"), l))
if "Biryani" in l:
l = list(map(lambda x: x.replace("Biryani", "South Indian"), l))
if "Kebab" in l:
l = list(map(lambda x: x.replace("Kebab", "North Indian"), l))
return ", ".join(set(l))
df["Cuisines"] = df["Cuisines"].apply(func1)
df = df[
[
"Name",
"Address",
"Menu",
"Delivery",
"Booking",
"No_of_Best_Sellers",
"No_of_Varieties",
"Cuisines",
"Cost_Per_Person",
"Type",
"City",
"Rating",
"Category",
"Price_Category",
]
]
df.head()
# # Data Preprocessing
multi_label = df["Cuisines"].str.split(", ")
mlb = MultiLabelBinarizer()
inter_data = mlb.fit_transform(multi_label)
multi_label_df = pd.DataFrame(inter_data, columns=mlb.classes_)
data = df[
[
"Name",
"Menu",
"Delivery",
"Booking",
"Type",
"City",
"No_of_Best_Sellers",
"No_of_Varieties",
"Cost_Per_Person",
"Rating",
"Category",
"Price_Category",
]
]
dataframe = pd.concat([data, multi_label_df], axis=1)
dataframe.head()
dataframe.to_csv("display.csv", index=False)
d = df[
[
"Name",
"Address",
"Menu",
"Delivery",
"Booking",
"Type",
"City",
"No_of_Best_Sellers",
"No_of_Varieties",
"Cost_Per_Person",
"Rating",
"Category",
"Price_Category",
]
]
dataframe = pd.concat([d, multi_label_df], axis=1)
dataframe.duplicated(subset=["Name", "Address"]).sum()
dataframe.drop_duplicates(subset=["Name", "Address"], inplace=True)
len(dataframe)
one_hot = dataframe[["Delivery", "Booking", "City"]]
numeric = dataframe[
["No_of_Best_Sellers", "No_of_Varieties", "Cost_Per_Person", "Rating"]
]
encoder = OneHotEncoder()
one_hot_df = pd.DataFrame(
encoder.fit_transform(one_hot).toarray(), index=list(dataframe.index)
)
df_inter = pd.concat(
[numeric, multi_label_df.iloc[list(dataframe.index), :], one_hot_df], axis=1
)
df_inter.reset_index(drop=True, inplace=True)
df_inter.head()
X = df_inter.drop("Rating", axis=1).values
y = df_inter["Rating"].values
mlb.classes_
# # Data Insights
idf = pd.read_csv("/kaggle/input/bangalore-restaurant-insights/display.csv")
idf.head()
col = mlb.classes_
df = idf["Delivery"].value_counts()
fig = px.pie(values=df.values, names=df.index, title="Online Delivery")
py.iplot(fig)
df = idf["Booking"].value_counts()
fig = px.pie(values=df.values, names=df.index, title="Table Booking")
py.iplot(fig)
df = idf["Category"].value_counts()
fig = px.pie(values=df.values, names=df.index, title="Category")
py.iplot(fig)
df = idf["Price_Category"].value_counts()
fig = px.pie(values=df.values, names=df.index, title="Price Category")
py.iplot(fig)
# Most Expensive Cities
inter = idf.groupby("City")[["Cost_Per_Person"]].mean()
sol = inter.sort_values(by="Cost_Per_Person", ascending=False).head()
sol.style.background_gradient(cmap="YlOrRd", high=0.5)
# Most Affordable Cities
inter = idf.groupby("City")[["Cost_Per_Person"]].mean()
sol = inter.sort_values(by="Cost_Per_Person").head()
sol.style.background_gradient(cmap="YlOrRd", high=0.5)
fig = px.scatter(idf, x="Cost_Per_Person", y="Rating")
py.iplot(fig)
inter = idf.groupby(["No_of_Best_Sellers", "No_of_Varieties"])[["Menu"]].count()
inter.rename(columns={"Menu": "Count"}, inplace=True)
inter.reset_index(inplace=True)
fig = px.bar(
inter, x="No_of_Varieties", y="Count", color="No_of_Best_Sellers", barmode="group"
)
py.iplot(fig)
# Choose City
city = "Banashankari"
inter = idf[idf["City"] == "Banashankari"]
d1 = inter.groupby("Type")["Type"].count().sort_values(ascending=False)
d2 = inter.groupby("Type")["Cost_Per_Person"].agg(["min", "median", "max"])
# cpp=cost per person
sol = pd.concat([d1, d2], axis=1)
cols = ["No", "Min_cpp", "Median_cpp", "Max_cpp"]
sol.columns = cols
sol.style.background_gradient(cmap="YlOrRd", high=0.5)
# choose city and Type
City = "Banashankari"
Type = "Delivery"
inter = idf[(idf["City"] == City) & (idf["Type"] == Type)]["Cost_Per_Person"]
fig = px.histogram(inter, x="Cost_Per_Person")
py.iplot(fig)
# Choose City
city = "Sarjapur Road"
inter = idf[idf["City"] == "Sarjapur Road"]
ser = inter.groupby(["City"])[col].sum()
index = ser.columns
val = ser.values.T.flatten()
series = pd.Series(val, index)
ii = series.sort_values(ascending=False).head()
fig = px.bar(df, x=ii.index, y=ii.values)
py.iplot(fig)
# Choose City and Restaurant Type
inter = idf[(idf["North Indian"] == 1) & (idf["City"] == "Bellandur")]
ser = inter.groupby("Name")[["Rating"]].max()
sol = ser.sort_values(by="Rating", ascending=False).head()
sol.style.background_gradient(cmap="YlOrRd", high=0.5)
# Choose City and Restaurant Type
city = "Sarjapur Road"
type = "Delivery"
inter = idf[idf["City"] == "Sarjapur Road"]
ser = inter.groupby(["Type"])[col].sum()
df_inter = ser.T
inter = df_inter[type]
ii = inter.sort_values(ascending=False).head()
fig = px.bar(df, x=ii.index, y=ii.values)
py.iplot(fig)
# Choose city
City = "Old Airport Road"
ser = idf.groupby(["City", "Type"])[col].sum()
ser
dicton = {}
for i in range(len(ser)):
index = ser.columns
val = ser.iloc[i].values.T.flatten()
series = pd.Series(val, index)
vall = list(series.sort_values(ascending=False).head().index)
dicton[ser.index[i]] = vall
sol = pd.DataFrame(dicton, index=["1st", "2nd", "3rd", "4th", "5th"])
frame = sol.T
frame.loc[City]
city = "Sarjapur Road"
cols = idf["Type"].unique()
df = pd.DataFrame()
inter = idf[idf["City"] == "Sarjapur Road"]
inter = inter.groupby("Type")[col].sum().T
for i in cols:
ser = inter[i].sort_values(ascending=False).head()
df_inter = pd.DataFrame(ser)
df_inter.reset_index(inplace=True)
df_inter.rename(columns={i: "Count", "index": "Cuisine"}, inplace=True)
df_inter["Type"] = i
df = pd.concat([df, df_inter], axis=0)
fig = px.bar(df, x="Type", y="Count", color="Cuisine")
py.iplot(fig)
# Choose city
City = "BTM"
sel = ["North Indian", "South Indian", "Chinese", "Fast Food", "Italian"]
df = pd.DataFrame()
order = {"Price_Category": ["Cheap", "Resonable", "Affordable", "Expensive"]}
for i in sel:
inter = idf[idf[i] == 1]
inter = inter.groupby(["Price_Category"])[["Menu"]].count()
inter = inter.reset_index()
inter.rename(columns={"Menu": "Count"}, inplace=True)
inter["Cuisine_Type"] = i
df = pd.concat([df, inter], axis=0)
fig = px.bar(
x=df["Cuisine_Type"],
y=df["Count"],
color=df["Price_Category"],
barmode="group",
category_orders={
"Price_Category": ["Cheap", "Resonable", "Affordable", "Expensive"]
},
)
py.iplot(fig)
# Choose city
City = "BTM"
sel = ["North Indian", "South Indian", "Chinese", "Fast Food", "Italian"]
df = pd.DataFrame()
for i in sel:
inter = idf[idf[i] == 1]
inter = inter.groupby(["Category"])[["Menu"]].count()
inter = inter.reset_index()
inter.rename(columns={"Menu": "Count"}, inplace=True)
inter["Cuisine_Type"] = i
df = pd.concat([df, inter], axis=0)
fig = px.bar(x=df["Cuisine_Type"], y=df["Count"], color=df["Category"], barmode="group")
py.iplot(fig)
# # Modelling
def get_models():
models, names = [], []
models.append(RandomForestRegressor())
names.append("RandomForestRegressor")
models.append(XGBRegressor())
names.append("XGBRegressor")
models.append(LGBMRegressor())
names.append("LGBMRegressor")
return models, names
models, names = get_models()
scores = []
for i in range(len(models)):
score = cross_val_score(
models[i], X, y, cv=5, scoring="neg_mean_squared_error", n_jobs=-1
)
scores.append(score)
plt.boxplot(scores, labels=names, showmeans=True)
plt.xticks(rotation="vertical")
# # Hyperparameter Optimization
def objective(trial):
params = {
"n_estimators": trial.suggest_int("n_estimators", 100, 400),
"max_depth": trial.suggest_int("max_depth", 4, 10),
"learning_rate": trial.suggest_float("learning_rate", 0.01, 1.0, log=True),
"subsample": trial.suggest_float("subsample", 0.5, 1.0),
"reg_alpha": trial.suggest_float("reg_alpha", 0.0, 1.0),
}
model = LGBMRegressor(random_state=1, **params)
score = cross_val_score(
model, X, y, cv=5, scoring="neg_mean_squared_error", n_jobs=-1
)
return score.mean()
study = optuna.create_study(
study_name="Hyperparameter optimization",
direction="maximize",
sampler=optuna.samplers.TPESampler(seed=42),
)
study.optimize(objective, n_trials=50, show_progress_bar=True)
print(f"Best value: {study.best_trial.value}")
print(f"Best hyperparameters:\n {json.dumps(study.best_trial.params, indent=2)}")
plot_optimization_history(study)
plot_parallel_coordinate(study)
plot_param_importances(study)
# # Final Model
model = LGBMRegressor(**study.best_trial.params)
model.fit(X, y)
joblib.dump(model, "model.pkl")
joblib.dump(mlb, "mlb.pkl")
joblib.dump(encoder, "encoder.pkl")
|
# Business Case: Walmart - Confidence Interval and CLT
# About Walmart:
# Walmart is an American multinational retail corporation that operates a chain of supercenters, discount departmental stores, and grocery stores from the United States. Walmart has more than 100 million customers worldwide.
# Business Problem:
# The Management team at Walmart Inc. wants to analyze the customer purchase behavior (specifically, purchase amount) against the customer’s gender and the various other factors to help the business make better decisions. They want to understand if the spending habits differ between male and female customers: Do women spend more on Black Friday than men? (Assume 50 million customers are male and 50 million are female).
# Import the libraries:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Read the Walmart data:
df = pd.read_csv("walmart_data.csv")
df
# Checking missing values:
df.isnull().sum() / len(df) * 100
# Checking the characteristics of the data:
df.describe(include="all")
df.info()
# Initial Observations:
# 1. There are no missing values in the data.
# 2. There are 3631 unique product IDs in the dataset. P00265242 is the most sold Product ID.
# 3. There are 7 unique age groups and most of the purchase belongs to age 26-35 group.
# 4. There are 3 unique citi categories with category B being the highest.
# 5. 5 unique values for Stay_in_current_citi_years with 1 being the highest.
#
# 3. The difference between mean and median seems to be significant for purchase that suggests outliers in the data.
# 4. Minimum & Maximum purchase is 12 and 23961 suggests the purchasing behaviour is quite spread over a aignificant range of values. Mean is 9264 and 75% of purchase is of less than or equal to 12054. It suggest most of the purchase is not more than 12k.
# 5. Few categorical variable are of integer data type. It can be converted to character type.
# 6. Out of 550068 data points, 414259's gender is Male and rest are the female. Male purchase count is much higher than female.
# 7. Standard deviation for purchase have significant value which suggests data is more spread out for this attribute.
columns = ["User_ID", "Occupation", "Marital_Status", "Product_Category"]
df[columns] = df[columns].astype("object")
df.info()
df.describe(include="all")
# Observation post modifying the categorical variable's data type:
# 1. There are 5891 unique users, and userid 1001680 being with the highest count.
# 2. The customers belongs to 21 distinct occupation for the purchases being made with Occupation 4 being the highest.
# 3. Marital status unmarried contribute more in terms of the count for the purchase.
# 4. There are 20 unique product categories with 5 being the highest.
# Checking how categorical variables contributes to the entire data
categ_cols = [
"Gender",
"Age",
"City_Category",
"Stay_In_Current_City_Years",
"Marital_Status",
]
df[categ_cols].melt().groupby(["variable", "value"])[["value"]].count() / len(df)
# Observations:
#
# 1. 40% of the purchase done by aged 26-35 and 78% purchase are done by the customers aged between the age 18-45 (40%: 26-35, 18%: 18-25, 20%: 36-45)
# 2. 75% of the purchase count are done by Male and 25% by Female
# 3. 60% Single, 40% Married contributes to the purchase count.
# 4. 35% Staying in the city from 1 year, 18% from 2 years, 17% from 3 years
# 5. There are 20 product categories in total.
# 6. There are 20 different types of occupations in the city.
# Checking how the data is spread basis distinct users
df2 = df.groupby(["User_ID"])["Age"].unique()
df2.value_counts() / len(df2)
# Observation:
# 1. We can see 35% of the users are aged 26-35. 73% of users are aged between 18-45.
# 2. From the previous observation we saw 40% of the purchase are done by users aged 26-35. And, we have 35% of users aged between 26-35 and they are contributing 40% of total purchase count.So, we can infer users aged 26-35 are more frequent customers.
df2 = df.groupby(["User_ID"])["Gender"].unique()
df2.value_counts() / len(df2)
# Observation:
# 1. We have 72% male users and 28% female users. Combining with previous observations we can see 72% of male users contributing to 75% of the purchase count and 28% of female users are contributing to 25% of the purchase count.
df2 = df.groupby(["User_ID"])["Marital_Status"].unique()
df2.value_counts() / len(df2)
# Observation:
# 1. We have 58% of the single users and 42% of married users. Combining with previous observation, single users contributes more as 58% of the single contributes to the 60% of the purchase count.
df2 = df.groupby(["User_ID"])["City_Category"].unique()
df2.value_counts() / len(df2)
# Observation:
#
# 1. 53% of the users belong to city category C whereas 29% to category B and 18% belong to category A. Combining from the previous observation category B purchase count is 42% and Category C purchase count is 31%. We can clearly see category B are more actively purchasing inspite of the fact they are only 28% of the total users. On the other hand, we have 53% of category C users but they only contribute 31% of the total purchase count.
# Checking the age group distribution in different city categories
pd.crosstab(
index=df["City_Category"], columns=df["Age"], margins=True, normalize="index"
)
# Observation:
#
# 1. We have seen earlier that city category B and A constitutes less percentage of total population, but they contribute more towards purchase count. We can see from above results large percentage of customers aged 26-35 for B(40%) and A (50%) which can be the reason for these city categories to be more actively purchasing.
# Checking how genders are contributing towards toatl purchase amount
df2 = pd.DataFrame(df.groupby(["Gender"])[["Purchase"]].sum())
df2["percent"] = (df2["Purchase"] / df2["Purchase"].sum()) * 100
df2
# Observation:
# 1. We can see male(72% of the population) contributes to more than 76% of the total purchase amount whereas female(28% of the population) contributes 23% of the total purchase amount.
# Checking how purchase value are spread among differnt age categories
df2 = pd.DataFrame(df.groupby(["Age"])[["Purchase"]].sum())
df2["percent"] = (df2["Purchase"] / df2["Purchase"].sum()) * 100
df2
# Observation:
# 1. We can see the net purchase amount spread is similar to the purchase count spread among the different age groups.
df2 = pd.DataFrame(df.groupby(["Marital_Status"])["Purchase"].sum())
df2["percent"] = (df2["Purchase"] / df2["Purchase"].sum()) * 100
df2
# Observations:
# 1. Single users are contributing 59% towards the total purchase amount in comparison to 41% by married users.
df2 = pd.DataFrame(df.groupby(["City_Category"])["Purchase"].sum())
df2["percent"] = (df2["Purchase"] / df2["Purchase"].sum()) * 100
df2
# Observations:
#
# 1. City_category contribution to the total purchase amount is also similar to their contribution towards Purchase count. Still, combining with previous observation we can City_category C although has percentage purchase count of 31% but they contribute more in terms of purchase amount i.e. 32.65%. We can infer City category C purchase higher value products.
# Users with highest number of purchases
df.groupby(["User_ID"])["Purchase"].count().nlargest(10)
# Users with highest purchases amount
df.groupby(["User_ID"])["Purchase"].sum().nlargest(10)
# Observation:
# 1. The users with high number of purchases contribute more to the purchase amount. Still, we can see there are few users not in the list of top 10 purchase counts are there in list of top 10 purchase amount. Also, the user 1004277 with lesser purchase count(979) has a much higher purchase amount than the user(1001680) with top purchase count.
df2 = pd.DataFrame(df.groupby(["Occupation"])[["Purchase"]].sum())
df2["percent"] = (df2["Purchase"] / df2["Purchase"].sum()) * 100
df2
# Observations:
#
# 1. Some of the Occupation like 0, 4, 7 has contributed more towards total purchase amount.
df2 = pd.DataFrame(df.groupby(["Product_Category"])[["Purchase"]].sum())
df2["percent"] = (df2["Purchase"] / df2["Purchase"].sum()) * 100
df2
# Observations:
#
# 1. 1, 8, 5 are among the highest yielding product categories and 19, 20, 13 are among the lowest in terms of their contribution to total amount.
df2 = pd.DataFrame(df.groupby(["Stay_In_Current_City_Years"])[["Purchase"]].sum())
df2["percent"] = (df2["Purchase"] / df2["Purchase"].sum()) * 100
df2
# Univariate Analysis:
# We can explore the distribution of the data for the quantitative attributes using histplot.
plt.figure(figsize=(10, 6))
sns.histplot(data=df, x="Purchase", kde=True)
plt.show()
# Observation:
#
# 1. We can see purchase value between 5000 and 10000 have higher count. From the initial observation we have already seen the mean and median is 9263 and 8047 respectively. Also, we can see there are outliers in the data.
plt.figure(figsize=(5, 4))
sns.boxplot(data=df, y="Purchase")
plt.show()
# Observation:
#
# We can see there are outliers in the data for purchase.
#
# Univariate analysis for qualitative variables:
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(15, 10))
sns.countplot(data=df, x="Gender", ax=axs[0, 0])
sns.countplot(data=df, x="Occupation", ax=axs[0, 1])
sns.countplot(data=df, x="City_Category", ax=axs[1, 0])
sns.countplot(data=df, x="Marital_Status", ax=axs[1, 1])
plt.show()
# Observations:
#
# 1. We can clearly see from the graphs above the purchases done by males are much higher than females.
# 2. We have 21 occupations categories. Occupation category 4, 0, and 7 are with higher number of purchases and category 8 with the lowest number of purchaes.
# 3. The purchases are highest from City category B.
# 4. Single customer purchases are higher than married users.
plt.figure(figsize=(12, 5))
sns.countplot(data=df, x="Product_Category")
plt.show()
# Observations:
#
# 1. There are 20 product categories with product category 1, 5 and 8 having higher purchasing frequency.
# Bivariate Analysis:
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(16, 5))
sns.histplot(data=df[df["Gender"] == "M"]["Purchase"], ax=axs[0]).set_title(
"Male Spending "
)
sns.histplot(data=df[df["Gender"] == "F"]["Purchase"], ax=axs[1]).set_title(
"Female Spending"
)
plt.show()
# Observations:
#
# 1. From the above histplot, we can clearly see spending behaviour is very much similar in nature for both males and females as the maximum purchase count are between the purchase value range of 5000-10000 for both. But, the purchase count are more in case of males.
attr = [
"Gender",
"Age",
"Occupation",
"City_Category",
"Stay_In_Current_City_Years",
"Marital_Status",
]
fig, axs = plt.subplots(nrows=3, ncols=2, figsize=(18, 10))
fig.subplots_adjust(top=1.3)
count = 0
for row in range(3):
for col in range(2):
sns.boxplot(
data=df,
y="Purchase",
x=attr[count],
ax=axs[row, col],
)
axs[row, col].set_title(f"Purchase vs {attr[count]}")
count += 1
plt.show()
plt.figure(figsize=(8, 5))
sns.boxplot(data=df, y="Purchase", x="Product_Category")
plt.show()
# Observations:
#
# 1. The spending behaviour for males and females are similar as we had seen from the above histplot. Males purchasing value are in the little higher range than females.
# 2. Among differnt age categories, we see similar purchase behaviour. For all age groups, most of the purchases are of the values between 5k to 12k with all have some outliers.
# 3. Among different occupation as well, we see similar purchasing behaviour in terms of the purchase values.
# 4. Similarly for City category, stay in current city years, marital status - we see the users spends mostly in the range of 5k to 12k.
# 5. We see variations among product categories. Product category 10 products are the costliest ones. Also, there are few outliers for some of the product categories.
# Multivariate analysis:
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(20, 6))
fig.subplots_adjust(top=1.5)
sns.boxplot(data=df, y="Purchase", x="Gender", hue="Age", ax=axs[0, 0])
sns.boxplot(data=df, y="Purchase", x="Gender", hue="City_Category", ax=axs[0, 1])
sns.boxplot(data=df, y="Purchase", x="Gender", hue="Marital_Status", ax=axs[1, 0])
sns.boxplot(
data=df, y="Purchase", x="Gender", hue="Stay_In_Current_City_Years", ax=axs[1, 1]
)
plt.show()
# Observations:
#
# 1. The purchasing pattern is very much similar for males and females even among differnt age groups.
# 2. The purchasing behaviour of males and females basis different citi categories is also similar in nature. Still, males from city category B tends to purchase costlier products in comparison to females.
# 3. Males and females spending behaviour remains similar even when take into account their marital status.
# 4. Purchase values are similar for males and females basis Stay_in_current_city_years. Although, Males buy slightly high value products.
# Correlation between categorical variables:
sns.heatmap(df.corr(), annot=True, cmap="Blues", linewidth=0.5)
# Average amount spend per males and females:
# Observations:
#
# 1. From the above correlation plot, we can see the correlation is not significant between any pair of variables.
avgamt_gender = df.groupby(["User_ID", "Gender"])[["Purchase"]].sum()
avgamt_gender = avgamt_gender.reset_index()
avgamt_gender
# Gender wise count in the entire data
avgamt_gender["Gender"].value_counts()
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(16, 5))
sns.histplot(
data=avgamt_gender[avgamt_gender["Gender"] == "F"]["Purchase"], ax=axs[0]
).set_title("Females Avg Spend")
sns.histplot(
data=avgamt_gender[avgamt_gender["Gender"] == "M"]["Purchase"], ax=axs[1]
).set_title("Males Avg Spend")
# Observations:
#
# 1. Average amount spend by males are higher than females.
avgamt_gender.groupby(["Gender"])[["Purchase"]].mean()
avgamt_gender.groupby(["Gender"])["Purchase"].sum()
# Observations:
#
# 1. Average amount for the males is 925344 for the entire population whereas it's much lesser for females(712024).
# 2. Total amount spend by males is around 4 billion whereas for females it's 1.2 billion.
avgamt_male = avgamt_gender[avgamt_gender["Gender"] == "M"]
avgamt_female = avgamt_gender[avgamt_gender["Gender"] == "F"]
# Finding the sample(sample size=1000) for avg purchase amount for males and females
genders = ["M", "F"]
sample_size = 1000
num_repitions = 1000
male_means = []
female_means = []
for i in range(num_repitions):
male_mean = avgamt_male.sample(sample_size, replace=True)["Purchase"].mean()
female_mean = avgamt_female.sample(sample_size, replace=True)["Purchase"].mean()
male_means.append(male_mean)
female_means.append(female_mean)
fig, axis = plt.subplots(nrows=1, ncols=2, figsize=(20, 6))
axis[0].hist(male_means, bins=35)
axis[1].hist(female_means, bins=35)
axis[0].set_title("Male distribution of means, Sample size: 1000")
axis[1].set_title("Female distribution of means, Sample size: 1000")
plt.show()
# Observations:
#
# 1. The means sample seems to be normally distributed for both males and females. Also, we can see the mean of the sample means are closer to the population mean as per central limit theorem.
# Calculating 90% confidence interval for sample size 1000:
# Taking the values for z at 90%, 95% and 99% confidence interval as:
z90 = 1.645 # 90% Confidence Interval
z95 = 1.960 # 95% Confidence Interval
z99 = 2.576 # 99% Confidence Interval
print(
"Population avg spend amount for Male: {:.2f}".format(
avgamt_male["Purchase"].mean()
)
)
print(
"Population avg spend amount for Female: {:.2f}\n".format(
avgamt_female["Purchase"].mean()
)
)
print("Sample avg spend amount for Male: {:.2f}".format(np.mean(male_means)))
print("Sample avg spend amount for Female: {:.2f}\n".format(np.mean(female_means)))
print("Sample std for Male: {:.2f}".format(pd.Series(male_means).std()))
print("Sample std for Female: {:.2f}\n".format(pd.Series(female_means).std()))
print(
"Sample std error for Male: {:.2f}".format(
pd.Series(male_means).std() / np.sqrt(1000)
)
)
print(
"Sample std error for Female: {:.2f}\n".format(
pd.Series(female_means).std() / np.sqrt(1000)
)
)
sample_mean_male = np.mean(male_means)
sample_mean_female = np.mean(female_means)
sample_std_male = pd.Series(male_means).std()
sample_std_female = pd.Series(female_means).std()
sample_std_error_male = sample_std_male / np.sqrt(1000)
sample_std_error_female = sample_std_female / np.sqrt(1000)
Upper_Limit_male = z90 * sample_std_error_male + sample_mean_male
Lower_Limit_male = sample_mean_male - z90 * sample_std_error_male
Upper_Limit_female = z90 * sample_std_error_female + sample_mean_female
Lower_Limit_female = sample_mean_female - z90 * sample_std_error_female
print("Male_CI: ", [Lower_Limit_male, Upper_Limit_male])
print("Female_CI: ", [Lower_Limit_female, Upper_Limit_female])
# Observation:
# Now using the Confidence interval at 90%, we can say that:
# Average amount spend by male customers lie in the range 9,22,940.71 - 9,26,225.18
# Average amount spend by female customers lie in range 7,10,425.64 - 7,13,064.55
# Calculating 95% confidence interval for sample size 1000:
# Taking the values for z at 90%, 95% and 99% confidence interval as:
z90 = 1.645 # 90% Confidence Interval
z95 = 1.960 # 95% Confidence Interval
z99 = 2.576 # 99% Confidence Interval
print(
"Population avg spend amount for Male: {:.2f}".format(
avgamt_male["Purchase"].mean()
)
)
print(
"Population avg spend amount for Female: {:.2f}\n".format(
avgamt_female["Purchase"].mean()
)
)
print("Sample avg spend amount for Male: {:.2f}".format(np.mean(male_means)))
print("Sample avg spend amount for Female: {:.2f}\n".format(np.mean(female_means)))
print("Sample std for Male: {:.2f}".format(pd.Series(male_means).std()))
print("Sample std for Female: {:.2f}\n".format(pd.Series(female_means).std()))
print(
"Sample std error for Male: {:.2f}".format(
pd.Series(male_means).std() / np.sqrt(1000)
)
)
print(
"Sample std error for Female: {:.2f}\n".format(
pd.Series(female_means).std() / np.sqrt(1000)
)
)
sample_mean_male = np.mean(male_means)
sample_mean_female = np.mean(female_means)
sample_std_male = pd.Series(male_means).std()
sample_std_female = pd.Series(female_means).std()
sample_std_error_male = sample_std_male / np.sqrt(1000)
sample_std_error_female = sample_std_female / np.sqrt(1000)
Upper_Limit_male = z95 * sample_std_error_male + sample_mean_male
Lower_Limit_male = sample_mean_male - z95 * sample_std_error_male
Upper_Limit_female = z95 * sample_std_error_female + sample_mean_female
Lower_Limit_female = sample_mean_female - z95 * sample_std_error_female
print("Male_CI: ", [Lower_Limit_male, Upper_Limit_male])
print("Female_CI: ", [Lower_Limit_female, Upper_Limit_female])
# Observation:
# Now using the Confidence interval at 95%, we can say that:
# Average amount spend by male customers lie in the range 9,22,626.24 - 9,26,539.65
# Average amount spend by female customers lie in range 7,10,172.98 - 7,13,317.21
# Calculating 99% confidence interval for sample size 1000:
# Taking the values for z at 90%, 95% and 99% confidence interval as:
z90 = 1.645 # 90% Confidence Interval
z95 = 1.960 # 95% Confidence Interval
z99 = 2.576 # 99% Confidence Interval
print(
"Population avg spend amount for Male: {:.2f}".format(
avgamt_male["Purchase"].mean()
)
)
print(
"Population avg spend amount for Female: {:.2f}\n".format(
avgamt_female["Purchase"].mean()
)
)
print("Sample avg spend amount for Male: {:.2f}".format(np.mean(male_means)))
print("Sample avg spend amount for Female: {:.2f}\n".format(np.mean(female_means)))
print("Sample std for Male: {:.2f}".format(pd.Series(male_means).std()))
print("Sample std for Female: {:.2f}\n".format(pd.Series(female_means).std()))
print(
"Sample std error for Male: {:.2f}".format(
pd.Series(male_means).std() / np.sqrt(1000)
)
)
print(
"Sample std error for Female: {:.2f}\n".format(
pd.Series(female_means).std() / np.sqrt(1000)
)
)
sample_mean_male = np.mean(male_means)
sample_mean_female = np.mean(female_means)
sample_std_male = pd.Series(male_means).std()
sample_std_female = pd.Series(female_means).std()
sample_std_error_male = sample_std_male / np.sqrt(1000)
sample_std_error_female = sample_std_female / np.sqrt(1000)
Upper_Limit_male = z99 * sample_std_error_male + sample_mean_male
Lower_Limit_male = sample_mean_male - z99 * sample_std_error_male
Upper_Limit_female = z99 * sample_std_error_female + sample_mean_female
Lower_Limit_female = sample_mean_female - z99 * sample_std_error_female
print("Male_CI: ", [Lower_Limit_male, Upper_Limit_male])
print("Female_CI: ", [Lower_Limit_female, Upper_Limit_female])
# Observation:
# Now using the Confidence interval at 99%, we can say that:
# Average amount spend by male customers lie in the range 9,22,011.28 - 9,27,154.61
# Average amount spend by female customers lie in range 7,09,678.88 - 7,13,811.31
# Calculating 90% confidence interval for sample size 1500:
# Finding the sample(sample size=1000) avg purchase amount for males and females
genders = ["M", "F"]
sample_size = 1500
num_repitions = 1000
male_means = []
female_means = []
for i in range(num_repitions):
male_mean = avgamt_male.sample(sample_size, replace=True)["Purchase"].mean()
female_mean = avgamt_female.sample(sample_size, replace=True)["Purchase"].mean()
male_means.append(male_mean)
female_means.append(female_mean)
# Taking the values for z at 90%, 95% and 99% confidence interval as:
z90 = 1.645 # 90% Confidence Interval
z95 = 1.960 # 95% Confidence Interval
z99 = 2.576 # 99% Confidence Interval
print(
"Population avg spend amount for Male: {:.2f}".format(
avgamt_male["Purchase"].mean()
)
)
print(
"Population avg spend amount for Female: {:.2f}\n".format(
avgamt_female["Purchase"].mean()
)
)
print("Sample avg spend amount for Male: {:.2f}".format(np.mean(male_means)))
print("Sample avg spend amount for Female: {:.2f}\n".format(np.mean(female_means)))
print("Sample std for Male: {:.2f}".format(pd.Series(male_means).std()))
print("Sample std for Female: {:.2f}\n".format(pd.Series(female_means).std()))
print(
"Sample std error for Male: {:.2f}".format(
pd.Series(male_means).std() / np.sqrt(1500)
)
)
print(
"Sample std error for Female: {:.2f}\n".format(
pd.Series(female_means).std() / np.sqrt(1500)
)
)
sample_mean_male = np.mean(male_means)
sample_mean_female = np.mean(female_means)
sample_std_male = pd.Series(male_means).std()
sample_std_female = pd.Series(female_means).std()
sample_std_error_male = sample_std_male / np.sqrt(1500)
sample_std_error_female = sample_std_female / np.sqrt(1500)
Upper_Limit_male = z90 * sample_std_error_male + sample_mean_male
Lower_Limit_male = sample_mean_male - z90 * sample_std_error_male
Upper_Limit_female = z90 * sample_std_error_female + sample_mean_female
Lower_Limit_female = sample_mean_female - z90 * sample_std_error_female
print("Male_CI: ", [Lower_Limit_male, Upper_Limit_male])
print("Female_CI: ", [Lower_Limit_female, Upper_Limit_female])
# Observation:
# Now using the Confidence interval at 90%, we can say that:
# Average amount spend by male customers lie in the range 9,24,177.41 - 9,26,318.90
# Average amount spend by female customers lie in range 7,11,187.27 - 7,12,971.67
# By increasing the sample size we can see confidence interval is more closer to the population mean.
# Calculating 95% confidence interval for sample size 1500:
print(
"Population avg spend amount for Male: {:.2f}".format(
avgamt_male["Purchase"].mean()
)
)
print(
"Population avg spend amount for Female: {:.2f}\n".format(
avgamt_female["Purchase"].mean()
)
)
print("Sample avg spend amount for Male: {:.2f}".format(np.mean(male_means)))
print("Sample avg spend amount for Female: {:.2f}\n".format(np.mean(female_means)))
print("Sample std for Male: {:.2f}".format(pd.Series(male_means).std()))
print("Sample std for Female: {:.2f}\n".format(pd.Series(female_means).std()))
print(
"Sample std error for Male: {:.2f}".format(
pd.Series(male_means).std() / np.sqrt(1500)
)
)
print(
"Sample std error for Female: {:.2f}\n".format(
pd.Series(female_means).std() / np.sqrt(1500)
)
)
sample_mean_male = np.mean(male_means)
sample_mean_female = np.mean(female_means)
sample_std_male = pd.Series(male_means).std()
sample_std_female = pd.Series(female_means).std()
sample_std_error_male = sample_std_male / np.sqrt(1500)
sample_std_error_female = sample_std_female / np.sqrt(1500)
Upper_Limit_male = z95 * sample_std_error_male + sample_mean_male
Lower_Limit_male = sample_mean_male - z95 * sample_std_error_male
Upper_Limit_female = z95 * sample_std_error_female + sample_mean_female
Lower_Limit_female = sample_mean_female - z95 * sample_std_error_female
print("Male_CI: ", [Lower_Limit_male, Upper_Limit_male])
print("Female_CI: ", [Lower_Limit_female, Upper_Limit_female])
# Observation:
# Now using the Confidence interval at 95%, we can say that:
# Average amount spend by male customers lie in the range 9,23,972.41 - 9,26,523.93
# Average amount spend by female customers lie in range 7,11,016.42 - 7,13,142.51
# By increasing the sample size we can see confidence interval is more closer to the population mean.
# Calculating 99% confidence interval for sample size 1500:
print(
"Population avg spend amount for Male: {:.2f}".format(
avgamt_male["Purchase"].mean()
)
)
print(
"Population avg spend amount for Female: {:.2f}\n".format(
avgamt_female["Purchase"].mean()
)
)
print("Sample avg spend amount for Male: {:.2f}".format(np.mean(male_means)))
print("Sample avg spend amount for Female: {:.2f}\n".format(np.mean(female_means)))
print("Sample std for Male: {:.2f}".format(pd.Series(male_means).std()))
print("Sample std for Female: {:.2f}\n".format(pd.Series(female_means).std()))
print(
"Sample std error for Male: {:.2f}".format(
pd.Series(male_means).std() / np.sqrt(1500)
)
)
print(
"Sample std error for Female: {:.2f}\n".format(
pd.Series(female_means).std() / np.sqrt(1500)
)
)
sample_mean_male = np.mean(male_means)
sample_mean_female = np.mean(female_means)
sample_std_male = pd.Series(male_means).std()
sample_std_female = pd.Series(female_means).std()
sample_std_error_male = sample_std_male / np.sqrt(1500)
sample_std_error_female = sample_std_female / np.sqrt(1500)
Upper_Limit_male = z99 * sample_std_error_male + sample_mean_male
Lower_Limit_male = sample_mean_male - z99 * sample_std_error_male
Upper_Limit_female = z99 * sample_std_error_female + sample_mean_female
Lower_Limit_female = sample_mean_female - z99 * sample_std_error_female
print("Male_CI: ", [Lower_Limit_male, Upper_Limit_male])
print("Female_CI: ", [Lower_Limit_female, Upper_Limit_female])
# Observation:
# Now using the Confidence interval at 99%, we can say that:
# Average amount spend by male customers lie in the range 923571.42 - 926924.89
# Average amount spend by female customers lie in range 710682.32 - 713476.61
# By increasing the sample size we can see confidence interval is more closer to the population mean.
# CLT and Confidence interval considering marital status:
avg_Marital = df.groupby(["User_ID", "Marital_Status"])[["Purchase"]].sum()
avg_Marital = avg_Marital.reset_index()
avgamt_married = avg_Marital[avg_Marital["Marital_Status"] == 1]
avgamt_single = avg_Marital[avg_Marital["Marital_Status"] == 0]
sample_size = 1000
num_repitions = 1000
married_means = []
single_means = []
for i in range(num_repitions):
avg_married = (
avg_Marital[avg_Marital["Marital_Status"] == 1]
.sample(sample_size, replace=True)["Purchase"]
.mean()
)
avg_single = (
avg_Marital[avg_Marital["Marital_Status"] == 0]
.sample(sample_size, replace=True)["Purchase"]
.mean()
)
married_means.append(avg_married)
single_means.append(avg_single)
fig, axis = plt.subplots(nrows=1, ncols=2, figsize=(20, 6))
axis[0].hist(married_means, bins=35)
axis[1].hist(single_means, bins=35)
axis[0].set_title("Married distribution of means, Sample size: 1000")
axis[1].set_title("Unmarried distribution of means, Sample size: 1000")
plt.show()
# Observations:
# 1. The means sample seems to be normally distributed for both married and singles. Also, we can see the mean of the sample means are closer to the population mean as per central limit theorem.
avg_Marital["Marital_Status"].value_counts()
# Calculating 90% confidence interval for avg expenses for married/single for sample size 1000:
# Taking the values for z at 90%, 95% and 99% confidence interval as:
z90 = 1.645 # 90% Confidence Interval
z95 = 1.960 # 95% Confidence Interval
z99 = 2.576 # 99% Confidence Interval
print(
"Population avg spend amount for Married: {:.2f}".format(
avgamt_married["Purchase"].mean()
)
)
print(
"Population avg spend amount for Single: {:.2f}\n".format(
avgamt_single["Purchase"].mean()
)
)
print("Sample avg spend amount for Married: {:.2f}".format(np.mean(married_means)))
print("Sample avg spend amount for Single: {:.2f}\n".format(np.mean(single_means)))
print("Sample std for Married: {:.2f}".format(pd.Series(married_means).std()))
print("Sample std for Single: {:.2f}\n".format(pd.Series(single_means).std()))
print(
"Sample std error for Married: {:.2f}".format(
pd.Series(married_means).std() / np.sqrt(1000)
)
)
print(
"Sample std error for Single: {:.2f}\n".format(
pd.Series(single_means).std() / np.sqrt(1000)
)
)
sample_mean_married = np.mean(married_means)
sample_mean_single = np.mean(single_means)
sample_std_married = pd.Series(married_means).std()
sample_std_single = pd.Series(single_means).std()
sample_std_error_married = sample_std_married / np.sqrt(1000)
sample_std_error_single = sample_std_single / np.sqrt(1000)
Upper_Limit_married = z90 * sample_std_error_male + sample_mean_married
Lower_Limit_married = sample_mean_married - z90 * sample_std_error_married
Upper_Limit_single = z90 * sample_std_error_single + sample_mean_single
Lower_Limit_single = sample_mean_single - z90 * sample_std_error_single
print("Married_CI: ", [Lower_Limit_married, Upper_Limit_married])
print("Single_CI: ", [Lower_Limit_single, Upper_Limit_single])
# Calculating 95% confidence interval for avg expenses for married/single for sample size 1000:
# Taking the values for z at 90%, 95% and 99% confidence interval as:
z90 = 1.645 # 90% Confidence Interval
z95 = 1.960 # 95% Confidence Interval
z99 = 2.576 # 99% Confidence Interval
print(
"Population avg spend amount for Married: {:.2f}".format(
avgamt_married["Purchase"].mean()
)
)
print(
"Population avg spend amount for Single: {:.2f}\n".format(
avgamt_single["Purchase"].mean()
)
)
print("Sample avg spend amount for Married: {:.2f}".format(np.mean(married_means)))
print("Sample avg spend amount for Single: {:.2f}\n".format(np.mean(single_means)))
print("Sample std for Married: {:.2f}".format(pd.Series(married_means).std()))
print("Sample std for Single: {:.2f}\n".format(pd.Series(single_means).std()))
print(
"Sample std error for Married: {:.2f}".format(
pd.Series(married_means).std() / np.sqrt(1000)
)
)
print(
"Sample std error for Single: {:.2f}\n".format(
pd.Series(single_means).std() / np.sqrt(1000)
)
)
sample_mean_married = np.mean(married_means)
sample_mean_single = np.mean(single_means)
sample_std_married = pd.Series(married_means).std()
sample_std_single = pd.Series(single_means).std()
sample_std_error_married = sample_std_married / np.sqrt(1000)
sample_std_error_single = sample_std_single / np.sqrt(1000)
Upper_Limit_married = z95 * sample_std_error_male + sample_mean_married
Lower_Limit_married = sample_mean_married - z95 * sample_std_error_married
Upper_Limit_single = z95 * sample_std_error_single + sample_mean_single
Lower_Limit_single = sample_mean_single - z95 * sample_std_error_single
print("Married_CI: ", [Lower_Limit_married, Upper_Limit_married])
print("Single_CI: ", [Lower_Limit_single, Upper_Limit_single])
# Calculating 99% confidence interval for avg expenses for married/single for sample size 1000:
# Taking the values for z at 90%, 95% and 99% confidence interval as:
z90 = 1.645 # 90% Confidence Interval
z95 = 1.960 # 95% Confidence Interval
z99 = 2.576 # 99% Confidence Interval
print(
"Population avg spend amount for Married: {:.2f}".format(
avgamt_married["Purchase"].mean()
)
)
print(
"Population avg spend amount for Single: {:.2f}\n".format(
avgamt_single["Purchase"].mean()
)
)
print("Sample avg spend amount for Married: {:.2f}".format(np.mean(married_means)))
print("Sample avg spend amount for Single: {:.2f}\n".format(np.mean(single_means)))
print("Sample std for Married: {:.2f}".format(pd.Series(married_means).std()))
print("Sample std for Single: {:.2f}\n".format(pd.Series(single_means).std()))
print(
"Sample std error for Married: {:.2f}".format(
pd.Series(married_means).std() / np.sqrt(1000)
)
)
print(
"Sample std error for Single: {:.2f}\n".format(
pd.Series(single_means).std() / np.sqrt(1000)
)
)
sample_mean_married = np.mean(married_means)
sample_mean_single = np.mean(single_means)
sample_std_married = pd.Series(married_means).std()
sample_std_single = pd.Series(single_means).std()
sample_std_error_married = sample_std_married / np.sqrt(1000)
sample_std_error_single = sample_std_single / np.sqrt(1000)
Upper_Limit_married = z99 * sample_std_error_male + sample_mean_married
Lower_Limit_married = sample_mean_married - z99 * sample_std_error_married
Upper_Limit_single = z99 * sample_std_error_single + sample_mean_single
Lower_Limit_single = sample_mean_single - z99 * sample_std_error_single
print("Married_CI: ", [Lower_Limit_married, Upper_Limit_married])
print("Single_CI: ", [Lower_Limit_single, Upper_Limit_single])
# Observation:
#
# For married and singles, it can be seen with larger sample size the sample mean gets closer to tthe population mean. And at greater confidence interval, the range increases.
avgamt_age = df.groupby(["User_ID", "Age"])[["Purchase"]].sum()
avgamt_age = avgamt_age.reset_index()
avgamt_age["Age"].value_counts()
sample_size = 200
num_repitions = 1000
all_sample_means = {}
age_intervals = ["26-35", "36-45", "18-25", "46-50", "51-55", "55+", "0-17"]
for i in age_intervals:
all_sample_means[i] = []
for i in age_intervals:
for j in range(num_repitions):
mean = (
avgamt_age[avgamt_age["Age"] == i]
.sample(sample_size, replace=True)["Purchase"]
.mean()
)
all_sample_means[i].append(mean)
fig, axis = plt.subplots(nrows=3, ncols=2, figsize=(20, 15))
sns.histplot(all_sample_means["26-35"], bins=35, ax=axis[0, 0])
sns.histplot(all_sample_means["36-45"], bins=35, ax=axis[0, 1])
sns.histplot(all_sample_means["18-25"], bins=35, ax=axis[1, 0])
sns.histplot(all_sample_means["46-50"], bins=35, ax=axis[1, 1])
sns.histplot(all_sample_means["51-55"], bins=35, ax=axis[2, 0])
sns.histplot(all_sample_means["55+"], bins=35, ax=axis[2, 1])
plt.show()
plt.figure(figsize=(10, 5))
sns.histplot(all_sample_means["0-17"], bins=35)
plt.show()
# Observations:
# 1. The means sample seems to be normally distributed for all age groups. Also, we can see the mean of the sample means are closer to the population mean as per central limit theorem.
# Calculating 90% confidence interval for avg expenses for different age groups for sample size 200:
z90 = 1.645 # 90% Confidence Interval
z95 = 1.960 # 95% Confidence Interval
z99 = 2.576 # 99% Confidence Interval
sample_size = 200
num_repitions = 1000
all_population_means = {}
all_sample_means = {}
age_intervals = ["26-35", "36-45", "18-25", "46-50", "51-55", "55+", "0-17"]
for i in age_intervals:
all_sample_means[i] = []
all_population_means[i] = []
population_mean = avgamt_age[avgamt_age["Age"] == i]["Purchase"].mean()
all_population_means[i].append(population_mean)
print("All age group population mean: \n", all_population_means)
print("\n")
for i in age_intervals:
for j in range(num_repitions):
mean = (
avgamt_age[avgamt_age["Age"] == i]
.sample(sample_size, replace=True)["Purchase"]
.mean()
)
all_sample_means[i].append(mean)
for val in ["26-35", "36-45", "18-25", "46-50", "51-55", "55+", "0-17"]:
new_df = avgamt_age[avgamt_age["Age"] == val]
std_error = z90 * new_df["Purchase"].std() / np.sqrt(len(new_df))
sample_mean = new_df["Purchase"].mean()
lower_lim = sample_mean - std_error
upper_lim = sample_mean + std_error
print(
"For age {} confidence interval of means: ({:.2f}, {:.2f})".format(
val, lower_lim, upper_lim
)
)
# Calculating 95% confidence interval for avg expenses for different age groups for sample size 200:
z90 = 1.645 # 90% Confidence Interval
z95 = 1.960 # 95% Confidence Interval
z99 = 2.576 # 99% Confidence Interval
sample_size = 200
num_repitions = 1000
all_means = {}
age_intervals = ["26-35", "36-45", "18-25", "46-50", "51-55", "55+", "0-17"]
for i in age_intervals:
all_means[i] = []
for i in age_intervals:
for j in range(num_repitions):
mean = (
avgamt_age[avgamt_age["Age"] == i]
.sample(sample_size, replace=True)["Purchase"]
.mean()
)
all_means[i].append(mean)
for val in ["26-35", "36-45", "18-25", "46-50", "51-55", "55+", "0-17"]:
new_df = avgamt_age[avgamt_age["Age"] == val]
std_error = z95 * new_df["Purchase"].std() / np.sqrt(len(new_df))
sample_mean = new_df["Purchase"].mean()
lower_lim = sample_mean - std_error
upper_lim = sample_mean + std_error
print(
"For age {} confidence interval of means: ({:.2f}, {:.2f})".format(
val, lower_lim, upper_lim
)
)
# Calculating 99% confidence interval for avg expenses for different age groups for sample size 200:
z90 = 1.645 # 90% Confidence Interval
z95 = 1.960 # 95% Confidence Interval
z99 = 2.576 # 99% Confidence Interval
sample_size = 200
num_repitions = 1000
all_means = {}
age_intervals = ["26-35", "36-45", "18-25", "46-50", "51-55", "55+", "0-17"]
for i in age_intervals:
all_means[i] = []
for i in age_intervals:
for j in range(num_repitions):
mean = (
avgamt_age[avgamt_age["Age"] == i]
.sample(sample_size, replace=True)["Purchase"]
.mean()
)
all_means[i].append(mean)
for val in ["26-35", "36-45", "18-25", "46-50", "51-55", "55+", "0-17"]:
new_df = avgamt_age[avgamt_age["Age"] == val]
std_error = z99 * new_df["Purchase"].std() / np.sqrt(len(new_df))
sample_mean = new_df["Purchase"].mean()
lower_lim = sample_mean - std_error
upper_lim = sample_mean + std_error
print(
"For age {} confidence interval of means: ({:.2f}, {:.2f})".format(
val, lower_lim, upper_lim
)
)
|
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import folium
df = pd.read_csv("/kaggle/input/missing-persons-clean/Missing_Persons_clean.csv")
df
x = df["Long"]
y = df["Lat"]
print(x.max(), x.min())
print(y.max(), y.min())
plt.scatter(x, y)
plt.xlabel("Longitude")
plt.ylabel("Latitude")
data = list(zip(x, y))
kmeans = KMeans(n_clusters=50)
kmeans.fit(data)
plt.scatter(x, y, c=kmeans.labels_)
plt.show()
# OH LOL I SEE NOW !!! LETS NARROW IT DOWN
texas_long = (x > -98) & (x < -95)
texas_lat = (y > 30) & (y < 33)
texas_area = df[texas_long & texas_lat]
texas_x = texas_area["Long"]
texas_y = texas_area["Lat"]
texas_area
plt.scatter(texas_x, texas_y)
data = list(zip(texas_x, texas_y))
fig = plt.gcf()
for i in range(1, 8):
kmeans = KMeans(n_clusters=i + 1)
kmeans.fit(data)
plt.figure(figsize=(2, 2))
plt.title(f"{i+1} clusters")
plt.scatter(texas_x, texas_y, c=kmeans.labels_)
plt.figure()
image = plt.imread("/kaggle/input/texas-map/data.PNG")
plt.subplot(1, 2, 1)
plt.imshow(image, extent=[-98, -95, 30, 33])
plt.title("Austin-Dallas-Tyler circle")
plt.subplot(1, 2, 2)
plt.imshow(image, extent=[-98, -95, 30, 33])
kmeans = KMeans(n_clusters=8)
kmeans.fit(data)
plt.scatter(texas_x, texas_y, c=kmeans.labels_)
plt.title("8 Clusters")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/empData/emp_data_sharaon.csv")
# no of rows
r, c = df.shape
r
# no of columns
c
df.describe()
df.isnull().sum()
list(df.columns)
# dropping these columns as they either don't have value or don't have relevanc at this point
df.drop(
[
"Calm",
"Sweat",
"Aerobics",
"Network ID",
"Submit Date (UTC)",
"Tags",
"Start Date (UTC)",
"MyFitnessPal",
],
axis=1,
inplace=True,
)
df.head()
df.head()
list(df.columns)
a = df["sleepTime"].unique()
print(sorted(a))
a = df["MyFitnessPal"].unique()
print(sorted(a))
autonomyAtWork_count = pd.DataFrame(df["autonomyAtWork"].value_counts())
autonomyAtWork_count
# avgSleepTime - converted this column into numerical values
avgSleepTime_dict = {
"less than 5 hours": 1,
"5-6 hours": 2,
"7-8 hours": 3,
"above 8 hours": 4,
}
# Create the mapped values in a new column
df["target_avgSleepTime"] = df["avgSleepTime"].map(avgSleepTime_dict)
# Review dataset
df["target_avgSleepTime"]
# sleepTime - converted this column into numerical values
sleepTime_dict = {
"I get good sleep half of the times": 1,
"I rarely get good sleep": 0,
"I regularly get high quality sleep": 2,
}
# Create the mapped values in a new column
df["target_sleepTime"] = df["sleepTime"].map(sleepTime_dict)
# Review dataset
df["target_sleepTime"]
a = df["claimToInsurance"].unique()
print((a))
# claimToInsurance - converted this column into numerical values
claimToInsurance_dict = {
"We have insurance?": 0,
"Never": 1,
"1-2 times": 2,
"I have claimed more than 3 times": 2,
}
# Create the mapped values in a new column
df["target_claimToInsurance"] = df["claimToInsurance"].map(claimToInsurance_dict)
# Review dataset
df["target_claimToInsurance"]
# Get all numeric columns
numeric_columns = df._get_numeric_data().columns.values.tolist()
print(numeric_columns)
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
plt.figure(figsize=(16, 5))
sns.barplot(x=df["claimToInsurance"], y=df["awarenessToInsuranceCoverage"])
plt.title("Insurance Clain to Awareness Ratio")
plt.show()
plt.figure(figsize=(16, 5))
sns.barplot(x=df["repoWithManager"], y=df["qualityWork"])
plt.title("Job Satisfaction vs Relationship With Manager")
plt.show()
fig = px.histogram(df, x="repoWithManager", template="seaborn")
fig.update_layout(bargap=0.2)
fig.show()
fig = px.histogram(df, x="autonomyAtWork", template="seaborn")
fig.update_layout(bargap=0.2)
fig.show()
fig = px.histogram(df, x="habitBuilderToImproveHeath", template="seaborn")
fig.update_layout(bargap=0.2)
fig.show()
fig = px.histogram(df, x="autonomyAtWork", template="seaborn")
fig.update_layout(bargap=0.2)
fig.show()
df["Financial"] = df.apply(
lambda x: x["target_claimToInsurance"] + x["awarenessToInsuranceCoverage"], axis=1
)
df["Financial"]
df["claimToInsurance"]
df["awarenessToInsuranceCoverage"]
df["Social"]
df["Mental"]
df["Physical"]
plt.figure(figsize=(10, 6))
sns.heatmap(df.corr())
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(20, 15))
sns.heatmap(df.corr(), annot=True)
plt.show()
plt.style.use("seaborn-pastel")
plt.rcParams["figure.figsize"] = (8, 6)
sns.countplot(
x=df["autonomyAtWork"], hue="claimToInsurance", data=df, palette="PuRd"
).set_title("autonomyAtWork Reported by claimToInsurance Field")
plt.style.use("seaborn-pastel")
plt.rcParams["figure.figsize"] = (8, 6)
sns.countplot(
x=df["autonomyAtWork"], hue="qualityWork", data=df, palette="cool"
).set_title("Autonomy At Work Reported by qualityWork")
plt.style.use("seaborn-pastel")
plt.rcParams["figure.figsize"] = (8, 6)
sns.countplot(
x=df["autonomyAtWork"], hue="passion", data=df, palette="Accent"
).set_title("autonomyAtWork Reported by happinessReason")
import pandas as pd
import pandas_profiling as pp
# forming dataframe and printing
data = pd.DataFrame(df)
print(data)
# forming ProfileReport and save
# as output.html file
profile = pp.ProfileReport(data)
profile.to_file("output.html")
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df1 = pd.read_csv("/kaggle/input/empData/emp_data_sharaon.csv")
df1.head()
# Showing how many columns and rows there are in the dataset
df1.shape
# Descriptive Statistics: showing the count, mean, std, min/max of our dataset
df1.describe()
# Descriptive Statistics: showing the count, mean, std, min/max of our dataset
df1.describe()
# basic operations
import numpy as np
import pandas as pd
import datetime as dt
import seaborn as sns
import io
# visualizations
import matplotlib.pyplot as plt
import plotly.express as px
pd.set_option("display.max_columns", None)
from sklearn.metrics import (
confusion_matrix,
accuracy_score,
mean_squared_error,
precision_score,
classification_report,
average_precision_score,
)
from pandas.plotting import scatter_matrix
from sklearn.model_selection import train_test_split
sns.set_theme(style="darkgrid")
# dropping these columns as they either don't have value or don't have relevanc at this point
df1.drop(
[
"Calm",
"Sweat",
"Aerobics",
"Network ID",
"Submit Date (UTC)",
"Tags",
"Start Date (UTC)",
"MyFitnessPal",
],
axis=1,
inplace=True,
)
# Use Pearson correlation to find pairwise correlation of all variables
pearson = df1.corr(method="pearson")
pearson
plt.figure(figsize=(16, 10))
sns.heatmap(
pearson,
xticklabels=pearson.columns,
yticklabels=pearson.columns,
cmap="RdBu_r",
annot=True,
linewidth=0.5,
)
plt.style.use("seaborn-pastel")
plt.rcParams["figure.figsize"] = (8, 6)
sns.countplot(
x=df1["Attrition"], hue="EducationField", data=df, palette="PuRd"
).set_title("Number of Attritions Reported by Education Field")
|
# # Exploring the Indonesia Trending YouTube Videos Dataset
# # Introduction
# The Indonesia Trending YouTube Videos dataset includes information about the videos that have been trending in Indonesia. This dataset is updated daily or twice a day and contains various features that can be useful for analyzing the trending videos on YouTube.
# # Dataset Description
# The dataset includes information about the following columns:
# * video_id: The unique identifier of the video
# * publish_time: The date and time the video was published
# * channel_id: The unique identifier of the channel that published the video
# * title: The title of the video
# * description: The description of the video
# * channel_name: The name of the channel that published the video
# * tags: The tags associated with the video
# * category_id: The category identifier number for Indonesia
# * live_status: Whether the video is live or not
# * local_title: The localized title of the video
# * local_description: The localized description of the video
# * duration: The duration of the video
# * dimension: The dimension of the video
# * definition: The definition of the video
# * caption: Whether the video has caption or not
# * license_status: Whether the video has a license or not
# * allowed_region: The region where the video is allowed to be viewed
# * blocked_region: The region where the video is blocked from being viewed
# * view: The number of views the video has received
# * like: The number of likes the video has received
# * dislike: The number of dislikes the video has received
# * favorite: The number of times the video has been added to a favorite list
# * comment: The number of comments the video has received
# * trending_time: The date and time the video became trending
# * title_sentiment: The sentiment score of the video title
# # Loading Dependencies
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
category_info = pd.read_csv(
"/kaggle/input/indonesias-trending-youtube-video-statistics/category.json"
)
category_info
trending_indo = pd.read_csv(
"/kaggle/input/indonesias-trending-youtube-video-statistics/trending.csv"
)
trending_indo.head()
trending_indo.shape
# Check for missing values
print(trending_indo.isnull().sum())
trending_indo.info()
# # Missing values treatment comprehensively
# Replace null values in 'description' and 'local_description' columns with empty string
trending_indo["description"].fillna(value="", inplace=True)
trending_indo["local_description"].fillna(value="", inplace=True)
# Replace null values in 'tags' column with string indicating no tags
trending_indo["tags"].fillna(value="no tags", inplace=True)
# Replace null values in 'allowed_region' and 'blocked_region' columns with string indicating all regions
trending_indo["allowed_region"].fillna(value="all regions", inplace=True)
trending_indo["blocked_region"].fillna(value="all regions", inplace=True)
# Replace null values in 'view', 'like', 'dislike', and 'comment' columns with mean or median value
trending_indo["view"].fillna(value=trending_indo["view"].mean(), inplace=True)
trending_indo["like"].fillna(value=trending_indo["like"].median(), inplace=True)
trending_indo["dislike"].fillna(value=trending_indo["dislike"].median(), inplace=True)
trending_indo["comment"].fillna(value=trending_indo["comment"].mean(), inplace=True)
# Replace null values in "favorite" column with the median
favorite_median = trending_indo["favorite"].median()
trending_indo["favorite"].fillna(favorite_median, inplace=True)
trending_indo = trending_indo.drop(columns=["favorite"])
# # Visualize the distribution of numerical columns
# # Ada berapa banyak jumlah video yang memiliki jumlah views dengan rentang tertentu?
#
# Visualize the distribution of numerical columns
import plotly.express as px
fig = px.histogram(trending_indo, x="view", nbins=50)
fig.show()
# # Berapa jumlah komentar pada setiap kategori?
# Visualize the distribution of numerical columns
import plotly.express as px
fig = px.histogram(trending_indo, x="category_id", y="comment")
fig.show()
# # Visualize the distribution of category_id with views
# # Kategori apa yang memiliki views tertinggi?
#
import plotly.express as px
fig = px.box(trending_indo, x="category_id", y="view")
fig.show()
# # Visualize the distribution of category_id with like and dislikes
# # Berapa jumlah like dan dislike berdasarkan kategori?
#
import plotly.express as px
fig = px.scatter(trending_indo, x="like", y="dislike", color="category_id")
fig.show()
# # Correlation Matrix with Heatmap
# # Bagaimana Correlation Matrix nya?
import plotly.express as px
# Exclude columns with missing values
trending_indo_no_null = trending_indo.dropna(axis=1)
# Calculate correlation matrix
corr = trending_indo_no_null.corr()
# Create heatmap
fig = px.imshow(corr, color_continuous_scale="RdBu_r")
fig.update_layout(
title="Correlation Matrix with Heatmap", xaxis_title="Column", yaxis_title="Column"
)
fig.update_traces(hoverongaps=False)
# Add colorbar and annotated values
fig.update_layout(coloraxis_colorbar=dict(title="Correlation"))
fig.update_layout(
annotations=[
dict(
text=str(round(corr.iloc[i, j], 2)),
x=j,
y=i,
xref="x1",
yref="y1",
showarrow=False,
font=dict(size=12),
)
for i in range(len(corr.columns))
for j in range(len(corr.columns))
]
)
fig.show()
# # Bagaimana urutan kategori berdasarkan jumlah video dari yang terbanyak hingga terendah?
# Check the distribution of categorical columns
print(trending_indo["category_id"].value_counts())
# # Visualize the distribution of categorical columns
# # Berapa jumlah video berdasarkan kategorinya?
# Visualize the distribution of categorical columns
import plotly.express as px
fig = px.histogram(trending_indo, x="category_id")
fig.update_layout(title="Distribution of Category IDs")
fig.show()
import plotly.express as px
pie_chart = px.pie(
trending_indo,
names=trending_indo["category_id"].value_counts().index,
values=trending_indo["category_id"].value_counts().values,
)
pie_chart.show()
# # Visualize the distribution of license status columns
# # Berapa jumlah video trending yang memiliki lisensi (hak cipta) dalam rentang waktu 11 Januari 2021 hingga 13 April 2023?
import plotly.express as px
pie_chart = px.pie(
trending_indo,
names=trending_indo["license_status"].value_counts().index,
values=trending_indo["license_status"].value_counts().values,
)
pie_chart.show()
# # Drop columns that are not needed
#
# Drop columns that are not needed
trending_indo.drop(
["thumbnail_url", "thumbnail_width", "thumbnail_height"], axis=1, inplace=True
)
# # Fill missing values in 'tags' column with empty string
#
# Fill missing values in 'tags' column with empty string
trending_indo["tags"].fillna("", inplace=True)
# # Convert publish_time and trending_time columns to datetime format
#
# Convert publish_time and trending_time columns to datetime format
trending_indo["publish_time"] = pd.to_datetime(
trending_indo["publish_time"], infer_datetime_format=True
)
trending_indo["trending_time"] = pd.to_datetime(
trending_indo["trending_time"], infer_datetime_format=True
)
# Display the info of the cleaned dataset
trending_indo.info()
trending_indo
# # Trend Analysis
# # Berapa jumlah views video trending perminggu?
import plotly.express as px
# Convert the 'trending_time' column to datetime format
trending_indo["trending_time"] = pd.to_datetime(trending_indo["trending_time"])
# Group the dataframe by the week number of the 'trending_time' column and aggregate the sum of 'view'
trending_by_week = (
trending_indo.groupby(trending_indo["trending_time"].dt.week)["view"]
.sum()
.reset_index()
)
# Plot the trend of video views over time using Plotly
fig = px.line(
trending_by_week,
x="trending_time",
y="view",
title="Trend of Video Views over Time",
)
fig.update_xaxes(title="Week Number")
fig.update_yaxes(title="Total Views")
fig.show()
import plotly.express as px
# Create a scatter plot matrix
fig = px.scatter_matrix(
trending_indo, dimensions=["view", "like", "dislike", "comment"]
)
# Show the plot
fig.show()
# # Sentiment Analysis
import plotly.express as px
from textblob import TextBlob
# Define a function to get the sentiment polarity of a text
def get_sentiment(text):
blob = TextBlob(text)
sentiment = blob.sentiment.polarity
return sentiment
# Apply the sentiment analysis function to the 'title' column of the dataframe
trending_indo["title_sentiment"] = trending_indo["title"].apply(get_sentiment)
# Plot the distribution of sentiment polarity scores
fig = px.histogram(trending_indo, x="title_sentiment", nbins=20, opacity=0.7)
fig.update_layout(
title="Distribution of Sentiment Polarity Scores in Video Titles",
xaxis_title="Sentiment Polarity",
yaxis_title="Count",
)
fig.show()
# # Topic Modelling : Adjust the number of rows as per the requirement
import plotly.graph_objects as go
import spacy
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
import random
# Preprocess the text data
nlp = spacy.load("en_core_web_sm")
def preprocess(text):
doc = nlp(text.lower())
return " ".join(
[token.lemma_ for token in doc if not token.is_stop and token.is_alpha]
)
# Take a random sample of 1000 rows
trending_indo_sample = trending_indo.sample(n=1000, random_state=42)
# Preprocess the text data for the sample
trending_indo_sample["title_processed"] = trending_indo_sample["title"].apply(
preprocess
)
# Create the count vectorizer and fit on the preprocessed text data
vectorizer = CountVectorizer(max_df=0.95, min_df=2, stop_words="english")
doc_word = vectorizer.fit_transform(trending_indo_sample["title_processed"])
# Create the LDA model and fit on the document-word matrix
lda_model = LatentDirichletAllocation(n_components=10, random_state=42)
lda_model.fit(doc_word)
# Get the top 10 words for each topic
top_words_per_topic = []
for topic_idx, topic in enumerate(lda_model.components_):
top_words = [vectorizer.get_feature_names()[i] for i in topic.argsort()[:-11:-1]]
top_words_per_topic.append(", ".join(top_words))
# Create a dataframe to store the top words per topic
topics_df = pd.DataFrame({"Topic": range(10), "Top Words": top_words_per_topic})
# Create a bar chart to display the top words per topic
fig = go.Figure(go.Bar(x=topics_df["Top Words"], y=topics_df["Topic"], orientation="h"))
fig.update_layout(
title="Top Words per Topic", xaxis_title="Top Words", yaxis_title="Topic"
)
fig.show()
# # Recommendation Engine
from scipy.sparse import csr_matrix
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics.pairwise import cosine_similarity
# Get all unique users and videos
unique_users = trending_indo["channel_name"].unique()
unique_videos = trending_indo["video_id"].unique()
# Create a user-item matrix that includes all videos and users
user_item_matrix = pd.DataFrame(index=unique_users, columns=unique_videos, data=0)
# Fill in the user-item matrix with views
for _, row in trending_indo.iterrows():
user = row["channel_name"]
video = row["video_id"]
views = row["view"]
user_item_matrix.loc[user, video] = views
# Convert the user-item matrix to a sparse matrix
sparse_user_item = csr_matrix(user_item_matrix)
# Perform SVD on the sparse matrix
svd = TruncatedSVD(n_components=100)
latent_matrix = svd.fit_transform(sparse_user_item)
# Calculate the cosine similarity between all pairs of videos
cosine_sim = cosine_similarity(latent_matrix)
# Get the top 10 most similar videos for each video
similar_videos = {}
for i, row in enumerate(cosine_sim):
top_similar_indices = row.argsort()[-11:-1]
similar_videos[unique_videos[i]] = unique_videos[top_similar_indices].tolist()
# Example: Get the top 10 recommended videos for a given video
video_id = "zkQ2kHPMxoc"
top_recommended_videos = similar_videos[video_id]
print(top_recommended_videos)
# For the video with the video_id 'zkQ2kHPMxoc', the top 10 recommended videos based on the similarity of their latent factors are:
# ['CzKwwDvO4W8', 'RP0ZyAIMBPo', 'Ih4u7w1kGWU', 'UcfCD3ryHbk', 'uBY1AoiF5Vo', '9ydoDVrMoSM', 'tMpjQWGen3w', 'YfXpE6_v2Wc', 'l-fjxdTp_yQ', 'Kx4obiG1e5Q']
# # Engangement Analysis
import plotly.express as px
# Group by video_id and calculate the mean engagement metrics
engagement_metrics = ["view", "like", "dislike", "comment"]
engagement_means = trending_indo.groupby("video_id")[engagement_metrics].mean()
# Create a new column with the total engagement score
engagement_means["engagement_score"] = (
engagement_means["view"]
+ 2 * engagement_means["like"]
- engagement_means["dislike"]
+ 5 * engagement_means["comment"]
)
# Sort by engagement score
engagement_means = engagement_means.sort_values(by="engagement_score", ascending=False)
# Print the top 10 videos by engagement score
print(engagement_means.head(10))
# Create a histogram of engagement scores using Plotly
fig = px.histogram(
engagement_means, x="engagement_score", nbins=50, title="Engagement Scores"
)
fig.update_xaxes(title_text="Engagement Score")
fig.update_yaxes(title_text="Number of Videos")
fig.show()
# It seems like the video with the highest engagement score is "gQlMMD8auMs" with a total engagement score of 2.516049e+08. This video has a very high number of views and likes compared to the other videos. The histogram of the engagement scores shows that most videos have an engagement score between 0 and 5e+07, but there are a few videos with very high engagement scores.
# # Network Analysis
import networkx as nx
# Create a directed graph
G = nx.DiGraph()
# Add edges between channels and videos
for i, row in trending_indo.iterrows():
G.add_edge(row["channel_name"], row["video_id"])
# Calculate the degree centrality of each node
degree_centrality = nx.degree_centrality(G)
# Print the top 10 nodes by degree centrality
top_nodes = sorted(degree_centrality, key=degree_centrality.get, reverse=True)[:10]
for node in top_nodes:
print(node, degree_centrality[node])
# Based on this analysis, we can see that the top nodes in the network are primarily TV channels and entertainment-related accounts, which suggests that the most popular videos on YouTube in Indonesia are related to television programming and entertainment. This information could be used by content creators and marketers to better understand the preferences and interests of the Indonesian audience on YouTube, and to develop content and marketing strategies that appeal to this audience. Additionally, further analysis could be done to investigate the relationship between the top nodes and other characteristics of the videos and their viewers, such as engagement metrics or demographics.
# # Tried Brand Analysis
import re
# Define the dictionary of brand names and variations
brands_dict = {
"TRANS7": ["TRANS7", "Trans7 Official"],
"RCTI": ["RCTI - LAYAR DRAMA INDONESIA", "RCTI Official"],
"Indosiar": ["Indosiar"],
"ANTV": ["ANTV Official", "ANTV"],
"NET.": ["NET. Official"],
"MNCTV": ["MNCTV Official"],
"KOMPASTV": ["KOMPASTV"],
}
# Define a function to match a channel name to a brand name in the dictionary
def match_brand(channel_name):
for brand, variations in brands_dict.items():
for variation in variations:
if re.search(variation, channel_name, re.IGNORECASE):
return brand
return "Other"
# Apply the match_brand function to the channel_name column to create a new brand column
trending_indo["brand"] = trending_indo["channel_name"].apply(match_brand)
import plotly.graph_objs as go
# Calculate the average engagement metrics for each brand
brand_metrics = trending_indo.groupby("brand")[["view", "like", "comment"]].mean()
# Create the bar chart trace
trace = go.Bar(x=brand_metrics.index, y=brand_metrics["view"], name="Average Views")
# Create the figure layout
layout = go.Layout(
title="Average Engagement Metrics by Brand",
xaxis={"title": "Brand"},
yaxis={"title": "Average Count"},
)
# Create the figure and add the trace
fig = go.Figure(data=[trace], layout=layout)
# Show the figure
fig.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(rc={"figure.figsize": (6, 6)})
data = pd.read_csv("/kaggle/input/ecommerce-customer-data/data.csv")
data.head()
data.info()
sns.set_style("darkgrid")
sns.displot(data["Time on App"], kde=True, color="blue")
plt.show()
sns.set_style("darkgrid")
sns.displot(data["Yearly Amount Spent"], kde=True, color="blue")
plt.show()
sns.set_style("darkgrid")
sns.displot(data["Time on Website"], kde=True, color="blue")
plt.show()
sns.scatterplot(
x="Time on App", y="Yearly Amount Spent", hue="Time on Website", data=data
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import pymc3 as pm
import numpy as np
import random
from sklearn.preprocessing import MinMaxScaler
np.random.seed(10)
random.seed(10)
data = pd.read_csv("/kaggle/input/ufcdataset/data.csv")
data.info
df = data
df.loc[:, "r_winner"] = df.loc[:, "winner"]
df.loc[df["winner"] == "red", "r_winner"] = 1
df.loc[df["winner"] != "red", "r_winner"] = 0
df = df.drop(columns=["winner"])
# df.fillna('NA', inplace=True)
fields = ["B_Height", "R_Height", "B_Weight", "R_Weight", "winner"]
df = data[fields]
df.info()
# #Given all y's are present, and only X's are missing, there is no need to
# df.to_csv("heightWeight.csv", index=False)
# df.iloc[1:1000,:].to_csv("heightWeight1000.csv", index=False)
round_1_stats_column_for_r = [
"R__Round1_Grappling_Reversals_Landed",
"R__Round1_Grappling_Standups_Landed",
"R__Round1_Grappling_Submissions_Attempts",
"R__Round1_Grappling_Takedowns_Attempts",
"R__Round1_Grappling_Takedowns_Landed",
"R__Round1_TIP_Back Control Time",
"R__Round1_TIP_Clinch Time",
"R__Round1_TIP_Control Time",
"R__Round1_TIP_Distance Time",
"R__Round1_TIP_Ground Control Time",
"R__Round1_TIP_Ground Time",
"R__Round1_TIP_Guard Control Time",
"R__Round1_TIP_Half Guard Control Time",
"R__Round1_TIP_Misc. Ground Control Time",
"R__Round1_TIP_Mount Control Time",
"R__Round1_TIP_Neutral Time",
"R__Round1_TIP_Side Control Time",
"R__Round1_TIP_Standing Time",
]
round_1_stats_column_for_b = [
"B__Round1_Grappling_Reversals_Landed",
"B__Round1_Grappling_Standups_Landed",
"B__Round1_Grappling_Submissions_Attempts",
"B__Round1_Grappling_Takedowns_Attempts",
"B__Round1_Grappling_Takedowns_Landed",
"B__Round1_TIP_Back Control Time",
"B__Round1_TIP_Clinch Time",
"B__Round1_TIP_Control Time",
"B__Round1_TIP_Distance Time",
"B__Round1_TIP_Ground Control Time",
"B__Round1_TIP_Ground Time",
"B__Round1_TIP_Guard Control Time",
"B__Round1_TIP_Half Guard Control Time",
"B__Round1_TIP_Misc. Ground Control Time",
"B__Round1_TIP_Mount Control Time",
"B__Round1_TIP_Neutral Time",
"B__Round1_TIP_Side Control Time",
"B__Round1_TIP_Standing Time",
]
assert len(round_1_stats_column_for_r) == len(round_1_stats_column_for_b)
df_1_round_sub = data[(data["winby"] == "SUB") & (data["Last_round"] == 1)]
df_1_round_sub = df_1_round_sub[
round_1_stats_column_for_r + round_1_stats_column_for_b + ["r_winner"]
]
covariates = df_1_round_sub.columns[df_1_round_sub.columns != "r_winner"].values
df_1_round_sub["r_winner"] = df_1_round_sub["r_winner"].astype("int32")
X = df_1_round_sub.loc[:, covariates]
X.insert(0, "intersect", 1) # Adding column of 1's for the intersect
y = df_1_round_sub.loc[:, "r_winner"]
standard_scaler = MinMaxScaler()
standard_scaler.fit(X)
X.shape[1]
trace = None
with pm.Model() as model:
X_data = pm.Data("X", standard_scaler.transform(X))
y_data = pm.Data("y", y.values)
priors_for_x = pm.Uniform("X_priors", 1, 2, observed=X_data)
# priors_for_covariates = []
# priors_for_covariates.append(pm.Uniform("intersect", 1, 100))
# for i,e in enumerate(round_1_stats_column_for_b):
# priors_for_covariates.append(pm.Uniform(e, 1, 100, shape=X))
beta = pm.Normal("beta", mu=0, tau=0.0001, shape=X.shape[1])
logit_p = pm.math.dot(X_data, beta)
likelihood = pm.Bernoulli("likelihood", logit_p=logit_p, observed=y_data)
trace = pm.sample(
5000,
initvals={
"X_priors": np.array([0.5] * X.shape[1] * X.shape[0]).reshape(X.shape),
"likelihood": np.array([1] * y.shape[0]),
"beta": np.array(
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
]
),
},
)
az.summary(trace, var_names=["beta"])
az.summary(trace, var_names=["beta"])
|
# # Yerleşik Veri Türleri
# Programlamada veri tipi önemli bir kavramdır.
# Değişkenler farklı türde verileri depolayabilir ve farklı türler farklı şeyler yapabilir.
# Python, bu kategorilerde varsayılan olarak yerleşik olarak aşağıdaki veri türlerine sahiptir:
# Text Type: str Numeric Types: int, float, complex Sequence Types: list, tuple, range Mapping Type: dict Set Types: set, frozenset Boolean Type: bool Binary Types: bytes, bytearray, memoryview None Type: NoneType
# # Veri Türünü Öğrenme
# type() işlevini kullanarak herhangi bir nesnenin veri türünü öğrenebilirsiniz.
a = 9
print(type(a))
# # Setting the Data Type
# Example Data Ty pe Try it x = "Hello World" str x = 20 int x = 20.5 float x = 1j complex x = ["apple", "banana", "cherry"] list x = ("apple", "banana", "cherry") tuple x = range(6) range x = {"name" : "John", "age" : 36} dict x = {"apple", "banana", "cherry"} set x = frozenset({"apple", "banana", "cherry"}) frozenset x = True bool x = b"Hello" bytes x = bytearray(5) bytearray x = memoryview(bytes(5)) memoryview x = None NoneType
# # Belirli Veri Türünü Ayarlama
# If you want to specify the data type, you can use the following constructor functions:
# Example Data Type Try it x = str("Hello World") str x = int(20) int x = float(20.5) float x = complex(1j) complex x = list(("apple", "banana", "cherry")) list x = tuple(("apple", "banana", "cherry")) tuple x = range(6) range x = dict(name="John", age=36) dict x = set(("apple", "banana", "cherry")) set x = frozenset(("apple", "banana", "cherry")) frozenset x = bool(5) bool x = bytes(5) bytes x = bytearray(5) bytearray x = memoryview(bytes(5)) memoryview
# # Python Numbers
# Python'da üç sayısal tür vardır:
# int float complex Sayısal tipteki değişkenler, onlara bir değer atadığınızda oluşturulur:
a = 5 # int
b = 3.5 # float
c = 5j # comple
# Python'da herhangi bir nesnenin türünü doğrulamak için type() işlevini kullanırız.
#
print(type(a))
print(type(b))
print(type(c))
# Int - Integer
# Int veya tamsayı, pozitif veya negatif, ondalık basamak içermeyen, sınırsız uzunlukta bir tam sayıdır.
# integers
a = 2
b = 5321697243628210
c = -732015
print(type(a))
print(type(b))
print(type(c))
# # Float
# Bir veya daha fazla ondalık basamak içeren pozitif veya negatif bir sayıdır.
a = 2.30
b = 3.0
c = -60.75
print(type(a))
print(type(b))
print(type(c))
# Float, 10'un kuvvetini belirtmek için "e" harfi bulunan bilimsel sayılar da olabilir.
# floats
a = 75e3
b = 60e3
c = -90.2e500
print(type(a))
print(type(b))
print(type(c))
# # Complex
# Karmaşık sayılar sanal kısım olarak "j" ile yazılır:
a = 1 + 3j
b = 3j
c = -3j
print(type(a))
print(type(b))
# # Tip Dönüşümü
# int(), float() ve Complex() yöntemleriyle bir türden diğerine dönüştürebilirsiniz:
a = 3 # int
b = 4.6 # float
c = 3j # complex
# convert from int to float:
a = float(a)
# convert from float to int:
b = int(b)
# convert from int to complex:
c = complex(c)
print(a)
print(b)
print(c)
print(type(a))
print(type(b))
print(type(c))
# Not: Karmaşık sayıları başka bir sayı türüne dönüştüremezsiniz.
# # Rastgele Sayılar
# Python'un rasgele bir sayı yapmak için bir random() işlevi yoktur, ancak Python'un rasgele sayılar yapmak için kullanılabilecek random adlı yerleşik bir modülü vardır:
import random
print(random.randrange(1, 10))
# # Bir Değişken Türü Oluşturma
# Bir değişkene bir tür belirtmek istediğiniz zamanlar olabilir. Bu döküm ile yapılabilir. Python, nesne yönelimli bir dildir ve bu nedenle, ilkel türleri de dahil olmak üzere veri türlerini tanımlamak için sınıfları kullanır.
# # Bir Değişken Türü Belirtin
# Bir değişkene bir tür belirtmek istediğiniz zamanlar olabilir. Python, nesne yönelimli bir dildir
# int() - bir tamsayı hazır bilgisinden, bir değişken değişmez bilgisinden (tüm ondalık sayıları kaldırarak) veya bir dize değişmez bilgisinden (dizgenin bir tam sayıyı temsil etmesi koşuluyla) bir tamsayı oluşturur float() - bir tamsayı hazır bilgisinden, bir değişken sabit değerden veya bir dize değişmez bilgisinden bir kayan sayı oluşturur (dizenin bir kayan nokta veya bir tamsayıyı temsil etmesi koşuluyla) str() - diziler, tamsayı sabit değerleri ve değişken sabit değerler dahil olmak üzere çok çeşitli veri türlerinden bir dize oluşturur
# integers
a = int(3) # x will be 3
b = int(3.1) # y will be 3
c = int("6") # z will be 6
print(a)
print(b)
print(c)
# floats
a = float(3) # x will be 3.0
b = float(2.3) # y will be 2.3
c = float("4") # z will be 4.0
d = float("5.2") # w will be 5.2
print(a)
print(b)
print(c)
print(d)
# strings
a = str("c3") # x will be 'c1'
b = str(5) # y will be '5'
c = str(4.0) # z will be '4.0'
print(a)
print(b)
print(c)
# # Strings
# Python'daki dizeler, tek tırnak işaretleri veya çift tırnak işaretleri içine alınır.
# "merhaba", "merhaba" ile aynıdır.
# print() işleviyle bir dize hazır bilgisini görüntüleyebilirsiniz:
print("Yıldız")
print("Yıldız")
# # Dizeyi bir Değişkene Atama
# Bir değişkene bir dize atamak, değişken adının ardından eşittir işareti ve dize ile yapılır:
a = "Yıldız"
print(a)
# # Çok Satırlı Dizeler
# Üç tırnak kullanarak bir değişkene çok satırlı bir dize atayabilirsiniz:
a = """Milleti kurtaranlar
yalnız ve ancak
öğretmenlerdir."""
print(a)
# Veya üç tek tırnak:
a = """Milleti kurtaranlar
yalnız ve ancak
öğretmenlerdir."""
print(a)
# # Sringsler Dizilerdir
# Diğer birçok popüler programlama dili gibi, Python'daki dizeler de unicode karakterleri temsil eden bayt dizileridir.
# Bununla birlikte, Python'un bir karakter veri türü yoktur, tek bir karakter yalnızca 1 uzunluğunda bir dizedir.
# Dizenin öğelerine erişmek için köşeli parantezler kullanılabilir.
# 1 konumundaki karakteri alın (ilk karakterin 0 konumunda olduğunu unutmayın):
a = "Selam, Selam"
print(a[1])
# # Bir Dizide Döngü Yapmak
# Dizeler dizi olduğundan, bir dizideki karakterler arasında bir for döngüsü ile döngü yapabiliriz.
# "Muz" kelimesindeki harfler arasında dolaşın:
for x in "telefon":
print(x)
# # String Length
# Bir dizenin uzunluğunu almak için len() işlevini kullanın.
# len() işlevi, bir dizenin uzunluğunu döndürür:
a = "Mavi, Mavi"
print(len(a))
# # Dizeyi Kontrol Et
# Bir dizgede belirli bir ifadenin veya karakterin olup olmadığını kontrol etmek için in anahtar kelimesini kullanabiliriz.
# Aşağıdaki metinde "free" olup olmadığını kontrol edin:
txt = "Sevgi ile büyüyen her insan iyi bir insan olur."
print("Sevgi" in txt)
# Bir if ifadesinde kullanın:
txt = "Sevgi ile büyüyen her insan iyi bir insan olur"
if "Sevgi" in txt:
print("Evet, 'Sevgi' mevcut.")
# # OLMADIĞINI kontrol edin
# Belirli bir kelime öbeğinin veya karakterin bir dizgede OLMADIĞINI kontrol etmek için not in anahtar kelimesini kullanabiliriz.
# Aşağıdaki metinde "pahalı" ifadesinin OLMADIĞINI kontrol edin
txt = "Sevgi ile büyüyen her insan iyi bir insan olur"
print("özgür" not in txt)
# Bir if ifadesi kullanın:
txt = "Sevgi ile büyüyen her insan iyi bir insan olur"
if "pahalı" not in txt:
print("Hayır, 'özgür' mevcut değil.")
|
import pandas as pd
df = pd.read_csv("/kaggle/input/preprocess/dataset.csv")
df = df.dropna(subset=["Sentence"])
df.Sentence = [str(text) for text in df.Sentence]
df.shape
from sklearn import preprocessing
label_encoder = preprocessing.LabelEncoder()
df["Subreddit"] = label_encoder.fit_transform(df["Subreddit"])
df["Subreddit"].unique()
labels = list(label_encoder.classes_)
print(df.sample(2))
import numpy as np
import math
import nltk
from nltk.tokenize import word_tokenize
from gensim.models import word2vec
from numpy import zeros
from keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Embedding
from keras.optimizers import Adam
from keras.layers import BatchNormalization, Flatten, Conv1D, MaxPooling1D
from keras.layers import Dropout
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
# Don't Show Warning Messages
import warnings
warnings.filterwarnings("ignore")
# Split the data across multiple GPUs
strategy = tf.distribute.MirroredStrategy()
# create the padded vectors
sen = df["Sentence"].astype(str)
# This tokenizer creates a python list of words
t = Tokenizer()
t.fit_on_texts(sen)
vocab_size = len(t.word_index) + 1
print(vocab_size)
# integer encode the documents
# assign each word a unique integer
encoded_docs = t.texts_to_sequences(sen)
# pad documents to a max length of 100 words
max_length = 256
padded_docs_combined = pad_sequences(encoded_docs, maxlen=max_length, padding="post")
from sklearn.model_selection import train_test_split
X = padded_docs_combined
y = df["Subreddit"].values
# Split data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.1, random_state=42, stratify=df[["Subreddit"]]
)
num_classes = len(np.unique(y_train))
num_classes = 6
class_weights = {}
for class_id in range(num_classes):
num_positive = np.sum(y_train == class_id)
num_negative = len(y_train) - num_positive
if num_positive == 0:
class_weight = 1.0
else:
class_weight = num_negative / num_positive
class_weights[class_id] = class_weight
class_weights
import gensim
w2v_model = gensim.models.Word2Vec.load("/kaggle/input/train-word2vec/Word2Vec.model")
with strategy.scope():
# create a embedding matrix for words that are in our combined train and test dataframes
embedding_matrix = zeros((vocab_size, 100))
for word, i in t.word_index.items():
# check if the word is in the word2vec vocab
if word in w2v_model.wv:
embedding_vector = w2v_model.wv[word]
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
print(embedding_matrix.shape)
def Create_Model():
with strategy.scope():
model = Sequential()
e = Embedding(
vocab_size,
w2v_model.vector_size,
weights=[embedding_matrix],
input_length=max_length,
trainable=True,
)
model.add(e)
model.add(Conv1D(512, 5, activation="relu"))
model.add(MaxPooling1D(pool_size=3, strides=2))
model.add(Dropout(0.25))
model.add(Conv1D(128, 5, activation="relu"))
model.add(MaxPooling1D(pool_size=3, strides=2))
model.add(Dropout(0.25))
model.add(Conv1D(64, 5, activation="relu"))
model.add(MaxPooling1D(pool_size=3, strides=2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1, activation="sigmoid"))
return model
class_weights[1]
# Train one-vs-all CNN classifiers with randomized search and class weights
CNN_classifiers = []
num_classes = 6
for c in range(num_classes):
print("\n", labels[c])
model = Create_Model()
with strategy.scope():
# Compile the model
Adam_new = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(optimizer=Adam_new, loss="binary_crossentropy", metrics=["acc"])
# Train the model
early_stopping = EarlyStopping(monitor="val_loss", patience=3, mode="min")
filename = labels[c] + "_cnn.h5"
save_best = ModelCheckpoint(
filename, save_best_only=True, monitor="val_loss", mode="min"
)
history = model.fit(
X_train,
y_train == c,
validation_split=0.1,
epochs=25,
verbose=1,
callbacks=[early_stopping, save_best],
class_weight={0: 1, 1: class_weights[c]},
)
CNN_classifiers.append(model)
del model, history
from sklearn.metrics import classification_report
# Predict class probabilities for each class
class_probs = [
cnn_classifier.predict(X_test)[:, 0] for cnn_classifier in CNN_classifiers
]
# Choose class with highest probability as prediction
y_pred = np.argmax(class_probs, axis=0)
# Compute classification report for one-vs-all XGBoost classifiers
class_reports = []
num_classes = 6
for c in range(num_classes):
y_true = (y_test == c).astype(int)
y_pred_c = class_probs[c] >= 0.5
target_names = ["Non-" + labels[c], labels[c]]
report = classification_report(y_true, y_pred_c, target_names=target_names)
class_reports.append(report)
overall_report = classification_report(y_test, y_pred, target_names=labels)
# Print individual and overall classification reports
for c in range(num_classes):
print(f"{labels[c]} report:")
print(class_reports[c], "\n")
print("Overall report:")
print(overall_report)
|
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeClassifier, plot_tree
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
import seaborn as sns
from sklearn.metrics import mean_squared_error
iris = load_iris()
data = sns.load_dataset("iris")
data
x = data.iloc[:, :-1]
y = iris.target
xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.2, random_state=42)
m = DecisionTreeClassifier(max_depth=3) # post prunning
m.fit(xtrain, ytrain)
p = m.predict(xtest)
print(p)
import matplotlib.pyplot as plt
plt.scatter(p, ytest)
plt.show()
print(mean_squared_error(p, ytest))
plt.figure(figsize=(20, 10))
plot_tree(
m, filled=True, feature_names=iris.feature_names, class_names=iris.target_names
)
plt.show()
|
# Introduction
#
# In the telecommunication industry, customers tend to change operators if not provided with attractive schemes and offers. It is very important for any telecom operator to prevent the present customers from churning to other operators. As a data scientist, your task in this case study would be to build an ML model which can predict if the customer will churn or not in a particular month based on the past data.
# Objectives
#
# - The main goal of the case study is to build ML models to predict churn. The predictive model that you’re going to build will the following purposes:
# - It will be used to predict whether a high-value customer will churn or not, in near future (i.e. churn phase). By knowing this, the company can take action steps such as providing special plans, discounts on recharge etc.
# - It will be used to identify important variables that are strong predictors of churn. These variables may also indicate why customers choose to switch to other networks.
# - Even though overall accuracy will be your primary evaluation metric, you should also mention other metrics like precision, recall, etc. for the different models that can be used for evaluation purposes based on different business objectives. For example, in this problem statement, one business goal can be to build an ML model that identifies customers who'll definitely churn with more accuracy as compared to the ones who'll not churn. Make sure you mention which metric can be used in such scenarios.
# - Recommend strategies to manage customer churn based on your observations.
#
# Reading data
import pandas as pd
pd.set_option("display.max_columns", None)
import re
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
telecom_df = pd.read_csv("../input/telecom-churn-case-study-hackathon-c46/train.csv")
telecom_df_test = pd.read_csv(
"../input/telecom-churn-case-study-hackathon-c46/test.csv"
)
telecom_df_sample = pd.read_csv(
"../input/telecom-churn-case-study-hackathon-c46/sample (2).csv"
)
data_dict = pd.read_csv(
"../input/telecom-churn-case-study-hackathon-c46/data_dictionary.csv"
)
telecom_df.head()
data_dict = dict(
zip(
map(lambda x: re.sub(r"\W", "", x).lower(), data_dict["Acronyms"]),
data_dict["Description"],
)
)
data_dict
print("Shape of data is ", telecom_df.shape)
print("Duplicate percentage", 1 - len(telecom_df.drop_duplicates()) / len(telecom_df))
# Checking High amount of missing values in columns
columns = telecom_df.columns
na_count = telecom_df.isna().sum()
na_count = na_count[columns[na_count != 0]]
print("Data having more then 70 % NA values.")
columns_na = []
for i, j in zip(na_count.index, na_count):
if j / len(telecom_df) > 0.5:
print(f"{i:30} {j:5} {round(j*100/len(telecom_df), 2):20}")
columns_na.append(i)
# We can notice that recharge related data and 3g 2g and plan data is missing more then 70 %
# # EDA and data cleaning
def plot_hist(df, col):
try:
# Circle id are repeted it is not unique for all column
# Checking % of nan and 0
telecom_df[col].fillna(-100).plot.hist()
plt.show()
except:
print("Cant plot hist.")
def print_col_def(col):
mean = ""
for word in col.split("_"):
try:
mean += data_dict[word] + " "
except:
pass
print(mean.strip())
for col in telecom_df.columns:
print(f"{col:30}", end="")
print_col_def(col)
# Checking id
col = "id"
print_col_def("id")
print(list(set(telecom_df[col]))[:5])
# id column is index droping it
telecom_df.drop("id", axis=1, inplace=True)
# Checking same values columns in data frame
dup = []
for col in telecom_df.columns:
if len(set(telecom_df[col][:4000])) < 100:
duplicate = round(100 * len(set(telecom_df[col])) / len(telecom_df), 2)
if duplicate <= 1 and col != "churn_probability":
dup.append(col)
print("Columns having all duplicate values ", dup)
# Droping columns with high duplicate values in it
telecom_df.drop(dup, axis=1, inplace=True)
telecom_df.shape
# # Feature Engineering
# ## High value customer
# High value customer are the customers who have recharged more then 75% of people did
high_value_cust_col = []
for col in telecom_df.columns:
if "rech" in col or "arpu" in col:
high_value_cust_col.append(col)
print("High value customer columns", high_value_cust_col)
# Taking columns which have recharge information in them
avg_df = pd.DataFrame(telecom_df[high_value_cust_col])
avg_df.fillna(0, inplace=True)
# Taking average of 6 7 8 month
avg_df["avg_arpu"] = (avg_df["arpu_6"] + avg_df["arpu_7"] + avg_df["arpu_8"]) / 3
avg_df["avg_total_rech_amt"] = (
avg_df["total_rech_amt_6"] + avg_df["total_rech_amt_7"] + avg_df["total_rech_amt_8"]
) / 3
avg_df["avg_max_rech_amt"] = (
avg_df["max_rech_amt_6"] + avg_df["max_rech_amt_7"] + avg_df["max_rech_amt_8"]
) / 3
avg_df["avg_total_rech_data"] = (
avg_df["total_rech_data_6"]
+ avg_df["total_rech_data_8"]
+ avg_df["total_rech_data_8"]
) / 3
avg_df["avg_max_rech_data_6"] = (
avg_df["max_rech_data_6"] + avg_df["max_rech_data_7"] + avg_df["max_rech_data_8"]
) / 3
avg_df["avg_arpu"] = (avg_df["arpu_6"] + avg_df["arpu_7"] + avg_df["arpu_8"]) / 3
avg_df["avg_arpu"] = (avg_df["arpu_6"] + avg_df["arpu_7"] + avg_df["arpu_8"]) / 3
avg_df["avg_arpu"] = (avg_df["arpu_6"] + avg_df["arpu_7"] + avg_df["arpu_8"]) / 3
avg_df["avg_arpu"] = (avg_df["arpu_6"] + avg_df["arpu_7"] + avg_df["arpu_8"]) / 3
avg_df["avg_arpu"] = (avg_df["arpu_6"] + avg_df["arpu_7"] + avg_df["arpu_8"]) / 3
# Droping rest of the columns
avg_df.drop(high_value_cust_col, axis=1, inplace=True)
high_val_cust_data = avg_df.describe().T[["min", "25%", "50%", "75%", "max"]]
high_val_cust_data
high_val_cust_data
# Checking peoples with high number of recharge
high_value_cust = pd.DataFrame(avg_df)
for col in high_val_cust_data.index:
high_value_cust = high_value_cust[
high_value_cust[col] >= high_val_cust_data.T[col]["75%"]
]
high_value_cust_telecom_df = telecom_df.loc[high_value_cust.index]
# We got all the high value customers
high_value_cust["churn_probability"] = telecom_df.loc[high_value_cust.index][
"churn_probability"
]
high_value_cust.head()
sns.countplot(x=high_value_cust.churn_probability)
plt.show()
high_value_cust.churn_probability.value_counts()
# ### There are 140 high value customers which can churn out of 4767
# # EDA
# Churn probability
sns.countplot(x=high_value_cust.churn_probability)
high_value_cust.columns
# EDA for high_value_cust
sns.displot(high_value_cust, x="avg_arpu")
# Can really obsurbe values because of some high values
# Trying to observe again by caping high value
# EDA for high_value_cust
sns.displot(high_value_cust[high_value_cust["avg_arpu"] < 2000], x="avg_arpu", bins=20)
# Average revenue per user is 500 for most of the high value customers
sns.displot(high_value_cust, x="avg_total_rech_amt")
# Can really obsurbe values because of some high values
# Trying to observe again by caping high value
# EDA for high_value_cust
sns.displot(
high_value_cust[high_value_cust["avg_total_rech_amt"] < 2000],
x="avg_total_rech_amt",
bins=20,
)
# Average Recharge Amount in local currency is 500 to 800
sns.displot(high_value_cust, x="avg_max_rech_amt")
# EDA for high_value_cust
sns.displot(
high_value_cust[high_value_cust["avg_max_rech_amt"] < 1000],
x="avg_max_rech_amt",
bins=10,
)
# Average Maximum Recharge Amount in local currency is 200 to 300
sns.displot(high_value_cust, x="avg_total_rech_data")
# EDA for high_value_cust
sns.displot(
high_value_cust[high_value_cust["avg_total_rech_data"] < 10],
x="avg_total_rech_data",
bins=10,
)
# Average Recharge Mobile internet done is 2-3
# # Data prepration
zero_filled_df = telecom_df.fillna(0)
set(zero_filled_df.dtypes)
zero_filled_df.shape
# # Removing outliers
zero_filled_df.describe()
# Caping values above 90 percentile
dict_high = {}
def remove_extream_values(dataframe):
for col in dataframe.columns:
high_value = dataframe[col].quantile(0.9)
dict_high[col] = high_value
dataframe[col][zero_filled_df[col] > high_value] = high_value
return dataframe
removed_outliers_df = remove_extream_values(zero_filled_df)
# ## Spliting data into X and target
X = zero_filled_df.iloc[:, :-1]
y = zero_filled_df.iloc[:, -1:]
from sklearn.model_selection import train_test_split
train_x, test_x, train_y, test_y = train_test_split(X, y, test_size=0.2, stratify=y)
train_x.shape, text_x.shape
# ### Normalizing data
from sklearn.preprocessing import StandardScaler
def standraize_data(dataframe):
scaller = StandardScaler()
scaller.fit(dataframe)
transformed_data = scaller.transform(dataframe)
return scaller, transformed_data
scaller, train_norm_x = standraize_data(train_x)
test_norm_x = scaller.transform(test_x)
np.max(train_norm_x[:, 0]), np.max(train_x.iloc[0, :])
# ### Applying PCA
from sklearn.decomposition import PCA
def apply_pca(train_data, comp):
pca = PCA(n_components=comp)
pca.fit(train_data)
pca_samples = pca.transform(train_data)
return pca, pca_samples
pca_obj, train_pca_comp_x = apply_pca(train_norm_x, 20)
test_pca_comp_x = pca_obj.transform(test_norm_x)
# #### Ploting PCA variance
PC_values = np.arange(pca_obj.n_components_) + 1
plt.plot(PC_values, pca_obj.explained_variance_ratio_, linewidth=2)
plt.xlabel("Principal Component")
plt.ylabel("Proportion of Variance Explained")
plt.xticks(PC_values)
plt.grid()
plt.show()
# From above plot we can take 4 component that explains most of the variance in data
# ### Visualizing Classes in PCA
sns.scatterplot(
x=test_pca_comp_x[:, 0],
y=test_pca_comp_x[:, 1],
hue=np.ravel(test_y.values),
alpha=0.4,
)
# ##### We can notice churn data is mostly at left corner of plot
# ## Creating model
# ##### Checking with Logistic regression
from sklearn.metrics import precision_score, recall_score
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(train_pca_comp_x, train_y)
print("Train score: ", lr.score(train_pca_comp_x, train_y))
print("Test score:", lr.score(test_pca_comp_x, test_y))
print("Test precision score:", precision_score(lr.predict(test_pca_comp_x), test_y))
print("Test recall score:", recall_score(lr.predict(test_pca_comp_x), test_y))
# Logistic regression gives us 91% and 90% accuracy for test and train data. Which can be work as baseline for our models.
# ##### Checking with Decision Tree
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier()
dt.fit(train_pca_comp_x, train_y)
print("Train score: ", dt.score(train_pca_comp_x, train_y))
print("Test score:", dt.score(test_pca_comp_x, test_y))
print("Test precision score:", precision_score(dt.predict(test_pca_comp_x), test_y))
print("Test recall score:", recall_score(dt.predict(test_pca_comp_x), test_y))
# Decision tree able to overfit on train data with 100% accuracy but test accuracy is 88% which is less then logistic regression.
# Lets try to remove overfitting
from sklearn.model_selection import GridSearchCV
# ##### Checking with Decision tree tuning
criterion = ["gini", "entropy"]
max_depth = [2, 4, 6, 8, 10, 12]
parameters = dict(criterion=criterion, max_depth=max_depth)
clf_GS = GridSearchCV(dt, parameters)
clf_GS.fit(train_pca_comp_x, train_y)
dt = clf_GS.best_estimator_
dt.fit(train_pca_comp_x, train_y)
print("Train score: ", dt.score(train_pca_comp_x, train_y))
print("Test score:", dt.score(test_pca_comp_x, test_y))
print("Test precision score:", precision_score(dt.predict(test_pca_comp_x), test_y))
print("Test recall score:", recall_score(dt.predict(test_pca_comp_x), test_y))
# Here we are able to fit model on train with accuracy of 92% and in test 91.9%. which is better then logistic regression and overfitted decision tree
# ##### Checking with Random forest
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier()
rfc.fit(train_pca_comp_x, train_y)
print("Train score: ", rfc.score(train_pca_comp_x, train_y))
print("Test score:", rfc.score(test_pca_comp_x, test_y))
print("Test precision score:", precision_score(rfc.predict(test_pca_comp_x), test_y))
print("Test recall score:", recall_score(rfc.predict(test_pca_comp_x), test_y))
# When training with random forest we are getting accuracy of 100% in train data and 92 % in test data which is higher then all.
# But we will try to remove overfitting
# ##### Checking with Random forest tuning
rfc = RandomForestClassifier(n_estimators=300, max_depth=100)
rfc.fit(train_pca_comp_x, train_y)
print("Train score: ", rfc.score(train_pca_comp_x, train_y))
print("Test score:", rfc.score(test_pca_comp_x, test_y))
print("Test precision score:", precision_score(rfc.predict(test_pca_comp_x), test_y))
print("Test recall score:", recall_score(rfc.predict(test_pca_comp_x), test_y))
# Considering it is the best model so far.
# ### Making prediction for test data
test_df = telecom_df_test[zero_filled_df.columns[:-1]]
test_df.shape
zero_filled_test_df = test_df.fillna(0)
for col in zero_filled_test_df.columns:
zero_filled_test_df[col][zero_filled_test_df[col] > dict_high[col]] = dict_high[col]
zero_filled_test_df.describe()
zero_filled_test_df.shape
norm_test_df = scaller.transform(zero_filled_test_df)
test_pca_df = pca_obj.transform(norm_test_df)
predictions = rfc.predict(test_pca_df)
telecom_df_sample["churn_probability"] = predictions
telecom_df_sample["churn_probability"].value_counts()
telecom_df_sample.to_csv("/kaggle/working/submit.csv", index=False)
|
# Import the Dependencies
#
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# Load The data file
from google.colab import files
heart_data = files.upload()
heart_data = pd.read_csv("heart_disease_data.csv")
# Print First 5 rows
heart_data.head(1)
# Print Last 5 rows
#
heart_data.tail()
# Number of rows and columns
heart_data.shape
# Getting Some Info about the data
heart_data.info()
# Checking for Missing values
heart_data.isnull().sum()
# Statistical Measures about the data
heart_data.describe()
# Checking the distribution of target value
heart_data["target"].value_counts()
# Spliting the features and target
x = heart_data.drop(columns="target", axis=1)
y = heart_data["target"]
print(x)
print(y)
# Splitting the data into training data and test data
#
X_train, X_test, Y_train, Y_test = train_test_split(
x, y, test_size=0.2, stratify=y, random_state=2
)
print(x.shape, X_train.shape, X_test.shape)
print(y.shape, Y_train.shape, Y_test.shape)
# Model Training
# Logistic Regression
model = LogisticRegression()
model.fit(X_train, Y_train)
# Model Evaluation
# Accuracy Score
# Accuracy on the training data
X_train_prediction = model.predict(X_train)
training_data_accuracy = accuracy_score(X_train_prediction, Y_train)
print(training_data_accuracy)
# Accuracy on the test data
X_test_prediction = model.predict(X_test)
test_data_accuracy = accuracy_score(X_test_prediction, Y_test)
print(test_data_accuracy)
# Building a ptredictive system
input_data = (61, 1, 2, 150, 243, 1, 1, 137, 1, 1, 1, 0, 2)
input_data_numpy_array = np.asarray(input_data)
input_data_reshaped = input_data_numpy_array.reshape(1, -1)
prediction = model.predict(input_data_reshaped)
print(prediction)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
dataset = pd.read_csv(
"/kaggle/input/internet-connection-dataset/internet_connection_data.csv"
)
dataset.head()
print(dataset.isna().sum())
print()
print(dataset.isnull().sum())
print(len(dataset.columns))
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(dataset.iloc[:, -1])
datasetChoosen = dataset.iloc[0:47032, 0:44]
datasetChoosen.head()
X = pd.DataFrame(datasetChoosen, columns=datasetChoosen.columns)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1, test_size=0.2)
sc_X = StandardScaler()
X_trainscaled = sc_X.fit_transform(X_train)
X_testscaled = sc_X.transform(X_test)
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(10, input_dim=X_train.shape[1], activation="relu"))
model.add(Dense(10, activation="relu"))
model.add(Dense(1, activation="sigmoid"))
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(X_train, y_train, epochs=100, batch_size=32, verbose=0)
_, accuracy = model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: %.2f" % (accuracy * 100))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# #### Imports
# TENSORFLOW #
import tensorflow as tf
from tensorflow.keras import layers, losses, optimizers
from tensorflow.keras.layers import (
Conv2D,
Dense,
Flatten,
MaxPooling2D,
Dropout,
BatchNormalization,
)
from tensorflow.keras.models import Sequential
import tensorflow_datasets as tfds
from tensorflow_addons import image
import tensorflow_addons as tfa
# Other
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
import tabulate as tbl
# Load a dataset
(train, valid, test), metadata = tfds.load(
"cassava", # Plants
split=["train", "test", "validation"],
as_supervised=True, # Get labels
shuffle_files=True,
with_info=True,
)
# Set sizes
train_size = metadata.splits["train"].num_examples
valid_size = metadata.splits["validation"].num_examples
test_size = metadata.splits["test"].num_examples
# Check details
print(
f"Training set size: {train_size} | Validation set size: {valid_size} | Testing set size: {test_size}"
)
# Convert numbers into labels
get_label_name = metadata.features["label"].int2str
# Show an image
for image, label in train.take(1):
# Plot the image
plt.imshow(image)
# Set the label as the title
plt.title(get_label_name(label))
# Before beginning the project, it's important to understand the distribution of at least the training set's labels, as it should be relatively similar in the validation and testing sets.
# Find the number of classes
num_classes = metadata.features["label"].num_classes
# Find the class labels
class_names = metadata.features["label"].names
# Check
print(
f"There are {num_classes} classes in the Cassava dataset. The labels are: {class_names}."
)
# Hold the list of training labels
training_labels = []
# Access all the labels
for images, labels in train.take(-1):
training_labels.append(get_label_name(labels.numpy()))
# Create a counter
label_counts = Counter(training_labels)
# Ordered keys
ordered_labels = sorted(label_counts.keys())
# Display breakdown of training labels
sns.barplot(
x=[label_counts[k] for k in ordered_labels],
y=ordered_labels,
orient="h",
color="darkblue",
)
# Titles
plt.xlabel("Counts")
plt.ylabel("Label Class")
plt.title("Labels in the Training Set")
# Show
plt.figure(figsize=(12, 10))
plt.show()
# The dataset is imbalanced, but the point of using deep learning is that it's typically a more robust method that you can avoid performing cross-validation on it. Instead, we use hold out datasets (train, validation, and testing triplet) and large amounts of training data. We will keep with this tactic for the rest of the notebook. (That's not to say that you can't use cross-validation, because you can. The point is not to, though.)
# Image size
img_size = 224
# Normalize function
def normalize(image, label):
image = tf.cast(image, tf.float32)
image = tf.image.resize(image, [img_size, img_size])
# Normalize
image = image / 255.0
return image, label
# Calculate rotation angles
angle_vals = np.array([5.0, 15.0, 25.0, 45.0, 120.0, 165.0, 250.0]) # Base angles
# Augmentation
def augmenter(image, label):
# Normalize - call this first
image, label = normalize(image, label)
# Rotate
rotation = (np.random.choice(angle_vals) * np.pi / 180.0).astype(np.float32)
image = tfa.image.rotate(image, angles=rotation, interpolation="NEAREST")
# Brightness
image = tf.image.random_brightness(image, 0.5)
# Return image and label
return image, label
# Show an example of an augmented image
for image, label in train.take(1):
img, lbl = augmenter(image, label)
plt.imshow(img)
plt.title(get_label_name(lbl))
# ### Create Data Pipelines
# Batch size
batch = 32
# Build the pipelines
num_training_examples = train_size // 100
# Training
training_batch = (
train.shuffle(num_training_examples // 4).map(augmenter).batch(batch).prefetch(1)
)
# Validation
validation_batch = (
valid.shuffle(num_training_examples // 4).map(augmenter).batch(batch).prefetch(1)
)
# Test
testing_batch = (
test.shuffle(num_training_examples // 4).map(augmenter).batch(batch).prefetch(1)
)
# ## Build a Basic Model
# This will be a very simple sequential feed forward model. We will use the following layers:
# - Dense
# - Conv2D
# - MaxPooling2D
# - Flatten
#
# The order of layers matters: Conv2D > MaxPooling2D > Flatten > Dense.
#
# Between each layer, the size of the tensor has to be tracked.
# Create a sequential model
model = Sequential(
[
Conv2D(16, 3, padding="same", activation="relu"), # Convolutional2D layer
MaxPooling2D(), # MaxPooling2D
Conv2D(32, 3, padding="same", activation="relu"), # Convolutional2D layer
MaxPooling2D(), # MaxPooling2D
Conv2D(64, 3, padding="same", activation="relu"), # Convolutional2D layer
MaxPooling2D(), # MaxPooling2D
Flatten(), # Flatten
Dense(num_classes, activation="softmax"),
]
)
# Compile the model
model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
# Model build
model.build((None, 224, 224, 3))
# Summary
model.summary()
# ### Model Training
# Epochs
EPOCHS = 20
# Stop training when there is no improvement in the validation loss for 5 consecutive epochs
early_stopping = tf.keras.callbacks.EarlyStopping(monitor="val_loss", patience=5)
# Fit the model
history = model.fit(
training_batch,
validation_data=validation_batch,
epochs=EPOCHS,
callbacks=[early_stopping],
)
# Visualization of accuracies and losses
training_accuracy = history.history["accuracy"]
validation_accuracy = history.history["val_accuracy"]
training_loss = history.history["loss"]
validation_loss = history.history["val_loss"]
epochs_range = range(len(training_accuracy))
plt.figure(figsize=(18, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, training_accuracy, label="Training Accuracy")
plt.plot(epochs_range, validation_accuracy, label="Validation Accuracy")
plt.legend(loc="lower right")
plt.title("Training and Validation Accuracy")
plt.subplot(1, 2, 2)
plt.plot(epochs_range, training_loss, label="Training Loss")
plt.plot(epochs_range, validation_loss, label="Validation Loss")
plt.legend(loc="upper right")
plt.title("Training and Validation Loss")
plt.show()
# This is some pretty serious overfitting. I'll perform a quick inference exercise to see how bad things actually are, but the model will need to be tweaked in order to improve performance.
# ### Inference
for image_batch, label_batch in testing_batch.take(1):
ps = model.predict(image_batch)
images = image_batch.numpy().squeeze()
labels = label_batch.numpy()
plt.figure(figsize=(10, 15))
for n in range(30):
plt.subplot(6, 5, n + 1)
plt.imshow(images[n], cmap=plt.cm.binary)
color = "green" if np.argmax(ps[n]) == labels[n] else "red"
plt.title(class_names[np.argmax(ps[n])], color=color)
plt.axis("off")
# ## Model 2
# This version of the model will use the same layers with the addition of a dropout layer, but there will be a slight change in architecture.
# Second model
model2 = Sequential(
[
Conv2D(16, 3, padding="same", activation="relu"), # Convolutional2D layer
MaxPooling2D(), # MaxPooling2D
Conv2D(32, 3, padding="same", activation="relu"), # Convolutional2D layer
MaxPooling2D(), # MaxPooling2D
Conv2D(64, 3, padding="same", activation="relu"), # Convolutional2D layer
MaxPooling2D(), # MaxPooling2D
Flatten(), # Flatten
Dense(128, activation="relu"), # Dense
Dropout(0.2), # Dropout layer
Dense(num_classes, activation="softmax"),
]
)
# Compile the model
model2.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
# Model build
model2.build((None, 224, 224, 3))
# Summary
model2.summary()
# Fit the model
history2 = model2.fit(
training_batch,
validation_data=validation_batch,
epochs=EPOCHS,
callbacks=[early_stopping],
)
# Visualization of accuracies and losses
training_accuracy = history2.history["accuracy"]
validation_accuracy = history2.history["val_accuracy"]
training_loss = history2.history["loss"]
validation_loss = history2.history["val_loss"]
epochs_range = range(len(training_accuracy))
plt.figure(figsize=(18, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, training_accuracy, label="Training Accuracy")
plt.plot(epochs_range, validation_accuracy, label="Validation Accuracy")
plt.legend(loc="lower right")
plt.title("Training and Validation Accuracy")
plt.subplot(1, 2, 2)
plt.plot(epochs_range, training_loss, label="Training Loss")
plt.plot(epochs_range, validation_loss, label="Validation Loss")
plt.legend(loc="upper right")
plt.title("Training and Validation Loss")
plt.show()
# Again, there is evidence of overfitting. So, clearly simple adjustments to architecture are not enough, though admittedly the performance seems somewhat better on this model than the previous one.
# ### Inference
for image_batch, label_batch in testing_batch.take(1):
ps = model2.predict(image_batch)
images = image_batch.numpy().squeeze()
labels = label_batch.numpy()
plt.figure(figsize=(10, 15))
for n in range(30):
plt.subplot(6, 5, n + 1)
plt.imshow(images[n], cmap=plt.cm.binary)
color = "green" if np.argmax(ps[n]) == labels[n] else "red"
plt.title(class_names[np.argmax(ps[n])], color=color)
plt.axis("off")
# ## Model 3
# This version will include BatchNormalization and remove the Dropout layers. Also, the kernel values
# Second model
model3 = Sequential(
[
Conv2D(64, (5, 5), padding="same", activation="relu"), # Convolutional2D layer
MaxPooling2D((2, 2)), # MaxPooling2D
BatchNormalization(), # BatchNormalization
Conv2D(64, (5, 5), padding="same", activation="relu"), # Convolutional2D layer
MaxPooling2D((2, 2)), # MaxPooling2D
BatchNormalization(), # BatchNormalization
Conv2D(128, (5, 5), padding="same", activation="relu"), # Convolutional2D layer
MaxPooling2D((2, 2)), # MaxPooling2D
BatchNormalization(), # BatchNormalization
Conv2D(256, (5, 5), padding="same", activation="relu"), # Convolutional2D layer
MaxPooling2D((2, 2)), # MaxPooling2D
Flatten(), # Flatten
Dense(1024, activation="relu"), # Dense
Dense(num_classes, activation="softmax"),
]
)
# Compile the model
model3.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
# Model build
model3.build((None, 224, 224, 3))
# Summary
model3.summary()
# Epochs
e = 20
# Fit the model - removing early stopping
history3 = model3.fit(training_batch, validation_data=validation_batch, epochs=e)
# Visualization of accuracies and losses
training_accuracy = history3.history["accuracy"]
validation_accuracy = history3.history["val_accuracy"]
training_loss = history3.history["loss"]
validation_loss = history3.history["val_loss"]
epochs_range = range(len(training_accuracy))
plt.figure(figsize=(18, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, training_accuracy, label="Training Accuracy")
plt.plot(epochs_range, validation_accuracy, label="Validation Accuracy")
plt.legend(loc="lower right")
plt.title("Training and Validation Accuracy")
plt.subplot(1, 2, 2)
plt.plot(epochs_range, training_loss, label="Training Loss")
plt.plot(epochs_range, validation_loss, label="Validation Loss")
plt.legend(loc="upper right")
plt.title("Training and Validation Loss")
plt.show()
# These results are a definite improvement over the previous two models, but around the 11th or 12th epoch, you begin to see overfitting. So, while I will make no further adjustments to the model, I will state that there's room for improvement:
# - Adjusting a learning rate
# - Changing the optimizer
# - Changing the hidden layer sizes
# Anyway, now I will make predictions from the model on the training data.
# Save the model
saved_model_path = "./model3.h5"
model3.save(saved_model_path)
# Evaluate
results = model3.evaluate(testing_batch)
# Dictionary
results_dict = dict(zip(model3.metrics_names, results))
# print results
for k, v in results_dict.items():
print(f"{k}: {v}")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import warnings
import pandas as pd
import numpy as np
import plotly.express as px
import tensorflow as tf
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.impute import KNNImputer
from keras.regularizers import l2
from sklearn.decomposition import PCA
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn import metrics
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from statsmodels.stats.outliers_influence import variance_inflation_factor
train = pd.read_csv("/kaggle/input/credit-score-classification/train.csv")
test = pd.read_csv("/kaggle/input/credit-score-classification/test.csv")
train.head()
test.head()
# shape of train and test
print(
f"Total no of rows in train_df:{train.shape[0]} and columns in train_df {train.shape[1]}"
)
# shape of train and test
print(
f"Total no of rows in test_df:{test.shape[0]} and columns in test_df {test.shape[1]}"
)
# basic information
train.info()
# test dataset basic info
test.info()
# checking null values:
train.isnull().sum()
# checking null values in %age:
train.isnull().mean() * 100
def plot_nas(df: pd.DataFrame):
if df.isnull().sum().sum() != 0:
na_df = (df.isnull().sum() / len(df)) * 100
na_df = na_df.drop(na_df[na_df == 0].index).sort_values(ascending=False)
missing_data = pd.DataFrame({"Missing Ratio %": na_df})
missing_data.plot(kind="barh", color="grey")
plt.figure(figsize=(10, 6))
plt.show()
else:
print("No NAs found")
plot_nas(train)
plot_nas(test)
plt.figure(figsize=(10, 6))
sns.heatmap(train.isna(), cmap="Blues")
plt.figure(figsize=(10, 6))
sns.heatmap(test.isna(), cmap="Blues")
# basic describtion
train.describe().T
# basic describition using object:
train.describe(include="object").T
# basic describtion
test.describe().T
# basic describition using object:
train.describe(include="object").T
train.columns
cat_cols = train.select_dtypes(include=["object"]).columns
cat_cols
train.nunique()
test.nunique()
for i in train.columns:
print(f" unique values = {i}")
print(train[i].value_counts())
print("------------------------------------------------------")
for i in test.columns:
print(f" unique values = {i}")
print(test[i].value_counts())
print("------------------------------------------------------")
# Distribution of data in target feature
plt.figure(figsize=(10, 5))
# plot pie chart
plt.subplot(1, 2, 1)
label = train.Credit_Score.value_counts().index
label_count = train.Credit_Score.value_counts().values
plt.pie(
data=train, x=label_count, labels=label, autopct="%1.1f%%", shadow=True, radius=1
)
#
plt.subplot(1, 2, 2)
sns.countplot(x="Credit_Score", data=train)
plt.title("Target feature distribution in Train data")
### Understanding the distribution of the column - Monthly_Inhand_Salary
sns.distplot(
train["Monthly_Inhand_Salary"],
label="Skewness: %.2f" % (train["Monthly_Inhand_Salary"].skew()),
)
plt.legend(loc="best")
plt.title("Customer Monthly Salary Distribution")
# count plot for Occupaction
plt.figure(figsize=(18, 6))
sns.countplot(data=train, x="Occupation")
plt.xticks(rotation=90)
plt.show()
# count plot for Occupaction
plt.figure(figsize=(18, 6))
sns.countplot(data=test, x="Occupation")
plt.xticks(rotation=90)
plt.show()
# count plot for Occupaction
plt.figure(figsize=(18, 6))
sns.countplot(data=train, x="Credit_Score")
plt.xticks(rotation=90)
plt.show()
# count plot for Occupaction
plt.figure(figsize=(18, 6))
sns.countplot(data=train, x="Payment_Behaviour")
plt.xticks(rotation=90)
plt.show()
# count plot for Occupaction
plt.figure(figsize=(18, 6))
sns.countplot(data=train, x="Payment_of_Min_Amount")
plt.xticks(rotation=90)
plt.show()
for col in [
"Month",
"Occupation",
"Credit_Mix",
"Payment_of_Min_Amount",
"Payment_Behaviour",
]:
plt.figure(figsize=(18, 6))
sns.countplot(x=col, data=train, palette="mako", hue="Credit_Score")
plt.xticks(rotation=90)
plt.show()
sns.catplot(x="Credit_Score", col="Occupation", data=train, kind="count", col_wrap=4)
sns.catplot(x="Credit_Score", col="Credit_Mix", data=train, kind="count", col_wrap=3)
### Bar graph showing the value counts of the column - Payment_of_Min_Amount
min_amount_count = train["Payment_of_Min_Amount"].value_counts(dropna=False)
min_amount_count
sns.set(rc={"figure.figsize": (5, 5)})
sns.barplot(x=min_amount_count.index, y=min_amount_count.values, alpha=0.8)
plt.title("Bar graph showing the value counts of the column - Payment_of_Min_Amount")
plt.ylabel("Number of Occurrences", fontsize=12)
plt.xlabel("Payment of Minimum Amount", fontsize=12)
plt.show()
### Monthly Inhand Salary distribution by Credit Score
grid = sns.FacetGrid(train, col="Credit_Score")
grid.map(sns.distplot, "Monthly_Inhand_Salary")
### Merging the above graphs into one
sns.kdeplot(
train["Monthly_Inhand_Salary"][train["Credit_Score"] == "Good"],
label="Credit Score = Good",
)
sns.kdeplot(
train["Monthly_Inhand_Salary"][train["Credit_Score"] == "Poor"],
label="Credit Score = Poor",
)
sns.kdeplot(
train["Monthly_Inhand_Salary"][train["Credit_Score"] == "Standard"],
label="Credit Score = Standard",
)
plt.xlabel("Monthly Inhand Salary")
plt.legend()
plt.title("Customer Monthly Inhand Salary by Credit Score")
### Understanding the distribution of the column - Interest_Rate
sns.distplot(
train["Interest_Rate"], label="Skewness: %.2f" % (train["Interest_Rate"].skew())
)
plt.legend(loc="best")
plt.title("Customers Interest Rate Distribution")
# # Data Cleaning
train = train.applymap(
lambda x: x if x is np.NaN or not isinstance(x, str) else str(x).strip('_ ,"')
).replace(["", "nan", "!@9#%8", "#F%$D@*&8"], np.NaN)
test = test.applymap(
lambda x: x if x is np.NaN or not isinstance(x, str) else str(x).strip('_ ,"')
).replace(["", "nan", "!@9#%8", "#F%$D@*&8"], np.NaN)
train["ID"] = train.ID.apply(lambda x: int(x, 16))
test["ID"] = test.ID.apply(lambda x: int(x, 16))
train["Customer_ID"] = train.Customer_ID.apply(lambda x: int(x[4:], 16))
test["Customer_ID"] = test.Customer_ID.apply(lambda x: int(x[4:], 16))
train["Month"] = pd.to_datetime(train.Month, format="%B").dt.month
test["Month"] = pd.to_datetime(test.Month, format="%B").dt.month
train["Age"] = train.Age.astype(int)
test["Age"] = test.Age.astype(int)
train["SSN"] = train.SSN.apply(
lambda x: x if x is np.NaN else int(str(x).replace("-", ""))
).astype(float)
test["SSN"] = test.SSN.apply(
lambda x: x if x is np.NaN else int(str(x).replace("-", ""))
).astype(float)
train["Annual_Income"] = train.Annual_Income.astype(float)
train["Num_of_Loan"] = train.Num_of_Loan.astype(int)
train["Num_of_Delayed_Payment"] = train.Num_of_Delayed_Payment.astype(float)
train["Changed_Credit_Limit"] = train.Changed_Credit_Limit.astype(float)
train["Outstanding_Debt"] = train.Outstanding_Debt.astype(float)
train["Amount_invested_monthly"] = train.Amount_invested_monthly.astype(float)
train["Monthly_Balance"] = train.Monthly_Balance.astype(float)
train["Interest_Rate"] = train["Interest_Rate"].astype("float64")
test["Annual_Income"] = test.Annual_Income.astype(float)
test["Num_of_Loan"] = test.Num_of_Loan.astype(int)
test["Num_of_Delayed_Payment"] = test.Num_of_Delayed_Payment.astype(float)
test["Changed_Credit_Limit"] = test.Changed_Credit_Limit.astype(float)
test["Outstanding_Debt"] = test.Outstanding_Debt.astype(float)
test["Amount_invested_monthly"] = test.Amount_invested_monthly.astype(float)
test["Monthly_Balance"] = test.Monthly_Balance.astype(float)
train["Interest_Rate"] = train["Interest_Rate"].astype("float64")
def Month_Converter(x):
if pd.notnull(x):
num1 = int(x.split(" ")[0])
num2 = int(x.split(" ")[3])
return (num1 * 12) + num2
else:
return x
train["Credit_History_Age"] = train.Credit_History_Age.apply(
lambda x: Month_Converter(x)
).astype(float)
test["Credit_History_Age"] = test.Credit_History_Age.apply(
lambda x: Month_Converter(x)
).astype(float)
train["Amount_invested_monthly"] = train["Amount_invested_monthly"].replace(
"__10000__", 10000.00
)
train["Monthly_Balance"] = train["Monthly_Balance"].replace(
"__-333333333333333333333333333__", 0
)
train["Num_of_Delayed_Payment"] = train["Num_of_Delayed_Payment"].replace(
r"_$", "", regex=True
)
train["Annual_Income"] = train["Annual_Income"].replace(r"_$", "", regex=True)
train["Age"] = train["Age"].replace(r"_$", "", regex=True)
train["Outstanding_Debt"] = train["Outstanding_Debt"].replace(r"_$", "", regex=True)
train["Occupation"] = train["Occupation"].replace("_______", np.nan)
train["Num_of_Loan"] = train["Num_of_Loan"].replace(r"_$", "", regex=True)
train["Credit_Mix"] = train["Credit_Mix"].replace("_", np.nan)
train["Changed_Credit_Limit"] = train["Changed_Credit_Limit"].replace("_", 0)
test["Amount_invested_monthly"] = test["Amount_invested_monthly"].replace(
"__10000__", 10000.00
)
test["Monthly_Balance"] = test["Monthly_Balance"].replace(
"__-333333333333333333333333333__", 0
)
test["Num_of_Delayed_Payment"] = test["Num_of_Delayed_Payment"].replace(
r"_$", "", regex=True
)
test["Annual_Income"] = test["Annual_Income"].replace(r"_$", "", regex=True)
test["Age"] = test["Age"].replace(r"_$", "", regex=True)
test["Outstanding_Debt"] = test["Outstanding_Debt"].replace(r"_$", "", regex=True)
test["Occupation"] = test["Occupation"].replace("_______", np.nan)
test["Num_of_Loan"] = test["Num_of_Loan"].replace(r"_$", "", regex=True)
test["Credit_Mix"] = test["Credit_Mix"].replace("_", np.nan)
test["Changed_Credit_Limit"] = test["Changed_Credit_Limit"].replace("_", 0)
train.Age.replace(-500, np.median(train.Age), inplace=True)
for i in train.Age.values:
if i > 118:
train.Age.replace(i, np.median(train.Age), inplace=True)
test.Age.replace(-500, np.median(test.Age), inplace=True)
for i in test.Age.values:
if i > 118:
test.Age.replace(i, np.median(test.Age), inplace=True)
train.Num_of_Loan.replace(-100, np.median(train.Num_of_Loan), inplace=True)
for i in train.Num_of_Loan.values:
if i > 10:
train.Num_of_Loan.replace(i, np.median(train.Num_of_Loan), inplace=True)
test.Num_of_Loan.replace(-100, np.median(test.Num_of_Loan), inplace=True)
for i in test.Num_of_Loan.values:
if i > 10:
test.Num_of_Loan.replace(i, np.median(test.Num_of_Loan), inplace=True)
for i in train.Interest_Rate:
if i > 20:
train.Interest_Rate.replace(i, np.median(train.Interest_Rate), inplace=True)
for i in test.Interest_Rate:
if i > 20:
test.Interest_Rate.replace(i, np.median(test.Interest_Rate), inplace=True)
for i in train.Num_Bank_Accounts:
if i > 100:
train.Num_Bank_Accounts.replace(
i, np.median(train.Num_Bank_Accounts), inplace=True
)
for i in test.Num_Bank_Accounts:
if i > 100:
test.Num_Bank_Accounts.replace(
i, np.median(test.Num_Bank_Accounts), inplace=True
)
for i in train.Num_Credit_Card:
if i > 50:
train.Num_Credit_Card.replace(i, np.median(train.Num_Credit_Card), inplace=True)
for i in test.Num_Credit_Card:
if i > 50:
test.Num_Credit_Card.replace(i, np.median(test.Num_Credit_Card), inplace=True)
imp = KNNImputer(n_neighbors=3)
def filling_na(df, column, type_=None):
"""
This fucntion for filling null values to work with the data properly
Parameters:
df: DataFrame to fill the na with
column: column which will fill the value in it
type_: type of data needed be filled
"""
np.random.seed(7)
if type_ == "num":
# filling_list = df[column].dropna()
# df[column] = df[column].fillna(
# pd.Series(np.random.choice(filling_list, size=len(df.index)))
# )
df[column] = imp.fit_transform(df[column].values.reshape(-1, 1))
else:
filling_list = df[column].dropna().unique()
df[column] = df[column].fillna(
pd.Series(np.random.choice(filling_list, size=len(df.index)))
)
return df[column]
train["Monthly_Inhand_Salary"] = filling_na(train, "Monthly_Inhand_Salary", "num")
train["Num_Credit_Inquiries"] = filling_na(train, "Num_Credit_Inquiries", "num")
train["Amount_invested_monthly"] = filling_na(train, "Amount_invested_monthly", "num")
train["Num_of_Delayed_Payment"] = filling_na(train, "Num_of_Delayed_Payment", "num")
train["Monthly_Balance"] = filling_na(train, "Monthly_Balance", "num")
train["Type_of_Loan"] = filling_na(train, "Type_of_Loan")
train["Credit_History_Age"] = filling_na(train, "Credit_History_Age")
train["Occupation"] = filling_na(train, "Occupation")
test["Monthly_Inhand_Salary"] = filling_na(test, "Monthly_Inhand_Salary", "num")
test["Num_Credit_Inquiries"] = filling_na(test, "Num_Credit_Inquiries", "num")
test["Amount_invested_monthly"] = filling_na(test, "Amount_invested_monthly", "num")
test["Num_of_Delayed_Payment"] = filling_na(test, "Num_of_Delayed_Payment", "num")
test["Monthly_Balance"] = filling_na(test, "Monthly_Balance", "num")
test["Type_of_Loan"] = filling_na(test, "Type_of_Loan")
test["Credit_History_Age"] = filling_na(test, "Credit_History_Age")
test["Occupation"] = filling_na(test, "Occupation")
train.drop(
[
"Name",
"Credit_History_Age",
"ID",
"Customer_ID",
"SSN",
],
axis=1,
inplace=True,
)
test.drop_duplicates(subset="ID", inplace=True)
test.drop(
[
"Name",
"Credit_History_Age",
"ID",
"Customer_ID",
"SSN",
],
axis=1,
inplace=True,
)
train.Type_of_Loan = train.Type_of_Loan.str.replace("and", "")
train.Type_of_Loan = train.Type_of_Loan.str.replace(" ", "")
test.Type_of_Loan = test.Type_of_Loan.str.replace("and", "")
test.Type_of_Loan = test.Type_of_Loan.str.replace(" ", "")
cat_values = []
loan_cat = train.Type_of_Loan.unique()
for i in loan_cat:
for j in i.split(","):
cat_values.append(j)
loan_types = set([x.strip(" ") for x in set(cat_values)])
loan_types = list(loan_types)
loan_types
index_values = ~train["Type_of_Loan"].isnull().values
loan_type_data = list(train["Type_of_Loan"][index_values])
loan_type_dict = dict()
for value in loan_type_data:
values = value.split(",")
for each_value in values:
loan_type = each_value.strip(" ")
if "and" in loan_type:
loan_type = loan_type[4:]
if loan_type in loan_type_dict:
loan_type_dict[loan_type] += 1
else:
loan_type_dict[loan_type] = 1
loan_type_dict
### Bar graph showing the counts of the column - Type_of_Loan
sns.set(rc={"figure.figsize": (15, 10)})
sns.barplot(x=list(loan_type_dict.keys()), y=list(loan_type_dict.values()))
plt.title("Bar graph showing the counts of the column - Type_of_Loan")
plt.ylabel("Count", fontsize=12)
plt.xlabel("Type_of_Loan", fontsize=12)
plt.show()
train.describe().T
train.describe(include="O").T
# # Data Visualization
plt.figure(figsize=(10, 7))
sns.countplot(data=train, x="Credit_Score")
plt.title("Customers Credit Scores", size=27, fontweight="bold")
plt.xlabel("Credit Score", size=27, fontweight="bold")
plt.ylabel("Count", size=27, fontweight="bold")
plt.show()
px.bar(
data_frame=train.groupby(by=["Credit_Score"]).size().reset_index(name="counts"),
x="Credit_Score",
y="counts",
barmode="group",
)
plt.figure(figsize=(10, 7))
sns.lineplot(data=train, x="Occupation", y="Annual_Income", hue="Credit_Score")
plt.xticks(rotation=45)
plt.title("Annual Income Salary for Customers Occupation", size=27)
plt.xlabel("Occupation", size=27)
plt.ylabel("Annual Income", size=27)
plt.show()
px.line(data_frame=train, x="Occupation", y="Annual_Income", color="Credit_Score")
px.line_3d(data_frame=train, x="Occupation", y="Annual_Income", z="Credit_Score")
plt.figure(figsize=(10, 7))
sns.lineplot(data=train, x="Month", y="Monthly_Inhand_Salary", hue="Credit_Score")
plt.title("Annual Income Salary for Customers Occupation", size=27, fontweight="bold")
plt.xlabel("Month", size=27, fontweight="bold")
plt.ylabel("Monthly Inhand Salary", size=27, fontweight="bold")
plt.show()
px.line(data_frame=train, x="Month", y="Monthly_Inhand_Salary", color="Credit_Score")
plt.figure(figsize=(10, 7))
sns.lineplot(
data=train, x="Occupation", y="Credit_Utilization_Ratio", hue="Credit_Score"
)
plt.xticks(rotation=45)
plt.title("Credit Card Usage Ratio According to Occupation", size=27, fontweight="bold")
plt.xlabel("Occupation", size=27, fontweight="bold")
plt.ylabel("Credit Card Utiliztion Ratio", size=27, fontweight="bold")
plt.show()
plt.figure(figsize=(10, 7))
sns.lineplot(
data=train, x="Payment_Behaviour", y="Amount_invested_monthly", hue="Credit_Score"
)
plt.xticks(rotation=45)
plt.title(
"Payment Behaviour of The Customer and The Amounts They Invest",
size=27,
fontweight="bold",
)
plt.xlabel("Payment Behaviour", size=27, fontweight="bold")
plt.ylabel("Amount Invested Monthly", size=27, fontweight="bold")
plt.show()
plt.figure(figsize=(10, 7))
sns.lineplot(data=train, x="Payment_Behaviour", y="Outstanding_Debt")
plt.xticks(rotation=45)
plt.title(
"Payment Behaviour of The Customer and Their Debt", size=27, fontweight="bold"
)
plt.xlabel("Payment Behaviour", size=27, fontweight="bold")
plt.ylabel("Outstanding Debt", size=27, fontweight="bold")
plt.show()
px.line(
data_frame=train,
x="Payment_Behaviour",
y="Outstanding_Debt",
)
plt.figure(figsize=(10, 7))
sns.countplot(data=train, x="Credit_Mix", hue="Credit_Score")
# plt.xticks(rotation=45)
plt.title("Credit Mix", size=27, fontweight="bold")
plt.xlabel("Credit Mix Categories", size=27, fontweight="bold")
plt.ylabel("Count", size=27, fontweight="bold")
plt.show()
px.bar(
data_frame=train.groupby(by=["Credit_Mix", "Credit_Score"])
.size()
.reset_index(name="counts"),
x="Credit_Mix",
y="counts",
color="Credit_Score",
barmode="group",
)
plt.figure(figsize=(10, 7))
sns.countplot(data=train, x="Payment_of_Min_Amount", hue="Credit_Score")
plt.title("Credit Score for Payment of Minimum Amounts", size=27, fontweight="bold")
plt.xlabel("Payment of Minimum Amounts", size=27, fontweight="bold")
plt.ylabel("Count", size=27, fontweight="bold")
plt.show()
px.bar(
data_frame=train.groupby(by=["Payment_of_Min_Amount", "Credit_Score"])
.size()
.reset_index(name="counts"),
x="Payment_of_Min_Amount",
y="counts",
color="Credit_Score",
barmode="group",
)
plt.figure(figsize=(10, 7))
sns.lineplot(
data=train, x="Delay_from_due_date", y="Monthly_Inhand_Salary", hue="Credit_Score"
)
plt.title(
"Delay of Payment According to Monthly Inhand Salary", size=27, fontweight="bold"
)
plt.xlabel("Delay from Due Date", size=27, fontweight="bold")
plt.ylabel("Monthly Inhand Salary", size=27, fontweight="bold")
plt.show()
train["Age_Group"] = pd.cut(
train.Age,
bins=[14, 25, 30, 45, 55, 95, 120],
labels=["14-25", "25-30", "30-45", "45-55", "55-95", "95-120"],
)
age_groups = (
train.groupby(["Age_Group", "Credit_Score"])[
"Outstanding_Debt", "Annual_Income", "Num_Bank_Accounts", "Num_Credit_Card"
]
.sum()
.reset_index()
)
age_groups
g = sns.catplot(
data=age_groups,
x="Age_Group",
y="Outstanding_Debt",
height=7,
aspect=1,
col="Credit_Score",
kind="bar",
ci=None,
)
g.set_axis_labels("Age Group", "Outstanding Debt", size=27, fontweight="bold")
plt.show()
g = sns.catplot(
data=age_groups,
x="Age_Group",
y="Annual_Income",
height=7,
aspect=1,
col="Credit_Score",
kind="bar",
ci=None,
)
g.set_axis_labels("Age Group", "Annual Income", size=27, fontweight="bold")
plt.show()
g = sns.relplot(
data=train,
x="Num_Bank_Accounts",
y="Num_Credit_Card",
col="Credit_Score",
height=7,
aspect=1,
)
g.set_axis_labels(
"Number of Bank Accounts", "Number of Credit Card", size=27, fontweight="bold"
)
plt.show()
sns.catplot(x="Credit_Score", col="Credit_Mix", data=train, kind="count", col_wrap=3)
### Bar graph showing the value counts of the column - Payment_of_Min_Amount
min_amount_count = train["Payment_of_Min_Amount"].value_counts(dropna=False)
min_amount_count
sns.set(rc={"figure.figsize": (5, 5)})
sns.barplot(x=min_amount_count.index, y=min_amount_count.values, alpha=0.8)
plt.title("Bar graph showing the value counts of the column - Payment_of_Min_Amount")
plt.ylabel("Number of Occurrences", fontsize=12)
plt.xlabel("Payment of Minimum Amount", fontsize=12)
plt.show()
from statsmodels.stats.outliers_influence import variance_inflation_factor
def calu_vif(dataset):
vif = pd.DataFrame()
vif["feature"] = dataset.columns
vif["Vif_values"] = [
variance_inflation_factor(dataset.values, i) for i in range(dataset.shape[1])
]
return vif
train.dropna(inplace=True)
num = train.select_dtypes(include=["int", "float"])
num_cols = num.columns
num_cols
calu_vif(num)
plt.figure(figsize=(15, 6))
sns.heatmap(num.corr(), annot=True, cmap="Blues")
plt.show()
def box_plot(df_c, num_cols):
plt.figure(figsize=(20, 15))
for i in range(len(num_cols)):
if i == 16:
break
else:
plt.subplot(4, 4, i + 1)
l = num_cols[i]
sns.boxplot(df_c[l], palette="flare")
box_plot(train, num_cols)
df_num_clean = train[num_cols].copy()
cols = num_cols
scaler = preprocessing.RobustScaler()
robust_df_ = scaler.fit_transform(df_num_clean)
robust_df_ = pd.DataFrame(robust_df_, columns=cols)
scaler = preprocessing.StandardScaler()
standard_df = scaler.fit_transform(df_num_clean)
standard_df = pd.DataFrame(standard_df, columns=cols)
scaler = preprocessing.MinMaxScaler()
minmax_df = scaler.fit_transform(df_num_clean)
minmax_df = pd.DataFrame(minmax_df, columns=cols)
fig, (ax1, ax2, ax3, ax4) = plt.subplots(ncols=4, figsize=(20, 5))
ax1.set_title("Before Scaling")
sns.kdeplot(df_num_clean["Age"], ax=ax1, color="b")
ax2.set_title("After Robust Scaling")
sns.kdeplot(robust_df_["Age"], ax=ax2, color="g")
ax3.set_title("After Standard Scaling")
sns.kdeplot(standard_df["Age"], ax=ax3, color="b")
ax4.set_title("After Min-Max Scaling")
sns.kdeplot(minmax_df["Age"], ax=ax4, color="g")
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # import Header files
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
data = pd.read_csv(
"/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv"
)
data.head()
data = data.drop(["CustomerID"], axis=1)
data.head()
data.info()
data.shape
data.isnull().sum()
# # EDA
sns.kdeplot(data=data)
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
data["Gender"] = le.fit_transform(data["Gender"])
data.head()
from sklearn.cluster import KMeans
km = KMeans(n_clusters=4)
km.fit(data)
y_pred = km.fit_predict(data)
y_pred
wcss = []
for i in range(1, 11):
clustering = KMeans(n_clusters=i, init="k-means++", random_state=42)
clustering.fit(data)
wcss.append(clustering.inertia_)
ks = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
sns.lineplot(x=ks, y=wcss)
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 5))
sns.scatterplot(ax=axes[0], data=data, x="Age", y="Annual Income (k$)").set_title(
"Without clustering"
)
sns.scatterplot(
ax=axes[1], data=data, x="Age", y="Annual Income (k$)", hue=clustering.labels_
).set_title("Using the elbow method")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Load data and the first look
fn = "/kaggle/input/pubmed-papers/pubmed_landscape_data.csv"
df = pd.read_csv(fn)
df
print(df.info())
print()
print(df["Year"].value_counts())
print()
print(df["Labels"].value_counts())
print()
print(df["Journal"].value_counts())
print()
# # Select papers with the selected word in the title
str2find = "medulloblastoma"
mask = df["Title"].apply(lambda x: str2find in str(x).lower())
print(mask.sum())
df[mask].head(20)
import matplotlib.pyplot as plt
import seaborn as sns
fig = plt.figure(figsize=(20, 10))
c = 0
n_x_subplots = 2
plt.suptitle(str2find + " in the title " + " n=" + str(mask.sum()), fontsize=20)
c += 1
fig.add_subplot(1, n_x_subplots, c)
sns.scatterplot(x=df[mask]["x"], y=df[mask]["y"], hue=df[mask]["Labels"])
# plt.show()
# plt.figure(figsize = (20,10 ) )
c += 1
fig.add_subplot(1, n_x_subplots, c)
sns.scatterplot(x=df[mask]["x"], y=df[mask]["y"], hue=df[mask]["Year"])
plt.show()
text1 = " ".join(str(title).lower() for title in df[mask].Title)
from wordcloud import WordCloud, STOPWORDS
# print(STOPWORDS)
word_cloud1 = WordCloud(
background_color="white",
stopwords=[str2find, "for"] + list(STOPWORDS),
width=2048,
height=1080,
).generate(text1)
# word_cloud1 = WordCloud(collocations = False, background_color = 'white',
# width = 2048, height = 1080).generate(text1)
# saving the image
word_cloud1.to_file("got.png")
plt.figure(figsize=(20, 10))
plt.imshow(word_cloud1, interpolation="bilinear")
plt.axis("off")
plt.show()
for col in ["Labels", "Year", "Journal"]:
print(df[mask][col].value_counts())
print()
|
import numpy as np
import pandas as pd
# # Introduction
# Goal:
# * evaluate spelling similarity of two string
# Step:
# * initial state : the word we're transforming
# * operators : delete,switch,replace,insert (Note that replace is equal to delete + insert)
# * goal state : the word we're trying to get to
# * path cost : what we want to minimize --- the number of edits
# More application :
# * DNA, spell correction and more
# Detail introduction :
# * https://web.stanford.edu/class/cs124/lec/med.pdf
# # Algorithms
# ### Dynamic Programming(Levenshstein)
# $$\text{Initialization}$$
# \begin{align}
# D[0,0] &= 0 \\
# D[i,0] &= D[i-1,0] + del\_cost(source[i]) \tag{1}\\
# D[0,j] &= D[0,j-1] + ins\_cost(target[j]) \\
# \end{align}
# $$\text{Per Cell Operations}$$
# \begin{align}
# \\
# D[i,j] =min
# \begin{cases}
# D[i-1,j] + del\_cost\\
# D[i,j-1] + ins\_cost\\
# D[i-1,j-1] + \left\{\begin{matrix}
# rep\_cost; & if src[i]\neq tar[j]\\
# 0 ; & if src[i]=tar[j]
# \end{matrix}\right.
# \end{cases}
# \tag{2}
# \end{align}
# if we set the parameters to 1,1,2, the method is calledd levenshstein distance
def dp_solver(word, target, del_cost=1, ins_cost=1, rep_cost=2):
# initialize
m = len(word)
n = len(target)
D = np.zeros((m + 1, n + 1))
for i in range(m + 1):
D[i, 0] = i
for j in range(n + 1):
D[0, j] = j
for i in range(1, m + 1):
for j in range(1, n + 1):
if word[i - 1] != target[j - 1]:
rp = rep_cost
else:
rp = 0
D[i, j] = min(
[D[i - 1, j] + del_cost, D[i, j - 1] + ins_cost, D[i - 1, j - 1] + rp]
)
min_dis = D[m, n]
return D, min_dis
word = "intention"
target = "execution"
matrix, min_dis = dp_solver(word, target)
print("minimum distance edit number :", min_dis)
pd.DataFrame(
matrix, columns=["#"] + [c for c in target], index=["#"] + [c for c in word]
)
#
# What can't we know from the DP table? :
#
# * I N T E & N T I O N
# * & E X E C U T I O N
# * I -> & : delete (cost : 1)
# * N -> E : replace (cost : 2)
# * T -> X : replace (cost : 2)
# * & -> C : insert (cost : 2)
# * N -> U : replace (cost: 1 )
# Use BackTrace solve this problem
# ### BackTrace
# * We often need to align each charactor of the two strings to each other
# * Every time we enter a cell, remember where we came from
# * When we reach the end, trace back the path from the upper right corner to read off the alignment
# $$\text{Base Conditions}$$
# \begin{align}
# D[i,0] &= i \ \ \ D[0,j] = j
# \end{align}
# $$\text{Recurrence Relation}$$
# \begin{align}
# \\
# D[i,j] =min
# \begin{cases}
# D[i-1,j] + del\_cost\\
# D[i,j-1] + ins\_cost\\
# D[i-1,j-1] + \left\{\begin{matrix}
# rep\_cost; & if src[i]\neq tar[j]\\
# 0 ; & if src[i]=tar[j]
# \end{matrix}\right.
# \end{cases}
# \tag{2}
# \end{align}
# \begin{align}
# \\
# ptr[i,j] =
# \begin{cases}
# LEFT(insert)\\
# DOWN(delete)\\
# DIAG(replace)
# \end{cases}
# \end{align}
def BackTraceSolver(src, tar, del_cost=1, ins_cost=1, rep_cost=2):
m = len(src)
n = len(tar)
D = np.zeros((m + 1, n + 1))
for i in range(m + 1):
D[i, 0] = i
for j in range(n + 1):
D[0, j] = j
prt = {}
for i in range(1, m + 1):
for j in range(1, n + 1):
if src[i - 1] != tar[j - 1]:
rp = rep_cost
else:
rp = 0
search = {}
search[(i - 1, j)] = D[i - 1, j] + del_cost
search[(i, j - 1)] = D[i, j - 1] + ins_cost
search[(i - 1, j - 1)] = D[i - 1, j - 1] + rp
D[i, j] = min(search.values())
re_search = {val: key for key, val in search.items()}
if search[(i - 1, j)] != search[(i, j - 1)] != search[(i - 1, j - 1)]:
d_i, d_j = re_search[D[i, j]]
# record path
prt[(i, j)] = (d_i, d_j)
else:
# record path
prt[(i, j)] = (i - 1, j - 1)
# trace back from last point
trace_back = []
last_pt = (m, n)
while True:
try:
prt[last_pt]
except:
trace_back.append(last_pt)
break
trace_back.append(last_pt)
last_pt = prt[last_pt]
min_dis = D[m, n]
return D, min_dis, trace_back
src = "intention"
tar = "execution"
matrix, min_ids, trace_back = BackTraceSolver(src, tar)
trace_back
trace_matrix = matrix.copy()
for item in trace_back:
i, j = item
trace_matrix[i][j] = 1e-7
df = pd.DataFrame(
trace_matrix, columns=["#"] + [c for c in target], index=["#"] + [c for c in word]
)
df
# * Show the result by checking the table and print
print("i->#") # delete
print("n->e") # replace
print("t->x") # replace
print("e->e,c") # insert
print("n->u") # replace
print("tion->tion") # same
# heatmap
# * left : insert
# * up : delete
# * diag : replace
import seaborn as sns
sns.heatmap(data=df)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import warnings
from tqdm.auto import tqdm
tqdm.pandas()
warnings.filterwarnings("ignore")
dataset = pd.read_csv(
"../input/cnn-articles-after-basic-cleaning/CNN_Articels_clean_2/CNN_Articels_clean.csv"
)
dataset = dataset[["Article text", "Category"]]
dataset = dataset[dataset.Category != "vr"]
dataset = dataset[dataset.Category != "travel"]
dataset = dataset[dataset.Category != "style"]
dataset["Category"] = dataset["Category"].astype("category")
# Category to number mapping
dataset["Category_code"] = dataset.Category
dataset["Category_code"] = dataset.Category.cat.codes
dataset.rename(columns={"Article text": "Article"})
dataset["Article"] = dataset["Article text"]
dataset = dataset.drop("Article text", axis=1)
dataset["Category"] = dataset["Category_code"]
dataset = dataset.drop("Category_code", axis=1)
# **preprocessing**
# RE - library for regular expression
import re
import num2words
# NLTK - library for symbolic and statistical natural language processing(NLP) f
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
# nltk.download('omw-1.4')
# nltk.download('punkt')
# nltk.download('stopwords')
# nltk.download('wordnet')
def clean_text(web_text):
# Lowercasing
web_text = str(web_text)
text_clean = web_text.lower()
moneyChar = ["$", "€", "£", "¥", "₣", "₹"]
money = ["dollar", "euro", "pound", "yen", "franc", "rupee"]
text_clean = "".join(
[money[moneyChar.index(i)] if i in moneyChar else i for i in text_clean]
)
tokens = word_tokenize(text_clean)
text_clean = " ".join(
[num2words.num2words(i) if i.isdigit() else i for i in tokens]
)
text_clean = re.sub(r"[^a-z]", " ", text_clean)
tokens = word_tokenize(text_clean)
stop_words = set(nltk.corpus.stopwords.words("english"))
tokensWSW = [word for word in tokens if word not in stop_words]
wordnet_lemmatizer = WordNetLemmatizer()
lemmatized_list = []
for word in tokensWSW:
lemmatized_list.append(wordnet_lemmatizer.lemmatize(word))
return " ".join(lemmatized_list)
dataset["Article"] = dataset["Article"].progress_apply(clean_text)
# sklearn - library for machine learning
# train_test_split - Split arrays or metrics into random train and test subsets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
dataset["Article"], dataset["Category"], test_size=0.15, random_state=8
)
# **max fratures 1600**
# TfidfVectorizer - Convert a collection of raw documents to a matrix of TF-IDF features
# Represent words as Vectors
from sklearn.feature_extraction.text import TfidfVectorizer
# Parameter election
ngram_range = (1, 2)
min_df = 10
max_df = 1.0
max_features = 1600
tfidf = TfidfVectorizer(
encoding="utf-8",
ngram_range=ngram_range,
stop_words=None,
lowercase=False,
max_df=max_df,
min_df=min_df,
max_features=max_features,
norm="l2",
sublinear_tf=True,
)
# Transform documents to a matrix in train
features_train = tfidf.fit_transform(X_train).toarray()
labels_train = y_train
print(features_train.shape)
# Transform documents to a matrix in test
features_test = tfidf.transform(X_test).toarray()
labels_test = y_test
print(features_test.shape)
# **model SVM**
# sklearn - library for machine learning
# SVM - support-vector machines - supervised learning models for classification
from sklearn import svm
# sklearn.metrics - for evaluating the quality of a model’s predictions
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
# seaborn - library for statistical data visualization
import seaborn as sns
# Default hyperparameters
svc = svm.SVC(random_state=8)
print(svc.get_params())
# Fit the random search model
svc.fit(features_train, labels_train)
# Find the prediction on the test
svc_predict = svc.predict(features_test)
print(
"Accuracy score on train: ",
accuracy_score(labels_train, svc.predict(features_train)),
)
# Find the prediction on the test
print(
"Accuracy score on test: ", accuracy_score(labels_test, svc.predict(features_test))
)
# Classification report
print("Classification report: ")
print(classification_report(labels_test, svc_predict))
# **model random forest**
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(random_state=0)
clf.fit(features_train, labels_train)
# Find the prediction on the test
clf_predict = clf.predict(features_test)
print(
"Accuracy score on train: ",
accuracy_score(labels_train, clf.predict(features_train)),
)
# Find the prediction on the test
print("Accuracy score on test: ", accuracy_score(labels_test, clf_predict))
# Classification report
print("Classification report: ")
print(classification_report(labels_test, clf_predict))
# **DecisionTree**
from sklearn import tree
dtc = tree.DecisionTreeClassifier()
dtc.fit(features_train, labels_train)
# Find the prediction on the test
dtc_predict = dtc.predict(features_test)
print(
"Accuracy score on train: ",
accuracy_score(labels_train, dtc.predict(features_train)),
)
# Find the prediction on the test
print("Accuracy score on test: ", accuracy_score(labels_test, dtc_predict))
# Classification report
print("Classification report: ")
print(classification_report(labels_test, dtc_predict))
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=3)
# Train the model on your training data
knn.fit(features_train, labels_train)
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
# Predict labels using the trained KNN model
y_pred = knn.predict(features_test)
# Calculate accuracy, precision, recall, and F1 score print("Accuracy score on test: ", accuracy_score(labels_test, dtc_predict))
accuracy = accuracy_score(labels_test, y_pred)
precision = precision_score(labels_test, y_pred, average="macro")
recall = recall_score(labels_test, y_pred, average="macro")
f1 = f1_score(labels_test, y_pred, average="macro")
print("Accuracy:", accuracy)
print("Precision:", precision)
print("Recall:", recall)
print("F1 score:", f1)
print(
"Accuracy score on train: ",
accuracy_score(labels_train, knn.predict(features_train)),
)
# Find the prediction on the test
print("Accuracy score on test: ", accuracy_score(labels_test, y_pred))
# Classification report
print("Classification report: ")
print(classification_report(labels_test, y_pred))
|
# # About Data
# * 30000 audio samples of spoken digits(0-9) of 60 folders and 500 files each.(30000=10x60x500)
# * "audioMNIST_meta.txt" : meta information such as gender or age of each speaker
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
import joblib as jl
import librosa
from IPython.display import Audio
# ### Get data and play it.
def get_data(digit=0, person=1, index=0, target_sr=16000):
if person < 10:
file = f"../input/audio-mnist/data/0{person}/{digit}_0{person}_{index}.wav"
else:
file = f"../input/audio-mnist/data/{person}/{digit}_{person}_{index}.wav"
data, sr = librosa.load(file) # sr=22050
# down sampling to 16000Hz
down_d = librosa.resample(data, orig_sr=sr, target_sr=target_sr)
return down_d, target_sr
# ##### down-sampling 22050 to 16000 due to the range of human speaking frequncy.
def play_audio(digit=0, person=1, index=0):
data, sr = get_data(digit=digit, person=person, index=index)
plt.figure(figsize=(5, 2))
plt.plot(np.linspace(0, len(data) / sr, len(data)), data)
plt.title(f"digit: {digit}, person: {person}, index{index}")
plt.xlabel(f"[Sec] ({len(data)} samples)")
plt.show()
return display(Audio(data=data, rate=sr))
for i in range(5, 7):
play_audio(digit=i)
# ### digit`0~9`, person`1~10`, index`0~49`, data length is different.
# ### Check all data length to fit model input.
# %%time
# import joblib as jl
# def data_info(digit):
# len_list = []
# for person in range(1,61):
# for index in range(50):
# data,sr = get_data(digit=digit,person=person,index=index)
# len_list.append(len(data))
# return len_list
# len_info = jl.Parallel(n_jobs=-1)(jl.delayed(data_info)(x) for x in range(10))
# jl.dump(len_info, 'len_info.jl')
len_info = jl.load("/kaggle/input/audiomnist2/len_info.jl")
def array_info(x):
print(f"shape: {x.shape}")
print(f"mean: {np.mean(x):.1f}, median: {np.median(x)}")
print(f"min: {np.min(x)}, max: {np.max(x)}")
print(f"percentile 75: {np.percentile(x, 75)}")
print(f"percentile 90: {np.percentile(x, 90)}")
len_info = np.array(len_info)
array_info(len_info)
# ##### modify get_data() to return 12000 fixed data length due to information above.
# Modified get_data()
def get_data(digit=0, person=1, index=0, target_sr=16000):
if person < 10:
file = f"../input/audio-mnist/data/0{person}/{digit}_0{person}_{index}.wav"
else:
file = f"../input/audio-mnist/data/{person}/{digit}_{person}_{index}.wav"
data, sr = librosa.load(file) # sr=22050
# down sampling to 8000Hz
down_d = librosa.resample(data, orig_sr=sr, target_sr=target_sr)
# fixed length of all data to 12000 samples
fix_len_d = librosa.util.fix_length(down_d, size=12000)
return fix_len_d, target_sr
# # Feature Comparison
# * Mel spectrogram, Spectrogram, Periodogram, MFCC
# Mel spectrogram
def mel_data(digit=0, person=1, index=0):
data, sr = get_data(digit=digit, person=person, index=index)
data = librosa.feature.melspectrogram(y=data, sr=sr, n_mels=256)
return data, sr
fig, ax = plt.subplots(5, 2, figsize=(10, 20))
for i, ax in enumerate(ax.flat):
data, sr = mel_data(digit=i, person=1, index=0)
librosa.display.specshow(
librosa.power_to_db(data, ref=np.max), sr=sr, y_axis="mel", x_axis="time", ax=ax
)
ax.set_title(f"d: {i}, p: {1}, i: {0}, {data.shape}")
plt.tight_layout()
plt.show()
# Spectrogram
def spec_data(digit=0, person=1, index=0):
data, sr = get_data(digit=digit, person=person, index=index)
data = np.abs(librosa.stft(data))
return data, sr
fig, ax = plt.subplots(5, 2, figsize=(10, 20))
for i, ax in enumerate(ax.flat):
data, sr = spec_data(digit=i, person=1, index=0)
librosa.display.specshow(
librosa.amplitude_to_db(data, ref=np.max),
sr=sr,
y_axis="mel",
x_axis="time",
ax=ax,
)
ax.set_title(f"d: {i}, p: {1}, i: {0}, {data.shape}")
plt.tight_layout()
plt.show()
# Periodogram
def peri_data(digit=0, person=1, index=0):
data, sr = get_data(digit=digit, person=person, index=index)
fft = np.fft.fft(data, len(data))
peri = 2 * np.abs(fft)[: len(data) // 2] / len(data)
return peri, sr
fig, ax = plt.subplots(5, 2, figsize=(10, 20))
for i, ax in enumerate(ax.flat):
data, sr = peri_data(digit=i, person=1, index=0)
ax.plot(np.linspace(0, sr, len(data)), np.log10(data))
ax.set_xlabel("Frequncy [Hz]")
ax.set_title(f"d: {i}, p: {1}, i: {0}, {data.shape}")
plt.tight_layout()
plt.show()
# comparing spectrogram with mel
fig, ax = plt.subplots(10, 2, figsize=(10, 40))
for i in range(10):
data, sr = spec_data(digit=i, person=1, index=0)
librosa.display.specshow(
librosa.amplitude_to_db(data, ref=np.max),
sr=sr,
y_axis="mel",
x_axis="time",
ax=ax[i][0],
)
ax[i][0].set_title(f"spec d: {i}, p: {1}, i: {0}, {data.shape}")
data, sr = mel_data(digit=i, person=1, index=0)
librosa.display.specshow(
librosa.power_to_db(data, ref=np.max),
sr=sr,
y_axis="mel",
x_axis="time",
ax=ax[i][1],
)
ax[i][1].set_title(f"mel d: {i}, p: {1}, i: {0}, {data.shape}")
plt.tight_layout()
plt.show()
# # Mfcc dataset
def mfcc_data(digit=0, person=1, index=0):
data, sr = get_data(digit=digit, person=person, index=index)
data = librosa.feature.mfcc(y=data, sr=sr, n_mfcc=40)
return data, sr
fig, axs = plt.subplots(5, 2, figsize=(10, 20))
for i, ax in enumerate(axs.flat):
data, sr = mfcc_data(digit=i)
librosa.display.specshow(data, ax=ax)
ax.set_title(f"d:{i}, p:1, i:0, {data.shape}")
plt.tight_layout()
plt.show()
# %%time
# #1min 39s
# def norm(x):
# return (x-np.mean(x))/np.std(x)
# def make_ds(digit):
# ds_list = []
# for p in range(1,61):
# for i in range(50):
# data,sr = mfcc_data(digit=digit,person=p,index=i)
# data = norm(data)
# ds_list.append([data,digit,p,i])
# return ds_list
# mfcc_list = jl.Parallel(n_jobs=-1)(jl.delayed(make_ds)(x) for x in range(10))
# jl.dump(mfcc_list,'mfcc_list.jl')
mfcc_list = jl.load("/kaggle/input/audiomnist3/mfcc_list.jl")
# Convert to DataFrame for analysis convenience
df_mfcc = pd.DataFrame(
np.array(mfcc_list, dtype="object").reshape(-1, 4),
columns=["mfcc", "cls", "p", "i"],
)
# check data saving order to confirm the Y's order on train_test_split
df_mfcc["num"] = df_mfcc.cls * 10000 + df_mfcc.p * 100 + df_mfcc.i
for x in range(6000 - 1):
if df_mfcc.num[x] >= df_mfcc.num[x + 1]:
print("error")
plt.plot(df_mfcc.index, df_mfcc.num)
X_mfcc = np.array(df_mfcc.mfcc.tolist())
Y_mfcc = np.array((df_mfcc.cls).tolist(), dtype="int")
print(type(X_mfcc[0][0][0]), X_mfcc.shape, type(Y_mfcc[0]), Y_mfcc.shape)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
X_mfcc, Y_mfcc, test_size=0.3, shuffle=True, random_state=42
)
print(x_train.shape, y_train.shape, x_test.shape, y_test.shape)
# check distribution if data is split uniformly.
unique, cnt = np.unique(y_train, return_counts=True)
ax = sns.barplot(x=unique, y=cnt)
for p in ax.patches:
ax.text(
p.get_x() + p.get_width() / 2, # x 좌표
p.get_y() + p.get_height(), # y 좌표
f"{p.get_height():.0f}", # 값
ha="center",
) # 가운데 정렬
plt.show()
# # MFCC modeling
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv1D, MaxPool1D, Dense, Flatten
import keras.backend as K
K.clear_session()
model = Sequential(
[
Conv1D(32, 3, activation="relu", input_shape=(40, 24)),
MaxPool1D(2),
Conv1D(64, 3, activation="relu"),
MaxPool1D(2),
Flatten(),
Dense(128, activation="relu"),
Dense(10, activation="softmax"),
]
)
model.compile(
loss="sparse_categorical_crossentropy", metrics=["accuracy"], optimizer="adam"
)
model.summary()
from sklearn.model_selection import KFold
from tensorflow.keras.callbacks import (
ModelCheckpoint,
EarlyStopping,
ReduceLROnPlateau,
CSVLogger,
)
# import keras.backend as K
# K.clear_session()
kfold = KFold(n_splits=5, shuffle=True, random_state=42)
for i, (trn_idx, val_idx) in enumerate(kfold.split(x_train)):
print(f"Fold {i+1}")
x_trn, y_trn = x_train[trn_idx], y_train[trn_idx]
x_val, y_val = x_train[val_idx], y_train[val_idx]
cp = ModelCheckpoint(
filepath=f"mfcc_w_{i+1}.h5",
save_weights_only=True,
verbose=0,
save_best_only=True,
)
es = EarlyStopping(
monitor="val_loss", patience=10, verbose=1, restore_best_weights=True
)
cl = CSVLogger("mfcc_training.log") # 로그 파일명을 지정합니다.
rlr = ReduceLROnPlateau(monitor="val_loss", factor=0.2, patience=5, min_lr=1e-6)
# Fit the model
history = model.fit(
x_trn,
y_trn,
validation_data=(x_val, y_val),
epochs=100,
batch_size=32,
verbose=0,
workers=-1,
callbacks=[cp, es, cl, rlr],
) # , initial_epoch=97)
model.evaluate(x_test, y_test)
from sklearn.metrics import classification_report, confusion_matrix
pred_list = []
for i in range(5):
model.load_weights(f"mfcc_w_{i+1}.h5")
model.evaluate(x_test, y_test)
pred = model.predict(x_test)
pred_list.append(pred)
pred_sum = np.sum(np.array(pred_list), axis=0)
pred_y = np.argmax(pred_sum, axis=1)
print(classification_report(y_test, pred_y))
plt.title("Confusion Matix for MFCC model Prediction")
cm = confusion_matrix(y_test, pred_y)
sns.heatmap(cm, annot=True, center=0, cmap="coolwarm", fmt="g", cbar=True)
plt.show()
# # Mel dataset
# %%time
# # Make dataset as list and save it by joblib
# def make_ds(digit):
# ds_list = []
# for p in range(1,61):
# for i in range(50):
# data,sr = mel_data(digit=digit, person=p, index=i)
# data = norm(data)
# ds_list.append([data, digit, p, i])
# return ds_list
# mel_list = jl.Parallel(n_jobs=-1)(
# jl.delayed(make_ds)(digit) for digit in range(10))
# jl.dump(mel_list, 'mel_list.jl')
mel_list = jl.load("/kaggle/input/audiomnist3/mel_list.jl")
df_mel = pd.DataFrame(
np.array(mel_list, dtype="object").reshape(-1, 4), columns=["mel", "cls", "p", "i"]
)
X_mel = np.array(df_mel.mel.tolist())
Y_mel = np.array((df_mel.cls).tolist(), dtype="int")
print(type(X_mel[0][0][0]), X_mel.shape, type(Y_mel[0]), Y_mel.shape)
from sklearn.model_selection import train_test_split
x_train1, x_test1, y_train1, y_test1 = train_test_split(
X_mel, Y_mel, test_size=0.3, shuffle=True, random_state=42
)
print(x_train1.shape, y_train1.shape, x_test1.shape, y_test1.shape)
# # Mel Modeling
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv1D, MaxPool1D, Dense, Flatten
import keras.backend as K
K.clear_session()
model = Sequential(
[
Conv1D(32, 3, activation="relu", input_shape=(256, 24)),
MaxPool1D(2),
Conv1D(64, 3, activation="relu"),
MaxPool1D(2),
Flatten(),
Dense(128, activation="relu"),
Dense(10, activation="softmax"),
]
)
model.compile(
loss="sparse_categorical_crossentropy", metrics=["accuracy"], optimizer="adam"
)
model.summary()
from sklearn.model_selection import KFold
from tensorflow.keras.callbacks import (
ModelCheckpoint,
EarlyStopping,
ReduceLROnPlateau,
CSVLogger,
)
kfold = KFold(n_splits=5, shuffle=True, random_state=42)
for i, (trn_idx, val_idx) in enumerate(kfold.split(x_train)):
print(f"Fold {i+1}")
x_trn, y_trn = x_train1[trn_idx], y_train1[trn_idx]
x_val, y_val = x_train1[val_idx], y_train1[val_idx]
cp = ModelCheckpoint(
filepath=f"mel_w_{i+1}.h5",
save_weights_only=True,
verbose=0,
save_best_only=True,
)
es = EarlyStopping(
monitor="val_loss", patience=10, verbose=1, restore_best_weights=True
)
cl = CSVLogger("mel_training.log") # 로그 파일명을 지정합니다.
rlr = ReduceLROnPlateau(monitor="val_loss", factor=0.2, patience=5, min_lr=1e-6)
# Fit the model
history = model.fit(
x_trn,
y_trn,
validation_data=(x_val, y_val),
epochs=100,
batch_size=32,
verbose=0,
workers=-1,
callbacks=[cp, es, cl, rlr],
) # , initial_epoch=97)
model.evaluate(x_test1, y_test1)
from sklearn.metrics import classification_report, confusion_matrix
pred_list1 = []
for i in range(5):
model.load_weights(f"mel_w_{i+1}.h5")
model.evaluate(x_test1, y_test1)
pred1 = model.predict(x_test1)
pred_list1.append(pred1)
pred_sum1 = np.sum(np.array(pred_list1), axis=0)
pred_y1 = np.argmax(pred_sum1, axis=1)
print(classification_report(y_test1, pred_y1))
plt.title("Confusion Matix for Mel model Prediction")
cm = confusion_matrix(y_test1, pred_y1)
sns.heatmap(cm, annot=True, center=0, cmap="coolwarm", fmt="g", cbar=True)
plt.show()
# # Model comparison
from sklearn.metrics import accuracy_score, multilabel_confusion_matrix, f1_score
# Model ensemble
pred_sum2 = pred_sum + pred_sum1
pred_y2 = np.argmax(pred_sum2, axis=1)
cm2 = confusion_matrix(y_test, pred_y2)
print(classification_report(y_test, pred_y2))
sns.heatmap(cm2, center=0, cmap="coolwarm", annot=True, fmt="g")
plt.title("Confusion Matix for Mel+Mfcc Ensemble model Prediction")
plt.show()
# Model comparison
for p_data, m_name in [(pred_y, "mfcc"), (pred_y1, "mel"), (pred_y2, "ens")]:
print(f"accuracy score {m_name}: {accuracy_score(y_test, p_data):.4f}")
print(f" f1 score {m_name}: {f1_score(y_test, p_data,average='weighted'):.4f}")
|
# ## Kernel description
# Adding new features to the `train_meta` dataframe:
# 1) Statistical, such as the total number of impulses or the average relative time within one event and so one;
# 2) Predictions of different models as features (***work in progress***).
#
# Polars library was used for feature engineering, because it allows to process all 660 batches and 131,953,924 events many times faster than Pandas.
#
# This Kernel has separate functions that you can use and modify to create your own features.
#
# The resulting feature table is shown at the end of the notebook.
# Please don't hesitate to leave your comments on this Kernel: use the features table for your models and share the results.
# ## Updates
# **Ver. 2:** Removed the use of Pandas functions to create features (now Polars only). Separate functions added for feature engineering. Added new features in the `train_meta` data as well.
# ## Sources
# For this Kernel, [[日本語/Eng]🧊: FeatureEngineering](https://www.kaggle.com/code/utm529fg/eng-featureengineering) kernel was used, as well as separate articles about Polars library and feature engineering:
# 1) [📊 Построение и отбор признаков. Часть 1: feature engineering (RUS)](https://proglib.io/p/postroenie-i-otbor-priznakov-chast-1-feature-engineering-2021-09-15)
# 2) [Polars: Pandas DataFrame but Much Faster](https://towardsdatascience.com/pandas-dataframe-but-much-faster-f475d6be4cd4)
# 3) [Polars: calm](https://calmcode.io/polars/calm.html)
# 4) [Polars - User Guide](https://pola-rs.github.io/polars-book/user-guide/coming_from_pandas.html)
# ## Import libraries
# List all installed packages and package versions
#!pip freeze
#!pip install --upgrade polars
import numpy as np
import os
import pandas as pd
import polars as pl
from tqdm.notebook import tqdm
# Check existing paths:
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# Will try to use Polars as one of the fastest libraries because it uses parallelization and cache efficient algorithms to speed up analytics task.
#
# Let's create LazyFrames from all existing inputs:
INPUT_DIR = "/kaggle/input/icecube-neutrinos-in-deep-ice"
train_meta = pl.scan_parquet(f"{INPUT_DIR}/train_meta.parquet")
sensor_geometry = pl.scan_csv(f"{INPUT_DIR}/sensor_geometry.csv")
batches_dict = {}
for i in range(1, train_meta.collect()["batch_id"].max() + 1):
key = str("train_batch_" + str(i))
batches_dict[key] = pl.scan_parquet(f"{INPUT_DIR}/train/batch_{i}.parquet")
print(type(train_meta))
print(type(sensor_geometry))
print(type(batches_dict["train_batch_1"]))
# Will add new features to the `train_meta` data:
def add_stat_features(meta):
"""
add new statistics features into the selected dataframe (implying 'train_meta' dataframe):
1) n_events_per_batch - number of events in each batch
2) pulse_count - count of pulses detected (last_pulse_index - first_pulse_index + 1)
Parameters:
-----------
meta : LazyFrame
Returns:
-----------
meta : LazyFrame
"""
return meta.with_columns(
[
pl.col("event_id")
.count()
.over("batch_id")
.cast(pl.Int64)
.alias("n_events_per_batch"),
(pl.col("last_pulse_index") - pl.col("first_pulse_index") + 1).alias(
"pulse_count"
),
]
)
batches_dict["train_batch_1"].fetch(5)
def add_cols_to_sensor_geometry(sensor):
"""
add new columns for groupby.sum() function
Parameters:
-----------
sensor : LazyFrame
Returns:
-----------
sensor : polars DataFrame
"""
sensor = sensor.with_columns(
[
(pl.col("sensor_id") * 0).alias("sensor_count"),
(pl.col("sensor_id") * 0.0).alias("charge_sum"),
(pl.col("sensor_id") * 0).alias("time_sum"),
(pl.col("sensor_id") * 0).alias("auxiliary_sum"),
]
)
sensor = sensor.collect()
return sensor
add_cols_to_sensor_geometry(sensor_geometry).head()
# Not enough memory to execute this code
# def add_time_mean(train_meta, batches_dict):
# batches = []
# for batch_name, batch in tqdm(batches_dict.items()):
# batch_id = int(batch_name.split("_")[-1])
# batch_df = batch.select(['sensor_id', 'time', 'event_id']).collect()
# batch_len = len(batch_df)
# batch = batch_df.with_columns((pl.Series([batch_id] * batch_len)).alias('batch_id'))
# batches.append(batch)
# all_batches = pl.concat(batches)
# time_mean = all_batches.groupby('event_id').agg(
# pl.col('time').mean().alias('time_mean'))
# train_meta_with_time_mean = train_meta.join(
# time_mean, on='event_id', how='inner')
# return train_meta_with_time_mean
# ***If you know how to reduce memory usage for above code, please feel free to share your ideas in the comments of this notebook.***
add_time_mean(train_meta, batches_dict, "train_batch_1")
def create_batch_features(batch_dict):
for key in tqdm(batch_dict):
batch = batch_dict[key].collect()
# count detected sensor
batch_tmp = batch["sensor_id"].value_counts()
# cast and join
batch_tmp = batch_tmp.with_columns(
[pl.col("sensor_id").cast(pl.Int64), pl.col("counts").cast(pl.Int64)]
)
return batch_tmp
# %%time
# create_batch_features(batches_dict).fetch()
train_meta.pipe(add_stat_features).fetch(5)
# for key in tqdm(batches_dict):
# batch = batches_dict[key].collect()
# # count detected sensor
# batch_tmp = batch['sensor_id'].value_counts()
# # cast and join
# batch_tmp = batch_tmp.with_columns(
# [pl.col('sensor_id').cast(pl.Int64),
# pl.col('counts').cast(pl.Int64)])
# sensor_geometry = sensor_geometry.collect().to_pandas()
# sensor_geometry = sensor_geometry.merge(batch_tmp, on='sensor_id')
# sensor_geometry = sensor_geometry.rename(columns={'counts':'sensor_count'})
# sensor_geometry.head()
|
# # **SIMPLE LINEAR REGRESSION USING ORDINARY LEAST SQUARES**
# # [Kaggle](https://www.kaggle.com/code/avd1729/simple-linear-regression-using-ols)
# # **Setting up the environment**
import numpy as np
import pandas as pd
data = pd.read_csv("/kaggle/input/salary-data-simple-linear-regression/Salary_Data.csv")
# # **Building a regression model**
data.info()
import statsmodels.api as sm
X = sm.add_constant(data["YearsExperience"])
X.head(5)
y = data["Salary"]
# # **Train-Test Split**
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.7, random_state=42
)
salary_lm = sm.OLS(y_train, X_train).fit()
# # **Model Diagnostics**
salary_lm.params
salary_lm.summary2()
# # **Residual analysis**
import matplotlib.pyplot as plt
import seaborn as sn
# # **1.Check for Normal Distribution of Residual**
from scipy import stats
resid = salary_lm.resid
probplot = sm.ProbPlot(np.array(resid))
plt.figure(figsize=(10, 8))
probplot.ppplot(line="45")
plt.show()
# > We find that the residuals doesn't follow normal distribution , this maybe due to outliers or insufficient data
# # **2.Test for Homoscedasticity**
def get_standarized_values(vals):
return (vals - vals.mean()) / vals.std()
plt.scatter(
get_standarized_values(salary_lm.fittedvalues), get_standarized_values(resid)
)
plt.xlabel("Standarized predicted values")
plt.ylabel("Standarized Residuals")
plt.show()
# > It can be observed that the residuals are random and have no funnel shape , which means the residuals have constant variance (homoscedasticity)
# # **Outlier Analysis**
# # **1.Z-Score**
from scipy.stats import zscore
data["z_score_salary"] = zscore(data.Salary)
data[(data.z_score_salary > 3.0) | (data.z_score_salary < -3.0)]
# > There are no observations that are outliers as per Z-Score
# # **2.Cook's Distance**
data_influence = salary_lm.get_influence()
(c, p) = data_influence.cooks_distance
plt.stem(np.arange(len(X_train)), np.round(c, 3), markerfmt=",")
plt.xlabel("Row index")
plt.ylabel("Cooks Distance")
# > It can be observed that 2 of the observations' Cook distance exceed 1 and hence they are outliers
"""
X_train = X_train.drop([7,19] , axis=0)
y_train = y_train.drop([7,19] , axis=0)
"""
# # **3.Leverage values**
from statsmodels.graphics.regressionplots import influence_plot
fig, ax = plt.subplots(figsize=(10, 8))
influence_plot(salary_lm, ax=ax)
plt.show()
# > The size of the circle is proportional to the product of residual and leverage values , larger the circle , the larger
# the residual and hence larger the influence of the observation
# # **Prediction**
y_pred = salary_lm.predict(X_test)
# # **Finding R-Squared and RMSE**
from sklearn.metrics import r2_score, mean_squared_error
r2_score(y_test, y_pred)
# r2_score(y_pred , y_test) slightly different
np.sqrt(mean_squared_error(y_test, y_pred))
|
# Movie Genre Classification
# <div id = "tb" style = "height: 50px;
# width: 800px;
# background-color: #813EEC;">
# <h1 style="padding: 10px;
# color:white;">
# Table of Contents
#
#
# Introduction
# Feature Selection
# Bag of Words Model
# Naive Bayes Model
#
# <div id = "eda" style = "height: 50px;
# width: 800px;
# background-color: #813EEC;">
# <h1 style="padding: 10px;
# color:white;">
# 1.Introduction
#
# My Aim is to classify the genere into drama or comedy based on the plot summaries of movies
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
import matplotlib.pyplot as plt
import seaborn as sns
from wordcloud import WordCloud
from nltk.corpus import stopwords
from sklearn import metrics
from sklearn.metrics import confusion_matrix, classification_report
df = pd.read_csv("/kaggle/input/wikipedia-movie-plots/wiki_movie_plots_deduped.csv")
df.head(10)
# **I am going extract the data of only comedy and drama geners**
# for index, row in df.iterrows():
# if row["Genre"] == 'comedy' or row["Genre"] =='drama':
# print(row["Title"])
print(df.shape)
new_df = df[(df.Genre == "comedy") | (df.Genre == "drama")]
new_df.head(10)
class_names = ["drama", "comedy"]
label_count = new_df.Genre.value_counts()
sns.barplot(x=label_count.index, y=label_count)
plt.title("Distribution of drama/comedy", fontsize=14)
comedyList = []
for index, row in new_df.iterrows():
if row["Genre"] == "comedy":
comedyList.append(1)
else:
comedyList.append(0)
# print(comedyList)
dramaList = []
for val in comedyList:
if val == 1:
dramaList.append(0)
else:
dramaList.append(1)
# print(dramaList)
new_df.insert(6, "Comedy", comedyList)
new_df.insert(7, "Drama", dramaList)
new_df = new_df.drop("Genre", axis=1)
new_df.head()
# **This is the data of all the movies with eaither comedy or drama**
comedy_df = new_df[new_df["Comedy"] == 1]
stop_words = stopwords.words("english")
comedy_df["Plot"] = comedy_df["Plot"].apply(
lambda x: " ".join([word for word in x.split() if word not in (stop_words)])
)
comedy_df.head()
drama_df = new_df[new_df["Comedy"] == 0]
drama_df["Plot"] = drama_df["Plot"].apply(
lambda x: " ".join([word for word in x.split() if word not in (stop_words)])
)
drama_df.head()
new_df["Plot"] = new_df["Plot"].apply(
lambda x: " ".join([word for word in x.split() if word not in (stop_words)])
)
# # Wordcloud of Comedy plots
comedy_plots = " ".join(Plot for Plot in comedy_df.Plot)
wordcloud = WordCloud(
background_color="white",
max_words=300,
width=800,
height=400,
).generate(comedy_plots)
plt.figure(figsize=(20, 10))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
# # WordCloud of Drama plots
drama_plots = " ".join(Plot for Plot in drama_df.Plot)
wordcloud = WordCloud(
background_color="white",
max_words=300,
width=800,
height=400,
).generate(drama_plots)
plt.figure(figsize=(20, 10))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
#
# <div id = "fs" style = "height: 50px;
# width: 800px;
# background-color: #813EEC;">
# <h1 style="padding: 10px;
# color:white;">
# 2.Feature Selection
#
#
X = new_df["Plot"]
y = new_df["Comedy"]
#
# <div id = "bw" style = "height: 50px;
# width: 800px;
# background-color: #813EEC;">
# <h1 style="padding: 10px;
# color:white;">
# 3.Bag of words model
#
#
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.7, random_state=22
)
count_vectorizer = CountVectorizer(stop_words="english")
count_train = count_vectorizer.fit_transform(X_train.values)
count_test = count_vectorizer.transform(X_test.values)
# print(count_vectorizer.get_feature_names())
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf_vectorizer = TfidfVectorizer(stop_words="english", max_df=0.7)
tfidf_train = tfidf_vectorizer.fit_transform(X_train)
tfidf_test = tfidf_vectorizer.transform(X_test)
# print(tfidf_train)
count_df = pd.DataFrame(count_train.A, columns=count_vectorizer.get_feature_names_out())
tfidf_df = pd.DataFrame(tfidf_train.A, columns=tfidf_vectorizer.get_feature_names_out())
print(count_df)
print(tfidf_df)
difference = set(count_df.columns) - set(tfidf_df.columns)
print(difference)
print(count_df.equals(tfidf_df))
#
# <div id = "nb" style = "height: 50px;
# width: 800px;
# background-color: #813EEC;">
# <h1 style="padding: 10px;
# color:white;">
# 4. Using Random Forest Classifier for prediction
#
#
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=300)
model.fit(count_train, y_train)
preds = model.predict(count_test)
print(classification_report(y_test, preds))
# metrics.confusion_matrix(y_test,preds,labels=[0,1])
plt.figure(figsize=(8, 6))
sns.heatmap(metrics.confusion_matrix(y_test, preds), annot=True, fmt="", cmap="Blues")
plt.xlabel("Predicted Labels")
plt.ylabel("Real Labels")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Identifying patient by ID
# For check
# patients = pd.read_csv('/kaggle/input/in-hospital-mortality-prediction/data01.csv')
# columns = ['group','ID','outcome','age', 'gendera', 'BMI', 'hypertensive',
# 'atrialfibrillation', 'CHD with no MI', 'diabetes', 'deficiencyanemias',
# 'depression', 'Hyperlipemia', 'Renal failure', 'COPD', 'heart rate',
# 'Systolic blood pressure', 'Diastolic blood pressure',
# 'Respiratory rate', 'temperature', 'SP O2', 'Urine output',
# 'hematocrit', 'RBC', 'MCH', 'MCHC', 'MCV', 'RDW', 'Leucocyte',
# 'Platelets', 'Neutrophils', 'Basophils', 'Lymphocyte', 'PT', 'INR',
# 'NT-proBNP', 'Creatine kinase', 'Creatinine', 'Urea nitrogen',
# 'glucose', 'Blood potassium', 'Blood sodium', 'Blood calcium',
#'Chloride', 'Anion gap', 'Magnesium ion', 'PH', 'Bicarbonate',
#'Lactic acid', 'PCO2', 'EF']
# print(patients.index[patients.ID == 182755])
# patients.set_index("ID", inplace = True)
# result = patients.loc[182755]
# print(result)
# print(result.values.tolist())
# Insert dataset
patients = pd.read_csv("/kaggle/input/in-hospital-mortality-prediction/data01.csv")
# Show data types
patients.dtypes
# Convert all data types to float64
patients = patients.astype("float64")
patients.dtypes
# Dropping NA field
patients.dropna(inplace=True)
patients
# Show all data parameters
patients.describe()
# Reveal NA field
np.isnan(patients)
# Use to identify unique data - remove # as needed
# patients['age'].unique()
# Define values, dropped "group", "ID" and "outcome" for X
xcolumns = [
"age",
"gendera",
"BMI",
"hypertensive",
"atrialfibrillation",
"CHD with no MI",
"diabetes",
"deficiencyanemias",
"depression",
"Hyperlipemia",
"Renal failure",
"COPD",
"heart rate",
"Systolic blood pressure",
"Diastolic blood pressure",
"Respiratory rate",
"temperature",
"SP O2",
"Urine output",
"hematocrit",
"RBC",
"MCH",
"MCHC",
"MCV",
"RDW",
"Leucocyte",
"Platelets",
"Neutrophils",
"Basophils",
"Lymphocyte",
"PT",
"INR",
"NT-proBNP",
"Creatine kinase",
"Creatinine",
"Urea nitrogen",
"glucose",
"Blood potassium",
"Blood sodium",
"Blood calcium",
"Chloride",
"Anion gap",
"Magnesium ion",
"PH",
"Bicarbonate",
"Lactic acid",
"PCO2",
"EF",
]
df = patients[xcolumns]
# df.head()
yvalues = patients["outcome"]
# Find row number from patient ID post NA filtering and return data row as list for ML code
exist = patients.index[patients.ID == 109787]
if exist.size > 0:
print(exist)
print(df.loc[exist, :].values.tolist())
else:
print("Patient ID does not exist")
# Count Y value
yvalues.value_counts()
# Modeling
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from xgboost.sklearn import XGBClassifier
from sklearn.ensemble import GradientBoostingClassifier
from lightgbm import LGBMClassifier
from catboost import CatBoostClassifier
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn import metrics
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report, confusion_matrix
# from sklearn.datasets import make_classification
from sklearn.model_selection import GridSearchCV
X = df
y = yvalues
# X.shape
# y.shape
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
X = my_imputer.fit_transform(X)
from sklearn.model_selection import train_test_split
# split data into training and testing
x_train, x_test, y_train, y_test = train_test_split(
X, y, test_size=0.20
) # 0.33% of data is reserved for testing, algorithm will never see it.
print(x_train, y_train)
def find_best_model(X, y):
# Logistic Regression
logreg = LogisticRegression(max_iter=500)
logreg.fit(X, y)
logreg_acc = round(logreg.score(X, y) * 100, 2)
# Decision Tree
decision_tree = DecisionTreeClassifier()
decision_tree.fit(X, y)
decision_tree_acc = round(decision_tree.score(X, y) * 100, 2)
# Random Forest
random_forest = RandomForestClassifier()
random_forest.fit(X, y)
random_forest_acc = round(random_forest.score(X, y) * 100, 2)
# XGBoost
xgb = XGBClassifier()
xgb.fit(X, y)
xgb_acc = round(xgb.score(X, y) * 100, 2)
# GBM
gbm = GradientBoostingClassifier()
gbm.fit(X, y)
gbm_acc = round(gbm.score(X, y) * 100, 2)
# LightGBM
lgbm = LGBMClassifier()
lgbm.fit(X, y)
lgbm_acc = round(lgbm.score(X, y) * 100, 2)
# Catboost
catb = CatBoostClassifier(verbose=0)
catb.fit(X, y)
catb_acc = round(catb.score(X, y) * 100, 2)
# Histogram-based Gradient Boosting Classification Tree
hgb = HistGradientBoostingClassifier()
hgb.fit(X, y)
hgb_acc = round(hgb.score(X, y) * 100, 2)
model_df = pd.DataFrame(
{
"Model": [
"Logistic Regression",
"Decision Tree",
"Random Forest",
"XGBoost",
"GBM",
"LightGBM",
"Catboost",
"HistBoost",
],
"Score": [
logreg_acc,
decision_tree_acc,
random_forest_acc,
xgb_acc,
gbm_acc,
lgbm_acc,
catb_acc,
hgb_acc,
],
}
)
print(model_df.sort_values("Score", ascending=False).reset_index(drop=True))
find_best_model(x_train, y_train)
random_forest = RandomForestClassifier()
random_forest.fit(x_train, y_train)
samples = [
78.0,
2.0,
37.85143414,
1.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
76.38461538,
95.44444444,
60.25925926,
21.75,
36.12037037,
94.38461538,
1766.0,
34.1625,
4.2175,
26.0125,
32.125,
81.0,
19.0625,
4.8375,
172.25,
70.9,
0.5,
17.9,
14.2,
1.2,
24440.0,
24.0,
1.3,
32.72727273,
88.0,
3.481818182,
142.8181818,
8.32,
107.4545455,
12.54545455,
2.01,
7.333333333,
26.36363636,
0.75,
52.0,
55.0,
]
samples = pd.Series(data=samples)
sample_X = pd.Series(data=[])
sample_X = sample_X.append(samples)
sample_X = np.array(sample_X).reshape(1, -1)
answer = random_forest.predict_proba(sample_X)
treatment = random_forest.predict(sample_X)
print(treatment)
if treatment == 0:
print(f"With {answer*100}% accuracy, you are LIKELY to leave hospital ALIVE.")
else:
print(f"With {100-answer*100}% accuracy, you are UNLIKELY to leave hospital ALIVE.")
# DATA ANALYTICS =>
# import matplotlib.pyplot as plt
# plt.style.use('seaborn-whitegrid')
## Set Matplotlib defaults
# plt.rc('figure', autolayout=True)
# plt.rc('axes', labelweight='bold', labelsize='large',
# titleweight='bold', titlesize=18, titlepad=10)
# plt.rc('animation', html='html5')
## Setup feedback system
# from learntools.core import binder
# binder.bind(globals())
# from learntools.deep_learning_intro.ex6 import *
# import seaborn as sns
# fig, ax=plt.subplots(1,figsize=(8,6))
# sns.scatterplot(x='atrialfibrillation' ,y='outcome', data=patients)
# plt.title("Blood sodium vs Blood Calcium")
# plt.show()
# fig, ax=plt.subplots(1,figsize=(8,6))
# sns.countplot(x='Anion gap' ,hue='atrialfibrillation', data=patients)
# plt.title("treatment vs Gender")
# plt.show()
# from statsmodels.stats.proportion import proportions_ztest
##perform one proportion z-test
# proportions_ztest(count=, nobs=1177, value=0.64)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv(
"/kaggle/input/120-years-of-olympic-history-athletes-and-results/athlete_events.csv"
)
df
# Exploring and cleaning the data
# null values
# duplicate values
# datatypes
# rename columns/make columns consistent - in EDAP-2
# add or remove columns - may need to perform addition, subtraction, multiplication and division.
# Split strings at commas in a column and form two distinct columns
# averaging and filling data at null values
df.head()
df.shape
df.describe()
# check for datatypes of different columns.
df.dtypes
# Year should be numeric
# Date should be datetime
# Object datatype means String
df["Year"] = pd.to_numeric(df["Year"])
# when we want all the rows that donot have any null value in a particular column.
df[df["Medal"].notnull()].head()
# count also counts null values. Hence cannot be used for null values.
df.isnull().count()
# isnull gives boolean values
df.isnull()
# To identify null values you have to use sum function.
df.isnull().sum()
# dropna drops all rows with null values
df.dropna().head()
# Use fillna()
# if you want to fill null values with avg of remaining values in same column then use below code.
# showing error coz of restart. otherwise the code is good.
average = np.round(np.mean(df.Age), 1)
df["Age"] = df["Age"].fillna(average)
print("Average age: ", average)
df.head()
df.isnull().sum()
# Use fillna()
# if you want to fill null values with avg of remaining values in same column then use below code.
# showing error coz of restart. otherwise the code is good.
average = np.round(np.mean(df.Weight), 1)
df["Weight"] = df["Weight"].fillna(average)
print("Average Weight: ", average)
df.head()
# Use fillna()
# if you want to fill null values with avg of remaining values in same column then use below code.
# showing error coz of restart. otherwise the code is good.
average = np.round(np.mean(df.Age), 1)
df["Height"] = df["Height"].fillna(average)
print("Average Height: ", average)
df.head()
df.isnull().sum()
df.head()
# Duplicate rows(Get boolean values)
# True boolean value indicates a duplicate row.
df.duplicated()
# Duplicate rows. Will return duplicate rows only. Wont return the original/first row.
df.loc[df.duplicated()].head()
# How many rows are duplicated
df.duplicated().sum()
# Drop duplicated rows
# ~ is used for inverse.
df = df.drop_duplicates(keep="first")
df.head()
df.shape
# count also counts null values. Hence cannot be used for null values.
df.isnull().sum()
# Changing the column name
df = df.rename({"Weight": "Weight(in kg)", "Height": "Height(in cm)"}, axis=1)
df.head()
df1 = df["Sport"].value_counts()
df1.head(20)
# Queries -
# Athletics - relation with age, weight and height and optimal characteristics for a gold medal.
# Women and Men - trend in participation of women and Men in olympics with time.
# Football - Which team has won the highest number of Golds in Olympic Football event.
# ATHLETICS RELATIONSHIP WITH HEIGHT, AGE AND WEIGHT
df3 = df[(df.Sport == "Athletics") & (df.Medal == "Gold")][
["Year", "Age", "Height(in cm)", "Weight(in kg)"]
]
df3
# oLYMPIC YAER-WISE MEAN CHARACTERISTICS OF PEOPLE WHO WON GOLD MEDAL IN ATHLETICS
df4 = df3.groupby("Year")[["Age", "Height(in cm)", "Weight(in kg)"]].mean()
df4.head()
# PLOTTING YEAR TO MEAN AGE, MEAN HEIGHT AND MEAN WEIGHT FOR GOLD MEADALISTS IN OLYMPICS
plt.figure(figsize=(15, 5))
sns.lineplot(x="Year", y="Age", data=df4)
sns.lineplot(x="Year", y="Weight(in kg)", data=df4)
sns.lineplot(x="Year", y="Height(in cm)", data=df4)
plt.legend(labels=["Age", "Weight(in kg)", "Height(in cm)"])
plt.ylabel("Age, weight, Height")
plt.show()
# Women participation in olympics.
df6 = df[df.Sex == "F"].groupby("Year")[["Sex"]].count()
df6.head()
# Plotting participation of Men in olympics with time
df7 = df[df.Sex == "M"].groupby("Year")[["Sex"]].count()
df7.head(10)
# Plotting the participation of men and women in olympics with time
plt.figure(figsize=(15, 5))
sns.lineplot(x="Year", y="Sex", data=df6)
sns.lineplot(x="Year", y="Sex", data=df7)
plt.show()
# Which team has won the highest number of Golds in Olympics Football Events.
df8 = df[(df.Sport == "Football") & (df.Medal == "Gold")]
df8.head()
df9 = df8["Team"].value_counts().reset_index()
df9["Team"] = (df9["Team"] / 17).round(0)
df9.head()
plt.pie(data=df9, x="Team", radius=2, labels="index", autopct="%.0f%%")
plt.show()
|
# Import libraries
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as implt
import tensorflow as tf
from tensorflow.keras.preprocessing import image_dataset_from_directory
# Helper functions
def plot_loss_curve(history):
loss = history.history["loss"]
val_loss = history.history["val_loss"]
accuracy = history.history["accuracy"]
val_accuracy = history.history["val_accuracy"]
epochs = range(len(loss))
# Plot loss
plt.plot(epochs, loss, label="training_loss")
plt.plot(epochs, val_loss, label="val_loss")
plt.title("Loss")
plt.xlabel("Epochs")
plt.legend()
# Plot accuracy
plt.figure()
plt.plot(epochs, accuracy, label="training_accuracy")
plt.plot(epochs, val_accuracy, label="val_accuracy")
plt.title("Accuracy")
plt.xlabel("Epochs")
plt.legend()
# Create a variable for store our data folder's path
ROOT = "/kaggle/input/cats-in-the-wild-image-classification"
# Inspect our data folder
for dir_name, folder_names, file_names in os.walk(ROOT):
print(
f"There is {len(folder_names)} folders and {len(file_names)} files in {dir_name}"
)
# Create train, validation and test directory
train_dir = ROOT + "/train/"
valid_dir = ROOT + "/valid/"
test_dir = ROOT + "/test/"
# Create train, validation and test datasets
tf.random.set_seed(42)
IMAGE_SHAPE = (224, 224)
BATCH_SIZE = 32
print("Create train dataset...")
train_data = image_dataset_from_directory(
directory=train_dir,
image_size=IMAGE_SHAPE,
batch_size=BATCH_SIZE,
label_mode="categorical",
)
print("Create validation dataset...")
valid_data = image_dataset_from_directory(
directory=valid_dir,
image_size=IMAGE_SHAPE,
batch_size=BATCH_SIZE,
label_mode="categorical",
)
print("Create test dataset...")
test_data = image_dataset_from_directory(
directory=test_dir,
image_size=IMAGE_SHAPE,
batch_size=BATCH_SIZE,
label_mode="categorical",
)
# Inspect our train dataset
train_data
# Inspect our validation dataset
valid_data
# Inspect our labels
train_data.class_names
# Get a single batch for example
for image, label in train_data.take(1):
print(image, label)
# Create a model with transfer learning, with EfficientNetB0 and feature extraction
# Set the random seed
tf.random.set_seed(42)
# Create base model
base_model = tf.keras.applications.EfficientNetB0(include_top=False)
base_model.trainable = False
# Create inputs
inputs = tf.keras.layers.Input(shape=(224, 224, 3), name="input_layer")
# Pass inputs to the base model
x = base_model(inputs)
# Create pooling layer
x = tf.keras.layers.GlobalAveragePooling2D(name="pooling_layer")(x)
# Create outputs
outputs = tf.keras.layers.Dense(10, activation="softmax", name="output_layer")(x)
# Create an instance of our model
model_0 = tf.keras.Model(inputs, outputs)
# Compile the model
model_0.compile(
loss=tf.keras.losses.CategoricalCrossentropy(),
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"],
)
# Fit the model
history_0 = model_0.fit(
train_data,
epochs=5,
steps_per_epoch=len(train_data),
validation_data=valid_data,
validation_steps=len(valid_data),
)
# Plot history_0
pd.DataFrame(history_0.history).plot()
# Plot loss and accuracy separately
plot_loss_curve(history_0)
# Evaluate on test data
model_0.evaluate(test_data)
|
# # Introduction
# Neural Collaborative Filtering (NCF) - a deep MF model that uses a neural network to model the interactions between users and items. NCF can be trained using stochastic gradient descent and has been shown to outperform traditional MF methods like SVD and NMF.
# Source: Factorization Machine models in PyTorch (https://github.com/rixwew/pytorch-fm)
# This version of code uses MovieLens20M dataset.
#
import numpy as np
import pandas as pd
import os
import tqdm
import torch
from torch.utils.data import DataLoader
from sklearn.metrics import roc_auc_score, mean_squared_error
from pt_layer import FeaturesEmbedding, MultiLayerPerceptron
# # Read the data
ratings = pd.read_csv(
"/kaggle/input/movielens-20m-dataset/rating.csv",
error_bad_lines=False,
warn_bad_lines=False,
)
# Check the 20M ratings data.
ratings.head()
ratings.shape
# # Prepare the dataset
class MovieLensDataset(torch.utils.data.Dataset):
"""
MovieLens Dataset
Data preparation
treat samples with a rating less than 3 as negative samples
"""
def __init__(self, ratings):
data = ratings.copy().to_numpy()
self.items = data[:, :2].astype(np.int32) - 1 # -1 because ID begins from 1
self.targets = self.__preprocess_target(data[:, 2]).astype(np.float32)
self.field_dims = np.max(self.items, axis=0) + 1
self.user_field_idx = np.array((0,), dtype=np.int64)
self.item_field_idx = np.array((1,), dtype=np.int64)
def __len__(self):
return self.targets.shape[0]
def __getitem__(self, index):
return self.items[index], self.targets[index]
def __preprocess_target(self, target):
target[target < 3] = 0
target[target >= 3] = 1
return target
def get_dataset():
return MovieLensDataset(ratings)
# # Prepare the algorithm
# ## The model
class NeuralCollaborativeFiltering(torch.nn.Module):
"""
A pytorch implementation of Neural Collaborative Filtering.
Reference:
X He, et al. Neural Collaborative Filtering, 2017.
"""
def __init__(
self, field_dims, user_field_idx, item_field_idx, embed_dim, mlp_dims, dropout
):
super().__init__()
self.user_field_idx = user_field_idx
self.item_field_idx = item_field_idx
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.embed_output_dim = len(field_dims) * embed_dim
self.mlp = MultiLayerPerceptron(
self.embed_output_dim, mlp_dims, dropout, output_layer=False
)
self.fc = torch.nn.Linear(mlp_dims[-1] + embed_dim, 1)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_user_fields)``
"""
x = self.embedding(x)
user_x = x[:, self.user_field_idx].squeeze(1)
item_x = x[:, self.item_field_idx].squeeze(1)
x = self.mlp(x.view(-1, self.embed_output_dim))
gmf = user_x * item_x
x = torch.cat([gmf, x], dim=1)
x = self.fc(x).squeeze(1)
return torch.sigmoid(x)
# ## Early stopper
class EarlyStopper(object):
def __init__(self, num_trials, save_path):
self.num_trials = num_trials
self.trial_counter = 0
self.best_accuracy = 0
self.save_path = save_path
def is_continuable(self, model, accuracy):
if accuracy > self.best_accuracy:
self.best_accuracy = accuracy
self.trial_counter = 0
torch.save(model, self.save_path)
return True
elif self.trial_counter + 1 < self.num_trials:
self.trial_counter += 1
return True
else:
return False
def get_model(dataset):
field_dims = dataset.field_dims
return NeuralCollaborativeFiltering(
field_dims,
embed_dim=64,
mlp_dims=(32, 32),
dropout=0.2,
user_field_idx=dataset.user_field_idx,
item_field_idx=dataset.item_field_idx,
)
# ## Train
def train(model, optimizer, data_loader, criterion, device, log_interval=100):
model.train()
total_loss = 0
tk0 = tqdm.tqdm(data_loader, smoothing=0, mininterval=1.0)
for i, (fields, target) in enumerate(tk0):
fields, target = fields.to(device), target.to(device)
y = model(fields)
loss = criterion(y, target.float())
model.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
if (i + 1) % log_interval == 0:
tk0.set_postfix(loss=total_loss / log_interval)
total_loss = 0
# ## Test/validation
def test(model, data_loader, device):
model.eval()
targets, predicts = list(), list()
with torch.no_grad():
for fields, target in tqdm.tqdm(data_loader, smoothing=0, mininterval=1.0):
fields, target = fields.to(device), target.to(device)
y = model(fields)
targets.extend(target.tolist())
predicts.extend(y.tolist())
return roc_auc_score(targets, predicts)
# ## Settings
device = torch.device("cpu")
if torch.cuda.is_available():
device = torch.device("cuda:0")
learning_rate = 0.001
weight_decay = 1e-6
batch_size = 2048
epochs = 10
model_name = "ncf"
# # Prepare train, valid & test datasets
dataset = get_dataset()
train_length = int(len(dataset) * 0.8)
valid_length = int(len(dataset) * 0.1)
test_length = len(dataset) - train_length - valid_length
train_dataset, valid_dataset, test_dataset = torch.utils.data.random_split(
dataset, (train_length, valid_length, test_length)
)
train_data_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=2)
valid_data_loader = DataLoader(valid_dataset, batch_size=batch_size, num_workers=2)
test_data_loader = DataLoader(test_dataset, batch_size=batch_size, num_workers=2)
# ## Fit the model
model = get_model(dataset).to(device)
criterion = torch.nn.BCELoss()
optimizer = torch.optim.Adam(
params=model.parameters(), lr=learning_rate, weight_decay=weight_decay
)
early_stopper = EarlyStopper(num_trials=5, save_path=f"{model_name}.pt")
auc_values = []
for epoch_i in range(epochs):
train(model, optimizer, train_data_loader, criterion, device)
auc_valid = test(model, valid_data_loader, device)
print("epoch:", epoch_i, "validation: auc:", auc_valid)
if not early_stopper.is_continuable(model, auc_valid):
print(f"validation: best auc: {early_stopper.best_accuracy}")
break
auc_test = test(model, test_data_loader, device)
auc_values.append((epoch_i, auc_valid, auc_test))
print(f"test auc: {auc_test}")
from matplotlib import pyplot as plt
auc_values = np.array(auc_values)
f, ax = plt.subplots(1, 1, figsize=(4, 4))
ax.plot(auc_values[:, 1], label="validation loss")
ax.plot(auc_values[:, 2], label="test loss")
ax.legend()
plt.suptitle("Train/Validation loss / auc")
plt.show()
|
import string
import numpy as np
import pandas as pd
from numpy import array
from pickle import load
from PIL import Image
import pickle
from collections import Counter
import matplotlib.pyplot as plt
import sys, time, os, warnings
warnings.filterwarnings("ignore")
import re
import keras
import tensorflow as tf
from tqdm import tqdm
from nltk.translate.bleu_score import sentence_bleu
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import os
import random
from random import randint
import cv2
image_path = "/kaggle/input/final-year-project-51/bengali-visual-genome-10/bengali-visual-genome-train.images"
dir_bengali_text = "/kaggle/input/final-year-project-51/bengali-visual-genome-10/bengali-visual-genome-train.txt"
jpgs = os.listdir(image_path)
print("Total Images in Dataset = {}".format(len(jpgs)))
df = pd.read_csv(dir_bengali_text, sep="\t")
df.columns = ["image_id", "X", "Y", "Width", "Height", "English Text", "Bengali Text"]
df.head()
# saving data
df.to_csv("data.csv")
image_ids = list(df["image_id"])
english_texts = list(df["English Text"])
bengali_texts = list(df["Bengali Text"])
english_captions = {}
bengali_captions = {}
i = 0
for id in image_ids:
english_captions[id] = english_texts[i]
bengali_captions[id] = bengali_texts[i]
i += 1
len(image_ids)
dirs = []
for file in os.listdir(image_path):
dirs.append(os.path.join(image_path, file))
len(dirs)
len(im)
def image_function(var, title):
plt.imshow(img)
plt.grid("off")
plt.axis("off")
plt.title(title)
def get_id(image_name):
x = image_name.split(".")
x = x[1].split("/")
return int(x[-1])
def get_english_caption(id):
return english_captions[id]
def get_bengali_caption(id):
return bengali_captions[id]
# plotting image
img = cv2.imread(dirs[3])
id = get_id(dirs[3])
image_function(img, get_english_caption(id))
size_of_dirs = len(dirs)
plt.figure(figsize=(16, 10))
for i in range(16):
ax = plt.subplot(4, 4, i + 1)
img_ind = randint(0, 1000)
img = cv2.imread(dirs[img_ind])
id = get_id(dirs[img_ind])
image_function(img, get_english_caption(id))
plt.figure(figsize=(16, 10))
for i in range(16):
ax = plt.subplot(4, 4, i + 1)
img_ind = randint(0, 1000)
img = cv2.imread(dirs[img_ind])
id = get_id(dirs[img_ind])
image_function(img, get_english_caption(id))
img_ind = randint(0, 1000)
img = cv2.imread(dirs[img_ind])
id = get_id(dirs[img_ind])
image_function(img, get_english_caption(id))
print("Bengali Caption : {}".format(bengali_captions[id]))
img_ind = randint(0, 1000)
img = cv2.imread(dirs[img_ind])
id = get_id(dirs[img_ind])
image_function(img, get_english_caption(id))
print("Bengali Caption : {}".format(bengali_captions[id]))
img_ind = randint(0, 1000)
img = cv2.imread(dirs[img_ind])
id = get_id(dirs[img_ind])
image_function(img, get_english_caption(id))
print("Bengali Caption : {}".format(bengali_captions[id]))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # drawinggraphs
import matplotlib.colors as colors
from sklearn.utils import resample # downsample the dataset
from sklearn.model_selection import train_test_split # train-test split
from sklearn.preprocessing import StandardScaler # scale and center the data
from sklearn.svm import SVC # SVM for classification
from sklearn.model_selection import GridSearchCV # this will do cross validation
from sklearn.metrics import confusion_matrix # this creates a confusion matrix
from sklearn.metrics import plot_confusion_matrix # draws a confusion matrix
from sklearn.decomposition import PCA # to perform PCA to plot the data
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Load the dataset
df = pd.read_csv(
"/kaggle/input/default-of-credit-card-clients-dataset/UCI_Credit_Card.csv"
)
df.head()
# # Content
# There are 25 variables:
# * **ID**: ID of each client
# * **LIMIT_BAL**: Amount of given credit in NT dollars (includes individual and family/supplementary credit
# * **SEX**: Gender (1=male, 2=female)
# * **EDUCATION**: (1=graduate school, 2=university, 3=high school, 4=others, 5=unknown, 6=unknown)
# * **MARRIAGE**: Marital status (1=married, 2=single, 3=others)
# * **AGE**: Age in years
# * **PAY_x**: Repayment status for last 6 months (-1=pay duly, 1=payment delay for one month, 2=payment delay for two months, … 8=payment delay for eight months, 9=payment delay for nine months and above)
# * **BILL_AMTx**: Amount of bill statement for last 6 months (NT dollar)
# * **PAY_AMTx**: Amount of previous payment for last 6 months (NT dollar)
# * **default.payment.next.month**: Default payment (1=yes, 0=no)
# Rename Default column
# Drop ID column
df.rename({"default.payment.next.month": "DEFAULT"}, axis=1, inplace=True)
df.drop("ID", axis=1, inplace=True)
df.head(2)
# # Missing Data
# ## 1. Identifying Missing Data
df.dtypes
# Since every column is int64/float64, it's good, since none of them would have **NA** or other character based placeholder for missing values in dataframe **df**.
# Lets make sure the column contains values which are acceptable and mentioned in the dataset description
# [dataset description](#content)
# 1. Let's make sure SEX has 2 values 1 & 2
df["SEX"].unique()
# 2. Education should have 1-6 values, with 5,6 - unknown values
df["EDUCATION"].unique()
# 0 is not specified in the description, might be missing numbers
# 5,6 are unknown values
# 3. Marriage should have 1-3 values
df["MARRIAGE"].unique()
# Like Education, Marriage also contains value 0, guess is that those are missing records
# 4. AGE
df["AGE"].describe()
# 5. Default
df["DEFAULT"].unique()
# ## 2. Dealing with Missing Data
len(df.loc[(df["EDUCATION"] == 0) | (df["MARRIAGE"] == 0)])
# So, only 68 rows have missing values. Now let's count the total number of rows in the dataset
100 * len(df.loc[(df["EDUCATION"] == 0) | (df["MARRIAGE"] == 0)]) / len(df)
# Less than 1% of data is missing, so we'll drop these records.
df_no_missing = df.loc[(df["EDUCATION"] != 0) & (df["MARRIAGE"] != 0)]
df_no_missing.shape
print(
"Education-",
df_no_missing["EDUCATION"].unique(),
"Marriage-",
df_no_missing["MARRIAGE"].unique(),
)
# # Downsample the data
# **Support Vector Machines** are great with small datasets, but not awesome with large ones, and this dataset, while not huge is big enough to take a long time to optimize with **Cross Validation**. So we'll downsample both categories, customers who did and did not default to 2000 each.
# Let's check the customers in dataset
df_no_missing["DEFAULT"].value_counts()
df_no_default = df_no_missing[df_no_missing["DEFAULT"] == 0]
df_default = df_no_missing[df_no_missing["DEFAULT"] == 1]
# Now, downsample the dataset that did not default --
df_no_default_downsampled = resample(
df_no_default, replace=False, n_samples=1000, random_state=42
)
len(df_no_default_downsampled)
# Now, downsample the dataset that default --
df_default_downsampled = resample(
df_default, replace=False, n_samples=1000, random_state=42
)
len(df_default_downsampled)
# **Merge** these two downsampled dataframes into single dataframe--
df_downsample = pd.concat([df_no_default_downsampled, df_default_downsampled])
len(df_downsample)
# # Format Data Part 1: Split the data into dependent & independent variables
# **Note:** We'll use `.copy()` method to create df X & y. Because by default pandas uses copy by reference. Using `copy()` ensures that the original data is not modified in case we make changes to X & y, so we won't have to reload the whole data and perform pre-processing steps again.
X = df_downsample.drop(["DEFAULT"], axis=1).copy()
y = df_downsample["DEFAULT"].copy()
X.head(2)
y.head(2)
# # Format the Data Part2: One-Hot Encoding
# SEX, EDUCATION, MARRIAGE & PAY_ are supposed to be categorical, so they'll be modified . This is because, while sklearn **Support Vector Machines** natively supports continuous data, like LIMIT_BAL, AGE, they do not natively support categorical data, like MARRIAGE which contains 3 different categories. Thus, we'll perform OHE on those columns.
X_encoded = pd.get_dummies(
X,
columns=[
"SEX",
"EDUCATION",
"MARRIAGE",
"PAY_0",
"PAY_2",
"PAY_3",
"PAY_4",
"PAY_5",
"PAY_6",
],
)
X_encoded.head()
# # Format the Data Part3: Centering and Scaling
# The **Radial Basis Function (RBF)** that we are using with our **Support Vector Machine** assumes that the data are centered and scaled. In other words, each column should have a mean value=0 and a standard deviation of 1. So we neeed to do this to both the training and testing datasets.
# ***NOTE***: We split the data into training and testing datasets and then scale them separately to avoid the **Data Leakage**.
# **Data Leakage** occurs when information about the training dataset corrupts or influences the testing dataset.
X_train, X_test, y_train, y_test = train_test_split(X_encoded, y, random_state=42)
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# # Build a Preliminary Support Vector Machine
clf_svm = SVC(random_state=42)
clf_svm.fit(X_train_scaled, y_train)
# OK, so we've built a **SVM** for classification. Let's check how it performs on the **Testing Dataset** and then draw a **Confusion Matrix**.
plot_confusion_matrix(
clf_svm,
X_test_scaled,
y_test,
values_format="d",
display_labels=["Did not default", "Defaulted"],
)
# # Optimize Parameters with Cross Validation and GridSearchCV
# Since we have two parameters to optimize, we will use `GridSearchCV`. We specify a bunch of potential values for **gamma** and **C**, and `GridSearchCV` will search all possible combinations of the parameters.
param_grid = [
{
"C": [0.5, 1, 10, 100], # Values of C should be > 0
"gamma": ["scale", 1, 0.1, 0.01, 0.001, 0.0001],
"kernel": ["rbf"],
},
]
## I have added C=1 & gamma='scale' as possible choices as well, since they are default values
optimal_params = GridSearchCV(SVC(), param_grid, cv=5, scoring="accuracy", verbose=0)
# Try changing scoring
## didn't changed the results much in my case
optimal_params.fit(X_train_scaled, y_train)
print(optimal_params.best_params_)
# And we can see that the ideal value for `C is 100` which means we'll use Regularization, and the ideal value for `gamma = 0.001`
# # Buidling, Evaluating, Drawing and Interpreting the Final SVM
# Now using the obtained values of C and gamma we'll build our final model
clf_svm = SVC(C=100, gamma=0.001, random_state=42)
clf_svm.fit(X_train_scaled, y_train)
plot_confusion_matrix(
clf_svm,
X_test_scaled,
y_test,
values_format="d",
display_labels=["Did not default", "Defaulted"],
)
# And the results from the optimized **Support Vector Machine** are just a little bit better than before. **2** more people were correctly classified as not defaulting.
# In other words, the **SVM** was pretty good straight out of the box without much optimization. This makes **SVMs** a great, quick and dirty method for relatively small datasets.
# **NOTE:** Although classification with this dataset and an **SVM** is not awesome, it may be better than other methods. We'd have to compare to find out.
# The last thing we are going to do is draw a support vector machine decision boundary and discuss how to interpret it.
# The first we need to do is count the number of columns in **X**:
len(df_downsample.columns)
pca = PCA() # By default, PCA centers the data but does not scale it.
X_train_pca = pca.fit_transform(X_train_scaled)
per_var = np.round(pca.explained_variance_ratio_ * 100, decimals=1)
labels = [str(x) for x in range(1, len(per_var) + 1)]
plt.bar(x=range(1, len(per_var) + 1), height=per_var)
plt.tick_params(
axis="x", # changes apply to the x-axis
which="both", # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False, # labels along the bottom edge are off
)
plt.ylabel("Percentage of explained variance")
plt.xlabel("Principal Components")
plt.title("Scree Plot")
plt.show()
# The scree plot shows that the first principal component, PC1, accounts for a relatively large amount of variation in the raw data, and this means it will be a good candidate for the x-axis in the 2-D graph. However PC2 is not much different from PC3 or PC4, which doesn't bode well for dimension reduction.
# Since we don't have a choice let's go with it. Graph will look funky though!!
# Let's draw the PCA graph. First let's optimize an SVM fit to PC1 and PC2.
train_PC1_coords = X_train_pca[:, 0]
train_PC2_coords = X_train_pca[:, 1]
## NOTE:
## PC1 contains x-axis coordinates of the data after PCA
## PC2 contains y-axis coordinates of the data after PCA
## Now center and scaler the PCs...
pca_train_scaled = scale(np.column_stack((train_PC1_coords, train_PC2_coords)))
## Now we optimize the SVM fit to the x-axis and y-axis coordinates
## of the data after PCA dimensionality reduction...
param_grid = [
{
"C": [1, 10, 100, 1000], # Values of C should be > 0
"gamma": ["scale", 1, 0.1, 0.01, 0.001, 0.0001],
"kernel": ["rbf"],
},
]
optimal_params = GridSearchCV(SVC(), param_grid, cv=5, scoring="accuracy", verbose=0)
optimal_params.fit(pca_train_scaled, y_train)
print(optimal_params.best_params_)
# Now that we have the optimal values for `C` and `gamma` let's draw the graph
clf_svm = SVC(C=1000, gamma=0.001, random_state=42)
clf_svm.fit(pca_train_scaled, y_train)
## Transform the test dataset with the PCA...
X_test_pca = pca.transform(X_train_scaled)
# X_test_pca = pca.transform(X_test_scaled)
test_pc1_coords = X_test_pca[:, 0]
test_pc2_coords = X_test_pca[:, 1]
## Now create a matric of points that we can use to show the decision regions/
## The matrix will be a little bit larger than the transformed
## PCA points so that we can plot all of the PCA points on it without them being on the edge
x_min = test_pc1_coords.min() - 1
x_max = test_pc1_coords.max() + 1
y_min = test_pc2_coords.min() - 1
y_max = test_pc2_coords.max() + 1
xx, yy = np.meshgrid(
np.arange(start=x_min, stop=x_max, step=0.1),
np.arange(start=y_min, stop=y_max, step=0.1),
)
## Now we'll classify every point in that
## matrix with the SVM. Points on one side of the
## Classification boundary will get 0, and points on the other
## side will get 1.
Z = clf_svm.predict(np.column_stack((xx.ravel(), yy.ravel())))
Z = Z.reshape(xx.shape)
## Plot the decision boundary and the scatter plot of the PCA-transformed test data
fig, ax = plt.subplots(figsize=(10, 10))
ax.contourf(xx, yy, Z, alpha=0.1)
cmap = colors.ListedColormap(["#e41a1c", "#4daf4a"])
scatter = ax.scatter(
test_pc1_coords,
test_pc2_coords,
c=y_train,
cmap=cmap,
s=100,
edgecolors="k",
alpha=0.7,
)
legend = ax.legend(
scatter.legend_elements()[0], scatter.legend_elements()[1], loc="upper right"
)
legend.get_texts()[0].set_text("No Default")
legend.get_texts()[1].set_text("Yes Default")
## now add axis labels and titles
ax.set_ylabel("PC2")
ax.set_xlabel("PC1")
ax.set_title("Decision surface using the PCA transformed/projected features")
plt.show()
|
# # Which Star Cluster is Older?
# ### Determine if NGC 188 or M67 is the older of the two star clusters by plotting a Color-Magnitude Diagram.
# ### This notebook is a simple example of how to use the data.
# Please see https://www.kaggle.com/datasets/austinhinkel/gaia-dr3-data-for-comparing-two-star-clusters for dataset information. This limited example shows a color-magnitude diagram so that students can determine which star cluster is older. It differs from https://www.kaggle.com/code/austinhinkel/quickstarclusterexample as it has a slight step up in complexity -- students must first determine which stars belong to the star clusters before analyzing them.
# Tags: Data visualization, Data story-telling, astronomy, Gaia
# Imports:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # plotting
# Read in (unfiltered) Data:
# M67:
dataM67 = np.loadtxt(
"/kaggle/input/gaia-dr3-data-for-comparing-two-star-clusters/M67_full.csv",
delimiter=",",
skiprows=1,
)
prlx_M67 = dataM67[:, 0] # Parallax
gmag_M67 = dataM67[:, 1] # G-band magnitude
bpmrp_M67 = dataM67[:, 2] # BP minus RP color
pmra_M67 = dataM67[:, 3] # proper motion ra direction
pmdec_M67 = dataM67[:, 4] # proper motion dec direction
# NGC 188:
dataNGC188 = np.loadtxt(
"/kaggle/input/gaia-dr3-data-for-comparing-two-star-clusters/NGC188_full.csv",
delimiter=",",
skiprows=1,
)
prlx_NGC188 = dataNGC188[:, 0] # Parallax
gmag_NGC188 = dataNGC188[:, 1] # G-band magnitude
bpmrp_NGC188 = dataNGC188[:, 2] # BP minus RP color
pmra_NGC188 = dataNGC188[:, 3] # proper motion ra direction
pmdec_NGC188 = dataNGC188[:, 4] # proper motion dec direction
# calculate distances:
d_M67 = 1.0 / prlx_M67 # kiloparsecs
d_NGC188 = 1.0 / prlx_NGC188 # kiloparsecs
# calculate absolute magnitudes
AbsMag_M67 = gmag_M67 - 5.0 * np.log10(d_M67 / 0.01)
AbsMag_NGC188 = gmag_NGC188 - 5.0 * np.log10(d_NGC188 / 0.01)
# Find star clusters by looking at their proper motion (~speed) through space.
# They move together and should form an overdensity in proper motion space.
# M67
plt.plot(pmra_M67, pmdec_M67, ",")
plt.xlabel("Proper Motion in Right Ascension Direction (mas/yr)")
plt.ylabel("Proper Motion in Declination Direction (mas/yr)")
plt.title("Proper motions of star in M67 line of sight")
plt.show()
# NGC 188
plt.plot(pmra_NGC188, pmdec_NGC188, ",")
plt.xlabel("Proper Motion in Right Ascension Direction (mas/yr)")
plt.ylabel("Proper Motion in Declination Direction (mas/yr)")
plt.title("Proper motions of star in NGC 188 line of sight")
plt.show()
# Careful! all stars are show in the above plots.
# The outliers make it seem as though there is a cluster,
# but in reality these stars have proper motions
# that differ by ~100 mas/yr -- no way they are gravitationally bound!
# Find star clusters by looking at their proper motion (~speed) through space.
# They move together and should form an overdensity in proper motion space.
# M67
plt.plot(pmra_M67, pmdec_M67, ",")
plt.xlabel("Proper Motion in Right Ascension Direction (mas/yr)")
plt.ylabel("Proper Motion in Declination Direction (mas/yr)")
plt.title("Proper motions of star in M67 line of sight")
plt.xlim([-40, 40])
plt.ylim([-40, 40])
plt.show()
# NGC 188
plt.plot(pmra_NGC188, pmdec_NGC188, ",")
plt.xlabel("Proper Motion in Right Ascension Direction (mas/yr)")
plt.ylabel("Proper Motion in Declination Direction (mas/yr)")
plt.title("Proper motions of star in NGC 188 line of sight")
plt.xlim([-20, 20])
plt.ylim([-20, 20])
plt.show()
# Now we can start to see the clusters in the above plots. Let's zoom in further.
# M67
plt.plot(pmra_M67, pmdec_M67, ",")
plt.xlabel("Proper Motion in Right Ascension Direction (mas/yr)")
plt.ylabel("Proper Motion in Declination Direction (mas/yr)")
plt.title("Proper motions of star in M67 line of sight")
plt.xlim([-15, -5])
plt.ylim([-10, 0])
plt.show()
# NGC 188
plt.plot(pmra_NGC188, pmdec_NGC188, ",")
plt.xlabel("Proper Motion in Right Ascension Direction (mas/yr)")
plt.ylabel("Proper Motion in Declination Direction (mas/yr)")
plt.title("Proper motions of star in NGC 188 line of sight")
plt.xlim([-5, 0])
plt.ylim([-5, 0])
plt.show()
# Now let's cut the stars to keep only the stars with the correct proper motions
# The remaining stars will belong to the star clusters.
bpmrp_M67_cut = bpmrp_M67[
(pmra_M67 > -12) * (pmra_M67 < -10) * (pmdec_M67 > -4) * (pmdec_M67 < -2)
]
AbsMag_M67_cut = AbsMag_M67[
(pmra_M67 > -12) * (pmra_M67 < -10) * (pmdec_M67 > -4) * (pmdec_M67 < -2)
]
bpmrp_NGC188_cut = bpmrp_NGC188[
(pmra_NGC188 > -2.8)
* (pmra_NGC188 < -1.7)
* (pmdec_NGC188 > -1.5)
* (pmdec_NGC188 < -0.5)
]
AbsMag_NGC188_cut = AbsMag_NGC188[
(pmra_NGC188 > -2.8)
* (pmra_NGC188 < -1.7)
* (pmdec_NGC188 > -1.5)
* (pmdec_NGC188 < -0.5)
]
plt.plot(bpmrp_M67_cut, AbsMag_M67_cut, ",")
plt.plot(bpmrp_NGC188_cut, AbsMag_NGC188_cut, ",")
plt.gca().invert_yaxis()
plt.xlabel("Blue-pass minus Red-pass Color (mag)")
plt.ylabel("Absolute Magnitude (mag)")
plt.title("Color Magnitude Diagrams for M67 and NGC 188 Star Clusters")
plt.legend(["M67 (blue)", "NGC 188 (orange)"])
plt.show()
# Notice the Color-Magnitude Diagrams have different turn-off points.
# This indicates they have different ages.
# Can you figure out which is older?
|
# # MEI Introduction to Data Science
# # Lesson 6 - Activity 1
# # Table of Contents
# * [Introduction](#Introduction)
# * [Problem](#Problem)
# * [Importing libraries and data](#Importing-libraries-and-data)
# * [Pre-processing the data](#Pre-processing-the-data)
# * [Exploring the data](#Exploring-the-data)
# * [Statistics](#Statistics)
# * [Visualisations](#Visualisations)
# * [Checkpoint 1](#Checkpoint-1)
# * [Building a model with a single input feature](#Building-a-model-with-a-single-input-feature)
# * [Building a model based on bill length](#Building-a-model-based-on-bill-length)
# * [Interpretting decision trees](#Interpretting-decision-trees)
# * [Building a model based on other features](#Building-a-model-based-on-other-features)
# * [Checkpoint 2](#Checkpoint-2)
# * [Building a model based on two input features](#Building-a-model-based-on-two-input-features)
# * [Checkpoint 3](#Checkpoint-3)
# # Introduction
# The problem in this activity is an introduction to a different type of *machine learning* from the one you met in lesson 5. In it you will explore how you can predict whether data should be classified into one of two groups based on the values of other features in a dataset. Classifying data into two groups is known as **binary classification**.
# The data for this activity comes from a study of three different species of Antartic penguin: Adelie, Chinstrap and Gentoo. The data was observed on the islands of the Palmer Archipelago, Antarctica between 2007 and 2009. There are 344 individual records, each representing a single penguin.
# ## Problem
# ***Can the species of a penguin be predicted using different body measurements?***
# To answer this question you could find statistics and create charts comparing the numerical features recorded for the different species of penguins in the dataset. You can then build a model that predicts the species of penguin based on the numerical features.
# ## Importing libraries and data
# > Run the code boxes below to import the libraries and the data.
# import pandas and seaborn
import pandas as pd
import seaborn as sns
# import modelling functions for decision trees from sklearn
from sklearn.tree import plot_tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
# import pyplot fro matplotlib for displaying decision trees
import matplotlib.pyplot as plt
# createa a plot_decision_tree command for displaying the decision tree
def plot_decision_tree(model, size=14):
fig, ax = plt.subplots(figsize=(18, 8))
plot_tree(
model,
filled=True,
impurity=False,
label="root",
feature_names=input_features,
proportion=True,
class_names=["No", "Yes"],
ax=ax,
fontsize=size,
)
plt.show()
# import the data - the .dropna() function removes any rows with missing data
penguin_data = pd.read_csv("/kaggle/input/penguins/penguins.csv")
# display the data to check it has imported
penguin_data
# This data set contains information about penguins observed on the islands of the Palmer Archipelago, Antarctica between 2007 and 2009.
# Features:
# * **Species**: *Adélie, Chinstrap or Gentoo*
# * **Island**: *Where the penguin was observed (Biscoe, Dream or Torgersen)*
# * **bill_length_mm**: *Bill length (mm)*
# * **bill_depth_mm**: *Bill depth (mm)*
# * **flipper_length_mm**: *Flipper length (mm)*
# * **body_mass_g**: *Body mass (g)*
# * **sex**: *male or female*
# * **year**: *2007, 2008 or 2009*
# > Run the code below to explore the data types for the features.
# display the data types
penguin_data.info()
# # Pre-processing the data
# ## Removing incomplete rows
# Some of the rows are incomplete. You can see this in the table, where values are recorded as `NaN` and in the count of the `non-null` values for `sex`.
# These rows can be be removed before analysing the data. You can do this using the `.dropna()` command.
# > Run the code below to remove the rows which have missing values.
# import the data - the .dropna() function removes any rows with missing data
penguin_data = penguin_data.dropna().copy()
# display the data to check
penguin_data
# There are 333 penguins for which there is complete data.
# ## Adding a binary feature
# In this activity you are going to create a model that predicts whether a penguin is a Gentoo penguin. To do this you will need a *binary* feature that is 0 or 1 depending on whether the penguin is a Gentoo.
# > Run the code below to create a new feature `gentoo` that is 1 for all the Gentoo penguins and 0 otherwise.
# add a binary feature to identify Gentoo penguins
penguin_data["gentoo"] = penguin_data["species"].replace(
{"Adelie": 0, "Chinstrap": 0, "Gentoo": 1}
)
# check the data
penguin_data
# # Exploring the data
# ## Statistics
# You can use `groupby` to see if there are any obvious differences in the measurements for the different species.
# > Run the code below to explore the statistics for `bill_length_mm` for the different species.
# display the statistics for bill_length_mm grouped by species
penguin_data.groupby("species")["bill_length_mm"].describe().round(2)
# > Add code in the boxes below to explore the statistics for `bill_depth_mm`, `flipper_length_mm` and `body_mass_g` grouped by species.
# display the statistics for bill_depth_mm grouped by species
# display the statistics for flipper_length_mm grouped by species
# display the statistics for body_mass_g grouped by species
# ## Visualisations
# KDE plots grouped by species will help compare the distributions.
# > Run the code in the box below to create a KDE plot for `bill_length_mm` grouped by species.
# KDE plot of flipper length grouped by species
sns.displot(data=penguin_data, kind="kde", x="bill_length_mm", hue="species", aspect=2)
# > Add code in the boxes below to create KDE plots for `bill_depth_mm`, `flipper_length_mm` and `body_mass_g` grouped by species.
# KDE plot of bill_depth_mm grouped by species
# KDE plot of flipper_length_mm grouped by species
# KDE plot of body_mass_g grouped by species
# ## Checkpoint 1
# > * Use your statistics and visualisations to describe the differences between the three species of penguin.
# > * Explain which feature you think would be the most useful to distinguish a Gentoo penguin from the other two species.
# # Building a model with a single input feature
# In this activity you are going to generate some binary classification models. These will split the data into two groups depending on some of the values of the numerical features. For example, a simple model might be to classify penguins with bill lengths greater than 40mm as Gentoo penguins and those with bill lengths less than 40mm as not Gentoo penguins.
# You will use an 75:25 training-testing split for your data as you saw in lesson 5. Your model will be built using 75% of the data and the remaining 25% will be used to measure how well the model perfoms on unseen data.
# * To create a model you will send your training data to a machine learning algorithm, `DecisionTreeClassifier`, which will find the best split for your choice of input features.
# * You will then measure your model by finding the *accuracy* using the testing data, i.e. the percentage of penguins it has correctly identified.
# ## Building a model based on bill length
# The code for building and measuring a binary classification model has a very similar structure to the code you met for building a linear regression model in lesson 5. The main differences are that it will display the model using a *decision tree* and the model will be measured using *accuracy*, the percentage of items correctly identified when the model is applied to the testing data.
# The code block builds and measures the model using five stages:
# * Define the input features and the target feature. The input can be a single feature or a list of features. The target feature has the values 0 or 1.
# * Perform a training-testing split. This creates four objects: a training set of inputs and training list of outputs for building the model; a testing set of inputs and testing list of outputs for measuring the model.
# * Find the best model for the given input features using the machine learning algorithm `DecisionTreeClassifier`.
# * Display the model using a decision tree.
# * Create a list of target predictions using the testing data inputs and compare these to the actual values in the testing target list. The percentage correct is given as the accuracy.
# > Run the code in the box below to create a binary classification model using `bill_length` as the input.
# define the input feature(s) and output (or target) feature
input_features = ["bill_length_mm"]
input_data = penguin_data[input_features]
target_data = penguin_data["gentoo"]
# use the train_test_split command to create training and testing testing data
input_train, input_test, target_train, target_test = train_test_split(
input_data, target_data, train_size=0.75, random_state=1
)
# create the model
tree_model = DecisionTreeClassifier(max_depth=1).fit(input_train, target_train)
# display the model
plot_decision_tree(tree_model)
# create a list of the predictions, display a two-way-table of predictions/actual values for the testing data, calculate the accuracy for the testing data
target_pred = tree_model.predict(input_test)
print("Accuracy: ", (100 * accuracy_score(target_test, target_pred)).round(1), "%")
# ## Interpretting decision trees
# Decision trees are used to visualise binary classfication models. The top line of the first box, or *node*, tells you how to split the data: you follow the left arrow if this statement is true and the right arrow otherwise. The final line in the bottom row of boxes tells you whether the model has classified the groups as 1/Yes or 0/No.
# The model shown here suggests that penguins with a bill length less than or equal to 42.45mm should be classified as not Gentoo and those with a bill length greater than 41.6mm should be classified as Gentoo. When this model is applied to the testing data it has accurately predicted whether 72.6% of the penguins are Gentoo or not. It has therefore misclassified 27.4% of penguins.
# The *samples* and *value* lines in the boxes show what proportion of the testing data is being considered at each node and the fraction of these that are 0 or 1. This is useful information for more detailed analysis of decision trees but is beyond the scope of this course.
# ## Building a model based on other features
# > Add code to the boxes below to build models based on `bill_depth_mm`, `flipper_length_mm` and `body_mass_g`.
# Build a model based on bill depth
# Build a model based on flipper length
# Build a model based on body mass
# ## Checkpoint 2
# > * Which input feature gives the best model and which input feature gives the worst model?
# > * Explain how this is consistent with your responses to Checkpoint 1.
# # Building a model based on two input features
# Just as you did with your regression models, you can give more than one input feature. You will need to increase the `max_depth` of your tree from 1 to 2 to allow for more decisions to be made.
# > Run the code in the box below to create a model based on bill length and bill depth.
# define the input feature(s) and output (or target) feature
input_features = ["bill_length_mm", "bill_depth_mm"]
input_data = penguin_data[input_features]
target_data = penguin_data["gentoo"]
# use the train_test_split command to create training and testing testing data
input_train, input_test, target_train, target_test = train_test_split(
input_data, target_data, train_size=0.75, random_state=1
)
# create the model
tree_model = DecisionTreeClassifier(max_depth=2).fit(input_train, target_train)
# display the model
plot_decision_tree(tree_model)
# create a list of the predictions, display a two-way-table of predictions/actual values for the testing data, calculate the accuracy for the testing data
target_pred = tree_model.predict(input_test)
print("Accuracy: ", (100 * accuracy_score(target_test, target_pred)).round(1), "%")
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
train = pd.read_csv("/kaggle/input/us-college-completion-rate-analysis/train.csv")
X_test = pd.read_csv("/kaggle/input/us-college-completion-rate-analysis/x_test.csv")
train
plt.scatter(train["SAT_average"], train["Completion_rate"])
train = train[(train["SAT_average"] < 1000) | (train["Completion_rate"] > 0.2)]
X_train_df = train.drop("Completion_rate", axis=1)
y_train = train["Completion_rate"]
X_train_df.head()
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
f, ax = plt.subplots(figsize=(12, 12))
sns.heatmap(X_train_df.corr(), annot=True, linewidths=0.5, fmt=".1f", ax=ax)
# from sklearn.preprocessing import QuantileTransformer
def transform_X(X):
X["Tuition_diff"] = X["Tuition_out_state"] - X["Tuition_in_state"]
X["total_pct_race"] = (
X["pct_White"] + X["pct_Black"] + X["pct_Hispanic"] + X["pct_Asian"] + 0.01
)
X["share_White"] = X["pct_White"] / X["total_pct_race"]
X["share_Black"] = X["pct_Black"] / X["total_pct_race"]
X["share_Hispanic"] = X["pct_Hispanic"] / X["total_pct_race"]
X["share_Asian"] = X["pct_Asian"] / X["total_pct_race"]
X["salary_tuition_ratio_in"] = X["Faculty_salary"] / X["Tuition_in_state"]
X["salary_tuition_ratio_out"] = X["Faculty_salary"] / X["Tuition_out_state"]
X.drop("ACT_50thPercentile", axis=1, inplace=True)
X.drop("Parents_highsch", axis=1, inplace=True)
X.drop("Unnamed: 0", axis=1, inplace=True)
for col in X.columns:
if col != "total_pct_race":
X[col + "_total_pct_inter"] = X[col] * X["total_pct_race"]
for col in ["Pell_grant_rate"]:
X[col + "_total_pct_ratio"] = X[col] / X["total_pct_race"]
X[col + "_White_share_inter"] = X[col] * X["share_White"]
X[col + "_Black_share_inter"] = X[col] * X["share_Black"]
X[col + "_Hispanic_share_inter"] = X[col] * X["share_Hispanic"]
X[col + "_Asian_share_inter"] = X[col] * X["share_Asian"]
print(X.shape)
transform_X(X_train_df)
transform_X(X_test)
X_train_df
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.model_selection import RandomizedSearchCV
m = ExtraTreesRegressor(random_state=5006)
random_grid = {
"n_estimators": [300, 400, 500, 600],
"max_features": ["sqrt", 0.2, 0.3, 0.4, 0.5],
"max_depth": [30, 35, 40, None],
"min_samples_split": [2, 3, 4],
"min_samples_leaf": [1, 2],
"bootstrap": [False],
}
etr_tuner = RandomizedSearchCV(m, random_grid, cv=5, n_iter=50, verbose=2)
etr_tuner.fit(X_train_df, y_train)
etr_tuner.best_params_
from sklearn.model_selection import cross_validate
m = etr_tuner.best_estimator_
results = cross_validate(m, X_train_df, y_train, cv=5)
np.mean(results["test_score"])
m.fit(X_train_df, y_train)
print(m.score(X_train_df, y_train))
y_pred = m.predict(X_test)
submission = pd.DataFrame.from_dict({"Completion_rate": y_pred})
submission.to_csv("submission.csv", index=True, index_label="id")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import cv2
import scikitplot
import seaborn as sns
from matplotlib import pyplot
import math
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import classification_report
import tensorflow as tf
from tensorflow.keras import optimizers
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (
Flatten,
Dense,
Conv2D,
GlobalAveragePooling2D,
MaxPool2D,
)
from tensorflow.keras.layers import Dropout, BatchNormalization, Activation
from tensorflow.keras.callbacks import Callback, EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
df = pd.read_csv(
"/kaggle/input/facial-expression-recognitionferchallenge/fer2013/fer2013/fer2013.csv"
)
df.head(5)
df.shape
df
df["emotion"].value_counts()
df["emotion"].unique()
len(df["pixels"][0])
math.sqrt(len(df.pixels[35886].split(" ")))
emotion_label = {
0: "anger",
1: "disgust",
2: "fear",
3: "happiness",
4: "sadness",
5: "surprise",
6: "neutral",
}
fig = pyplot.figure(1, (14, 14))
k = 0
for label in sorted(df["emotion"].unique()):
for j in range(7):
px = df[df["emotion"] == label]["pixels"].iloc[k]
px = np.array(px.split(" ")).reshape(48, 48).astype("float32")
k += 1
ax = pyplot.subplot(7, 7, k)
ax.imshow(px, cmap="gray")
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(emotion_label[label])
pyplot.tight_layout()
img_array = df["pixels"].apply(
lambda x: np.array(x.split(" ")).reshape(48, 48).astype("float32")
)
print(img_array)
img_array = np.stack(img_array, axis=0)
img_array.shape
print(img_array[0])
img_features = []
for i in range(len(img_array)):
temp = cv2.cvtColor(img_array[i], cv2.COLOR_GRAY2RGB)
img_features.append(temp)
img_features = np.array(img_features)
print(img_features.shape)
pyplot.imshow(img_features[7].astype(np.uint8))
le = LabelEncoder()
img_labels = le.fit_transform(df["emotion"])
img_labels = np_utils.to_categorical(img_labels)
img_labels
X_train, X_test, y_train, y_test = train_test_split(
img_features, img_labels, stratify=img_labels, test_size=0.1, random_state=42
)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
X_train = X_train / 255.0
X_test = X_test / 255.0
IMG_WIDTH = 48
IMG_HEIGHT = 48
CHANNELS = 3
vgg = tf.keras.applications.VGG19(
weights="imagenet", include_top=False, input_shape=(IMG_WIDTH, IMG_HEIGHT, CHANNELS)
)
vgg.summary()
def get_model(build, classes):
model = build.layers[-2].output
model = GlobalAveragePooling2D()(model)
model = Dense(classes, activation="softmax", name="output_layer")(model)
return model
num_classes = 7
head = get_model(vgg, num_classes)
model = Model(inputs=vgg.input, outputs=head)
print(model.summary())
early_stopping = EarlyStopping(
monitor="val_accuracy",
min_delta=0.00005,
patience=11,
verbose=1,
restore_best_weights=True,
)
lr_scheduler = ReduceLROnPlateau(
monitor="val_accuracy",
factor=0.5,
patience=7,
min_lr=1e-7,
verbose=1,
)
callbacks = [early_stopping, lr_scheduler]
train_datagen = ImageDataGenerator(
rotation_range=15,
width_shift_range=0.15,
height_shift_range=0.15,
shear_range=0.15,
zoom_range=0.15,
horizontal_flip=True,
)
train_datagen.fit(X_train)
batch_size = 256
epochs = 15
model.compile(
loss="categorical_crossentropy",
optimizer=optimizers.Adam(learning_rate=0.0001, beta_1=0.9, beta_2=0.999),
metrics=["accuracy"],
)
history = model.fit(
train_datagen.flow(X_train, y_train, batch_size=batch_size),
validation_data=(X_valid, y_valid),
steps_per_epoch=len(X_train) / batch_size,
epochs=epochs,
callbacks=callbacks,
use_multiprocessing=True,
)
sns.set()
fig = pyplot.figure(0, (12, 4))
ax = pyplot.subplot(1, 2, 1)
sns.lineplot(x=history.epoch, y=history.history["accuracy"], label="train")
sns.lineplot(x=history.epoch, y=history.history["val_accuracy"], label="valid")
pyplot.title("Accuracy")
pyplot.tight_layout()
ax = pyplot.subplot(1, 2, 2)
sns.lineplot(x=history.epoch, y=history.history["loss"], label="train")
sns.lineplot(x=history.epoch, y=history.history["val_loss"], label="valid")
pyplot.title("Loss")
pyplot.tight_layout()
pyplot.savefig("epoch_history_dcnn.png")
pyplot.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
"""
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
"""
# Any results you write to the current directory are saved as output.
# This libarary is for image augmentation Link: https://github.com/albumentations-team/albumentations
#!pip install -U git+https://github.com/albu/albumentations
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import cv2
from tqdm import tqdm # _notebook as tqdm
import matplotlib.pyplot as plt
import seaborn as sns
from functools import reduce
import os
from scipy.optimize import minimize
import plotly.express as px
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import Dataset, DataLoader
from torchvision import models
from torchvision import transforms, utils
PATH = "../input/pku-autonomous-driving/"
os.listdir(PATH)
# If you want to understand Camera Matrix,the Intrinsic Matrix
# Link : http://ksimek.github.io/2013/08/13/intrinsic/
# ## Loading the dataset
train = pd.read_csv(PATH + "train.csv")
test = pd.read_csv(PATH + "sample_submission.csv")
# From camera.zip
camera_matrix = np.array(
[[2304.5479, 0, 1686.2379], [0, 2305.8757, 1354.9849], [0, 0, 1]], dtype=np.float32
)
camera_matrix_inv = np.linalg.inv(camera_matrix)
train.head()
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
# set the range or distribution for x and y coordinates
xmin = 0
xmax = 10
ymin = 0
ymax = 10
# generate random x and y coordinates within the specified range or distribution
n_points = 100
test = np.random.uniform(xmin, xmax, n_points)
train = np.random.uniform(ymin, ymax, n_points)
# fit a linear regression model to the generated coordinates
model = LinearRegression()
X = test.reshape(-1, 1)
y = train.reshape(-1, 1)
model.fit(X, y)
# evaluate the model on a separate set of test data
n_test_points = 50
test_x = np.random.uniform(xmin, xmax, n_test_points)
test_y = np.random.uniform(ymin, ymax, n_test_points)
test_X = test_x.reshape(-1, 1)
test_y_pred = model.predict(test_X)
# compute the evaluation metrics for the model's performance
mse = mean_squared_error(test_y, test_y_pred)
r2 = r2_score(test_y, test_y_pred)
# plot the generated coordinates and the regression line
plt.scatter(test, train, label="Training Data")
plt.plot(test_x, test_y_pred, color="red", label="Regression Line")
plt.scatter(test_x, test_y, color="green", label="Test Data")
plt.xlabel("X Coordinates")
plt.ylabel("Y Coordinates")
plt.title("Linear Regression on test and train Coordinates")
plt.legend()
plt.show()
# print the evaluation metrics
print("Mean Squared Error:", mse)
print("R-squared Score:", r2)
def imread(path, fast_mode=False):
img = cv2.imread(path)
if not fast_mode and img is not None and len(img.shape) == 3:
img = np.array(img[:, :, ::-1])
return img
img = imread(PATH + "train_images/ID_5f4dac207" + ".jpg")
IMG_SHAPE = img.shape
plt.figure(figsize=(15, 8))
plt.imshow(img)
# ## Extracting data
# PredictionString column contains pose information about all cars
# From the data description:
# The primary data is images of cars and related pose information. The pose information is formatted as strings, as follows:
# model type, yaw, pitch, roll, x, y, z
# This function extracts these values:
def str2coords(s, names=["id", "yaw", "pitch", "roll", "x", "y", "z"]):
"""
Input:
s: PredictionString (e.g. from train dataframe)
names: array of what to extract from the string
Output:
list of dicts with keys from `names`
"""
coords = []
for l in np.array(s.split()).reshape([-1, 7]):
coords.append(dict(zip(names, l.astype("float"))))
if "id" in coords[-1]:
coords[-1]["id"] = int(coords[-1]["id"])
return coords
train = pd.read_csv(PATH + "train.csv")
test = pd.read_csv(PATH + "sample_submission.csv")
lens = [len(str2coords(s)) for s in train["PredictionString"]]
plt.figure(figsize=(15, 6))
sns.countplot(lens)
plt.xlabel("Number of cars in image")
points_df = pd.DataFrame()
for col in ["x", "y", "z", "yaw", "pitch", "roll"]:
arr = []
for ps in train["PredictionString"]:
coords = str2coords(ps)
arr += [c[col] for c in coords]
points_df[col] = arr
print("len(points_df)", len(points_df))
points_df.head()
# ## Plot position information distribution
# to gain insight into the dataset Let's plot distribution of the position information
plt.figure(figsize=(15, 6))
sns.distplot(points_df["x"], bins=500)
plt.xlabel("x")
plt.show()
plt.figure(figsize=(15, 6))
sns.distplot(points_df["y"], bins=500)
plt.xlabel("y")
plt.show()
plt.figure(figsize=(15, 6))
sns.distplot(points_df["z"], bins=500)
plt.xlabel("z")
plt.show()
plt.figure(figsize=(15, 6))
sns.distplot(points_df["yaw"], bins=500)
plt.xlabel("yaw")
plt.show()
plt.figure(figsize=(15, 6))
sns.distplot(points_df["pitch"], bins=500)
plt.xlabel("pitch")
plt.show()
plt.figure(figsize=(15, 6))
sns.distplot(points_df["roll"], bins=500)
plt.xlabel("roll rotated by pi")
plt.show()
def rotate(x, angle):
x = x + angle
x = x - (x + np.pi) // (2 * np.pi) * 2 * np.pi
return x
plt.figure(figsize=(15, 6))
sns.distplot(points_df["roll"].map(lambda x: rotate(x, np.pi)), bins=500)
plt.xlabel("roll rotated by pi")
plt.show()
# # 2D Visualization
def get_img_coords(s):
"""
Input is a PredictionString (e.g. from train dataframe)
Output is two arrays:
xs: x coordinates in the image
ys: y coordinates in the image
"""
coords = str2coords(s)
xs = [c["x"] for c in coords]
ys = [c["y"] for c in coords]
zs = [c["z"] for c in coords]
P = np.array(list(zip(xs, ys, zs))).T
img_p = np.dot(camera_matrix, P).T
img_p[:, 0] /= img_p[:, 2]
img_p[:, 1] /= img_p[:, 2]
# img_p[:, 0] /= zs
# img_p[:, 1] /= zs
img_xs = img_p[:, 0]
img_ys = img_p[:, 1]
img_zs = img_p[:, 2] # z = Distance from the camera
return img_xs, img_ys
plt.figure(figsize=(14, 14))
plt.imshow(imread(PATH + "train_images/" + train["ImageId"][500] + ".jpg"))
plt.scatter(*get_img_coords(train["PredictionString"][500]), color="red", s=100)
xs, ys = [], []
for ps in train["PredictionString"]:
x, y = get_img_coords(ps)
xs += list(x)
ys += list(y)
plt.figure(figsize=(18, 18))
plt.imshow(imread(PATH + "train_images/" + train["ImageId"][500] + ".jpg"), alpha=0.3)
plt.scatter(xs, ys, color="red", s=10, alpha=0.2)
zy_slope = LinearRegression()
X = points_df[["z"]]
y = points_df["y"]
zy_slope.fit(X, y)
print("MAE without x:", mean_absolute_error(y, zy_slope.predict(X)))
# Will use this model later
xzy_slope = LinearRegression()
X = points_df[["x", "z"]]
y = points_df["y"]
xzy_slope.fit(X, y)
print("MAE with x:", mean_absolute_error(y, xzy_slope.predict(X)))
print("\ndy/dx = {:.3f}\ndy/dz = {:.3f}".format(*xzy_slope.coef_))
plt.figure(figsize=(16, 16))
plt.xlim(0, 500)
plt.ylim(0, 100)
plt.scatter(points_df["z"], points_df["y"], label="Real points")
X_line = np.linspace(0, 500, 10)
plt.plot(
X_line, zy_slope.predict(X_line.reshape(-1, 1)), color="orange", label="Regression"
)
plt.legend()
plt.xlabel("z coordinate")
plt.ylabel("y coordinate")
# ## 3D Visualization
from math import sin, cos
# convert euler angle to rotation matrix
def euler_to_Rot(yaw, pitch, roll):
Y = np.array([[cos(yaw), 0, sin(yaw)], [0, 1, 0], [-sin(yaw), 0, cos(yaw)]])
P = np.array([[1, 0, 0], [0, cos(pitch), -sin(pitch)], [0, sin(pitch), cos(pitch)]])
R = np.array([[cos(roll), -sin(roll), 0], [sin(roll), cos(roll), 0], [0, 0, 1]])
return np.dot(Y, np.dot(P, R))
def draw_line(image, points):
color = (255, 0, 0)
cv2.line(image, tuple(points[0][:2]), tuple(points[3][:2]), color, 16)
cv2.line(image, tuple(points[0][:2]), tuple(points[1][:2]), color, 16)
cv2.line(image, tuple(points[1][:2]), tuple(points[2][:2]), color, 16)
cv2.line(image, tuple(points[2][:2]), tuple(points[3][:2]), color, 16)
return image
def draw_points(image, points):
for p_x, p_y, p_z in points:
cv2.circle(image, (p_x, p_y), int(1000 / p_z), (0, 255, 0), -1)
# if p_x > image.shape[1] or p_y > image.shape[0]:
# print('Point', p_x, p_y, 'is out of image with shape', image.shape)
return image
def visualize(img, coords):
# You will also need functions from the previous cells
x_l = 1.02
y_l = 0.80
z_l = 2.31
img = img.copy()
for point in coords:
# Get values
x, y, z = point["x"], point["y"], point["z"]
yaw, pitch, roll = -point["pitch"], -point["yaw"], -point["roll"]
# Math
center_point = np.array([x, y, z]).reshape([1, 3])
Rotation_matrix = euler_to_Rot(
yaw, pitch, roll
).T # Rotation matrix to transform from car coordinate frame to camera coordinate frame
bounding_box = np.array(
[
[x_l, -y_l, -z_l],
[x_l, -y_l, z_l],
[-x_l, -y_l, z_l],
[-x_l, -y_l, -z_l],
]
).T
img_cor_points = np.dot(
camera_matrix, np.dot(Rotation_matrix, bounding_box) + center_point.T
)
img_cor_points = img_cor_points.T
img_cor_points[:, 0] /= img_cor_points[:, 2]
img_cor_points[:, 1] /= img_cor_points[:, 2]
img_cor_points = img_cor_points.astype(int)
# Drawing
img = draw_line(img, img_cor_points)
img_point = np.dot(camera_matrix, center_point.T).T
img_point[:, 0] /= img_point[:, 2]
img_point[:, 1] /= img_point[:, 2]
img = draw_points(img, img_point.astype(int))
return img
n_rows = 6
for idx in range(n_rows):
fig, axes = plt.subplots(1, 2, figsize=(20, 20))
img = imread(PATH + "train_images/" + train["ImageId"].iloc[10 + idx] + ".jpg")
axes[0].imshow(img)
img_vis = visualize(img, str2coords(train["PredictionString"].iloc[10 + idx]))
axes[1].imshow(img_vis)
plt.show()
IMG_WIDTH = 1024
IMG_HEIGHT = IMG_WIDTH // 16 * 5
MODEL_SCALE = 8
def rotate(x, angle):
x = x + angle
x = x - (x + np.pi) // (2 * np.pi) * 2 * np.pi
return x
def _regr_preprocess(regr_dict, flip=False):
if flip:
for k in ["x", "pitch", "roll"]:
regr_dict[k] = -regr_dict[k]
for name in ["x", "y", "z"]:
regr_dict[name] = regr_dict[name] / 100
regr_dict["roll"] = rotate(regr_dict["roll"], np.pi)
regr_dict["pitch_sin"] = sin(regr_dict["pitch"])
regr_dict["pitch_cos"] = cos(regr_dict["pitch"])
regr_dict.pop("pitch")
regr_dict.pop("id")
return regr_dict
def _regr_back(regr_dict):
for name in ["x", "y", "z"]:
regr_dict[name] = regr_dict[name] * 100
regr_dict["roll"] = rotate(regr_dict["roll"], -np.pi)
pitch_sin = regr_dict["pitch_sin"] / np.sqrt(
regr_dict["pitch_sin"] ** 2 + regr_dict["pitch_cos"] ** 2
)
pitch_cos = regr_dict["pitch_cos"] / np.sqrt(
regr_dict["pitch_sin"] ** 2 + regr_dict["pitch_cos"] ** 2
)
regr_dict["pitch"] = np.arccos(pitch_cos) * np.sign(pitch_sin)
return regr_dict
def preprocess_image(img, flip=False):
img = img[img.shape[0] // 2 :]
bg = np.ones_like(img) * img.mean(1, keepdims=True).astype(img.dtype)
bg = bg[:, : img.shape[1] // 6]
img = np.concatenate([bg, img, bg], 1)
img = cv2.resize(img, (IMG_WIDTH, IMG_HEIGHT))
if flip:
img = img[:, ::-1]
return (img / 255).astype("float32")
def get_mask_and_regr(img, labels, flip=False):
mask = np.zeros(
[IMG_HEIGHT // MODEL_SCALE, IMG_WIDTH // MODEL_SCALE], dtype="float32"
)
regr_names = ["x", "y", "z", "yaw", "pitch", "roll"]
regr = np.zeros(
[IMG_HEIGHT // MODEL_SCALE, IMG_WIDTH // MODEL_SCALE, 7], dtype="float32"
)
coords = str2coords(labels)
xs, ys = get_img_coords(labels)
for x, y, regr_dict in zip(xs, ys, coords):
x, y = y, x
# print(x,img.shape[0] // 2,y, img.shape[1] // 6)
x = (x - img.shape[0] // 2) * IMG_HEIGHT / (img.shape[0] // 2) / MODEL_SCALE
# x=(x*1/2)*(IMG_HEIGHT / MODEL_SCALE)/(img.shape[0] // 2)
x = np.round(x).astype("int")
y = (y + img.shape[1] // 6) * IMG_WIDTH / (img.shape[1] * 4 / 3) / MODEL_SCALE
# y=(y* 4/3)*(IMG_WIDTH / MODEL_SCALE)/((img.shape[1] * 3/4) )
y = np.round(y).astype("int")
# print(x,y)
if (
x >= 0
and x < IMG_HEIGHT // MODEL_SCALE
and y >= 0
and y < IMG_WIDTH // MODEL_SCALE
):
mask[x, y] = 1
regr_dict = _regr_preprocess(regr_dict, flip)
regr[x, y] = [regr_dict[n] for n in sorted(regr_dict)]
if flip:
mask = np.array(mask[:, ::-1])
regr = np.array(regr[:, ::-1])
return mask, regr
img0 = imread(PATH + "train_images/" + train["ImageId"][500] + ".jpg")
img = preprocess_image(img0, flip=True)
mask, regr = get_mask_and_regr(img0, train["PredictionString"][0], flip=True)
print("img.shape", img.shape, "std:", np.std(img))
print("mask.shape", mask.shape, "std:", np.std(mask))
print("regr.shape", regr.shape, "std:", np.std(regr))
print(img[:, ::-1].shape)
plt.figure(figsize=(16, 16))
plt.title("Processed image")
plt.imshow(img)
plt.show()
plt.figure(figsize=(16, 16))
plt.title("Processed flip image")
plt.imshow(img[:, ::-1])
plt.show()
plt.figure(figsize=(16, 16))
plt.title("Detection Mask")
plt.imshow(mask)
plt.show()
plt.figure(figsize=(16, 16))
plt.title("Yaw values")
plt.imshow(regr[:, :, -2])
plt.show()
# Define functions to convert back from 2d map to 3d coordinates and angles
DISTANCE_THRESH_CLEAR = 2
def convert_3d_to_2d(x, y, z, fx=2304.5479, fy=2305.8757, cx=1686.2379, cy=1354.9849):
# stolen from https://www.kaggle.com/theshockwaverider/eda-visualization-baseline
return x * fx / z + cx, y * fy / z + cy
def optimize_xy(r, c, x0, y0, z0, flipped=False):
def distance_fn(xyz):
x, y, z = xyz
xx = -x if flipped else x
slope_err = (xzy_slope.predict([[xx, z]])[0] - y) ** 2
x, y = convert_3d_to_2d(x, y, z)
y, x = x, y
x = (x - IMG_SHAPE[0] // 2) * IMG_HEIGHT / (IMG_SHAPE[0] // 2) / MODEL_SCALE
y = (y + IMG_SHAPE[1] // 6) * IMG_WIDTH / (IMG_SHAPE[1] * 4 / 3) / MODEL_SCALE
return max(0.2, (x - r) ** 2 + (y - c) ** 2) + max(0.4, slope_err)
res = minimize(distance_fn, [x0, y0, z0], method="Powell")
x_new, y_new, z_new = res.x
return x_new, y_new, z_new
def clear_duplicates(coords):
for c1 in coords:
xyz1 = np.array([c1["x"], c1["y"], c1["z"]])
for c2 in coords:
xyz2 = np.array([c2["x"], c2["y"], c2["z"]])
distance = np.sqrt(((xyz1 - xyz2) ** 2).sum())
if distance < DISTANCE_THRESH_CLEAR:
if c1["confidence"] < c2["confidence"]:
c1["confidence"] = -1
return [c for c in coords if c["confidence"] > 0]
def extract_coords(prediction, flipped=False):
logits = prediction[0]
regr_output = prediction[1:]
points = np.argwhere(logits > 0)
col_names = sorted(["x", "y", "z", "yaw", "pitch_sin", "pitch_cos", "roll"])
coords = []
for r, c in points:
regr_dict = dict(zip(col_names, regr_output[:, r, c]))
coords.append(_regr_back(regr_dict))
coords[-1]["confidence"] = 1 / (1 + np.exp(-logits[r, c]))
coords[-1]["x"], coords[-1]["y"], coords[-1]["z"] = optimize_xy(
r, c, coords[-1]["x"], coords[-1]["y"], coords[-1]["z"], flipped
)
coords = clear_duplicates(coords)
return coords
def coords2str(coords, names=["yaw", "pitch", "roll", "x", "y", "z", "confidence"]):
s = []
for c in coords:
for n in names:
s.append(str(c.get(n, 0)))
return " ".join(s)
for idx in range(2):
fig, axes = plt.subplots(1, 2, figsize=(20, 20))
for ax_i in range(2):
img0 = imread(PATH + "train_images/" + train["ImageId"].iloc[10 + idx] + ".jpg")
if ax_i == 1:
img0 = img0[:, ::-1]
img = preprocess_image(img0, ax_i == 1)
mask, regr = get_mask_and_regr(
img0, train["PredictionString"][10 + idx], ax_i == 1
)
regr = np.rollaxis(regr, 2, 0)
coords = extract_coords(np.concatenate([mask[None], regr], 0), ax_i == 1)
axes[ax_i].set_title("Flip = {}".format(ax_i == 1))
axes[ax_i].imshow(visualize(img0, coords))
plt.show()
# ## Creating the Pytorch model
# the model is U-net model in Pytorch
# Refer to this link for implementation detials: https://github.com/milesial/Pytorch-UNet
# this main idea of the model to use U-net to predict the center point of the car and regress the rotation angles .
from efficientnet_pytorch import EfficientNet
class double_conv(nn.Module):
"""(conv => BN => ReLU) * 2"""
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
)
def forward(self, x):
x = self.conv(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(up, self).__init__()
# would be a nice idea if the upsampling could be learned too,
# but my machine do not have enough memory to handle all those weights
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch // 2, in_ch // 2, 2, stride=2)
self.conv = double_conv(in_ch, out_ch)
def forward(self, x1, x2=None):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2))
# for padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
if x2 is not None:
x = torch.cat([x2, x1], dim=1)
else:
x = x1
x = self.conv(x)
return x
def get_mesh(batch_size, shape_x, shape_y):
mg_x, mg_y = np.meshgrid(np.linspace(0, 1, shape_y), np.linspace(0, 1, shape_x))
mg_x = np.tile(mg_x[None, None, :, :], [batch_size, 1, 1, 1]).astype("float32")
mg_y = np.tile(mg_y[None, None, :, :], [batch_size, 1, 1, 1]).astype("float32")
mesh = torch.cat([torch.tensor(mg_x).to(device), torch.tensor(mg_y).to(device)], 1)
return mesh
class MyUNet(nn.Module):
"""Mixture of previous classes"""
def __init__(self, n_classes):
super(MyUNet, self).__init__()
self.base_model = EfficientNet.from_pretrained("efficientnet-b0")
self.conv0 = double_conv(5, 64)
self.conv1 = double_conv(64, 128)
self.conv2 = double_conv(128, 512)
self.conv3 = double_conv(512, 1024)
self.mp = nn.MaxPool2d(2)
self.up1 = up(1282 + 1024, 512)
self.up2 = up(512 + 512, 256)
self.outc = nn.Conv2d(256, n_classes, 1)
def forward(self, x):
batch_size = x.shape[0]
mesh1 = get_mesh(batch_size, x.shape[2], x.shape[3])
x0 = torch.cat([x, mesh1], 1)
x1 = self.mp(self.conv0(x0))
x2 = self.mp(self.conv1(x1))
x3 = self.mp(self.conv2(x2))
x4 = self.mp(self.conv3(x3))
x_center = x[:, :, :, IMG_WIDTH // 8 : -IMG_WIDTH // 8]
feats = self.base_model.extract_features(x_center)
bg = torch.zeros(
[feats.shape[0], feats.shape[1], feats.shape[2], feats.shape[3] // 8]
).to(device)
feats = torch.cat([bg, feats, bg], 3)
# Add positional info
mesh2 = get_mesh(batch_size, feats.shape[2], feats.shape[3])
feats = torch.cat([feats, mesh2], 1)
x = self.up1(feats, x4)
x = self.up2(x, x3)
x = self.outc(x)
return x
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
n_epochs = 10
model = MyUNet(8).to(device)
optimizer = optim.Adam(model.parameters(), lr=0.001)
exp_lr_scheduler = lr_scheduler.StepLR(
optimizer, step_size=max(n_epochs, 10) * len(train_loader) // 3, gamma=0.1
)
# ## The Loss function
# if you wana know more about custom loss in Pytorch refer to this link : https://cs230.stanford.edu/blog/pytorch/
def criterion(prediction, mask, regr, size_average=True):
# Binary mask loss
pred_mask = torch.sigmoid(prediction[:, 0])
# mask_loss = mask * (1 - pred_mask)**2 * torch.log(pred_mask + 1e-12) + (1 - mask) * pred_mask**2 * torch.log(1 - pred_mask + 1e-12)
mask_loss = mask * torch.log(pred_mask + 1e-12) + (1 - mask) * torch.log(
1 - pred_mask + 1e-12
)
mask_loss = -mask_loss.mean(0).sum()
# Regression L1 loss
pred_regr = prediction[:, 1:]
regr_loss = (torch.abs(pred_regr - regr).sum(1) * mask).sum(1).sum(1) / mask.sum(
1
).sum(1)
regr_loss = regr_loss.mean(0)
# Sum
loss = mask_loss + regr_loss
if not size_average:
loss *= prediction.shape[0]
return loss
def train_model(epoch, history=None):
model.train()
for batch_idx, (img_batch, mask_batch, regr_batch) in enumerate(tqdm(train_loader)):
img_batch = img_batch.to(device)
mask_batch = mask_batch.to(device)
regr_batch = regr_batch.to(device)
optimizer.zero_grad()
output = model(img_batch)
loss = criterion(output, mask_batch, regr_batch)
if history is not None:
history.loc[
epoch + batch_idx / len(train_loader), "train_loss"
] = loss.data.cpu().numpy()
loss.backward()
optimizer.step()
exp_lr_scheduler.step()
print(
"Train Epoch: {} \tLR: {:.6f}\tLoss: {:.6f}".format(
epoch, optimizer.state_dict()["param_groups"][0]["lr"], loss.data
)
)
def evaluate_model(epoch, history=None):
model.eval()
loss = 0
with torch.no_grad():
for img_batch, mask_batch, regr_batch in dev_loader:
img_batch = img_batch.to(device)
mask_batch = mask_batch.to(device)
regr_batch = regr_batch.to(device)
output = model(img_batch)
loss += criterion(output, mask_batch, regr_batch, size_average=False).data
loss /= len(dev_loader.dataset)
if history is not None:
history.loc[epoch, "dev_loss"] = loss.cpu().numpy()
print("Dev loss: {:.4f}".format(loss))
def get_img_coords(s):
"""
Input is a PredictionString (e.g. from train dataframe)
Output is two arrays:
xs: x coordinates in the image (row)
ys: y coordinates in the image (column)
"""
coords = str2coords(s)
xs = [c["x"] for c in coords]
ys = [c["y"] for c in coords]
zs = [c["z"] for c in coords]
P = np.array(list(zip(xs, ys, zs))).T
img_p = np.dot(camera_matrix, P).T
img_p[:, 0] /= img_p[:, 2]
img_p[:, 1] /= img_p[:, 2]
img_xs = img_p[:, 0]
img_ys = img_p[:, 1]
img_zs = img_p[:, 2] # z = Distance from the camera
return img_xs, img_ys
plt.figure(figsize=(14, 14))
plt.imshow(imread(PATH + "train_images/" + train["ImageId"][2217] + ".jpg"))
plt.scatter(*get_img_coords(train["PredictionString"][2217]), color="red", s=100)
# Road points
road_width = 3
road_xs = [-road_width, road_width, road_width, -road_width, -road_width]
road_ys = [0, 0, 500, 500, 0]
plt.figure(figsize=(16, 16))
plt.axes().set_aspect(1)
plt.xlim(-50, 50)
plt.ylim(0, 100)
# View road
plt.fill(road_xs, road_ys, alpha=0.2, color="gray")
plt.plot(
[road_width / 2, road_width / 2],
[0, 100],
alpha=0.4,
linewidth=4,
color="white",
ls="--",
)
plt.plot(
[-road_width / 2, -road_width / 2],
[0, 100],
alpha=0.4,
linewidth=4,
color="white",
ls="--",
)
# View cars
plt.scatter(
points_df["x"],
np.sqrt(points_df["z"] ** 2 + points_df["y"] ** 2),
color="red",
s=10,
alpha=0.1,
)
def train_model(epoch, history=None):
model.train()
def closure():
loss = loss_function(output, model(input))
loss.backward()
return loss
for batch_idx, (img_batch, mask_batch, regr_batch) in enumerate(tqdm(train_loader)):
img_batch = img_batch.to(device)
mask_batch = mask_batch.to(device)
regr_batch = regr_batch.to(device)
output = model(img_batch)
loss = criterion(output, mask_batch, regr_batch)
if history is not None:
history.loc[
epoch + batch_idx / len(train_loader), "train_loss"
] = loss.data.cpu().numpy()
loss.backward()
optimizer.first_step(zero_grad=True)
preds_second = model(img_batch)
loss_second = criterion(preds_second, mask_batch, regr_batch)
loss_second.backward()
optimizer.second_step(zero_grad=True)
# optimizer.step()
exp_lr_scheduler.step()
print(
"Train Epoch: {} \tLR: {:.6f}\tLoss: {:.6f}".format(
epoch, optimizer.state_dict()["param_groups"][0]["lr"], loss.data
)
)
def evaluate_model(epoch, history=None):
model.eval()
loss = 0
with torch.no_grad():
for img_batch, mask_batch, regr_batch in dev_loader:
img_batch = img_batch.to(device)
mask_batch = mask_batch.to(device)
regr_batch = regr_batch.to(device)
output = model(img_batch)
loss += criterion(output, mask_batch, regr_batch, size_average=False).data
loss /= len(dev_loader.dataset)
if history is not None:
history.loc[epoch, "dev_loss"] = loss.cpu().numpy()
print("Dev loss: {:.4f}".format(loss))
class CarDataset(Dataset):
"""Car dataset."""
def __init__(
self, dataframe, root_dir, training=True, transform=None, hasIDs=False
):
self.df = dataframe
self.root_dir = root_dir
self.transform = transform
self.training = training
self.hasIDs = hasIDs
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# Get image name
idx, labels = self.df.values[idx]
img_name = self.root_dir.format(idx)
# Augmentation
flip = False
if self.training:
flip = np.random.randint(10) == 1
# Read image
img0 = imread(img_name, True)
img = preprocess_image(img0, flip=flip)
img = np.rollaxis(img, 2, 0)
# Get mask and regression maps
mask, regr = get_mask_and_regr(img0, labels, flip=flip)
regr = np.rollaxis(regr, 2, 0)
if self.hasIDs:
return [idx, img]
return [img, mask, regr]
train_images_dir = PATH + "train_images/{}.jpg"
test_images_dir = PATH + "test_images/{}.jpg"
df_train, df_dev = train_test_split(train, test_size=0.01, random_state=42)
df_test = test
# Create dataset objects
train_dataset = CarDataset(df_train, train_images_dir, training=True)
dev_dataset = CarDataset(df_dev, train_images_dir, training=False)
dev_dataset2 = CarDataset(df_dev, train_images_dir, training=False, hasIDs=True)
test_dataset = CarDataset(df_test, test_images_dir, training=False)
BATCH_SIZE = 4
val_batch_size = 6
# Create data generators - they will produce batches
train_loader = DataLoader(
dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4
)
dev_loader = DataLoader(
dataset=dev_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=0
)
dev_loader2 = DataLoader(
dataset=dev_dataset2, batch_size=val_batch_size, shuffle=False, num_workers=4
)
test_loader = DataLoader(
dataset=test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=0
)
img, mask, regr = dev_dataset[0]
plt.figure(figsize=(16, 16))
plt.title("Input image")
plt.imshow(np.rollaxis(img, 0, 3))
plt.show()
plt.figure(figsize=(16, 16))
plt.title("Ground truth mask")
plt.imshow(mask)
plt.show()
output = model(torch.tensor(img[None]).to(device))
logits = output[0, 0].data.cpu().numpy()
plt.figure(figsize=(16, 16))
plt.title("Model predictions")
plt.imshow(logits)
plt.show()
plt.figure(figsize=(16, 16))
plt.title("Model predictions thresholded")
plt.imshow(logits > 0)
plt.show()
# ## Training the Model
# Traing the model took more than 15 hrs not continous by saving checkpoints and retrain on Colab for more than 30 ephocs and increasing the augmentation ratio with every retraining of the model
# ## Visualize some predictions of the model
predictions = []
test_loader = DataLoader(
dataset=test_dataset, batch_size=1, shuffle=False, num_workers=4
)
model.eval()
for img, _, _ in tqdm(test_loader):
with torch.no_grad():
output = model(img.to(device))
output = output.data.cpu().numpy()
for out in output:
coords = extract_coords(out)
s = coords2str(coords)
predictions.append(s)
test = pd.read_csv(PATH + "sample_submission.csv")
test["PredictionString"] = predictions
test.to_csv("predictions.csv", index=False)
test.head()
# **Federated Learning**
import torch
import torch.nn as nn
# Define the neural network model
class AutonomousDrivingModel(nn.Module):
def __init__(self):
super(AutonomousDrivingModel, self).__init__()
self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
self.fc1 = nn.Linear(32 * 8 * 8, 64)
self.fc2 = nn.Linear(64, 10)
self.fc3 = nn.Linear(10, 1)
def forward(self, x):
x = self.pool(torch.relu(self.conv1(x)))
x = self.pool(torch.relu(self.conv2(x)))
x = x.view(-1, 32 * 8 * 8)
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = self.fc3(x)
return x
# Instantiate the model
model = AutonomousDrivingModel()
# Define the loss function and optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
def train_model_federated(train_sets, model, criterion, optimizer, num_epochs):
for epoch in range(num_epochs):
# Initialize the model weights
model_weights = model.state_dict()
# Average the model weights across all vehicles
for train_set in train_sets:
# Create a data loader for the current vehicle's training set
train_loader = DataLoader(train_set, batch_size=64, shuffle=True)
# Train the model on the current vehicle's training set
for inputs, labels in train_loader:
# Zero the parameter gradients
optimizer.zero_grad()
# Forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# Update the model weights for the current vehicle
vehicle_weights = model.state_dict()
for key in model_weights.keys():
model_weights[key] += vehicle_weights[key]
for key in model_weights.keys():
model_weights[key] /= len(train_sets)
model.load_state_dict(model_weights)
return model
# Train the model using federated learning
num_epochs = 10
model
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import random
# Generate random dataset
num_images = 1000
image_size = 64
X = np.zeros((num_images, 3, image_size, image_size))
y = np.zeros(num_images)
for i in range(num_images):
# Generate random image
image = np.random.rand(3, image_size, image_size)
X[i] = image
# Assign random label (0 or 1)
label = random.randint(0, 1)
y[i] = label
# Define model architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.fc1 = nn.Linear(64 * (image_size // 4) * (image_size // 4), 128)
self.fc2 = nn.Linear(128, 2)
def forward(self, x):
x = self.pool(torch.relu(self.conv1(x)))
x = self.pool(torch.relu(self.conv2(x)))
x = x.view(-1, 64 * (image_size // 4) * (image_size // 4))
x = torch.relu(self.fc1(x))
x = self.fc2(x)
return x
# Initialize model, loss function, and optimizer
model = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# Split dataset into training and testing sets
split_ratio = 0.8
split_index = int(num_images * split_ratio)
X_train, y_train = X[:split_index], y[:split_index]
X_test, y_test = X[split_index:], y[split_index:]
# Train model
num_epochs = 10
for epoch in range(num_epochs):
running_loss = 0.0
for i in range(split_index):
# Get inputs and labels
inputs = torch.FloatTensor(X_train[i])
label = torch.LongTensor(np.array([y_train[i]]))
# Zero the parameter gradients
optimizer.zero_grad()
# Forward + backward + optimize
outputs = model(inputs.unsqueeze(0))
loss = criterion(outputs, label)
loss.backward()
optimizer.step()
# Print statistics
running_loss += loss.item()
if i % 100 == 99: # Print every 100 mini-batches
print(
"[Epoch %d, Batch %5d] loss: %.3f"
% (epoch + 1, i + 1, running_loss / 100)
)
running_loss = 0.0
print("Finished Training")
# Evaluate model on testing set
correct = 0
total = 0
with torch.no_grad():
for i in range(num_images - split_index):
# Get inputs and labels
inputs = torch.FloatTensor(X_test[i])
label = torch.LongTensor(np.array([y_test[i]]))
# Predict label
outputs = model(inputs.unsqueeze(0))
_, predicted = torch.max(outputs.data, 1)
# Update accuracy
total += label.size(0)
correct += (predicted == label).sum().item()
import numpy as np
import matplotlib.pyplot as plt
# Define the number of epochs, number of clients, and number of rounds
num_epochs = 10
num_clients = 5
num_rounds = 10
# Generate some random data for training
train_data = np.random.rand(100, 2)
train_labels = np.random.randint(0, 2, size=100)
# Initialize the model weights randomly
weights = np.random.rand(2)
# Define the learning rate and the fraction of clients to be selected per round
lr = 0.1
frac_clients = 0.5
# Define a function for training the model on a single client's data
def train_on_client(client_data, client_labels, weights):
for epoch in range(num_epochs):
for i in range(len(client_data)):
prediction = np.dot(client_data[i], weights)
error = client_labels[i] - prediction
weights += lr * error * client_data[i]
return weights
# Define a function for selecting a fraction of clients randomly for each round
def select_clients(num_clients, frac_clients):
num_selected = int(num_clients * frac_clients)
selected_clients = np.random.choice(num_clients, size=num_selected, replace=False)
return selected_clients
# Initialize the list to store the accuracies for each round
accuracies = []
# Run the Federated Learning process for the specified number of rounds
for round in range(num_rounds):
# Select a random fraction of clients for this round
selected_clients = select_clients(num_clients, frac_clients)
# Train the model on the selected clients' data
for client in selected_clients:
weights = train_on_client(
train_data[client::num_clients], train_labels[client::num_clients], weights
)
# Evaluate the model on the test data
test_data = np.random.rand(100, 2)
test_labels = np.random.randint(0, 2, size=100)
predictions = np.dot(test_data, weights)
predicted_labels = np.round(predictions)
accuracy = np.sum(predicted_labels == test_labels) / len(test_labels)
accuracies.append(accuracy)
# Plot the accuracies as a line graph
plt.plot(range(num_rounds), accuracies)
plt.xlabel("Round")
plt.ylabel("Accuracy")
plt.title("Federated Learning Accuracy")
plt.show()
import matplotlib.pyplot as plt
import numpy as np
# Initialize the lists to store the accuracies for each model
lr_accuracies = []
fl_accuracies = []
# Train the Linear Regression model on the training data and evaluate on the test data
lr_weights = train_linear_regression(train_data, train_labels, lr_weights)
predictions = np.dot(test_data, lr_weights)
predicted_labels = np.round(predictions)
accuracy = np.sum(predicted_labels == test_labels) / len(test_labels)
lr_accuracies.append((0, accuracy))
# Run the Federated Learning process for the specified number of rounds and evaluate on the test data
for round in range(num_rounds):
# Select a random fraction of vehicles for this round
selected_vehicles = select_vehicles(num_vehicles, frac_vehicles)
# Train the model on the selected vehicles' data
for vehicle in selected_vehicles:
fl_weights = train_on_vehicle(
train_data[vehicle::num_vehicles],
train_labels[vehicle::num_vehicles],
fl_weights,
)
# Evaluate the model on the test data
predictions = np.dot(test_data, fl_weights)
predicted_labels = np.round(predictions)
accuracy = np.sum(predicted_labels == test_labels) / len(test_labels)
fl_accuracies.append((round + 1, accuracy))
# Plot the accuracies as separate graphs for comparison
lr_x, lr_y = zip(*lr_accuracies)
fl_x, fl_y = zip(*fl_accuracies)
# Plot the accuracies for each model
lr_x, lr_y = zip(*lr_accuracies)
fl_x, fl_y = zip(*fl_accuracies)
plt.plot([1, 4], [0, 2])
plt.plot(lr_x, lr_y, "ro-", label="")
plt.legend(loc="lower right")
plt.xlabel("Rounds")
plt.ylabel("Accuracy")
plt.title("Linear Regression Model Accuracy")
plt.show()
# Plot the accuracies for each model
lr_x, lr_y = zip(*lr_accuracies)
fl_x, fl_y = zip(*fl_accuracies)
plt.plot([0, 8], [0, 4], "b-", label="Federated Learning")
plt.legend(loc="lower right")
plt.xlabel("Rounds")
plt.ylabel("Accuracy")
plt.title("Federated Learning Model Accuracy")
plt.show()
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from rich import print as _pprint
import dabl
pd.set_option("display.max_columns", 500)
pd.set_option("display.max_rows", 500)
def cprint(string):
_pprint(f"[black]{string}[/black]")
def stats(scol, col):
cprint(f"[red] Average Value in the Column: {scol} is: {np.mean(col):.4f} [/red]")
cprint(
f"[yellow] Median Value in the Column: {scol} is: {np.median(col):.4f} [/yellow]"
)
cprint(f"[blue] Maxmimum Value in the Column: {scol} is: {np.max(col):.4f} [/blue]")
cprint(
f"[green] Minimum Value in the Column: {scol} is: {np.min(col):.4f} [/green]"
)
cprint(
f"[cyan] 50th Quantile of the Column: {scol} is: {np.quantile(col, 0.5):.4f} [/cyan]"
)
cprint(f"75th Quantile of the Column: {scol} is: {np.quantile(col, 0.75):.4f}")
train_file = pd.read_csv("../input/tabular-playground-series-mar-2021/train.csv")
test_file = pd.read_csv("../input/tabular-playground-series-mar-2021/test.csv")
train_file.head()
train_file.info()
train_file.describe()
names = train_file["target"].value_counts().index.tolist()
values = train_file["target"].value_counts().tolist()
plt.style.use("fivethirtyeight")
plt.pie(x=values, labels=names, autopct="%1.2f%%")
plt.title("Target Value Pie-Chart")
plt.show()
# Get a list of Categorical as well as continuous column names
catCols = [f"cat{i}" for i in range(0, 10)]
conCols = [f"cont{i}" for i in range(0, 11)]
# Show Unique categories in feature columns
cprint("[bold magenta] Categorical Features and their Value Counts [/bold magenta]")
for col in catCols:
cprint(f"{'-'*20} Column: {col} {'-'*20}")
cprint(
f"Number of unique Categories in [red]{col}[/red]: [blue]{train_file[col].nunique()}[/blue]"
)
cprint(f"Value Counts for [red]{col}[/red]: \n{train_file[col].value_counts()}")
# Show some quick stats of Continuous Features
cprint("[bold green] Continuous Features and their Basic Statistics [/bold green]")
for col in conCols:
cprint(f"[bold]{'-'*20} Column: {col} {'-'*20}[/bold]")
stats(col, train_file[col])
|
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.metrics import mean_absolute_error, mean_squared_error, make_scorer
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import GridSearchCV
test = pd.read_csv("/kaggle/input/home-data-for-ml-course/test.csv")
train = pd.read_csv("/kaggle/input/home-data-for-ml-course/train.csv")
train.columns
train.head()
null_counts = train.isnull().sum()
null_counts = null_counts[null_counts > 0]
print(null_counts)
train["Alley"] = train["Alley"].fillna(0)
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy="most_frequent")
train[
[
"MasVnrType",
"MasVnrArea",
]
] = imputer.fit_transform(train[["MasVnrType", "MasVnrArea"]])
train = pd.get_dummies(
train, columns=["SaleCondition", "SaleType", "MasVnrType"], drop_first=True
)
test["Alley"] = test["Alley"].fillna(0)
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy="most_frequent")
test[
[
"MasVnrType",
"MasVnrArea",
]
] = imputer.fit_transform(test[["MasVnrType", "MasVnrArea"]])
test = pd.get_dummies(
test, columns=["SaleCondition", "SaleType", "MasVnrType"], drop_first=True
)
train.info()
train.columns
y = train.SalePrice
# Create X (After completing the exercise, you can return to modify this line!)
features = [
"MSSubClass",
"LotArea",
"OverallQual",
"OverallCond",
"YearBuilt",
"YearRemodAdd",
"1stFlrSF",
"2ndFlrSF",
"LowQualFinSF",
"GrLivArea",
"FullBath",
"HalfBath",
"BedroomAbvGr",
"KitchenAbvGr",
"TotRmsAbvGrd",
"Fireplaces",
"WoodDeckSF",
"OpenPorchSF",
"EnclosedPorch",
"3SsnPorch",
"ScreenPorch",
"PoolArea",
"MiscVal",
"MoSold",
"YrSold",
"MasVnrType_BrkFace",
"MasVnrType_None",
"MasVnrType_Stone",
"MasVnrArea",
"SaleCondition_AdjLand",
"SaleCondition_Alloca",
"SaleCondition_Family",
"SaleCondition_Normal",
"SaleCondition_Partial",
"SaleType_CWD",
"SaleType_Con",
"SaleType_ConLD",
"SaleType_ConLI",
"SaleType_ConLw",
"SaleType_New",
"SaleType_Oth",
"SaleType_WD",
]
# Select columns corresponding to features, and preview the data
X = train[features]
X.head()
# Split into validation and training data
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
train[features] = scaler.fit_transform(train[features])
rf_model = RandomForestRegressor(random_state=1)
rf_model.fit(train_X, train_y)
rf_val_predictions = rf_model.predict(val_X)
rf_val_mae = mean_absolute_error(rf_val_predictions, val_y)
print("Validation MAE for Random Forest Model: {:,.0f}".format(rf_val_mae))
# Define the parameter grid to search over
param_grid = {
"n_estimators": [55, 56, 57],
"max_depth": [14, 15, 20],
"max_leaf_nodes": [None, 10, 20],
"min_weight_fraction_leaf": [0, 0.1, 0.2],
"max_features": ["sqrt", "log2", 0.5],
"bootstrap": [True, False],
}
# Create the random forest regressor object
rf_model_1 = RandomForestRegressor(random_state=1)
# Define the scoring metric (mean squared error)
scorer = make_scorer(mean_absolute_error, greater_is_better=False)
# Create the GridSearchCV object
rf_model_1_grid = GridSearchCV(
estimator=rf_model_1, param_grid=param_grid, scoring=scorer, cv=5, n_jobs=-1
)
# Fit the GridSearchCV object to the data
rf_model_1_grid.fit(train_X, train_y)
rf_model_1_pred = rf_model_1_grid.predict(val_X)
rf_model_1_mae = mean_absolute_error(rf_model_1_pred, val_y)
# Print the best hyperparameters and best score
print("Best Hyperparameters: ", rf_model_1_grid.best_params_)
print("Validation MAE for Random Forest Model: {:,.0f}".format(rf_model_1_mae))
print("Best Score: ", -rf_model_1_grid.best_score_)
from catboost import CatBoostRegressor
# Define the parameter grid to search over
param_grid = {
"iterations": [2000, 3000],
"learning_rate": [0.01, 0.001],
#'depth': [4, 8],
"l2_leaf_reg": [1, 2],
"border_count": [256, 512],
#'bagging_temperature': [0.5, 1, 1.5],
}
# Create the CatBoost regressor object
cat_model_2 = CatBoostRegressor(random_seed=1, silent=True)
# Define the scoring metric (mean absolute error)
scorer = make_scorer(mean_absolute_error, greater_is_better=False)
# Create the GridSearchCV object
cat_model_2_grid = GridSearchCV(
estimator=cat_model_2, param_grid=param_grid, scoring=scorer, cv=5, n_jobs=-1
)
# Fit the GridSearchCV object to the data
cat_model_2_grid.fit(train_X, train_y)
cat_model_2_pred = cat_model_2_grid.predict(val_X)
cat_model_2_mae = mean_absolute_error(cat_model_2_pred, val_y)
# Print the best hyperparameters and best score
print("Best Hyperparameters: ", cat_model_2_grid.best_params_)
print("Validation MAE for CatBoost Model: {:,.0f}".format(cat_model_2_mae))
print("Best Score: ", -cat_model_2_grid.best_score_)
import xgboost as xgb
from sklearn.metrics import make_scorer, mean_absolute_error
from sklearn.model_selection import GridSearchCV
param_grid = {
"n_estimators": [60, 65],
"max_depth": [20, 25],
"subsample": [0.7, 0.8, 1.0],
"colsample_bytree": [0.6, 0.8],
"gamma": [0, 0.2],
"learning_rate": [0.07, 0.1, 1],
}
xgb_model = xgb.XGBRegressor(random_state=1)
scorer = make_scorer(mean_absolute_error, greater_is_better=False)
xgb_model_grid = GridSearchCV(
estimator=xgb_model, param_grid=param_grid, scoring=scorer, cv=5, n_jobs=-1
)
xgb_model_grid.fit(train_X, train_y)
xgb_model_pred = xgb_model_grid.predict(val_X)
xgb_model_mae = mean_absolute_error(xgb_model_pred, val_y)
print("Best Hyperparameters: ", xgb_model_grid.best_params_)
print("Validation MAE for XGBoost Model: {:,.0f}".format(xgb_model_mae))
print("Best Score: ", -xgb_model_grid.best_score_)
rf_model_on_full_data = RandomForestRegressor(random_state=1)
test_X = test[features]
rf_model_on_full_data.fit(X, y)
rf_model_on_full_data_val_predictions = rf_model.predict(test_X)
# rf_model_on_full_data_val_mae = mean_absolute_error(rf_model_on_full_data_val_predictions,y)
# print("Validation MAE for Random Forest Model: {:,.0f}".format(rf_model_on_full_data_val_mae))
cat_model_on_full_data = CatBoostRegressor(
random_seed=1,
silent=True,
border_count=512,
iterations=2000,
l2_leaf_reg=2,
learning_rate=0.01,
)
test_X = test[features]
cat_model_on_full_data.fit(X, y)
cat_model_on_full_data_val_pred = cat_model_on_full_data.predict(test_X)
output = pd.DataFrame({"Id": test.Id, "SalePrice": cat_model_on_full_data_val_pred})
output.to_csv("submission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# In this notebook, we analyzed data on universities in Pakistan using Python and data analysis libraries. We cleaned the data, created various visualizations, and performed data analysis tasks such as university type and number of campuses. We drew conclusions and provided a comprehensive overview of the higher education sector in Pakistan.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Load the data from CSV file
df = pd.read_csv(
"/kaggle/input/all-of-the-universities-in-pakistan/All the Universities of Pakistan.csv"
)
df.head()
df.tail()
df.columns
df.info()
df.describe()
# Select the required columns
df = df[
[
"University",
"Location",
"Province",
"Established",
"Campuses",
"Specialization",
"Type",
]
]
# Rename columns
df = df.rename(
columns={
"University": "university_name",
"Location": "city",
"Province": "province",
"Established": "established_year",
"Campuses": "num_campuses",
"Specialization": "specialization",
"Type": "university_type",
}
)
# Handle missing values
df["num_campuses"] = df["num_campuses"].fillna(1)
df["established_year"] = df["established_year"].fillna(0).astype(int)
# Remove duplicate rows
df = df.drop_duplicates()
# Reset index
df = df.reset_index(drop=True)
# Print the cleaned data
print(df.head())
# Create bar chart of universities by province
province_counts = df["province"].value_counts()
plt.bar(province_counts.index, province_counts.values)
plt.title("Number of Universities by Province")
plt.xlabel("Province")
plt.ylabel("Number of Universities")
plt.xticks(rotation=90, fontsize=10)
plt.show()
# Create pie chart of university types
type_counts = df["university_type"].value_counts()
plt.pie(type_counts.values, labels=type_counts.index, autopct="%1.1f%%")
plt.title("University Types")
plt.show()
# Bar chart of the number of universities by city:
plt.figure(figsize=(10, 6)) # Increase the figure size
city_counts = df["city"].value_counts()
plt.bar(city_counts.index, city_counts.values)
plt.title("Number of Universities by City")
plt.xlabel("City")
plt.ylabel("Number of Universities")
plt.xticks(
rotation=90, fontsize=10
) # Rotate the x-tick labels by 90 degrees and increase the font size
plt.yticks(fontsize=10) # Increase the font size of y-tick labels
plt.show()
# Stacked bar chart of the number of universities by province and university type:
province_type_counts = df.groupby(["province", "university_type"]).size().unstack()
province_type_counts.plot(
kind="bar", stacked=True, figsize=(10, 6)
) # Increase the figure size
plt.title("Number of Universities by Province and Type")
plt.xlabel("Province")
plt.ylabel("Number of Universities")
plt.xticks(rotation=90) # Rotate the x-tick labels by 90 degrees
plt.show()
# Calculate the number of universities in each province:
province_counts = df["province"].value_counts()
print(province_counts)
# Calculate the number of universities by university type
type_counts = df["university_type"].value_counts()
print(type_counts)
# CONCLUSION
# Based on the analysis and visualization of the data, we can draw the following conclusions:
# The majority of universities in Pakistan are located in Punjab province, followed by Sindh and Khyber Pakhtunkhwa.
# The most common types of universities in Pakistan are public universities, followed by private universities and institutes.
# Islamabad has the highest number of specialized universities, followed by Lahore and Karachi.
# The mean number of campuses per university is highest in Islamabad, followed by Punjab and Sindh provinces.
# The most common academic program offered by universities in Pakistan is business administration, followed by computer science and engineering.
##Private universities generally have higher fees than public universities.
# Overall, the data suggests that Pakistan has a diverse and growing higher education sector, with a range of public and private universities offering a variety of academic programs. There are also significant regional differences in the distribution of universities and their specialization, with Islamabad emerging as a hub for specialized universities and Punjab having the highest number of campuses per university.
|
# # Importing Libraries 📥
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option("display.max_rows", 10)
sns.set() # Setting seaborn as default style
# # Sneak Peek 🔎
df = pd.read_csv(
"../input/bachelor-degree-majors-by-age-sex-and-state/Bachelor_Degree_Majors.csv"
)
df
df.head()
df.tail()
df.describe()
df.info()
# Checking for duplicates
df[df.duplicated()].sum()
# **Our Dataframe doesn't have any null or missing values 🎉 🎉 🎉**
# # Cleaning Dataset 🧹
# **Issues to fix within the data:**
# **1. Unnecessary data:**
# - The data value of ' 25 and older' in the 'Age Group' isn't needed.
# - The 'Total' data value in the 'Sex' Column is non essential
# **2. Fixing numerical Dtype and formatting:**
# - The dtypes of the numerical values need to be changed from (Dtype: object) to (Dtype: int).
# - Removing the ',' within all quantitative values in the dataset. (249,148 --> 249148).
# **3. Formatting:**
# - Creating a new column called 'STEM' by merging 'Science and Engineering' and 'Science and Engineering Related Fields' together.
# - Some minor adjustments.
# ***
# Removing rows that contains '25 and older' as a value in 'Age Group'
df = df[df["Age Group"] != "25 and older"]
# Removing the 'Total' data value in the 'Sex' Column
df = df[df["Sex"] != "Total"]
# Converting data type from (Dtype: object) to (Dtype: int)
def convert(string):
return int(string.replace(",", ""))
for col in df.iloc[:, 3:]:
df[col] = df[col].apply(convert)
# Merging 'Science and Engineering' & 'Science and Engineering Related Fields' columns together into a new column called 'STEM'
df["STEM"] = (
df["Science and Engineering"] + df["Science and Engineering Related Fields"]
)
df = df.drop(
["Science and Engineering", "Science and Engineering Related Fields"], axis=1
)
# Some minor adjustments to satisfy the OCD 👽
# Reset the index to start from zero
df.reset_index(drop=True, inplace=True)
# Rearrange the columns
df = df[
[
"State",
"Sex",
"Age Group",
"Bachelor's Degree Holders",
"STEM",
"Business",
"Education",
"Arts, Humanities and Others",
]
]
# Renaming the column to remove the single quotes
df.rename(
columns={"Bachelor's Degree Holders": "Bachelors Degree Holders"}, inplace=True
)
# Final form
df
# # Analyzing Dataset 📊
# ## 1. Analyzing based on states and sex (regardless of age)
# #### Q1: Number of bachelor's degree holders by states
# Group the dataframe by states
d1 = df.groupby(["State"]).sum().reset_index()
# Sorting the number of bachelor's degree holders in descending order
d1.sort_values(by="Bachelors Degree Holders", ascending=False, inplace=True)
plt.figure(figsize=(18, 14))
sns.barplot(x="Bachelors Degree Holders", y="State", data=d1)
# Another way to the see the result
d1.style.background_gradient(cmap="Blues")
# Insights ✅
# - Highest number of bachelor's degree holders are in: California, Texas, New York and Florida.
# - Lowest number of bachelor's degree holders are in: Vermont, North Dakota, Alaska and Wyoming.
# #### Q2: Number of bachelor's degree holders by states in each major
d2 = df.groupby(["State"]).sum().reset_index()
columns = d2.columns[2:]
i = 1
plt.figure(figsize=(15, 22))
for col in columns:
plt.subplot(2, 2, i)
sns.barplot(x=col, y="State", data=d2.sort_values(by=col, ascending=False))
i += 1
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1, wspace=0.6, hspace=0.2)
# Insights ✅
# - California have the highest number of bachelor's degree holders across all majors except in education where Texas take the lead.
# - Wyoming have the lowest number of bachelor's degree holders across all majors except in education where District of Columbia is the lowest.
# #### Q3: Number of bachelor's degree holders by states based on sex
d3 = df.groupby(["State", "Sex"]).sum().reset_index()
plt.figure(figsize=(18, 14))
sns.barplot(x="Bachelors Degree Holders", y="State", hue="Sex", data=d3, palette="cool")
# Insights ✅
# - The state of 'Utah' is the only state where the number of males bachelor's degree holders is bigger than females.
# #### Q4: Number of bachelor's degree holders by states based on sex in each major
d4 = df.groupby(["State", "Sex"]).sum().reset_index()
columns = d4.columns[3:]
i = 1
plt.figure(figsize=(15, 22))
for col in columns:
plt.subplot(2, 2, i)
sns.barplot(x=col, y="State", hue="Sex", data=d4, palette="cool")
i += 1
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1, wspace=0.6, hspace=0.2)
# Insights ✅
# - In 'STEM', number of males is higher than females in all states, except:
# - Alaska, Delaware, Hawaii, Kentucky, Louisiana, Maine, Mississippi, Nebraska, New York, North Dakota, Rhode Island, Tennessee, West Virginia.
# - In 'Business', number of males is higher than females in all states, except:
# - District of Columbia. (We will see later in the EDA, that District of columbia have the highest percentage of BS Holders per population)
# - In 'Education', numbers of females is higher than males in all states.
# - In 'Arts, Humanities and Others', numbers of females is higher than males in all states.
# ## 2. Analyzing based on sex and age (regardless of states)
# #### Q5: Percentage of males and females in each major
d5 = df.groupby(["Sex"]).sum().reset_index()
d5
columns = d5.columns[1:]
i = 1
plt.figure(figsize=(20, 10))
for col in columns:
plt.subplot(1, 5, i)
plt.title(col)
plt.pie(
d5[col],
labels=d5["Sex"],
autopct="%.1f%%",
colors=["#AA6AEA", "#6AAAEA"],
pctdistance=0.5,
labeldistance=1.1,
)
# add a circle at the center to transform it in a donut chart
my_circle = plt.Circle((0, 0), 0.7, color="white")
p = plt.gcf()
p.gca().add_artist(my_circle)
i += 1
# Insights ✅
# - In general, number of females is bigger than males)
# - Number of males is bigger in STEM and Business.
# - Number of females is much higher in Education and Arts, Humanities & Others.
# - Biggest difference in numbers can be found in Education.
# #### Q6: Number of males and females based on age group
d6 = df.groupby(["Sex", "Age Group"]).sum().reset_index()
d6
plt.figure(figsize=(7, 7))
sns.barplot(
x="Age Group", y="Bachelors Degree Holders", hue="Sex", data=d6, palette="cool"
)
# Insights ✅
# - Number of males is higher than females only in the '65 and older' age group.
# #### Q7: Number of males and females based on age group in each major
d7 = df.groupby(["Sex", "Age Group"]).sum().reset_index()
d7
columns = d7.columns[3:]
i = 1
plt.figure(figsize=(10, 10))
for col in columns:
plt.subplot(2, 2, i)
sns.barplot(x="Age Group", y=col, hue="Sex", data=d7, palette="cool")
i += 1
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1, wspace=0.4, hspace=0.4)
# Insights ✅
# - In 'STEM', number of females is higher than males in '25 to 39' age group.
# - In 'Business', number of males is higher than females in each and every age groups.
# - In 'Education' and 'Arts, Humanities and Others', number of females is higher than males in all age groups.
# - Age group 40-64 has highest number of bachelor's degree holders.
# ## 3. Analyzing based on populations
# #### Importing and cleaning the dataset
# Source: U.S. Census Bureau, Population Division
# https://www.census.gov/newsroom/press-kits/2019/national-state-estimates.html
population = pd.read_csv(
"../input/homeless-in-america-version-2-20102019/nst-est2019-alldata.csv"
)
population
# Extracting only "POPESTIMATE2019" resident total population estimate for 2019
# Excluding Puerto Rico as well
population = population.loc[5:55, ["NAME", "POPESTIMATE2019"]]
population.reset_index(drop=True, inplace=True)
population
# #### Creating new DataFrame (bspop_df) by merging the population and Bachelor's Degree Dataframes together
bspop_df = df.groupby(["State"]).sum().reset_index()
bspop_df
# Before merging, let’s check if we have the same 'states' values in both DataFrames
all(bspop_df["State"] == population["NAME"])
# Adding the "Population" column to the bspop_df DataFrame
bspop_df.insert(1, "Population", population["POPESTIMATE2019"])
bspop_df
# Calculating the percentage of Bachelor holder’s per population
bspop_df.insert(
3, "BS%", (bspop_df["Bachelors Degree Holders"] / bspop_df["Population"]) * 100
)
bspop_df
# Calculating the percentage of Bachelor holder’s in each major
bspop_df.insert(
5, "STEM%", (bspop_df["STEM"] / bspop_df["Bachelors Degree Holders"]) * 100
)
bspop_df.insert(
7, "Business%", (bspop_df["Business"] / bspop_df["Bachelors Degree Holders"]) * 100
)
bspop_df.insert(
9,
"Education%",
(bspop_df["Education"] / bspop_df["Bachelors Degree Holders"]) * 100,
)
bspop_df.insert(
11,
"Arts, Humanities and Others%",
(bspop_df["Arts, Humanities and Others"] / bspop_df["Bachelors Degree Holders"])
* 100,
)
# Final form
bspop_df
# #### Q8: Comparing the percentage of bachelor's degree holders per population by states
d8 = bspop_df
d8.sort_values(by="BS%", ascending=False, inplace=True)
d8
plt.figure(figsize=(18, 14))
sns.barplot(x="BS%", y="State", data=d8)
# Insights ✅
# - Highest percentage of bachelor's degree holders per population are in: District of Columbia, Massachusetts and Colorado.
# - Lowest percentage of bachelor's degree holders per population are in: Arkansas, West Virginia and Mississippi.
# - This is the beautiful thing about looking at data from different perspective. When we compared based on number of bachelor's degree holders, california was in the first place, but now we can find it in the 17th position.
# #### Q9: Comparing the percentage of bachelor's degree holders per population by states in each major
d9 = bspop_df
d9
columns = d9.columns[5::2]
i = 1
plt.figure(figsize=(15, 22))
for col in columns:
plt.subplot(2, 2, i)
sns.barplot(x=col, y="State", data=d9.sort_values(by=col, ascending=False))
i += 1
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1, wspace=0.6, hspace=0.2)
# Another way to the see the result
d9.sort_values(by="Population", ascending=False, inplace=True)
d9.style.background_gradient(cmap="Blues")
# Insights ✅
# - Very interesting insights about “District of Columbia”:
# - As we saw before, it have the highest percentage of bachelor's degree holders per population (Q8).
# - Now we can find it with the highest percentage of bachelor's degree holders in STEM and Arts, and the lowest in Education and Business (2nd lowest).
# - These guys are LIT 🔥, taking literally the famous quote “There is no such thing as second place. Either you're first or you're last" to another level.
# #### Q10: Distribution of bachelor's degree holders across majors (Heatmap)
d10 = bspop_df[
["State", "BS%", "STEM%", "Business%", "Education%", "Arts, Humanities and Others%"]
]
d10
plt.figure(figsize=(15, 15))
d10 = d10.groupby(["State"]).sum()
sns.heatmap(d10, annot=True, fmt="f", cmap="Reds")
# Insights ✅
# Baseded on the heatmap, we can see that STEM is crushing it in every single state.
# - Lowest percentage in Nebraska (38.5%) and the highest in district of columbia (53.6%).
# - Distribution's order of bachelor's degree holders across majors in descending order are: STEM, Arts and Humanities, Business and Education.
# #### Q11: Relation between population and percentage of bachelor's degree holders across majors
d11 = bspop_df
d11
sns.set_theme(style="white")
columns = d11.columns[3::2]
i = 1
plt.figure(figsize=(20, 20))
for col in columns:
plt.subplot(5, 1, i)
sns.lineplot(x="Population", y=col, data=d11)
i += 1
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1, wspace=0.2, hspace=0.6)
|
# # Importing Libraries
# https://medium.com/@Skpd/pix2pix-gan-for-generating-map-given-satellite-images-using-pytorch-6e50c318673a
# https://github.com/shashi7679/pix2pix-GANs/blob/master/dataset.py
import cv2
import os
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import torch
from torch import nn
from torch.optim import Adam
from torch.utils.data import DataLoader, Dataset
import albumentations as A
device = "cuda" if torch.cuda.is_available() else "cpu"
lr = 2e-4
beta1 = 0.4
BatchSize = 16
ImgSize = 256
l1Lambda = 100
lambdaGP = 10
epochs = 100
class Satellite2Map_Data(Dataset):
def __init__(self, root, transform=None):
self.root = root
list_files = os.listdir(self.root)
self.n_samples = list_files
self.transform = transform
def __len__(self):
return len(self.n_samples)
def __getitem__(self, idx):
image_name = self.n_samples[idx]
# print(self.n_samples)
image_path = os.path.join(self.root, image_name)
image = np.asarray(Image.open(image_path).convert("RGB"))
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
height, width, _ = image.shape
width_cutoff = width // 2
satellite_image = image[:, :width_cutoff, :]
map_image = image[:, width_cutoff:, :]
if self.transform:
transformed = self.transform(image=satellite_image, mask=map_image)
satelliteImage = transformed["image"]
mapImage = transformed["mask"]
return satelliteImage, mapImage
transforms = A.Compose(
[
A.Resize(ImgSize, ImgSize),
A.RandomRotate90(p=0.4),
A.Transpose(p=0.5), # Change x-axis to y and Y-axis to X.
A.AdvancedBlur(blur_limit=(3, 7), p=0.4),
]
)
dataset = Satellite2Map_Data("/kaggle/input/pix2pix-maps/train", transform=transforms)
loader = DataLoader(dataset, batch_size=2, shuffle=True)
for indx, (x, y) in enumerate(loader):
print(x.shape, y.shape, x.dtype, y.dtype, type(x))
c = x[0]
d = y[0]
break
plt.subplot(1, 2, 1)
plt.imshow(c)
plt.subplot(1, 2, 2)
plt.imshow(d)
plt.show()
# # Class Generator
class ConvBlock(nn.Module):
def __init__(self, inputFeatures, outFeatures, kernelSize, stride, padding):
super(ConvBlock, self).__init__()
self.inputFeatures = inputFeatures
self.outFeatures = outFeatures
self.kernelSize = kernelSize
self.stride = stride
self.padding = padding
self.convBlock = nn.Sequential(
nn.Conv2d(
self.inputFeatures,
self.outFeatures,
self.kernelSize,
self.stride,
self.padding,
),
nn.BatchNorm2d(self.outFeatures),
nn.LeakyReLU(0.2),
)
def forward(self, x):
out = self.convBlock(x)
return out, out
class ConvDownBlock(nn.Module):
def __init__(self, inputFeatures, outFeatures, kernelSize, stride, padding):
super(ConvDownBlock, self).__init__()
self.inputFeatures = inputFeatures
self.outFeatures = outFeatures
self.kernelSize = kernelSize
self.stride = stride
self.padding = padding
self.convUpBlock = nn.Sequential(
nn.ConvTranspose2d(
self.inputFeatures,
self.outFeatures,
self.kernelSize,
self.stride,
self.padding,
),
nn.BatchNorm2d(self.outFeatures),
nn.LeakyReLU(0.2),
)
def forward(self, x):
out = self.convUpBlock(x)
return out
class Generator(nn.Module):
def __init__(self, inputShape, hiddenFeatures=32, heads=2, encoderNumber=2):
super(Generator, self).__init__()
self.channel, self.height, self.width = inputShape
self.hiddenFeatures = hiddenFeatures
assert self.channel == 3, f"Channels should be 3 not {self.channel}"
assert (
self.height == self.width
), f"Height and Width must be same; Given- {self.height} and {self.width}"
# Encoder
self.d1 = ConvBlock(self.channel, self.hiddenFeatures, 4, 2, 1)
self.d2 = ConvBlock(self.hiddenFeatures, self.hiddenFeatures * 2, 4, 2, 1)
self.d3 = ConvBlock(self.hiddenFeatures * 2, self.hiddenFeatures * 4, 4, 2, 1)
self.d4 = ConvBlock(self.hiddenFeatures * 4, self.hiddenFeatures * 8, 4, 2, 1)
self.d5 = ConvBlock(self.hiddenFeatures * 8, self.hiddenFeatures * 16, 4, 2, 1)
self.d6 = ConvBlock(self.hiddenFeatures * 16, self.hiddenFeatures * 32, 4, 2, 1)
# Bottleneck
self.bottleNeck = nn.TransformerEncoderLayer(
d_model=self.hiddenFeatures * 32, nhead=heads
)
self.transformerEncoder = nn.TransformerEncoder(
self.bottleNeck, num_layers=encoderNumber
)
# Decoder
self.up6 = ConvDownBlock(
self.hiddenFeatures * 32, self.hiddenFeatures * 16, 4, 2, 1
) # 1024 -> 512
self.up5 = ConvDownBlock(
self.hiddenFeatures * 16, self.hiddenFeatures * 8, 4, 2, 1
) # 512 -> 256
self.up4 = ConvDownBlock(
self.hiddenFeatures * 8, self.hiddenFeatures * 4, 4, 2, 1
) # 256 -> 128
self.up3 = ConvDownBlock(
self.hiddenFeatures * 4, self.hiddenFeatures * 2, 4, 2, 1
) # 128 -> 64
self.up2 = ConvDownBlock(
self.hiddenFeatures * 2, self.hiddenFeatures, 4, 2, 1
) # 64 -> 32
# last ConvTranspose block must not have BatchNormalisation for better training Stabilty
self.up1 = nn.Sequential(
nn.ConvTranspose2d(self.hiddenFeatures, self.channel, 4, 2, 1), nn.Tanh()
) # out -> 32, 3
def forward(self, Satimage):
# input -> (B, 3, 256, 256)
same1, pass1 = self.d1(Satimage)
same2, pass2 = self.d2(pass1)
same3, pass3 = self.d3(pass2)
same4, pass4 = self.d4(pass3)
same5, pass5 = self.d5(pass4)
same6, pass6 = self.d6(pass5) # out -> (B,1024, 4, 4)
# Reshaping for bottleNeck Transformer Encoder
pass6 = torch.reshape(pass6, (same6.shape[0], same6.shape[1], -1))
pass6 = torch.permute(pass6, (0, 2, 1))
bottleNeckLayer = self.transformerEncoder(pass6)
# Reshaping to 4d for ConvTranspose operation
bottleNeckLayer = torch.permute(bottleNeckLayer, (0, 2, 1))
bottleNeckLayer = torch.reshape(
bottleNeckLayer,
(
bottleNeckLayer.shape[0],
bottleNeckLayer.shape[1],
same6.shape[2],
same6.shape[3],
),
)
# out->torch.Size([2, 1024, 4, 4])
# Decoder Block/ Upsampling with Adding Residual connections
upSample6 = self.up6(torch.add(bottleNeckLayer, same6))
upSample5 = self.up5(torch.add(upSample6, same5))
upSample4 = self.up4(torch.add(upSample5, same4))
upSample3 = self.up3(torch.add(upSample4, same3))
upSample2 = self.up2(torch.add(upSample3, same2))
genOutput = self.up1(torch.add(upSample2, same1))
return genOutput
gen = Generator((3, 256, 256)).to(device)
gen(torch.randn(2, 3, 256, 256, device=device)).shape # torch.Size([2, 3, 256, 256])
# cc = nn.Conv2d(3, 16, 4, 2,1)
# dd = cc(torch.rand(4,3,64,64))
# dd.shape # torch.Size([4, 16, 32, 32])
# m = nn.ConvTranspose2d(1024, 712, 4, stride=2, padding=1)
# c1 = m(torch.randn(4,1024,32,32))
# c1.shape #torch.Size([4, 16, 32, 32])
# encoder_layer = nn.TransformerEncoderLayer(d_model=1024, nhead=8)
# transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=2)
# src = torch.rand(4,4, 1024)
# out = transformer_encoder(src)
# out.shape #torch.Size([4, 4, 1024])
# # Class Discriminator
class Discriminator(nn.Module):
def __init__(self, inputShape, hiddenDims=16):
super(Discriminator, self).__init__()
self.channel, self.height, self.width = inputShape
self.intialconv = nn.Sequential(
nn.Conv2d(self.channel, hiddenDims, 4, 2, 1), nn.ReLU()
)
self.intermidate = nn.Sequential(
self._intermidateBlock(hiddenDims, hiddenDims * 2),
self._intermidateBlock(hiddenDims * 2, hiddenDims * 4),
self._intermidateBlock(hiddenDims * 4, hiddenDims * 8),
)
self.lastlayer = nn.Sequential(
nn.Conv2d(hiddenDims * 8, 1, 4, 2, 1),
)
def _intermidateBlock(self, inFeatures, outFeatures):
return nn.Sequential(
nn.Conv2d(inFeatures, outFeatures, 4, 2, 1),
nn.BatchNorm2d(outFeatures),
nn.LeakyReLU(0.2),
)
def forward(self, x, y):
res = torch.cat([x, y], dim=1)
# return self.intialconv(res)
return self.lastlayer(self.intermidate(self.intialconv(res)))
disc = Discriminator((6, ImgSize, ImgSize), 16).to(device)
# disc(torch.rand(2,3,256,256,device=device), torch.rand(2,3,256,256,device=device)).shape #torch.Size([2, 1, 8, 8])
# # Loss Function and Optimisers
BCELoss = nn.BCEWithLogitsLoss()
L1Loss = nn.L1Loss()
optimizerD = torch.optim.Adam(disc.parameters(), lr=lr, betas=(beta1, 0.999))
optimizerG = torch.optim.Adam(gen.parameters(), lr=lr, betas=(beat1, 0.999))
# # training Loop
for i in range(epochs):
for idx, (x, y) in enumerate(loader):
x = x.to(device)
y = y.to(device)
# Train Discriminator
# 1) E[log D(x,y)]
optimizerD.zero_grad()
realDiscOut = disc(x, y)
realDiscLoss = BCELoss(realDiscOut, torch.ones_like(realDiscOut))
# 2) E[log(1 - D(x,G(x)))]
fakeGenOut = gen(x)
fakediscOut = disc(x, fakeGenOut)
fakeDiscLoss = BCELoss(fakediscOut, torch.zeros_like(fakediscOut))
DiscLoss = (fakeDiscLoss + realDiscLoss) / 2
DiscLoss.backward()
optimizerD.step()
# Train Generator :- L1(G) = Ex,y,z[ky − G(x, z)k1]
optimizerG.zero_grad()
fakeDisc = disc(x, fakeGenOut)
genFakeLoss = BCELoss(fakeDisc, torch.ones_like(fakeDisc))
l1 = L1Loss(fakeGenOut, y) * l1Lambda
GenLoss = genFakeLoss + l1
GenLoss.backward()
optimizerG.step()
print(
f"============================================== EPOCH: {i} Completed============================================================"
)
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.linear_model
from sklearn.datasets import fetch_openml
mnist = fetch_openml("mnist_784", version=1, as_frame=False)
mnist.keys()
x, y = mnist["data"], mnist["target"]
y.shape
x[0].shape
plt.imshow(x[0].reshape(28, 28))
x_train, x_test, y_train, y_test = x[:60000], x[60000:], y[:60000], y[60000:]
y_test
y_train_5 = list(map(lambda x: True if (x == "5") else False, y_train))
y_test_5 = list(map(lambda x: True if (x == "5") else False, y_test))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import xgboost as xgb
from sklearn.metrics import mean_squared_error
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
db = pd.read_csv("/kaggle/input/transformator-health-index/DatasetA.csv")
db["Water"] = db["Water"].astype("float64")
db["IFT"] = db["IFT"].astype("float64")
# Splitting train and test
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(db, test_size=0.25, random_state=11)
# Setting the labels
y_train = train_set["HI"]
y_test = test_set["HI"]
# Dropping the labels
train_set = train_set.drop("HI", axis=1)
test_set = test_set.drop("HI", axis=1)
"""
# Scaling the data. The output is a numpy array
from sklearn.preprocessing import StandardScaler
std_scaler = StandardScaler()
train_set_scaled = std_scaler.fit_transform(train_set)
test_set_scaled = std_scaler.fit_transform(test_set)
# This was a numpy array, setting the data to pandas format:
X_train = pd.DataFrame(train_set, columns = train_set.columns, index = train_set.index)
X_test = pd.DataFrame(test_set, columns = test_set.columns, index = test_set.index)
"""
X_train = train_set.copy()
X_test = test_set.copy()
# # Using XGBOOST
"""
import optuna
# Define the objective function for Optuna
def objective(trial):
# Set the hyperparameters to be tuned by Optuna
params = {
'n_estimators': trial.suggest_int('n_estimators', 100, 1000),
'max_depth': trial.suggest_int('max_depth', 3, 10),
'learning_rate': trial.suggest_float('learning_rate', 0.01, 0.5),
'subsample': trial.suggest_float('subsample', 0.5, 1),
'colsample_bytree': trial.suggest_float('colsample_bytree', 0.5, 1),
'reg_alpha': trial.suggest_float('reg_alpha', 1e-5, 10),
'reg_lambda': trial.suggest_float('reg_lambda', 1e-5, 10),
'random_state': 42,
'tree_method': trial.suggest_categorical('tree_method', ['gpu_hist'])
}
# Initialize the XGBoost Regressor model with the suggested hyperparameters
xgb_model = xgb.XGBRegressor(**params)
# Fit the model on the training data
xgb_model.fit(X_train_new, y_expensive)
# Predict the target values for the test data
y_expensive_pred = xgb_model.predict(X_test_new)
# Compute the mean squared error
mse = mean_squared_error(y_expensive_test, y_expensive_pred)
return mse
# Create an Optuna study
study = optuna.create_study(direction='minimize')
# Optimize the hyperparameters
study.optimize(objective, n_trials=100)
# Print the best hyperparameters
print("Best hyperparameters: ", study.best_params)
"""
Best_hyperparameters = {
"n_estimators": 721,
"max_depth": 10,
"learning_rate": 0.10826726920251205,
"subsample": 0.9466559602430304,
"colsample_bytree": 0.5007784856806652,
"reg_alpha": 0.0028607606172757665,
"reg_lambda": 5.377369500486225e-05,
}
# # Predicting Furan using xgboost
db = pd.read_csv("/kaggle/input/transformator-health-index/DatasetA.csv")
db["Water"] = db["Water"].astype("float64")
db["IFT"] = db["IFT"].astype("float64")
# Splitting train and test
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(db, test_size=0.2, random_state=11)
# Setting the labels
y_train = train_set["HI"]
y_test = test_set["HI"]
# Dropping the labels
train_set = train_set.drop("HI", axis=1)
test_set = test_set.drop("HI", axis=1)
# Scaling the data. The output is a numpy array
# from sklearn.preprocessing import StandardScaler
# std_scaler = StandardScaler()
# train_set_scaled = std_scaler.fit_transform(train_set)
# test_set_scaled = std_scaler.fit_transform(test_set)
# This was a numpy array, setting the data to pandas format:
X_train = pd.DataFrame(train_set, columns=train_set.columns, index=train_set.index)
X_test = pd.DataFrame(test_set, columns=test_set.columns, index=test_set.index)
y_Furan = X_train[["Furan"]]
X_train_1 = X_train.drop(["Furan", "IFT"], axis=1)
y_Furan_test = X_test[["Furan"]]
X_test_1 = X_test.drop(["IFT", "Furan"], axis=1)
model_Furan = xgb.XGBRegressor(**Best_hyperparameters)
# Fit the model on the training data
model_Furan.fit(X_train_1, y_Furan)
# Predict the target values for the test data
y_Furan_pred = model_Furan.predict(X_test_1)
# Evaluate the model performance
mse_Furan = mean_squared_error(y_Furan_test, y_Furan_pred)
# r2 = r2_score(y_expensive_test, y_expensive_pred)
print("MSE: ", mse_Furan)
pd.DataFrame([np.ravel(y_Furan_test), y_Furan_pred])
# # Predicting features separately using Neural Network
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
# Define the neural network model
model = keras.Sequential(
[
layers.Dense(64, input_shape=[13]),
layers.BatchNormalization(),
layers.Activation("relu"),
layers.Dropout(0.1),
layers.Dense(32),
layers.BatchNormalization(),
layers.Activation("relu"),
layers.Dropout(0.1),
layers.Dense(1),
]
)
# Compile the model
model.compile(
optimizer="adam",
loss="mean_squared_error",
metrics=["mean_absolute_error", "mean_squared_error"],
)
# Define the callbacks
checkpoint = ModelCheckpoint("best_model.h5", save_best_only=True)
early_stop = EarlyStopping(patience=25, restore_best_weights=True)
# Train the model
history = model.fit(
X_train_1,
y_Furan,
validation_data=(X_test_1, y_Furan_test),
batch_size=32,
epochs=300,
callbacks=[checkpoint, early_stop],
)
# Evaluate the model on the test data
model.evaluate(X_test_1, y_Furan_test)
# # Predicting IFT Using XGBOOST
y_IFT = X_train[["IFT"]]
X_train_2 = X_train.drop(["Furan", "IFT"], axis=1)
y_IFT_test = X_test[["IFT"]]
X_test_2 = X_test.drop(["IFT", "Furan"], axis=1)
model_IFT = xgb.XGBRegressor(**Best_hyperparameters)
# Fit the model on the training data
model_IFT.fit(X_train_2, y_IFT)
# Predict the target values for the test data
y_IFT_pred = model_IFT.predict(X_test_2)
# Evaluate the model performance
mse_IFT = mean_squared_error(y_IFT_test, y_IFT_pred)
# r2 = r2_score(y_expensive_test, y_expensive_pred)
print("MSE: ", mse_IFT)
pd.DataFrame([np.ravel(y_IFT_test), y_IFT_pred])
|
## Logging into wandb
import wandb
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
secret_value_0 = user_secrets.get_secret("wandb_api")
wandb.login(key=secret_value_0)
import sys
sys.path.append("./TiLT-Implementation/src/")
import os
from transformers import AutoTokenizer, AutoConfig
from datasets import load_dataset
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from pytorch_lightning.loggers import CSVLogger, WandbLogger
from pytorch_lightning.callbacks import ModelCheckpoint
import pytorch_lightning as pl
from dataset import ExtFUNSDDs
from torchvision import transforms
from tqdm.auto import tqdm
## Custom imports
from visual_backbone import Unet_encoder, RoIPool
from t5 import T5ForConditionalGenerationAbstractive, T5Stack
from transformers import AutoModel
# ## 1.1. Preparing the dataset
device = "cuda" if torch.cuda.is_available() else "cpu"
hf_ds = load_dataset("nielsr/funsd-layoutlmv3")
model_name = "t5-base"
## Visual Embedding extractor's parameters
in_channels = 3
num_pool_layers = 3
channels = 16
sampling_ratio = 2
spatial_scale = 48 / 384
output_size = (3, 3)
load_weights = True
max_epochs = 50
## FUNSD Dataset specific
num_classes = 7
## Tokenizer's parameter
model_max_length = 512
t5_config = AutoConfig.from_pretrained(model_name)
## Adding new parameters
t5_config.update(
dict(
in_channels=in_channels,
num_pool_layers=num_pool_layers,
channels=channels,
model_max_length=model_max_length,
output_size=output_size,
spatial_scale=spatial_scale,
sampling_ratio=sampling_ratio,
use_cache=False,
load_weights=load_weights,
lr=2e-4,
num_classes=num_classes,
max_epochs=max_epochs,
)
)
def get_id2label_and_label2id():
label2id = {
"O": 0,
"B-HEADER": 1,
"I-HEADER": 2,
"B-QUESTION": 3,
"I-QUESTION": 4,
"B-ANSWER": 5,
"I-ANSWER": 6,
}
id2label = {
0: "O",
1: "B-HEADER",
2: "I-HEADER",
3: "B-QUESTION",
4: "I-QUESTION",
5: "B-ANSWER",
6: "I-ANSWER",
}
return id2label, label2id
def convert_id_to_label(list_of_label):
return [id2label[x] for x in list_of_label]
# train_new_tags = list(map(lambda x : convert_id_to_label(x), hf_ds['train']['ner_tags']))
# test_new_tags = list(map(lambda x : convert_id_to_label(x), hf_ds['test']['ner_tags']))
# hf_ds['train'] = hf_ds['train'].remove_columns("ner_tags").add_column("ner_tags", train_new_tags)
# hf_ds['test'] = hf_ds['test'].remove_columns("ner_tags").add_column("ner_tags", test_new_tags)
# ### 1.2 Writing the `collate_fn` for custom handling of the dataloader
class CollateFn(object):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def __call__(self, list_of_ds):
simple_keys = [
"input_ids",
"attention_mask",
"bboxes",
"pixel_values",
"labels",
]
actual_batch = {}
for key in simple_keys:
actual_batch[key] = torch.stack([x[key] for x in list_of_ds])
# actual_batch['labels'] = self.tokenizer.batch_encode_plus([x['labels'] for x in list_of_ds], return_tensors = 'pt', is_split_into_words = True,
# padding='max_length', truncation = True)['input_ids']
return actual_batch
# sample_batch_encoding = collate_fn([train_ds[0], train_ds[1]])
# for key in sample_batch_encoding:
# sample_batch_encoding[key] = sample_batch_encoding[key].to(device)
# ## 2.1 Preparing the visual model
class VisualEmbedding(nn.Module):
def __init__(self, config):
super().__init__()
self.unet_encoder = Unet_encoder(
in_channels=config.in_channels,
channels=config.channels,
num_pool_layers=config.num_pool_layers,
)
self.roi_pool = RoIPool(
output_size=config.output_size, spatial_scale=config.spatial_scale
)
self.proj = nn.Linear(in_features=128 * 3 * 3, out_features=config.d_model)
self.config = config
def forward(self, pixel_values, bboxes):
image_embedding = self.unet_encoder(pixel_values)
feature_maps_bboxes = self.roi_pool(image_embedding, bboxes).flatten(2)
projection = self.proj(feature_maps_bboxes)
return projection
# ## 2.2 Preparing the semantic model
class TiLTTransformer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.visual_embedding_extractor = VisualEmbedding(config)
self.t5_model = T5ForConditionalGenerationAbstractive(config)
def generate(self, batch):
total_embedding = self.common_step(batch)
return self.t5_model.generate(input_embeds=total_embedding)
def common_step(self, batch):
## Visual embedding
visual_embedding = self.visual_embedding_extractor(
pixel_values=batch["pixel_values"], bboxes=batch["bboxes"]
)
## Semantic embedding from t5_model's embedding layer
semantic_embedding = self.t5_model.shared(batch["input_ids"])
## Net embedding is addition of both the embeddings
total_embedding = visual_embedding + semantic_embedding
return total_embedding
def forward(self, batch):
total_embedding = self.common_step(batch)
## This is then fed to t5_model
final_output = self.t5_model(
attention_mask=batch["attention_mask"],
inputs_embeds=total_embedding,
labels=batch["labels"],
)
return final_output
# tilt_model = TiLTTransformer(t5_config).to(device)
# output = tilt_model(sample_batch_encoding)
# ## 3.1 Preparing the metrics to evaluate the predictions
import evaluate
def get_labels(predictions, references):
# Transform predictions and references tensors to numpy arrays
if predictions.device.type == "cpu":
y_pred = predictions.detach().clone().numpy()
y_true = references.detach().clone().numpy()
else:
y_pred = predictions.detach().cpu().clone().numpy()
y_true = references.detach().cpu().clone().numpy()
# Remove ignored index (special tokens)
true_predictions = [
[id2label[p] for (p, l) in zip(pred, gold_label) if l != -100]
for pred, gold_label in zip(y_pred, y_true)
]
true_labels = [
[id2label[l] for (p, l) in zip(pred, gold_label) if l != -100]
for pred, gold_label in zip(y_pred, y_true)
]
return true_predictions, true_labels
# labels = sample_batch_encoding['labels']
# true_predictions, true_labels = get_labels(predictions = output.logits.argmax(axis = -1), references = labels)
# eval_metric = evaluate.load("seqeval")
# metric = eval_metric.compute(predictions = true_predictions, references = true_labels)
# ## Part: 4 Writing the `pytorch_lightning` code for training on FUNSD
id2label, label2id = get_id2label_and_label2id()
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Lambda(lambda x: 2 * x - 1)]
)
## Tokenizer
tokenizer = AutoTokenizer.from_pretrained(
model_name, use_fast=True, model_max_length=model_max_length
)
train_ds = ExtFUNSDDs(hf_ds["train"], tokenizer=tokenizer, transform=transform)
val_ds = ExtFUNSDDs(hf_ds["test"], tokenizer=tokenizer, transform=transform)
collate_fn = CollateFn(tokenizer)
class DataModule(pl.LightningDataModule):
def __init__(self, train_dataset, eval_dataset, batch_size: int = 2):
super(DataModule, self).__init__()
self.batch_size = batch_size
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
collate_fn=collate_fn,
)
def val_dataloader(self):
return DataLoader(
self.eval_dataset,
batch_size=self.batch_size,
shuffle=False,
collate_fn=collate_fn,
)
class TiltModel(pl.LightningModule):
def __init__(self, config):
super().__init__()
self.config = config
self.tilt_model = TiLTTransformer(config)
self.eval_metric = evaluate.load("seqeval")
def forward(self, batch):
return self.tilt_model(batch)
def training_step(self, batch, batch_idx):
output = self(batch)
loss = output.loss
predictions = output.logits.argmax(dim=-1)
true_predictions, true_labels = get_labels(
predictions=predictions, references=batch["labels"]
)
results = self.eval_metric.compute(
predictions=true_predictions, references=true_labels
)
self.log(
"train_loss", output.loss.item(), prog_bar=True, on_epoch=True, logger=True
)
self.log(
"train_overall_fl",
results["overall_f1"],
prog_bar=True,
on_epoch=True,
logger=True,
)
self.log(
"train_overall_recall",
results["overall_recall"],
prog_bar=True,
on_epoch=True,
logger=True,
)
self.log(
"train_overall_precision",
results["overall_precision"],
prog_bar=True,
on_epoch=True,
logger=True,
)
return loss
def validation_step(self, batch, batch_idx):
output = self(batch)
loss = output.loss
predictions = output.logits.argmax(dim=-1)
true_predictions, true_labels = get_labels(
predictions=predictions, references=batch["labels"]
)
results = self.eval_metric.compute(
predictions=true_predictions, references=true_labels
)
self.log(
"val_loss", output.loss.item(), prog_bar=True, on_epoch=True, logger=True
)
self.log(
"val_overall_fl",
results["overall_f1"],
prog_bar=True,
on_epoch=True,
logger=True,
)
self.log(
"val_overall_recall",
results["overall_recall"],
prog_bar=True,
on_epoch=True,
logger=True,
)
self.log(
"val_overall_precision",
results["overall_precision"],
prog_bar=True,
on_epoch=True,
logger=True,
)
return loss
def configure_optimizers(self):
optimizer = torch.optim.AdamW(self.parameters(), lr=self.config.lr)
return optimizer
def perform_evaluation(path: str = None, pl_model=None, pl_dl=None):
print("Evaluating the model")
if path is not None:
pl_model = pl_model.load_from_checkpoint(path, config=t5_config)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
eval_metric = evaluate.load("seqeval")
pl_model = pl_model.to(device)
pl_model.eval()
for idx, batch in enumerate(tqdm(pl_dl.val_dataloader())):
# move batch to device
batch = {k: v.to(device) for k, v in batch.items()}
with torch.no_grad():
outputs = pl_model(batch)
predictions = outputs.logits.argmax(-1)
true_predictions, true_labels = get_labels(predictions, batch["labels"])
eval_metric.add_batch(references=true_labels, predictions=true_predictions)
results = eval_metric.compute()
metrics = {}
for key in [
"overall_precision",
"overall_recall",
"overall_f1",
"overall_accuracy",
]:
print_statement = "{0: <30}".format(str(key) + " has value:")
print(print_statement, results[key])
metrics[key] = results[key]
return metrics
checkpoint_callback = ModelCheckpoint(
dirpath="./tilt/models",
monitor="val_overall_fl",
mode="max",
filename="tilt_best_ckpt",
save_top_k=1,
)
logger = CSVLogger("./tilt/logs", name="funsd_dataset")
wandb.init(config=t5_config, project="TiLT on FUNSD")
wandb_logger = WandbLogger(project="TiLT on FUNSD", log_model=False, entity="iakarshu")
trainer = pl.Trainer(
default_root_dir="./tilt/logs",
devices="auto",
accelerator="auto",
max_epochs=t5_config.max_epochs,
logger=wandb_logger,
callbacks=[checkpoint_callback],
log_every_n_steps=5,
)
pl_model = TiltModel(t5_config)
pl_dl = DataModule(train_ds, val_ds, batch_size=2)
trainer.fit(pl_model, pl_dl)
ckpt_folder = "./tilt/models"
if os.path.exists(ckpt_folder):
ckpt_path = os.path.join(ckpt_folder, os.listdir(ckpt_folder)[0])
else:
ckpt_path = None
metrics = perform_evaluation(path=ckpt_path, pl_model=pl_model, pl_dl=pl_dl)
print(metrics)
|
# # Text Classification
import collections
import numpy as np
import pandas as pd
import re
from argparse import Namespace
train_data = pd.read_csv(
"/kaggle/input/yelp-reviews-for-sa-finegrained-5-classes-csv/yelp_review_fine-grained_5_classes_csv/train.csv"
)
test_data = pd.read_csv(
"/kaggle/input/yelp-reviews-for-sa-finegrained-5-classes-csv/yelp_review_fine-grained_5_classes_csv/test.csv"
)
train_data.head()
test_data.class_index.value_counts()
train_proportion = 0.8
val_proportion = 0.2
import collections
by_rating = collections.defaultdict(list)
for _, row in train_data.iterrows():
by_rating[row.class_index].append(row.to_dict())
## Create a split data
final_list = []
np.random.seed(42)
for _, item_list in sorted(by_rating.items()):
np.random.shuffle(item_list)
n_total = len(item_list)
n_train = int(train_proportion * n_total)
n_val = int(val_proportion * n_total)
# Give data a split attribute
for item in item_list[:n_train]:
item["split"] = "train"
for item in item_list[n_train : n_train + n_val]:
item["split"] = "val"
# add to the final list
final_list.extend(item_list)
len(final_list)
for _, row in test_data.iterrows():
row_dict = row.to_dict()
row_dict["split"] = "test"
final_list.append(row_dict)
# Write split data to file
final_reviews = pd.DataFrame(final_list)
final_reviews.head()
final_reviews.split.value_counts()
final_reviews.isna().sum()
# # Preprocessing the Data
import re
import nltk
nltk.download("stopwords")
from nltk.corpus import stopwords
from num2words import num2words
def preprocess_text(text):
# convert to lower case
text = text.lower()
# replace contractions with full words
text = re.sub(r"won't", "will not", text)
text = re.sub(r"can\'t", "can not", text)
text = re.sub(r"n\'t", " not", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"\'s", " is", text)
text = re.sub(r"\'d", " would", text)
text = re.sub(r"\'ll", " will", text)
text = re.sub(r"\'t", " not", text)
text = re.sub(r"\'ve", " have", text)
text = re.sub(r"\'m", " am", text)
# remove punctuation
text = re.sub(r"[^\w\s]", "", text)
# convert numerical numbers to word
text = re.sub(r"\d+", lambda x: str(num2words(int(x.group(0)))) + " ", text)
# remove stop words
stop_words = set(stopwords.words("english"))
text = " ".join([word for word in text.split() if word not in stop_words])
return text
final_reviews.review_text = final_reviews.review_text.apply(preprocess_text)
final_reviews["rating"] = final_reviews.class_index.apply(
{1: "WORST", 2: "BAD", 3: "NEUTRAL", 4: "GOOD", 5: "BEST"}.get
)
## removing the text data which length are less than 1
final_reviews = final_reviews[final_reviews["review_text"].str.len() > 1]
# # Classifying Yelp Reviews
from argparse import Namespace
from collections import Counter
import json
import os
import re
import string
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm_notebook
# # Building the custom Vocabulary with our training data
# This code defines a Python class named Vocabulary that is used to build a vocabulary from a set of sentences. The vocabulary maps each unique word in the sentences to a unique integer ID. The class has three methods:
# > **__init__(self,freq_threshold,max_size)**: Initializes the class with two parameters, **freq_threshold** and **max_size**. **freq_threshold** is used to limit the vocabulary to only include words that appear at least freq_threshold times in the input sentences. **max_size** is used to limit the size of the vocabulary to max_size words.
# > **build_vocabulary(self, sentences)**: This method builds the vocabulary from the input sentences. It first tokenizes each sentence into a list of words using the tokenizer method. Then, it counts the frequency of each word in the sentences, and keeps only the words that appear more than freq_threshold times. If the resulting vocabulary size is greater than max_size, it keeps only the max_size most frequent words. Finally, it maps each word to a unique integer ID using the stoi and itos dictionaries.
# > **numericalize(self, text)**: This method converts a given sentence text into a list of integer IDs by tokenizing the sentence using the tokenizer method, and then mapping each word to its corresponding integer ID using the stoi dictionary. If a word is not present in the vocabulary, it is mapped to the integer ID of the token.
#
from tqdm import tqdm
class Vocabulary:
def __init__(self, freq_threshold, max_size):
super(Vocabulary, self).__init__()
self.freq_threshold = freq_threshold
self.max_size = max_size
self.stoi = {"<UNK>": 1}
self.itos = {1: "<UNK>"}
"""
Build The vocabulary
"""
@staticmethod
def tokenizer(text):
return [w.strip() for w in text.split()]
def build_vocabulary(self, sentences):
frequencies = {}
idx = 2
for sent in tqdm(sentences):
words = self.tokenizer(sent)
for w in words:
if w not in frequencies.keys():
frequencies[w] = 1
else:
frequencies[w] += 1
# Limit the vocab by removing the low frequencis word
frequencies = {k: v for k, v in frequencies.items() if v > self.freq_threshold}
if len(frequencies) > self.max_size:
frequencies = dict(
sorted(frequencies.items(), key=lambda x: -x[1])[: self.max_size]
)
for word in frequencies.keys():
self.stoi[word] = idx
self.itos[idx] = word
idx += 1
def numericalize(self, text):
tokenized_text = self.tokenizer(text)
numericalized_text = []
for token in tokenized_text:
if token in self.stoi.keys():
numericalized_text.append(self.stoi[token])
else:
numericalized_text.append(self.stoi["<UNK>"])
return numericalized_text
final_reviews.drop(labels=["rating"], inplace=True, axis=1)
final_reviews.split.value_counts()
# making the class index from 0 to 4 0 being worst and 4 being Best
final_reviews["class_index"] = final_reviews["class_index"] - 1
final_reviews
train_df = final_reviews[final_reviews["split"] == "train"]
valid_df = final_reviews[final_reviews["split"] == "val"]
test_df = final_reviews[final_reviews["split"] == "test"]
train_df.shape, test_df.shape, valid_df.shape
train_df.head()
# # Create Custom Dataset For Training and Validation
# This code defines two PyTorch datasets for sentiment analysis: TrainSentimentDataset and ValidSentimentDataset.
# > **TrainSentimentDataset** takes a training DataFrame as input and builds a vocabulary using the Vocabulary class. It stores the reviews and their corresponding sentiment labels as instance variables. The **__len__** method returns the number of reviews in the DataFrame. The **__getitem__** method takes an index, retrieves the corresponding review and sentiment, and returns the numericalized review as a PyTorch tensor along with the sentiment label.
# > **ValidSentimentDataset** takes a validation DataFrame and a TrainSentimentDataset object as inputs. It stores the reviews and their corresponding sentiment labels as instance variables. The **__len__** method returns the number of reviews in the DataFrame. The **__getitem__** method takes an index, retrieves the corresponding review and sentiment, and returns the numericalized review as a PyTorch tensor along with the sentiment label. It uses the Vocabulary object from the TrainSentimentDataset to numericalize the review. This ensures that the vocabulary used to numericalize the reviews is consistent across both the training and validation datasets.
from torch.utils.data import Dataset, DataLoader
class TrainSentimentDataset(Dataset):
def __init__(self, train_df, transforms=None, freq_threshold=5, max_size=8000):
super(TrainSentimentDataset, self).__init__()
self.train_df = train_df
self.reviews = self.train_df["review_text"].values
self.sentiments = self.train_df["class_index"].values
self.transforms = transforms
self.vocab = Vocabulary(freq_threshold=freq_threshold, max_size=max_size)
self.vocab.build_vocabulary(self.reviews)
def __len__(self):
return len(self.train_df)
def __getitem__(self, index):
review = self.reviews[index]
sentiment = self.sentiments[index]
numericalized_text = self.vocab.numericalize(review)
return torch.tensor(numericalized_text), sentiment
class ValidSentimentDataset(Dataset):
def __init__(
self, valid_df, train_dataset, transforms=None, freq_threshold=5, max_size=8000
):
super(ValidSentimentDataset, self).__init__()
self.reviews = valid_df["review_text"].values
self.sentiments = valid_df["class_index"].values
self.transforms = transforms
self.train_dataset = train_dataset
# self.vocab=Vocabulary(freq_threshold=freq_threshold,max_size=max_size)
# self.vocab.build_vocabulary(self.train_df["review"].values)
def __len__(self):
return len(self.reviews)
def __getitem__(self, index):
review = self.reviews[index]
sentiment = self.sentiments[index]
numericalized_text = self.train_dataset.vocab.numericalize(review)
return torch.tensor(numericalized_text), sentiment
#
# This code defines a collate function for the PyTorch DataLoader that will be used to load the datasets in batches. The collate function takes a batch of samples and performs padding on the numericalized text sequences to make them of the same length.
# **pad_sequence** function from **torch.nn.utils.rnn** is used to pad the sequences to the same length, which is the length of the longest sequence in the batch. The batch_first argument is set to True to make sure the batch is returned as the first dimension. The padding value is set to the **pad_idx** argument passed to the **MyCollate** constructor.
# The collate function returns the padded numericalized text sequences as a PyTorch tensor and the corresponding sentiment labels as a tensor as well.
from torch.nn.utils.rnn import pad_sequence
label_pipeline = lambda x: int(x)
class MyCollate:
def __init__(self, pad_idx):
self.pad_idx = pad_idx
def __call__(self, batch):
numericalized_text = [item[0] for item in batch]
numericalized_text = pad_sequence(
numericalized_text,
batch_first=True,
padding_value=self.pad_idx,
)
sentiments = torch.Tensor([item[1] for item in batch])
return numericalized_text, sentiments
from torch.utils.data import DataLoader
def get_train_loader(dataset, batch_size, num_worker=4, shuffle=True, pin_memory=False):
pad_idx = 0
loader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_worker,
collate_fn=MyCollate(pad_idx),
)
return loader
def get_valid_loader(
dataset, batch_size, num_workers=1, shuffle=True, pin_memory=False
):
pad_idx = 0
loader = DataLoader(
dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=shuffle,
pin_memory=pin_memory,
collate_fn=MyCollate(pad_idx=pad_idx),
)
return loader
train_dataset = TrainSentimentDataset(train_df)
valid_dataset = ValidSentimentDataset(valid_df, train_dataset)
test_dataset = ValidSentimentDataset(test_df, train_dataset)
train_loader = get_train_loader(train_dataset, batch_size=64)
valid_loader = get_valid_loader(valid_dataset, batch_size=64)
test_loader = get_valid_loader(test_dataset, batch_size=64)
# # Model Architecture
import torch.nn as nn
import torch.nn.functional as F
class YelpClassifier(nn.Module):
def __init__(self, vocab_size, embed_size, hidden_dim, num_classes):
super(YelpClassifier, self).__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim=embed_size)
self.lstm = nn.LSTM(
embed_size,
hidden_dim,
batch_first=True,
bidirectional=True,
)
self.fc1 = nn.Linear(hidden_dim * 2, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x):
x = self.embedding(x)
lstm_out, (h_o, c_o) = self.lstm(x)
out = torch.concat([h_o[0], h_o[-1]], dim=1)
out = F.relu(self.fc1(out))
out = self.fc2(out)
return out
# # Model Architecture
import torch.nn as nn
class classifier(nn.Module):
# define all the layers used in model
def __init__(
self,
vocab_size,
embedding_dim,
hidden_dim,
output_dim,
n_layers,
bidirectional,
dropout,
):
# Constructor
super().__init__()
# embedding layer
self.embedding = nn.Embedding(vocab_size, embedding_dim)
# lstm layer
self.lstm = nn.LSTM(
embedding_dim,
hidden_dim,
num_layers=n_layers,
bidirectional=bidirectional,
dropout=dropout,
batch_first=True,
)
# dense layer
self.fc = nn.Linear(hidden_dim * 2, output_dim)
def forward(self, text):
# text = [batch size,sent_length]
embedded = self.embedding(text)
# embedded = [batch size, sent_len, emb dim]
# packed sequence
# packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths,batch_first=True)
packed_output, (hidden, cell) = self.lstm(embedded)
# hidden = [batch size, num layers * num directions,hid dim]
# cell = [batch size, num layers * num directions,hid dim]
# concat the final forward and backward hidden state
hidden = torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1)
# hidden = [batch size, hid dim * num directions]
outputs = self.fc(hidden)
# Final activation function
# outputs=self.act(dense_outputs)
return outputs
# Initialize the device
device = "cuda" if torch.cuda.is_available() else "cpu"
# define hyperparameters
size_of_vocab = 8500
embedding_dim = 100
num_hidden_nodes = 32
num_output_nodes = 5
num_layers = 2
bidirection = True
dropout = 0.2
# instantiate the model
model = classifier(
size_of_vocab,
embedding_dim,
num_hidden_nodes,
num_output_nodes,
num_layers,
bidirectional=True,
dropout=dropout,
).to(device)
# model=YelpClassifier(10000,512,256,5).to(device)
# Below function train() trains a PyTorch model using the provided train and validation data loaders for a specified number of epochs. During training, the model's parameters are updated using the optimizer and the loss is computed using the specified criterion. The function prints the train and validation loss and accuracy after each epoch. If the validation loss improves, the model is saved to a specified file path. The function takes in the following parameters:
# * model: the PyTorch model to be trained
# * train_loader: the data loader for the training set
# * val_loader: the data loader for the validation set
# * criterion: the loss function used to evaluate the model's performance
# * optimizer: the optimization algorithm used to update the model's parameters
# * device: the device (CPU or GPU) used for training
# * epochs: the number of epochs to train the model
# * clip: the gradient clipping value used to prevent exploding gradients
# * save_path: the file path to save the model if its validation loss improves.
def train(
model,
train_loader,
val_loader,
criterion,
optimizer,
device,
epochs,
clip=1,
save_path="model",
):
best_val_loss = float("inf")
for epoch in range(epochs):
model.train()
train_loss = 0
train_acc = 0
for batch_idx, batch in enumerate(train_loader):
texts, targets = batch
texts = texts.to(device)
targets = targets.to(device)
targets = targets.long()
optimizer.zero_grad()
output = model(texts)
loss = criterion(output, targets)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
train_loss += loss.item()
train_acc += (output.argmax(1) == targets).sum().item()
if (batch_idx % 100 == 0) and (batch_idx != 0):
print(
f"Training complete {batch_idx}/ 8125 ={batch_idx/8125}% completed Train Accuracy = {train_acc/(batch_idx*64)} Loss = {train_loss/batch_idx}"
)
train_loss /= len(train_loader)
train_acc /= len(train_loader.dataset)
print(f"Train Loss: {train_loss:.4f} | Train Acc: {train_acc:.4f}")
model.eval()
val_loss = 0
val_acc = 0
with torch.no_grad():
for batch_idx, batch in enumerate(val_loader):
texts, targets = batch
texts = texts.to(device)
targets = targets.to(device)
output = model.forward(texts)
targets = targets.long()
loss = criterion(output, targets)
val_loss += loss.item()
val_acc += (output.argmax(1) == targets).sum().item()
val_loss /= len(val_loader)
val_acc /= len(val_loader.dataset)
print(f"Val Loss: {val_loss:.4f} | Val Acc: {val_acc:.4f}")
# Save the model if the validation loss improves
if save_path is not None and val_loss < best_val_loss:
save_path = str(save_path) + "_" + str(epoch) + ".pt"
print(f"Saving model to {save_path}")
torch.save(model.state_dict(), save_path)
best_val_loss = val_loss
torch.save(model.state_dict(), "model_1.pt")
# initialize the criterion and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
# # Run for Two epoch you will get 65% accuracy of Validation Dataset.
# # Will work on improving the model performance.
# # This is a Baseline model you can try to make the performance better
# call the train function
train(
model,
train_loader,
val_loader=valid_loader,
criterion=criterion,
optimizer=optimizer,
device=device,
epochs=4,
)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import catboost as cb
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
import math
train = pd.read_csv(
"/kaggle/input/yasmines-meteorological-mystery-dsc-psut-comp/train_data.csv",
parse_dates=["date"],
)
test = pd.read_csv(
"/kaggle/input/yasmines-meteorological-mystery-dsc-psut-comp/test_data.csv",
parse_dates=["date"],
)
train["source"] = "train"
test["source"] = "test"
train.dropna(inplace=True)
df = pd.concat([train, test], 0, ignore_index=True)
sample_sub = pd.read_csv(
"/kaggle/input/yasmines-meteorological-mystery-dsc-psut-comp/sample_sub.csv"
)
df.head()
df.tail()
def missing_values_table(df):
mis_val = df.isnull().sum()
mis_val_percent = 100 * df.isnull().sum() / len(df)
mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)
mis_val_table_ren_columns = mis_val_table.rename(
columns={0: "Missing Values", 1: "% of Total Values"}
)
mis_val_table_ren_columns = (
mis_val_table_ren_columns[mis_val_table_ren_columns.iloc[:, 1] != 0]
.sort_values("% of Total Values", ascending=False)
.round(1)
)
print(
"Your selected dataframe has " + str(df.shape[1]) + " columns.\n"
"There are "
+ str(mis_val_table_ren_columns.shape[0])
+ " columns that have missing values."
)
return mis_val_table_ren_columns
missing_values_train = missing_values_table(df)
missing_values_train.style.background_gradient(cmap="Reds")
df
def extract_date(df):
df["day"] = df["date"].dt.day
df["month"] = df["date"].dt.month
df["year"] = df["date"].dt.year
extract_date(df)
df.shape
int(df["sunrise"].iloc[0].split(":")[1]) < 30
def split_time(df):
df["rise_hour"] = 0
df["set_hour"] = 0
for i in range(df.shape[0]):
df["rise_hour"].iloc[i] = int(df["sunrise"].iloc[i].split(":")[0])
if int(df["sunrise"].iloc[i].split(":")[1]) < 30:
df["rise_hour"].iloc[i] -= 1
df["set_hour"].iloc[i] = int(df["sunset"].iloc[i].split(":")[0])
if int(df["sunset"].iloc[i].split(":")[1]) < 30:
df["set_hour"].iloc[i] -= 1
df.drop(["sunrise", "sunset"], axis=1, inplace=True)
split_time(df)
# Magnus Formula
# T = [Ts × a - ln(RH/100) × b] / [ln(RH/100) + a]
# ref: https://www.omnicalculator.com/physics/dew-point#how-to-calculate-dew-point-how-to-calculate-relative-humidity
def calculate_air_temperature(dew_point_temp, relative_humidity):
# recommended by Alduchov and Eskridg for celsius
a = 17.625
b = 243.04
RH = relative_humidity
T = (dew_point_temp * a - math.log(RH / 100) * b) / (math.log(RH / 100) + a)
return T
df["air_temp"] = df.apply(
lambda row: calculate_air_temperature(row["dew"], row["humidity"]), axis=1
)
df
# Heat Index
# HI = c1 + c2T + c3R + c4TR + c5T² + c6R² + c7T²R + c8TR² + c9T²R²
def heat_index(T, R):
# Constants for celsius
# ref : https://en.wikipedia.org/wiki/Heat_index
c1 = -8.78469475556
c2 = 1.61139411
c3 = 2.33854883889
c4 = -0.14611605
c5 = -0.012308094
c6 = -0.0164248277778
c7 = 0.002211732
c8 = 0.00072546
c9 = -0.000003582
HI = (
c1
+ c2 * T
+ c3 * R
+ c4 * T * R
+ c5 * (T**2)
+ c6 * (R**2)
+ c7 * (T**2) * R
+ c8 * T * (R**2)
+ c9 * (T**2) * (R**2)
)
return HI
df["heat_index"] = df.apply(
lambda row: heat_index(row["air_temp"], row["humidity"]), axis=1
)
df
# Absolute Humidity
# AH = (avp * 1000) / (461.5 * (T + 273.15) * (P / 100))
# ref: https://planetcalc.com/2167/
# def absolute_humidity(RH,T, P):
# svp = 6.112 * math.exp((17.62* T)/(243.12 * T))
# avp = (RH / 100) * svp
# AH = (avp * 1000) / (461.5 * (T + 273.15) * (P / 100))
# return AH
# df['abs_hum'] = df.apply(lambda row: absolute_humidity(row['humidity'], row['air_temp'], row['pressure']), axis=1)
# Absolute Humidity
# AH = (RH × Ps) / (Rw × T × 100)
def absolute_humidity(RH, Ps, T):
Rw = 28.97
AH = (RH * Ps) / (Rw * T * 100)
return AH
df["abs_hum"] = df.apply(
lambda row: absolute_humidity(row["humidity"], row["pressure"], row["air_temp"]),
axis=1,
)
df
plt.figure(figsize=(20, 20))
sns.heatmap(df.corr(), annot=True, cmap="RdYlGn")
df[["dew", "pressure"]]
train
sns.displot(train, x="humidity")
sns.displot(test, x="humidity")
mask = (train["humidity"] < 70) & (train["humidity"] > 50)
sns.displot(train[mask], x="max_feels_like")
train = df[df["source"] == "train"]
test = df[df["source"] == "test"]
test.drop(["source", "min_feels_like", "max_feels_like", "date"], axis=1, inplace=True)
train.drop(["source", "date"], axis=1, inplace=True)
def train_model(train, test, features, y):
kfold = KFold(5, random_state=42, shuffle=True)
train_trans = train[features]
test_trans = test[features]
scaler = StandardScaler()
train_trans = pd.DataFrame(
scaler.fit_transform(train_trans), columns=train_trans.columns
)
test_trans = pd.DataFrame(scaler.transform(test_trans), columns=test_trans.columns)
preds = np.zeros(test.shape[0])
imp = pd.DataFrame()
imp["features"] = test_trans.columns
imp["result"] = 0
for train_index, test_index in kfold.split(train_trans, y):
pool_train = cb.Pool(
train_trans.iloc[train_index].values, y.iloc[train_index].values
)
pool_eval = cb.Pool(
train_trans.iloc[test_index].values, y.iloc[test_index].values
)
params = {"iterations": 5000, "learning_rate": 0.01, "verbose": 100}
model = cb.train(pool_train, params, evals=pool_eval, early_stopping_rounds=50)
imp["result"] += model.get_feature_importance(pool_train) / 5
preds += model.predict(test_trans) / 5
return preds, imp
y = "max_feels_like"
features = [x for x in test.columns]
preds_max, imp_max = train_model(train, test, features, train[y])
y = "min_feels_like"
features = [x for x in test.columns]
preds_min, imp = train_model(train, test, features, train[y])
preds_min
sample_sub["min_feels_like"] = preds_min.round(1)
sample_sub["max_feels_like"] = preds_max.round(1)
sample_sub
sample_sub.to_csv("sub.csv", index=0)
|
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# # Receuil et Analyse de données
# En premier lieu nous allons charger les données dans un data frame, une structure de donnée offert par la librairie pandas adéquate pour la manipulation des données csv.
df = pd.read_csv("/kaggle/input/datansi/monuments.csv")
df.head()
# Premierement nous allons verifier le type des données latitude et longitude. La fonction info nous renvoie des informations sur les colones du data frame tel que le type de données qu'elles contiennent ainsi que le nombre de valeurs non nulles.
df.info()
# Sur les 90 entrées présente dans le csv, les collones lattitudes et longitudes sont bien renseignées car elles ne presentent aucune valeur nulle et au bon format flottant.
sum(df.duplicated() == True)
def show_duplicates():
display(df[df.duplicated()])
show_duplicates()
def browse_monuments(entry, df):
mask = df.apply(lambda x: x.astype(str).str.contains(entry).any(), axis=1)
filtered_df = df[mask]
display(filtered_df)
browse_monuments("Chapell", df)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
from catboost import CatBoostClassifier
from lightgbm import LGBMClassifier
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from functools import partial
from skopt import space
from skopt import gp_minimize
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
df_train = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
df_test = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv")
original_data = pd.read_csv(
"/kaggle/input/kidney-stone-prediction-based-on-urine-analysis/kindey stone urine analysis.csv"
)
submission = pd.read_csv("/kaggle/input/playground-series-s3e12/sample_submission.csv")
df_train.head()
print("Shape of the given train data:", df_train.shape)
print("Shape of the Original data: ", original_data.shape)
original_data.head()
# Is_generated
df_train["is_generated"] = 1
df_test["is_generated"] = 1
original_data["is_generated"] = 0
# Join data
train_full = pd.concat(
[df_train, original_data], axis=0, ignore_index=True
).reset_index(drop=True)
print("Shape fo the combined data: ", train_full.shape)
print("--" * 28)
train_full.head()
# From https://www.kaggle.com/code/tetsutani/ps3e12-eda-ensemble-baseline
def create_new_features(data):
# Ion product of calcium and urea
data["ion_product"] = data["calc"] * data["urea"]
# Calcium-to-urea ratio
data["calcium_to_urea_ratio"] = data["calc"] / data["urea"]
# Electrolyte balance
data["electrolyte_balance"] = data["cond"] / (10 ** (-data["ph"]))
# Osmolality-to-specific gravity ratio
data["osmolality_to_sg_ratio"] = data["osmo"] / data["gravity"]
## Add Feature engineering part
# The product of osmolarity and density is created as a new property
data["osmo_density"] = data["osmo"] * data["gravity"]
# Converting pH column to categorical variable
data["pH_cat"] = pd.cut(
data["ph"],
bins=[0, 4.5, 6.5, 8.5, 14],
labels=["sangat acidic", "acidic", "neutral", "basic"],
)
dummies = pd.get_dummies(data["pH_cat"])
data = pd.concat([data, dummies], axis=1)
# Deleting columns using dummy variables.
data.drop(
["pH_cat", "sangat acidic", "basic", "neutral", "ph"], axis=1, inplace=True
)
return data
# Create Feature
train_full = create_new_features(train_full)
df_test = create_new_features(df_test)
# https://www.kaggle.com/code/naesalang/little-beautiful-notebook/notebook
correlation = train_full.corr()
correlation["target"].drop("target").plot(kind="bar", color="xkcd:magenta")
plt.grid(True)
plt.xlabel("Features")
plt.ylabel("Correlation")
useful_columns = [c for c in train_full.columns if c not in ["id", "target"]]
print(useful_columns)
df_test = pd.DataFrame(df_test, columns=useful_columns)
df_test.columns
y = train_full["target"]
X = train_full[useful_columns]
# Scale
scaler = StandardScaler()
X = pd.DataFrame(scaler.fit_transform(X))
X_test = pd.DataFrame(scaler.transform(df_test))
X = np.asarray(X)
X_test = np.asarray(X_test)
# ### Models Description
# The models used for stacking here, were tuned using Bayesian optimization. The following models are used:
# * LGBMClassifier- https://www.kaggle.com/code/datascientistsohail/bayesian-lgbmclassifier-se03-ep12
# * XGBClassifier- https://www.kaggle.com/code/datascientistsohail/bayesian-xgbclassifier-se03-ep12
# * RandomForestClassifier- https://www.kaggle.com/code/datascientistsohail/bayesian-randomforestclassifier-se03-ep12
# * CatBoostClassifier- https://www.kaggle.com/code/datascientistsohail/bayesian-catboostclassifier-se03-ep12
# The tuned parameters for each of the above classifiers are given in the following cell.
lgbm_params = {
"learning_rate": 0.03252252802334653,
"max_depth": 14,
"n_estimators": 600,
"min_child_weight": 7,
"subsample": 0.8053707688963865,
"colsample_bytree": 0.639247506499367,
"reg_alpha": 12.203164082938548,
"reg_lambda": 90.67457610671555,
}
xgb_params = {
"learning_rate": 0.023338278154175066,
"max_depth": 14,
"n_estimators": 46,
"subsample": 0.8292375888649082,
}
rf_params = {
"max_depth": 27,
"n_estimators": 71,
"min_samples_split": 59,
"min_samples_leaf": 5,
}
catboost_params = {
"max_depth": 10,
"n_estimators": 50,
"learning_rate": 0.44980085135871845,
"l2_leaf_reg": 10.0,
}
clf1 = ("lgmb", LGBMClassifier(**lgbm_params, random_state=42, objective="binary"))
clf2 = (
"xgb",
XGBClassifier(**xgb_params, objective="binary:logistic", random_state=42),
)
clf3 = ("rf", RandomForestClassifier(**rf_params, criterion="gini", random_state=42))
clf4 = ("cat", CatBoostClassifier(**catboost_params, random_state=42, verbose=False))
# Create the voting classifier
voting_clf = VotingClassifier(estimators=[clf1, clf2, clf3, clf4], voting="soft")
num_folds = 5
kf = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=42)
predictions = np.zeros(len(X_test))
scores = []
for fold, (trn_idx, val_idx) in enumerate(kf.split(X, y)):
print("-" * 20, "Fold:", fold, "-" * 20)
X_train, X_valid = X[trn_idx], X[val_idx]
y_train, y_valid = y[trn_idx], y[val_idx]
voting_clf.fit(X_train, y_train)
y_pred = voting_clf.predict_proba(X_valid)[:, 1]
score = roc_auc_score(y_valid, y_pred)
print(score)
scores.append(score)
predictions += voting_clf.predict_proba(X_test)[:, 1] / num_folds
print("roc_auc_score: ", -1 * np.mean(scores))
submission["target"] = predictions
submission.to_csv("submission.csv", index=False)
|
import random
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import missingno
import matplotlib.pyplot as plt
import eli5
import catboost
from sklearn.model_selection import cross_val_score, StratifiedKFold
from sklearn.linear_model import LogisticRegression
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## Data preparation
# load all data available
train_data = pd.read_csv("/kaggle/input/spaceship-titanic/train.csv")
test_data = pd.read_csv("/kaggle/input/spaceship-titanic/test.csv")
sample_submission = pd.read_csv("/kaggle/input/spaceship-titanic/sample_submission.csv")
print(
"train_data",
train_data.shape,
"test_data",
test_data.shape,
"sample_submission",
sample_submission.shape,
)
# connect together train and test data to process all columns in a same way
data = pd.concat([train_data, test_data])
display(data.head())
data.info()
missingno.matrix(data)
# function to fill NaN in series wth random non NaN value
def fill_with_random(series: pd.Series):
rng = np.random.default_rng(seed=42)
series2 = series.copy()
series2 = series2.apply(
lambda x: rng.choice(series2.dropna().values) if x != x or x is None else x
)
return series2
# fill empties with first value within group column
def fill_group_forward(data, column, group):
grp = data.groupby(group)[column].first()
def fill(row):
if row[column] is None:
return grp[grp.index == row[group]].values[0]
else:
return row[column]
data[column] = data.apply(fill, axis=1)
data[column] = fill_with_random(data[column])
return data
# ---
# **Passenger ID**
# split passenger id to its group id and place in a group
data["group_id"] = data["PassengerId"].apply(lambda x: x.split("_")[0]).astype(int)
data["num_in_group"] = data["PassengerId"].apply(lambda x: x.split("_")[1]).astype(int)
print("Groups total:", data["group_id"].nunique())
print("Persons in group:")
sns.histplot(data["num_in_group"])
# ---
# **Home Planet**
# fill home planet with random values in a same percentage, as existing data
print(data["HomePlanet"].value_counts(dropna=False), data["HomePlanet"].shape)
data = fill_group_forward(
data, column="HomePlanet", group="group_id"
) # fill_with_random(data['HomePlanet'])
sns.histplot(data["HomePlanet"])
# ---
# **Cabin**
data = fill_group_forward(data, column="Cabin", group="group_id")
def split_cabin_code(x: str, n):
try:
split = x.split("/")
except:
return None
return split[n]
# extract specific featires from cabin description
data["cabin_deck"] = data["Cabin"].apply(lambda x: split_cabin_code(x, 0))
data["cabin_num"] = data["Cabin"].apply(lambda x: split_cabin_code(x, 1)).astype(int)
data["cabin_side"] = data["Cabin"].apply(lambda x: split_cabin_code(x, 2))
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
sns.histplot(data["cabin_deck"], ax=axs[0])
sns.histplot(data["cabin_num"], ax=axs[1])
sns.histplot(data["cabin_side"], ax=axs[2])
# ---
# **CryoSleep**
#
# share of sleepers at dufferent decks and ship sides
sns.heatmap(
data.pivot_table(
index="cabin_deck", columns="cabin_side", values="CryoSleep", aggfunc="mean"
),
annot=True,
)
print(data["CryoSleep"].value_counts(dropna=False))
data = fill_group_forward(data, column="CryoSleep", group="group_id")
sns.histplot(data["CryoSleep"])
# ---
# **Destination**
print(data["Destination"].value_counts(dropna=False))
data = fill_group_forward(data, column="Destination", group="group_id")
sns.histplot(data["Destination"])
# ---
# **Age**
# fill age randomly
print(data["Age"].value_counts(dropna=False))
data["Age"] = fill_with_random(data["Age"])
sns.histplot(data["Age"])
# ---
# **VIP**
print(data["VIP"].value_counts(dropna=False))
# check percentage of VIP on different decks and cabin sides
deck_to_vip = data.pivot_table(index="cabin_deck", values="VIP", aggfunc="mean")
sns.heatmap(deck_to_vip)
plt.show()
# fill VIP status randomly
for deck in data["cabin_deck"].unique():
# fill subset of passengers in deck/side
data.loc[(data["cabin_deck"] == deck), "VIP"] = fill_with_random(
data.loc[(data["cabin_deck"] == deck)]["VIP"]
)
data["VIP"] = data["VIP"].astype(bool)
# ---
# **RoomService, FoodCourt, ShoppingMall, Spa, VRDeck**
# `RoomService`, `FoodCourt`, `ShoppingMall`, `Spa`, `VRDeck` - Amount the passenger has billed at each of the Spaceship Titanic's many luxury amenities.
for col in ["RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"]:
fig = plt.figure(figsize=(15, 0.5))
sns.boxplot(data.loc[data[col] > 0][[col]], x=col)
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
# check some correlations between bills and different possibly affecting factors
deck_to_bill = data.pivot_table(
index="cabin_deck",
values=["RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"],
aggfunc="mean",
)
sns.heatmap(deck_to_bill, annot=True, fmt=".0f", ax=axs[0])
vip_to_bill = data.pivot_table(
index="VIP",
values=["RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"],
aggfunc="mean",
)
sns.heatmap(vip_to_bill, annot=True, fmt=".0f", ax=axs[1])
age_to_bill = data.pivot_table(
index="Age",
values=["RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"],
aggfunc="mean",
)
sns.heatmap(age_to_bill, ax=axs[2])
plt.show()
data = data.fillna(
{"RoomService": 0, "FoodCourt": 0, "ShoppingMall": 0, "Spa": 0, "VRDeck": 0}
)
# ---
# **Name**
def split_full_name(x: str, n):
try:
split = x.split(" ")
except:
return None
return split[n]
# extract name and family name
data["first_name"] = data["Name"].apply(lambda x: split_full_name(x, 0))
data["last_name"] = data["Name"].apply(lambda x: split_full_name(x, 1))
# fill gaps with
data["first_name"] = fill_with_random(data["first_name"])
data = fill_group_forward(data, column="last_name", group="group_id")
# **Finalize**
data.info()
data.set_index("PassengerId", inplace=True)
# ## Make baseline submission
def split_data(data, target="Transported"):
# prepare data for train, validation and submission
x = data.drop(columns=target)
y = data[target]
# drop text columns
for c in x.columns:
if x[c].dtype == "object":
x.drop(columns=c, inplace=True)
# extract train data
x_train = x[~y.isna()]
y_train = y[~y.isna()].astype(int)
# extract ubmission data
x_test = x[y.isna()]
return x_train, y_train, x_test
def train_and_predict(
model,
data,
new_feature_names=None,
folds=10,
scoring="accuracy",
top_n_features_to_show=30,
submission_file_name="submission.csv",
silent=False,
):
(x_train, y_train, x_test) = data
cv = StratifiedKFold(folds, shuffle=True, random_state=42)
# make cross-validation
cv_scores = cross_val_score(
model, x_train, y_train, cv=cv, scoring=scoring, n_jobs=4
)
if not silent:
print("CV scores", cv_scores)
if not silent:
print(f"CV mean:{cv_scores.mean():.4f}, CV std:{cv_scores.std():.4f}")
# train model
model.fit(x_train, y_train)
# show feature importances
if not silent:
display(
eli5.show_weights(
estimator=model,
feature_names=x_train.columns.to_list(),
top=top_n_features_to_show,
)
)
# print new features stats
if new_feature_names:
print("New feature weights:")
try:
print(
pd.DataFrame(
{
"feature": new_feature_names,
"coef": model.coef_.flatten()[-len(new_feature_names) :],
}
)
)
except:
pass
# make submission
preds = model.predict(x_test)
preds = pd.DataFrame(preds, index=x_test.index).astype(bool)
preds.columns = ["Transported"]
# save submission file
submission = sample_submission.drop(columns="Transported").merge(
preds.reset_index(), how="left", on="PassengerId"
)
submission.to_csv(submission_file_name, index=False)
return cv_scores
catreg = catboost.CatBoostClassifier(random_state=42, verbose=False)
cv_scores1 = train_and_predict(
catreg, split_data(data), submission_file_name="submission.csv"
)
# ## Feature engineering
def compare_cv_scores(cv_score_old, cv_score_new):
folds_compare = cv_score_new > cv_score_old
print("\nFolds compare:", folds_compare, end="\n\n")
if cv_score_new.mean() > cv_score_old.mean():
print("Score increased \t[GOOD]", end="")
else:
print("Score decreased \t[BAD]", end="")
print(
f"\t{cv_score_old.mean():.4f} -> {cv_score_new.mean():.4f}",
f"{cv_score_new.mean() - cv_score_old.mean():.4f}",
)
if cv_score_new.std() > cv_score_old.std():
print("Variation increased \t[BAD]", end="")
else:
print("Variation decreased \t[GOOD]", end="")
print(
f"\t{cv_score_old.std():.4f} -> {cv_score_new.std():.4f}",
f"{cv_score_new.std() - cv_score_old.std():.4f}",
)
# encode home planet
def add_homeplanet_one(data):
data = data.join(pd.get_dummies(data["HomePlanet"], prefix="home", drop_first=True))
return data
data = add_homeplanet_one(data)
# encode cabin side
def add_cabin_side(data):
data["cabin_side"] = data["cabin_side"].map({"S": 1, "P": 0}).astype(int)
return data
data = add_cabin_side(data)
# check if person single or not
def add_group_size(data):
group_sizes = data["group_id"].value_counts().reset_index()
group_sizes.columns = ["group_id", "group_size"]
data = (
data.reset_index()
.merge(group_sizes, how="left", on="group_id")
.set_index("PassengerId")
)
def categorize_size(x):
if x <= 1: # single
return 1
elif x <= 2: # couple
return 2
else:
return 3
data["group_size"] = data["group_size"].apply(categorize_size).astype(int)
return data
data = add_group_size(data)
def add_deck_bill(data):
# mean bill on a deck
data["deck_mean_bill"] = (
data["cabin_deck"]
.map(
{
"A": 3331,
"B": 2927,
"C": 3937,
"D": 2296,
"E": 1343,
"F": 1001,
"G": 408,
"T": 5916,
}
)
.astype(int)
)
return data
data = add_deck_bill(data)
def add_weighted_bills(data):
money_cols = ["RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"]
data["total_bill"] = (
data[["RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"]]
.apply(lambda x: 0.1 if x.sum() == 0 else x.sum(), axis=1)
.astype(int)
)
for col in money_cols:
data[col + "_w"] = data[col] / data["total_bill"]
data["total_bill"] = (data["total_bill"] - data["total_bill"].mean()) / data[
"total_bill"
].std()
return data
data = add_weighted_bills(data)
# ## Submission
cv_scores2 = train_and_predict(
catreg, split_data(data), submission_file_name="submission.csv"
)
compare_cv_scores(cv_scores1, cv_scores2)
cv_scores1 = cv_scores2
|
# # Import libraries and read raw file
import pandas as pd
from pandas_profiling import ProfileReport
import numpy as np
# Load data into a pandas DataFrame
df = pd.read_csv("6M-0K-99K.users.dataset.public.csv")
# ## Generate a Pandas Profiling Report
# ### It is a must-have for any initial analysis!
# Generate a profile report
profile = ProfileReport(df, title="Pandas Profiling Report", explorative=True)
# Save the report as an HTML file
profile.to_file("report.html")
# Pandas report revealed there are 200 countries but only 199 country codes. Let's dive into this issue and find ways to resolve it.
# # Fix some issues (inaccuracies in countries data, typos)
# ## Prepare for exporting for Tableau and dive a bit more into potential insights
# Filter characteristic columns
user_char = df[["country", "countryCode"]]
# Number of unique values in each column
unique_counts = user_char.nunique()
# Filter the countries sharing the same country code
shared_code = (
df.groupby("countryCode")
.agg(n=pd.NamedAgg(column="country", aggfunc="nunique"))
.query("n > 1")
)
unique_countries = df[df["countryCode"].isin(shared_code.index)]["country"].unique()
unique_countries
# we can drop country column now
# let's drop some non-value-added columns
df = df.drop(
[
"country",
"seniorityAsMonths",
"seniorityAsYears",
"identifierHash",
"type",
"civilityTitle",
],
axis=1,
)
# Improve readability of names
new_names = {
"language": "Language",
"socialNbFollowers": "Followers",
"socialNbFollows": "Following",
"socialProductsLiked": "Likes",
"productsListed": "Listings",
"productsSold": "Sales",
"productsPassRate": "PassRate",
"productsWished": "Wishlist",
"productsBought": "Purchases",
"gender": "Gender",
"civilityGenderId": "Civility",
"hasAnyApp": "HasApp",
"hasAndroidApp": "HasAndroid",
"hasIosApp": "HasIOS",
"hasProfilePicture": "HasProfilePicture",
"daysSinceLastLogin": "LastLogin",
"seniority": "Seniority",
"countryCode": "CountryCode",
}
df = df.rename(columns=new_names)
# remove the typo
max_val = df[df["LastLogin"] != 737028]["LastLogin"].max()
df["LastLogin"] = df["LastLogin"].replace(737028, max_val)
# save the processed file
df.to_csv("challenge.csv")
# who is this outlier?
df[df.Following == 13764]
import matplotlib.pyplot as plt
# Extract the 'dayssincelastlogin' column
days_since_last_login = df["LastLogin"]
# Create a histogram
plt.hist(days_since_last_login, bins=50, edgecolor="black")
# Add labels and title
plt.xlabel("Days Since Last Login")
plt.ylabel("Number of Users")
plt.title("Histogram of Days Since Last Login")
# Display the histogram
plt.show()
# * Insight: There is a noticeable difference in the proportion of items sold and bought via the iOS platform. Out of the total items sold on the platform, 64.2% (7,727 out of 12,027) were sold through the iOS app. However, when looking at the total items bought, only 45.3% (7,668 out of 17,006) were purchased using the iOS app. This indicates that iOS users tend to sell more items on the platform compared to their buying behavior.
# * The dataset may cover different timeframes for the purchased, sold, and listed items. For instance, some purchases might have been made before the start date of the dataset, while the sales and listings data only includes transactions within the dataset's timeframe.
#
# let's see how much these countires generate
df_best = df[df["CountryCode"].isin(["fr", "it", "gb", "us", "es", "de"])]
# Percentage of Total
print("Sales ratio: ", np.sum(df_best.Sales) / (np.sum(df.Sales)))
print("Purchasing ratio: ", np.sum(df_best.Purchases) / (np.sum(df.Purchases)))
sorted_df = df.sort_values("Likes", ascending=False)
# Let's have a closer look at the distribution
likes_quantiles = df[["Likes", "Followers", "Following"]].describe(
percentiles=[i / 200 for i in range(1, 200)]
)
likes_quantiles.tail(20)
sorted_df[["Likes", "Followers", "Following"]].head(20)
|
# Import Python Modules
import os
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import (
ExtraTreesClassifier,
AdaBoostClassifier,
RandomForestClassifier,
)
from sklearn.feature_selection import SequentialFeatureSelector
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Data Loading
df = pd.read_csv(os.path.join(dirname, filename))
df.head().T
df.info()
# No null infomation
pd.plotting.scatter_matrix(df, alpha=0.3, figsize=(15, 8), diagonal="kde")
plt.tight_layout()
# There is a mixture of continous data and discrete data for numerical features
categorical_cols = [
"checking_status",
"credit_history",
"purpose",
"savings_status",
"employment",
"personal_status",
"other_parties",
"property_magnitude",
"other_payment_plans",
"housing",
"job",
"own_telephone",
"foreign_worker",
"class",
]
numerical_cols = ["duration", "credit_amount", "age"]
discrete_cols = [
"installment_commitment",
"residence_since",
"existing_credits",
"num_dependents",
]
# View all Categorical Columns in the dataset
for i in categorical_cols:
print(f"{i} : {df[i].unique()}\n")
# Handling ordinal categories
from sklearn.preprocessing import LabelEncoder
def label_encode(data, columns):
le_model = LabelEncoder()
le = le_model.fit(columns)
label = le.transform(data)
return le_model, label
cat_df = pd.DataFrame() # We will store all categorical input here
# Categorical Features
checkingElements = ["no checking", "<0", "0<=X<200", ">=200"]
creditHistoryElements = [
"critical/other existing credit",
"no credits/all paid",
"delayed previously",
"existing paid",
"all paid",
]
purposeElements = [
"radio/tv",
"education",
"furniture/equipment",
"new car",
"used car",
"business",
"domestic appliance",
"repairs",
"other",
"retraining",
]
savingStatusElements = [
"no known savings",
"<100",
"100<=X<500",
"500<=X<1000",
">=1000",
]
employementElements = ["unemployed", "<1", "1<=X<4", "4<=X<7", ">=7"]
otherPartiesElement = ["none", "guarantor", "co applicant"]
propertyMagnitudeElements = [
"real estate",
"life insurance",
"no known property",
"car",
]
otherPaymentElements = ["none", "bank", "stores"]
housingElements = ["own", "for free", "rent"]
jobElements = [
"unemp/unskilled non res",
"unskilled resident",
"skilled",
"high qualif/self emp/mgmt",
]
# Category to Labels
variables = [
checkingElements,
creditHistoryElements,
purposeElements,
savingStatusElements,
employementElements,
otherPartiesElement,
propertyMagnitudeElements,
otherPaymentElements,
housingElements,
jobElements,
]
nominal_cols = [
"checking_status",
"credit_history",
"purpose",
"savings_status",
"employment",
"other_parties",
"property_magnitude",
"other_payment_plans",
"housing",
"job",
]
save_dict = {}
for col, var in zip(nominal_cols, variables):
col_le, col_labels = label_encode(df[f"{col}"], var)
save_dict[f"{col}"] = {"labels": col_labels, "model": col_le}
for key_name in save_dict.keys():
cat_df[f"{key_name}"] = save_dict[f"{key_name}"]["labels"]
num_df = pd.DataFrame()
scaler_num = MinMaxScaler()
data = np.array(df[numerical_cols])
stdand_values = scaler_num.fit_transform(data)
num_df[numerical_cols] = pd.DataFrame(stdand_values)
scaler_cat = MinMaxScaler()
data = np.array(cat_df[nominal_cols])
stdand_values = scaler_cat.fit_transform(data)
cat_df[nominal_cols] = pd.DataFrame(stdand_values)
# Handling Nominal Data
dummy_col = ["own_telephone", "foreign_worker", "class"]
dummy_df = pd.get_dummies(df[dummy_col])
dummy_cols = ["own_telephone_yes", "foreign_worker_yes", "class_good"]
cat_df[dummy_cols] = dummy_df[dummy_cols]
# Personal_status columns has mix of gender and status, however status are ambigious e.g. div/dep/mar. Single status is the only identifiable feature.
gender_df, status_df = [], []
for num, row in df.iterrows():
gender, status = row.personal_status.split(" ")
gender_df.append(gender)
status_df.append(status)
d = {"gender": gender_df, "status": status_df}
gender_status_df = pd.DataFrame(data=d)
gender_status_df = pd.get_dummies(gender_status_df)
gender_status_df = gender_status_df[
["gender_male", "status_single"]
] # if male = 1 then female = 0 and vice versa.
cat_df[["gender_male", "status_single"]] = gender_status_df
predictor = cat_df.join(num_df).drop("class_good", axis=1)
target = cat_df["class_good"]
# Feature Selection
clf = ExtraTreesClassifier(random_state=0)
sfs = SequentialFeatureSelector(clf)
sfs = sfs.fit(predictor, target)
feature_names = list(
sfs.get_feature_names_out()
) # Select Features that fits the target
feature_names
X_train, X_test, y_train, y_test = train_test_split(
predictor, target, train_size=0.8, stratify=target
)
model_1 = DecisionTreeClassifier(random_state=0)
model_1.fit(X_train[feature_names], y_train)
print(
f"Decision Tree Classifier Score : {model_1.score(X_test[feature_names], y_test )}\n"
)
y_pred = model_1.predict(X_test[feature_names])
tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()
f1score = f1_score(y_test, y_pred)
print(
f"True Positive: {tp}\tTrue Negative: {tn}\nFalse Positive: {fp}\tFalse Negative: {fn}\n"
)
print(f"F1 Score : {f1score}")
model_2 = RandomForestClassifier(random_state=0)
model_2.fit(X_train[feature_names], y_train)
print(
f"Random Forest Classifier Score : {model_2.score(X_test[feature_names], y_test )}\n"
)
y_pred = model_2.predict(X_test[feature_names])
tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()
f1score = f1_score(y_test, y_pred)
print(
f"True Positive: {tp}\tTrue Negative: {tn}\nFalse Positive: {fp}\tFalse Negative: {fn}\n"
)
print(f"F1 Score : {f1score}")
model_3 = AdaBoostClassifier(random_state=0)
model_3.fit(X_train[feature_names], y_train)
print(f"Ada Boost Classifier Score : {model_3.score(X_test[feature_names], y_test )}\n")
y_pred = model_3.predict(X_test[feature_names])
tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()
f1score = f1_score(y_test, y_pred)
print(
f"True Positive: {tp}\tTrue Negative: {tn}\nFalse Positive: {fp}\tFalse Negative: {fn}\n"
)
print(f"F1 Score : {f1score}")
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
# loading two almost similler image to find the mse between them
# REMEMBER WHEN COMPARING MSE, TWO IMAGES MUST HAVE THE SAME DIMENSION
# MSE IS NOT POSSIBLE FOR DIFFERENT DIMENSION IMAGES
image1 = cv2.imread("../input/mse-between-two-images/image1.jpg")
image2 = cv2.imread("../input/mse-between-two-images/image2.jpg")
plt.imshow(image1)
plt.imshow(image2)
print(image1.shape)
print(image2.shape)
## convert the images into gray scale
img1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
img2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
plt.imshow(img1)
plt.imshow(img2)
print(img1.shape)
print(img2.shape)
# MSE
# 1) First subtract the pixel between the images pixel
diff = cv2.subtract(img1, img2)
print(diff)
diff
plt.imshow(diff)
## now square the diff
tmp = diff**2
## now sum up the pixel difference
err_sum = np.sum(tmp)
print(err_sum)
## now we find the mean value
## in order to do that we need to divide the total err_sum
## with the number of total pixel
## since it is gray scale image so it is two dimensional
## so total pixel is height*weight
h, w = img1.shape
# you can use img2 . it does not matter since they are same shape
mse = err_sum / (float(h * w))
print(mse)
# total function
# is
def mse(img1, img2):
h, w = img1.shape
diff = cv2.subtract(img1, img2)
err = np.sum(diff**2)
mse = err / (float(h * w))
return mse
mse(img1, img2)
## if you dont want to use cv2.substract you can do it manually
def mse2(img1, img2):
err = np.sum((img1.astype("float") - img2.astype("float")) ** 2)
err /= float(img1.shape[0] * img1.shape[1])
return err
diff = img1.astype("float") - img2.astype("float")
plt.imshow(diff) ## it picks up better substraction
mse2(img1, img2)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from torch.autograd import Variable
import os
import numpy as np
learning_rate = 0.01
epsilon = 0.0314
k = 5
alpha = 0.00784
device = "cuda" if torch.cuda.is_available() else "cpu"
# # Data Loading
transform_train = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
)
transform_test = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
)
train_dataset = datasets.CIFAR10(
root="dataset/", train=True, transform=transform_train, download=True
)
test_dataset = datasets.CIFAR10(
root="dataset/", train=False, transform=transform_test, download=True
)
train_loader = DataLoader(dataset=train_dataset, batch_size=128, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=128, shuffle=True)
classes = (
"plane",
"car",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
)
# # PGD attack
class LinfPGDAttack(object):
def __init__(self, model):
self.model = model
def perturb(self, x_natural, y):
x = x_natural.detach()
x = x + torch.zeros_like(x).uniform_(-epsilon, epsilon)
for i in range(k):
x.requires_grad_()
with torch.enable_grad():
logits = self.model(x)
loss = F.cross_entropy(logits, y)
grad = torch.autograd.grad(loss, [x])[0]
x = x.detach() + alpha * torch.sign(grad.detach())
x = torch.min(torch.max(x, x_natural - epsilon), x_natural + epsilon)
x = torch.clamp(x, 0, 1)
return x
def attack(x, y, model, adversary):
model_copied = copy.deepcopy(model)
model_copied.eval()
adversary.model = model_copied
adv = adversary.perturb(x, y)
return adv
# # Model
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(self.expansion * planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
net = ResNet18()
net = net.to(device)
cudnn.benchmark = True
adversary = LinfPGDAttack(net)
criterion = nn.CrossEntropyLoss()
# optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=0.0002)
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate, weight_decay=1e-4)
rng = np.random.default_rng(12345)
def train(epoch, p):
print("\n[ Train epoch: %d ]" % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
n = 0
for batch_idx, (inputs, targets) in enumerate(train_loader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
if p < 0.1:
adv = adversary.perturb(inputs, targets)
else:
adv = inputs
adv_outputs = net(adv)
loss = criterion(adv_outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = adv_outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
n += 1
# if batch_idx % 10 == 0:
# print('\nCurrent batch:', str(batch_idx))
# print('Current adversarial train accuracy:', str(predicted.eq(targets).sum().item() / targets.size(0)))
# print('Current adversarial train loss:', loss.item())
print("\nTotal adversarial train accuarcy:", correct / total)
print("Total adversarial train loss:", train_loss / n)
def test(epoch, p):
print("\n[ Test epoch: %d ]" % epoch)
net.eval()
benign_loss = 0
adv_loss = 0
benign_correct = 0
adv_correct = 0
total = 0
n = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_loader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
outputs = net(inputs)
loss = criterion(outputs, targets)
benign_loss += loss.item()
_, predicted = outputs.max(1)
benign_correct += predicted.eq(targets).sum().item()
# if batch_idx % 10 == 0:
# print('\nCurrent batch:', str(batch_idx))
# print('Current benign test accuracy:', str(predicted.eq(targets).sum().item() / targets.size(0)))
# print('Current benign test loss:', loss.item())
if p < 0.1:
adv = adversary.perturb(inputs, targets)
else:
adv = inputs
# adv = adversary.perturb(inputs, targets)
adv_outputs = net(adv)
loss = criterion(adv_outputs, targets)
adv_loss += loss.item()
_, predicted = adv_outputs.max(1)
adv_correct += predicted.eq(targets).sum().item()
# if batch_idx % 10 == 0:
# print('Current adversarial test accuracy:', str(predicted.eq(targets).sum().item() / targets.size(0)))
# print('Current adversarial test loss:', loss.item())
print("\nTotal benign test accuarcy:", benign_correct / total)
print("Total adversarial test Accuarcy:", adv_correct / total)
print("Total benign test loss:", benign_loss)
print("Total adversarial test loss:", adv_loss)
state = {"net": net.state_dict()}
torch.save(net.state_dict(), "resnet_adv_weights.pt")
print("Model Saved!")
def adjust_learning_rate(optimizer, epoch):
lr = learning_rate
if epoch >= 50:
lr /= 10
if epoch >= 90:
lr /= 10
for param_group in optimizer.param_groups:
param_group["lr"] = lr
for epoch in range(0, 100):
adjust_learning_rate(optimizer, epoch)
p = rng.random()
train(epoch, p)
test(epoch, p)
|
# ### İş Problemi
# FLO satış ve pazarlama faaliyetleri için roadmap belirlemek istemektedir. Şirketin orta uzun vadeli plan yapabilmesi için var olan müşterilerin gelecekte şirkete sağlayacakları potansiyel değerin tahmin edilmesi gerekmektedir.
# ### Veri Seti Hikayesi
# Veri seti Flo’dan son alışverişlerini 2020 - 2021 yıllarında OmniChannel(hem online hem offline alışveriş yapan) olarak yapan müşterilerin geçmiş alışveriş davranışlarından elde edilen bilgilerden oluşmaktadır.
import pandas as pd
import datetime as dt
from lifetimes import BetaGeoFitter, GammaGammaFitter
pd.set_option("display.max_columns", None)
pd.set_option("display.float_format", lambda x: "%.2f" % x)
pd.set_option("display.width", 1000)
df_ = pd.read_csv("/kaggle/input/flo-data-20k/flo_data_20k.csv")
df = df_.copy()
df.head()
def outlier_tresholds(dataframe, variable):
quartile_1 = dataframe[variable].quantile(0.01)
quartile_3 = dataframe[variable].quantile(0.99)
interquartlile = quartile_3 - quartile_1
low_limit = round(quartile_1 - 1.5 * interquartlile, 0)
up_limit = round(quartile_3 + 1.5 * interquartlile, 0)
return low_limit, up_limit
def replace_with_treshold(dataframe, variable):
low_limit, up_limit = outlier_tresholds(dataframe, variable)
dataframe.loc[dataframe[variable] > up_limit, variable] = up_limit
dataframe.loc[dataframe[variable] < low_limit, variable] = low_limit
cols = [
"order_num_total_ever_online",
"order_num_total_ever_offline",
"customer_value_total_ever_offline",
"customer_value_total_ever_online",
]
for col in cols:
replace_with_treshold(df, col)
# Her bir müşterinin toplam alışveriş sayısı ve harcaması için yeni değişkenler oluşturalım
df["total_orders"] = (
df["order_num_total_ever_offline"] + df["order_num_total_ever_online"]
)
df["total_customer_value"] = (
df["customer_value_total_ever_online"] + df["customer_value_total_ever_offline"]
)
date_columns = df.columns[df.columns.str.contains("date")]
df[date_columns] = df[date_columns].apply(pd.to_datetime)
df.info()
# Veri setindeki en son alışverişin yapıldığı tarihten 2 gün sonrasını analiz tarihi olarak alalım.
df["last_order_date"].max()
today_date = dt.datetime(2021, 6, 1)
# customer_id, recency_cltv_weekly, T_weekly, frequency ve monetary_cltv_avg değerlerinin yer aldığı yeni bir cltv dataframe'i oluşturalım
# Monetary değeri satın alma başına ortalama değer olarak, recency ve tenure değerleri ise haftalık cinsten ifade edilecek.
cltv_df = pd.DataFrame()
cltv_df["customer_id"] = df["master_id"]
cltv_df["recency_cltv_weekly"] = (
(df["last_order_date"] - df["first_order_date"]).astype("timedelta64[D]")
) / 7
cltv_df["T_weekly"] = (
(today_date - df["first_order_date"]).astype("timedelta64[D]")
) / 7
cltv_df["frequency"] = df["total_orders"]
cltv_df["monetary_cltv_avg"] = df["total_customer_value"] / df["total_orders"]
cltv_df.head()
# #### BG/NBD, Gamma-Gamma Modellerinin Kurulması ve CLTV’nin Hesaplanması
# BG/NBD modelini fit edelim.
# 3 ay içerisinde müşterilerden beklenen satın almaları tahmin edelim ve exp_sales_3_month olarak cltv dataframe'ine ekleyelim
# 6 ay içerisinde müşterilerden beklenen satın almaları tahmin edelim ve exp_sales_6_month olarak cltv dataframe'ine ekleyelim
bgf = BetaGeoFitter(penalizer_coef=0.001)
bgf.fit(cltv_df["frequency"], cltv_df["recency_cltv_weekly"], cltv_df["T_weekly"])
cltv_df["exp_sales_3_month"] = bgf.predict(
4 * 3, cltv_df["frequency"], cltv_df["recency_cltv_weekly"], cltv_df["T_weekly"]
)
cltv_df["exp_sales_6_month"] = bgf.predict(
4 * 6, cltv_df["frequency"], cltv_df["recency_cltv_weekly"], cltv_df["T_weekly"]
)
cltv_df.head()
# Gamma-Gamma modelini fit edelim. Müşterilerin ortalama bırakacakları değeri tahminleyip exp_average_value olarak cltv dataframe'ine ekleyelim
ggf = GammaGammaFitter(penalizer_coef=0.001)
ggf.fit(cltv_df["frequency"], cltv_df["monetary_cltv_avg"])
cltv_df["exp_average_value"] = ggf.conditional_expected_average_profit(
cltv_df["frequency"], cltv_df["monetary_cltv_avg"]
)
cltv_df.head()
# 6 aylık CLTV hesaplayalım ve cltv ismiyle dataframe'e ekleyelim.Cltv değeri en yüksek 20 kişiyi gözlemleyelim.
cltv = ggf.customer_lifetime_value(
bgf,
cltv_df["frequency"],
cltv_df["recency_cltv_weekly"],
cltv_df["T_weekly"],
cltv_df["monetary_cltv_avg"],
time=6,
freq="W",
discount_rate=0.01,
)
cltv_df["cltv"] = cltv
cltv_df.sort_values("cltv", ascending=False)[:20]
# #### CLTV Değerine Göre Segmentlerin Oluşturulması
# 6 aylık CLTV'ye göre tüm müşterilerinizi 4 segmente ayıralım ve grup isimlerini veri setine ekleyelim.
cltv_df["segment"] = pd.qcut(cltv_df["cltv"], 4, labels=["D", "C", "B", "A"])
cltv_df.head()
|
# **Thanks for great works from**
# 此般浅薄: https://www.kaggle.com/code/xzj19013742/simple-eda-on-time-for-targets
# **Attention**
# In order to see feature importance in LGBM, you should run codes above by 此般浅薄.
# The codes below cannot run independently!!!
importance = pd.DataFrame()
est_colname = ["importance0", "importance1", "importance2"]
reg_colname = [
"importance_0",
"importance_1",
"importance_2",
"importance_3",
"importance_4",
]
for i in range(5):
for j in range(3):
# "regs" is not defined unless you run the codes above!
est = regs[i].estimators_[j]
name = est_colname[j]
feature_importances = pd.DataFrame(
est.feature_importances_, columns=[name]
) # .sort_values('importance')
importance = pd.concat([importance, feature_importances], axis=1)
name2 = reg_colname[i]
importance[name2] = (
importance["importance0"]
+ importance["importance1"]
+ importance["importance2"]
) / 3
importance = importance.drop(columns=["importance0", "importance1", "importance2"])
importance["importance"] = (
importance["importance_0"]
+ importance["importance_1"]
+ importance["importance_2"]
+ importance["importance_3"]
+ importance["importance_4"]
) / 5
importance = pd.DataFrame(importance["importance"], columns=["importance"]).sort_values(
"importance"
)
for i in range(len(cols)):
importance = importance.rename({i: cols[i]})
import plotly.express as px
fig = px.bar(
x=importance.index.values,
y=importance["importance"],
color_discrete_sequence=["darkslateblue"],
)
fig.update_layout(xaxis_title="Feature", yaxis_title="Importance")
fig.show()
|
# # Movie Mediapipe Background Blurred
# https://developers.google.com/mediapipe/solutions/vision/image_segmenter/web_js
import math
import urllib
import mediapipe as mp
from mediapipe.python._framework_bindings import image
from mediapipe.python._framework_bindings import image_frame
from mediapipe.tasks import python
from mediapipe.tasks.python import vision
import os
import cv2
import shutil
import numpy as np
import pandas as pd
from PIL import Image
from tqdm import tqdm
from IPython.display import HTML, Video, Image, clear_output
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import animation, rc
rc("animation", html="jshtml")
# # mp4 to frame
path_mp0 = "/kaggle/input/real-life-violence-situations-dataset/Real Life Violence Dataset/NonViolence/NV_110.mp4"
path_mp1 = "/kaggle/working/sample1/sample.mp4"
shutil.copy(path_mp0, path_mp1)
def video2frames(
video_file=path_mp1, image_dir="/kaggle/working/sample2/", image_file="img_%s.png"
):
i = 0
cap = cv2.VideoCapture(video_file)
while cap.isOpened():
flag, frame = cap.read()
if flag == False:
break
cv2.imwrite(image_dir + image_file % str(i).zfill(5), frame)
i += 1
cap.release()
video2frames()
paths0 = []
for dirname, _, filenames in os.walk("/kaggle/working/sample2/"):
for filename in filenames:
if filename[-4:] == ".png":
paths0 += [(os.path.join(dirname, filename))]
paths0 = sorted(paths0)
images0 = []
for i in tqdm(range(0, len(paths0), 20)):
images0 += [cv2.imread(paths0[i])]
def create_animation(ims):
fig = plt.figure(figsize=(12, 6))
im = plt.imshow(cv2.cvtColor(ims[0], cv2.COLOR_BGR2RGB))
text = plt.text(
0.05, 0.05, f"Slide {0}", transform=fig.transFigure, fontsize=14, color="blue"
)
plt.axis("off")
plt.close()
def animate_func(i):
im.set_array(cv2.cvtColor(ims[i], cv2.COLOR_BGR2RGB))
text.set_text(f"Slide {i}")
return [im]
return animation.FuncAnimation(
fig, animate_func, frames=len(ims), interval=1000 // 2
)
create_animation(np.array(images0))
# # back ground removel for frame image
IMAGE_FILENAMES = np.array(paths0)[list(range(0, len(paths0), 20))]
DESIRED_HEIGHT = 480
DESIRED_WIDTH = 480
def resize_and_show(image):
h, w = image.shape[:2]
# print(h,w)
if h < w:
img = cv2.resize(image, (DESIRED_WIDTH, math.floor(h / (w / DESIRED_WIDTH))))
else:
img = cv2.resize(image, (math.floor(w / (h / DESIRED_HEIGHT)), DESIRED_HEIGHT))
h2, w2 = img.shape[:2]
# print(h2,w2)
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
def resize_and_show_write(image, filename):
h, w = image.shape[:2]
# print(h,w)
if h < w:
img = cv2.resize(image, (DESIRED_WIDTH, math.floor(h / (w / DESIRED_WIDTH))))
else:
img = cv2.resize(image, (math.floor(w / (h / DESIRED_HEIGHT)), DESIRED_HEIGHT))
h2, w2 = img.shape[:2]
# print(h2,w2)
print("./original/" + filename)
cv2.imwrite("./original/" + filename, img)
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
images2 = {name: cv2.imread(name) for name in IMAGE_FILENAMES}
for name, image in images2.items():
filename = name.split("/")[-1]
resize_and_show_write(image, filename)
print()
BG_COLOR = (192, 192, 192) # gray
MASK_COLOR = (255, 255, 255) # white
OutputType = vision.ImageSegmenterOptions.OutputType
Activation = vision.ImageSegmenterOptions.Activation
base_options = python.BaseOptions(model_asset_path="deeplabv3.tflite")
options = vision.ImageSegmenterOptions(
base_options=base_options, output_type=OutputType.CATEGORY_MASK
)
# Create the image segmenter
with vision.ImageSegmenter.create_from_options(options) as segmenter:
for image_file_name in IMAGE_FILENAMES:
image = mp.Image.create_from_file(image_file_name)
category_masks = segmenter.segment(image)
image_data = image.numpy_view()
fg_image = np.zeros(image_data.shape, dtype=np.uint8)
fg_image[:] = MASK_COLOR
bg_image = np.zeros(image_data.shape, dtype=np.uint8)
bg_image[:] = BG_COLOR
condition = np.stack((category_masks[0].numpy_view(),) * 3, axis=-1) > 0.2
output_image = np.where(condition, fg_image, bg_image)
# print(f'Segmentation mask of {name}:')
filename = image_file_name.split("/")[-1]
print("./blurred/blurred" + filename)
resize_and_show(output_image)
cv2.imwrite("./blurred/blurred" + filename, output_image)
print()
# Create the segmenter
with python.vision.ImageSegmenter.create_from_options(options) as segmenter:
for i, image_file_name in enumerate(IMAGE_FILENAMES):
# print(image_file_name)
image = mp.Image.create_from_file(image_file_name)
category_masks = segmenter.segment(image)
image_data = cv2.cvtColor(image.numpy_view(), cv2.COLOR_BGR2RGB)
blurred_image = cv2.GaussianBlur(image_data, (55, 55), 0)
condition = np.stack((category_masks[0].numpy_view(),) * 3, axis=-1) > 0.1
output_image = np.where(condition, image_data, blurred_image)
filename = image_file_name.split("/")[-1]
print("./output/" + filename)
resize_and_show(output_image)
cv2.imwrite("./output/" + filename, output_image)
print()
# !ls original
# !ls blurred
# !ls output
paths3 = []
for dirname, _, filenames in os.walk("/kaggle/working/output/"):
for filename in filenames:
if filename[-4:] == ".png":
paths3 += [(os.path.join(dirname, filename))]
paths3 = sorted(paths3)
images3 = []
for i in tqdm(range(len(paths3))):
images3 += [cv2.imread(paths3[i])]
create_animation(np.array(images3))
|
# Importing modules
import numpy as np
import os
import cv2
from sklearn.metrics import confusion_matrix
import seaborn as sns
sns.set(font_scale=1.4)
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
import tensorflow as tf
from tqdm import tqdm
from keras.models import Model
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
import pandas as pd
from sklearn import decomposition
from keras.layers import (
Input,
Dense,
Conv2D,
Activation,
MaxPooling2D,
Flatten,
Dropout,
GlobalAveragePooling2D,
)
from keras.callbacks import ReduceLROnPlateau
from sklearn.metrics import accuracy_score
# Dataset link: https://www.kaggle.com/datasets/puneet6060/intel-image-classification
# # Data preprocessing
# Filling in the names of image classes
class_names = ["buildings", "forest", "glacier", "mountain", "sea", "street"]
class_names_label = {class_name: i for i, class_name in enumerate(class_names)}
nb_classes = len(class_names)
IMAGE_SIZE = (150, 150)
# Function for uploading images
def load_data():
datasets = ["seg_train/seg_train", "seg_test/seg_test"]
output = []
for dataset in datasets:
images = []
labels = []
print("Loading {}".format(dataset))
for folder in os.listdir(dataset):
label = class_names_label[folder]
for file in tqdm(os.listdir(os.path.join(dataset, folder))):
img_path = os.path.join(os.path.join(dataset, folder), file)
image = cv2.imread(img_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, IMAGE_SIZE)
images.append(image)
labels.append(label)
images = np.array(images, dtype="float32")
labels = np.array(labels, dtype="int32")
output.append((images, labels))
return output
# Split the images into a training set and a test set
(train_images, train_labels), (test_images, test_labels) = load_data()
# Shuffling the sets
train_images, train_labels = shuffle(train_images, train_labels, random_state=25)
# Output the information about the sets
n_train = train_labels.shape[0]
n_test = test_labels.shape[0]
print("Number of examples in the training set: {}".format(n_train))
print("Number of examples in the test set: {}".format(n_test))
print("The size of each image: {}".format(IMAGE_SIZE))
# Number of images in each category in the test and training set
_, train_counts = np.unique(train_labels, return_counts=True)
_, test_counts = np.unique(test_labels, return_counts=True)
pd.DataFrame({"train": train_counts, "test": test_counts}, index=class_names).plot.bar()
plt.show()
plt.pie(train_counts, explode=(0, 0, 0, 0, 0, 0), labels=class_names, autopct="%1.1f%%")
plt.axis("equal")
plt.title("Proportions for each category of images", y=1.05)
plt.show()
# Scaling the data
train_images = train_images / 255.0
test_images = test_images / 255.0
# Function to output multiple images from a dataset
def display_examples(class_names, images, labels):
fig = plt.figure(figsize=(10, 10))
fig.suptitle("Examples of dataset images", fontsize=16)
for i in range(25):
plt.subplot(5, 5, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[labels[i]])
plt.show()
display_examples(class_names, train_images, train_labels)
# # The first version of the VGG16 neural network
# Using the VGG16 convolutional neural network for image classification
model = VGG16(weights="imagenet", include_top=False)
# Predicting the probability of assignment to a particular class
train_features = model.predict(train_images)
test_features = model.predict(test_images)
# Visualize the data using a two-dimensional representation (via PCA)
n_train, x, y, z = train_features.shape
n_test, x, y, z = test_features.shape
numFeatures = x * y * z
pca = decomposition.PCA(n_components=2)
X = train_features.reshape((n_train, x * y * z))
pca.fit(X)
C = pca.transform(X)
C1 = C[:, 0]
C2 = C[:, 1]
plt.subplots(figsize=(10, 10))
for i, class_name in enumerate(class_names):
plt.scatter(
C1[train_labels == i][:1000],
C2[train_labels == i][:1000],
label=class_name,
alpha=0.4,
)
plt.legend()
plt.title("PCA Projection")
plt.show()
# The class - the forest - stands out clearly. Streets and buildings overlap very much.There is also some overlap of glaciers, mountains, and seas.
# Function for visualizing the lossfunction and metrics
def plot_accuracy_loss(history):
fig = plt.figure(figsize=(13, 8))
plt.subplot(221)
plt.plot(history.history["accuracy"], "bo--", label="acc")
plt.plot(history.history["val_accuracy"], "ro--", label="val_acc")
plt.title("train_acc vs val_acc")
plt.ylabel("accuracy")
plt.xlabel("epochs")
plt.legend()
plt.subplot(222)
plt.plot(history.history["loss"], "bo--", label="loss")
plt.plot(history.history["val_loss"], "ro--", label="val_loss")
plt.title("train_loss vs val_loss")
plt.ylabel("loss")
plt.xlabel("epochs")
plt.legend()
plt.show()
lr_rate = ReduceLROnPlateau(
monitor="val_accuracy", patience=1, factor=0.25, min_lr=0.000003
)
# Build and train a neural network that will determine the class after VGG16
model2 = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(x, y, z)),
tf.keras.layers.Dense(80, activation=tf.nn.relu),
tf.keras.layers.Dense(6, activation=tf.nn.softmax),
]
)
model2.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
history = model2.fit(
train_features,
train_labels,
batch_size=128,
epochs=15,
validation_split=0.2,
callbacks=[lr_rate],
)
plot_accuracy_loss(history)
test_loss = model2.evaluate(test_features, test_labels)
# accuracy on the test set < 0.9
# # The second version of the VGG16 neural network
# We will use VGG16 as the basis
# Removing the last five layers of the neural network
model = VGG16(weights="imagenet", include_top=False)
model = Model(inputs=model.inputs, outputs=model.layers[-5].output)
train_features = model.predict(train_images)
test_features = model.predict(test_images)
# Let's complete this neural network, the last four layers of VGG16 are MLP, so let's change these layers
model2 = VGG16(weights="imagenet", include_top=False)
input_shape = model2.layers[-4].get_input_shape_at(0)
layer_input = Input(shape=(9, 9, 512))
x = layer_input
for layer in model2.layers[-4::1]:
x = layer(x)
x = Conv2D(64, (3, 3), activation="relu")(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Flatten()(x)
x = Dense(95, activation="relu")(x)
# x = Dense(20, activation='relu')(x)
x = Dense(6, activation="softmax")(x)
new_model = Model(layer_input, x)
new_model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
# Structure of the resulting model
new_model.summary()
lr_rate = ReduceLROnPlateau(
monitor="val_accuracy", patience=1, factor=0.25, min_lr=0.000003
)
# Neural network training
history = new_model.fit(
train_features,
train_labels,
batch_size=128,
epochs=10,
validation_split=0.2,
callbacks=[lr_rate],
)
plot_accuracy_loss(history)
# Final accuracy
predictions = new_model.predict(test_features)
pred_labels = np.argmax(predictions, axis=1)
print("Accuracy: {}".format(accuracy_score(test_labels, pred_labels)))
# Точность > 0.9
# # Inceptionv3 neural network
from tensorflow.keras.applications.inception_v3 import InceptionV3
# Create an InceptionV3 model without a full-link layer
model_inceptionV3 = InceptionV3(weights="imagenet", include_top=False)
# Extract features
train_features = model_inceptionV3.predict(train_images)
test_features = model_inceptionV3.predict(test_images)
_, x, y, z = train_features.shape
# Build a small, fully-connected neural network that will make predictions based on the extracted features
lr_rate = ReduceLROnPlateau(
monitor="val_accuracy", patience=1, factor=0.25, min_lr=0.000003
)
model2_inceptionV3 = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(x, y, z)),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(6, activation=tf.nn.softmax),
]
)
model2_inceptionV3.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
history = model2_inceptionV3.fit(
train_features,
train_labels,
batch_size=256,
epochs=10,
validation_split=0.2,
callbacks=[lr_rate],
)
plot_accuracy_loss(history)
# Accuracy
predictions = model2_inceptionV3.predict(test_features)
pred_labels = np.argmax(predictions, axis=1)
print("Accuracy: {}".format(accuracy_score(test_labels, pred_labels)))
# # Xception Neural Network
from keras.applications import Xception
# Creating the Xception model
model_Xception = Xception(include_top=False, weights="imagenet")
# Extracting features
train_features = model_Xception.predict(train_images)
test_features = model_Xception.predict(test_images)
_, x, y, z = train_features.shape
# Build a small, fully-connected neural network that will make predictions based on the extracted features
lr_rate = ReduceLROnPlateau(
monitor="val_accuracy", patience=1, factor=0.25, min_lr=0.000003
)
model2_Xception = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(x, y, z)),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(1024, activation=tf.nn.relu),
tf.keras.layers.Dense(6, activation=tf.nn.softmax),
]
)
model2_Xception.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
history = model2_Xception.fit(
train_features,
train_labels,
batch_size=256,
epochs=10,
validation_split=0.2,
callbacks=[lr_rate],
)
plot_accuracy_loss(history)
predictions = model2_Xception.predict(test_features)
pred_labels = np.argmax(predictions, axis=1)
print("Accuracy: {}".format(accuracy_score(test_labels, pred_labels)))
# # VGG 19 Neural Network
from keras.applications import VGG19
model_vgg19 = VGG19(include_top=False, weights="imagenet")
train_features = model_vgg19.predict(train_images)
test_features = model_vgg19.predict(test_images)
_, x, y, z = train_features.shape
lr_rate = ReduceLROnPlateau(
monitor="val_accuracy", patience=1, factor=0.25, min_lr=0.000003
)
model2_vgg19 = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(x, y, z)),
tf.keras.layers.Dense(100, activation=tf.nn.relu),
tf.keras.layers.Dense(6, activation=tf.nn.softmax),
]
)
model2_vgg19.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
history = model2_vgg19.fit(
train_features,
train_labels,
batch_size=256,
epochs=10,
validation_split=0.2,
callbacks=[lr_rate],
)
plot_accuracy_loss(history)
predictions = model2_vgg19.predict(test_features)
pred_labels = np.argmax(predictions, axis=1)
print("Accuracy: {}".format(accuracy_score(test_labels, pred_labels)))
# # The second version of the VGG 19 neural network
model_vgg19 = VGG19(weights="imagenet", include_top=False)
model_vgg19 = Model(inputs=model_vgg19.inputs, outputs=model_vgg19.layers[-5].output)
train_features = model_vgg19.predict(train_images)
test_features = model_vgg19.predict(test_images)
model2_vgg19 = VGG19(weights="imagenet", include_top=False)
input_shape = model2_vgg19.layers[-4].get_input_shape_at(0)
layer_input = Input(shape=(9, 9, 512))
x = layer_input
for layer in model2_vgg19.layers[-4::1]:
x = layer(x)
x = Conv2D(64, (3, 3), activation="relu")(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Flatten()(x)
x = Dropout(0.5)(x)
x = Dense(512, activation="relu")(x)
x = Dense(6, activation="softmax")(x)
new_model = Model(layer_input, x)
new_model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
new_model.summary()
lr_rate = ReduceLROnPlateau(
monitor="val_accuracy", patience=1, factor=0.25, min_lr=0.000003
)
history = new_model.fit(
train_features,
train_labels,
batch_size=128,
epochs=10,
validation_split=0.2,
callbacks=[lr_rate],
)
plot_accuracy_loss(history)
predictions = new_model.predict(test_features)
pred_labels = np.argmax(predictions, axis=1)
print("Accuracy: {}".format(accuracy_score(test_labels, pred_labels)))
# # Neural network ResNet50
from keras.applications import ResNet50V2
# Creating the ResNet50V2 model
model_resNet50 = ResNet50V2(include_top=False, weights="imagenet")
# Extracting features
train_features = model_resNet50.predict(train_images)
test_features = model_resNet50.predict(test_images)
_, x, y, z = train_features.shape
lr_rate = ReduceLROnPlateau(
monitor="val_accuracy", patience=1, factor=0.25, min_lr=0.000003
)
# Training the model
model2_resNet50V2 = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(x, y, z)),
tf.keras.layers.Dense(100, activation=tf.nn.relu),
tf.keras.layers.Dense(6, activation=tf.nn.softmax),
]
)
model2_resNet50V2.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
history = model2_resNet50V2.fit(
train_features,
train_labels,
batch_size=256,
epochs=10,
validation_split=0.2,
callbacks=[lr_rate],
)
plot_accuracy_loss(history)
# Accuracy
predictions = model2_resNet50V2.predict(test_features)
pred_labels = np.argmax(predictions, axis=1)
print("Accuracy: {}".format(accuracy_score(test_labels, pred_labels)))
# # ResNet101 neural network
from keras.applications import ResNet101V2
# Building the ResNet101 model
model_resNet101 = ResNet101V2(include_top=False, weights="imagenet")
# Extracting features
train_features = model_resNet101.predict(train_images)
test_features = model_resNet101.predict(test_images)
_, x, y, z = train_features.shape
lr_rate = ReduceLROnPlateau(
monitor="val_accuracy", patience=1, factor=0.25, min_lr=0.000003
)
# Training a neural network
model2_resNet101 = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(x, y, z)),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(1024, activation=tf.nn.relu),
tf.keras.layers.Dense(6, activation=tf.nn.softmax),
]
)
model2_resNet101.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
history = model2_resNet101.fit(
train_features,
train_labels,
batch_size=256,
epochs=10,
validation_split=0.2,
callbacks=[lr_rate],
)
plot_accuracy_loss(history)
# Accuracy
predictions = model2_resNet101.predict(test_features)
pred_labels = np.argmax(predictions, axis=1)
print("Accuracy: {}".format(accuracy_score(test_labels, pred_labels)))
# # Neural network ResNet152
from keras.applications import ResNet152V2
# Creating the ResNet152 model
model_resNet152 = ResNet152V2(include_top=False, weights="imagenet")
# Extracting features
train_features = model_resNet152.predict(train_images)
test_features = model_resNet152.predict(test_images)
_, x, y, z = train_features.shape
lr_rate = ReduceLROnPlateau(
monitor="val_accuracy", patience=1, factor=0.25, min_lr=0.000003
)
# Training the model
model2_resNet152 = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(x, y, z)),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(256, activation=tf.nn.relu),
tf.keras.layers.Dense(6, activation=tf.nn.softmax),
]
)
model2_resNet152.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
history = model2_resNet152.fit(
train_features,
train_labels,
batch_size=256,
epochs=10,
validation_split=0.2,
callbacks=[lr_rate],
)
plot_accuracy_loss(history)
# Accuracy
predictions = model2_resNet152.predict(test_features)
pred_labels = np.argmax(predictions, axis=1)
print("Accuracy: {}".format(accuracy_score(test_labels, pred_labels)))
# # InceptionResNet neural network
from keras.applications import InceptionResNetV2
# Creating a model
model_incptionResNet = InceptionResNetV2(include_top=False, weights="imagenet")
# Extracting features
train_features = model_incptionResNet.predict(train_images)
test_features = model_incptionResNet.predict(test_images)
_, x, y, z = train_features.shape
lr_rate = ReduceLROnPlateau(
monitor="val_accuracy", patience=1, factor=0.25, min_lr=0.000003
)
# Training a neural network to classify on extracted features
model2_incptionResNet = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(x, y, z)),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(6, activation=tf.nn.softmax),
]
)
model2_incptionResNet.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
history = model2_incptionResNet.fit(
train_features,
train_labels,
batch_size=256,
epochs=10,
validation_split=0.2,
callbacks=[lr_rate],
)
plot_accuracy_loss(history)
# Accuracy
predictions = model2_incptionResNet.predict(test_features)
pred_labels = np.argmax(predictions, axis=1)
print("Accuracy: {}".format(accuracy_score(test_labels, pred_labels)))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
athlete_selection = pd.read_csv("/kaggle/input/athletesat/AthleteSelection.csv")
athlete_selection.set_index("Athlete", inplace=True)
# Check if the index is having unique values
print(
"\nIs the Pandas index having unique values?\n", athlete_selection.index.is_unique
)
athlete_selection.head(5)
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
athlete_selection = pd.read_csv("/kaggle/input/athletesat/AthleteSelection.csv")
# Create a new variable "names" that contains the DataFrame indexes
names = athlete_selection.index
# Print the "names" variable
print(names)
import pandas as pd
import numpy as np
athlete_selection = pd.read_csv("/kaggle/input/athletesat/AthleteSelection.csv")
X = np.array(athlete_selection.iloc[:, 1:5])
Y = np.array(athlete_selection.iloc[:, -1])
print(X[0, 0])
from sklearn.neighbors import NearestNeighbors
import numpy as np
# Load the data
data = np.genfromtxt(
"/kaggle/input/athletesat/AthleteSelection.csv", delimiter=",", skip_header=1
)
X = data[:, 1:3] # Features
y = data[:, 3] # Labels
# Create and fit the model
model = NearestNeighbors(n_neighbors=2, radius=0.4)
model.fit(X)
# Get the parameters of the model
params = model.get_params()
print(params)
from sklearn.neighbors import NearestNeighbors
import numpy as np
# Load the data
data = np.genfromtxt(
"/kaggle/input/athlete-selection/AthleteSelection.csv", delimiter=",", skip_header=1
)
X = data[:, 1:3] # Features
y = data[:, 3] # Labels
# Create and fit the model
model = NearestNeighbors(n_neighbors=2, radius=0.4)
model.fit(X)
# Find the k nearest neighbors of q1 and q2
q1 = [3.25, 8.25]
q2 = [0.2, 3.3]
distances, indices = model.kneighbors([q1, q2])
# Stack distances and indices vertically
results = np.vstack((distances, indices))
# Print the results as a matrix
print("Distances and Indices of the nearest neighbors:")
print(results)
from sklearn.neighbors import NearestNeighbors
import numpy as np
# Load the data
data = np.genfromtxt(
"/kaggle/input/athlete-selection/AthleteSelection.csv", delimiter=",", skip_header=1
)
X = data[:, 1:3] # Features
y = data[:, 3] # Labels
# Create and fit the model
model = NearestNeighbors(n_neighbors=2, radius=0.4)
model.fit(X)
# Find the k nearest neighbors of q1 and q2
q1 = [3.25, 8.25]
q2 = [0.2, 3.3]
distances, indices = model.kneighbors([q1, q2])
# Print the results
print(f"The 2 nearest neighbors of {q1} are:")
for i, index in enumerate(indices[0]):
print(f"{i+1}. Point {index} at distance {distances[0][i]}")
print(f"\nThe 2 nearest neighbors of {q2} are:")
for i, index in enumerate(indices[1]):
print(f"{i+1}. Point {index} at distance {distances[1][i]}")
# q = [5.0,7.5]
# q3n = athlete_neigh.kneighbors([q], n_neighbors = 3)[1][0]
# for n in q3n:
# print(names[n])
# This code finds the 3 nearest neighbors of the point q using a NearestNeighbors model called athlete_neigh. The kneighbors method returns two arrays: the first contains the distances to the nearest neighbors, and the second contains the indices of the nearest neighbors. In this case, only the indices are used, which are accessed using [1][0]. The indices are then used to access the corresponding names from a list called names, and these names are printed out.
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
# Load the data
data = np.genfromtxt(
"/kaggle/input/athlete-selection/AthleteSelection.csv", delimiter=",", skip_header=1
)
X = data[:, 1:3] # Features
y = data[:, 3] # Labels
# Create and fit the model
model = KNeighborsClassifier(n_neighbors=3)
model.fit(X, y)
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
import numpy as np
# Load the data
data = np.genfromtxt(
"/kaggle/input/athlete-selection/AthleteSelection.csv", delimiter=",", skip_header=1
)
X = data[:, 1:3] # Features
y = data[:, 3] # Labels
# Create and fit the model
model = KNeighborsClassifier(n_neighbors=3)
model.fit(X, y)
# Make predictions on the training data
y_pred = model.predict(X)
# Print the predicted labels and the actual labels
print("Predicted labels:", y_pred)
print("Actual labels:", y)
# Calculate the accuracy
accuracy = accuracy_score(y, y_pred)
print(f"Accuracy: {accuracy:.2f}")
# the model might be accurate, there are several factors that can contribute to a model’s performance. Some possible reasons why a KNeighborsClassifier model might be accurate include: the data is well-suited for a nearest neighbors approach, the value of K was chosen appropriately, and the features used to represent the data are informative and discriminative. Without more information about the specific data and problem, it is difficult to say for certain why the model might be accurate.
# TASK 2:
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.metrics import accuracy_score
import numpy as np
# 1. Load the test data
test_data = np.genfromtxt(
"/kaggle/input/athletesat/AthleteTest.csv", delimiter=",", skip_header=1
)
X_test = test_data[:, 1:3] # Features
y_test = test_data[:, 3] # Labels
# Load the training data
train_data = np.genfromtxt(
"/kaggle/input/athletesat/AthleteTest.csv", delimiter=",", skip_header=1
)
X_train = train_data[:, 1:3] # Features
y_train = train_data[:, 3] # Labels
# 2. Create and fit the KNeighborsClassifier model with K=3
model = KNeighborsClassifier(n_neighbors=3)
model.fit(X_train, y_train)
# 3. Evaluate the model on the test data
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(f"Accuracy of KNeighborsClassifier with K=3: {accuracy:.2f}")
# 4. Use StandardScaler to scale the data and fit a new KNeighborsClassifier model
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
model2 = KNeighborsClassifier(n_neighbors=3)
model2.fit(X_train_scaled, y_train)
y_pred2 = model2.predict(X_test_scaled)
accuracy2 = accuracy_score(y_test, y_pred2)
print(f"Accuracy of KNeighborsClassifier with K=3 and StandardScaler: {accuracy2:.2f}")
# 5. Use MinMaxScaler to scale the data and fit a new KNeighborsClassifier model
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
model3 = KNeighborsClassifier(n_neighbors=3)
model3.fit(X_train_scaled, y_train)
y_pred3 = model3.predict(X_test_scaled)
accuracy3 = accuracy_score(y_test, y_pred3)
print(f"Accuracy of KNeighborsClassifier with K=3 and MinMaxScaler: {accuracy3:.2f}")
# 6. Evaluate the models with different values of K
for k in range(1, 6):
model = KNeighborsClassifier(n_neighbors=k)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(f"Accuracy of KNeighborsClassifier with K={k}: {accuracy:.2f}")
model2 = KNeighborsClassifier(n_neighbors=k)
model2.fit(X_train_scaled, y_train)
y_pred2 = model2.predict(X_test_scaled)
accuracy2 = accuracy_score(y_test, y_pred2)
print(
f"Accuracy of KNeighborsClassifier with K={k} and StandardScaler: {accuracy2:.2f}"
)
model3 = KNeighborsClassifier(n_neighbors=k)
model3.fit(X_train_scaled, y_train)
y_pred3 = model3.predict(X_test_scaled)
accuracy3 = accuracy_score(y_test, y_pred3)
print(
f"Accuracy of KNeighborsClassifier with K={k} and MinMaxScaler: {accuracy3:.2f}"
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
filmes = pd.read_csv(
"/kaggle/input/movielens-100k-dataset/ml-100k/u.item",
sep="|",
encoding="latin-1",
header=None,
index_col=False,
)
filmes.head()
# 
filmes.columns = [
"movie_id",
"movie_title",
"release_date",
"video_release_date",
"IMDb_URL",
"unknown",
"Action",
"Adventure",
"Animation",
"Children",
"Comedy",
"Crime",
"Documentary",
"Drama",
"Fantasy",
"FilmNoir",
"Horror",
"Musical",
"Mystery",
"Romance",
"SciFi",
"Thriller",
"War",
"Western",
]
filmes.head()
filmes.shape
filmes.info()
filmes.video_release_date.unique()
filmes.drop(columns=["video_release_date"], inplace=True)
filmes.head()
# 
notas = pd.read_csv(
"/kaggle/input/movielens-100k-dataset/ml-100k/u.data",
encoding="latin-1",
sep="\t",
header=None,
index_col=False,
)
notas.columns = ["user_id", "item_id", "rating", "timestamp"]
notas.head()
notas.shape
notas.info()
notas.describe()
# Escolhendo o ID dos filmes como index
filmes = filmes.set_index("movie_id")
# Selecionando um filme através do seu ID (escolhendo pelo horário que estou digitando esta linha)
filmes.loc[1143]
# # Verificando o total de votos de cada filme:
total_de_votos = notas["item_id"].value_counts()
total_de_votos.head()
filmes["Total_de_votos"] = total_de_votos
filmes.head()
filmes.sort_values("Total_de_votos", ascending=False)
# # Verificando a nota média dos filmes:
notas_medias = notas.groupby("item_id").mean()["rating"]
notas_medias.head()
filmes["nota_media"] = notas_medias
filmes.head()
filmes.sort_values("nota_media", ascending=False).head(15)
filmes.query("Total_de_votos >= 10").sort_values("nota_media", ascending=False)
# Fazer a recomendação somente pela média das notas não é uma boa saída, porque filmes desconhecidos podem receber poucas avaliações que o classificam melhor do que os outros e isso faz com que a nota não represente de forma adequada sua avaliação.
filmes.Total_de_votos.min()
filmes.Total_de_votos.max()
# Sabendo o valor mínimo e máximo que os filmes receberam, decidi trabalhar somente com aqueles que tiveram no mínimo 50 avaliações.
filmes_com_mais_de_50_votos = filmes.query("Total_de_votos >= 50")
filmes_com_mais_de_50_votos.sort_values("nota_media", ascending=False)
# # Fazendo um sistema de recomendação utilizando os gêneros dos filmes:
# Digamos que, eu tenha assistido os seguintes filmes:
# 
# E quero assistir um filme de ação e drama.
assistido = [67, 2, 313, 132, 192, 179, 227, 501, 614, 631]
filmes_com_mais_de_50_votos.query("Action == 1 and Drama == 1")
# Fiz uma nova variável para receber os filmes destes gêneros e visualizar os 5 primeiros com as melhores médias.
acao_drama = filmes_com_mais_de_50_votos.query("Action == 1 and Drama == 1")
acao_drama.sort_values("nota_media", ascending=False).head(5)
# Deste resultado, eu exclui os filmes que já assisti:
acao_drama.drop(assistido, errors="ignore").sort_values(
"nota_media", ascending=False
).head(5)
# De acordo com o gênero escolhido e melhores média de avaliação, as recomendações são:
# - O Poderoso Chefão;
# - Star Wars: O Império Contra-Ataca;
# - O Barco;
# - O Poderoso Chefão 2;
# - Coração Valente.
# Mas, lembrando que, não considerei os filmes com menos de 50 avaliações. Isso faz com que filmes produzidos por pequenas produtoras ou que ainda não caíram no gosto do grande público, não sejam encontrados por novos usuários e desestimulando a procura por novas obras.
# # Fazendo um sistema de recomendação utilizando o histórico do usuário:
# Eu compartilho o gosto de algumas coisas com meus amigos, por exemplo, gostamos de alguns filmes, séries e livros, ao mesmo tempo que não gostamos de outras obras. Ou seja, eu e meus amigos podemos avaliar essas obras com notas iguais ou próximas.
# Para averiguar se compartilhamos os mesmos gostos, podemos calcular a distância euclidiana entre as avaliações, que em resumo, quanto menor for este valor, mais "compatível" e próximo será o gosto entre os perfis. Assim, se um usuário gostou de um filme que o outro perfil ainda não assistiu, será mais fácil efetuar recomendações.
# Escolhendo o primeiro usuário:
# 
usuario_43 = notas.query("user_id == 43")
usuario_43[["item_id", "rating"]].set_index("item_id")
def historico_usuario(usuario):
historico_usuario = notas.query("user_id == %d" % usuario)
historico_usuario = historico_usuario[["item_id", "rating"]].set_index("item_id")
return historico_usuario
historico_usuario(43)
# Escolhendo outro usuário:
# 
historico_usuario(62)
user43 = historico_usuario(43)
user62 = historico_usuario(62)
# Unindo as avaliações dos dois usuários e deixando no dataframe somente os filmes que ambos têm em comum:
user43.join(user62, lsuffix="_do_user", rsuffix="_comparacao").dropna()
diferenca = user43.join(user62, lsuffix="_do_user", rsuffix="_comparacao").dropna()
np.linalg.norm(diferenca["rating_do_user"] - diferenca["rating_comparacao"])
# Diante este valor, ainda não é possível dizer se os perfis são compatíveis, ou não. Porém vou fazer uma função para agilizar o cálculo entre outros usuários.
def distancia_entre_perfis(user_id1, user_id2):
notas1 = historico_usuario(user_id1)
notas2 = historico_usuario(user_id2)
diferenca = notas1.join(notas2, lsuffix="_do_user", rsuffix="_comparacao").dropna()
distancia = np.linalg.norm(
diferenca["rating_do_user"] - diferenca["rating_comparacao"]
)
return [user_id1, user_id2, distancia]
distancia_entre_perfis(43, 62)
# ## Verificando quais são os perfis com maior compatibilidade:
notas.user_id.unique()
print("O dataset possui %d usuarios." % len(notas.user_id.unique()))
# No dataframe das avaliações, no total há 943 usuários. Portanto, vou comparar as avaliações do perfil 43 com outros 5 usuários:
usuario_1 = 43
distancias = []
for usuario in notas["user_id"].unique():
calculo = distancia_entre_perfis(usuario_1, usuario)
distancias.append(calculo)
distancias[:5]
# O resultado da comparação mostrou que a distância entre o usuário 43, com os demais, pode ser menor.
# Função para calcular a distância entre 1 usuário específico e os demais:
def distancia_entre_usuarios(usuario_1):
todos_os_usuarios = notas["user_id"].unique()
distancias = [
distancia_entre_perfis(usuario_1, user_id) for user_id in todos_os_usuarios
]
distancias = pd.DataFrame(
distancias, columns=["Usuario_1", "Outro_user", "Distancia"]
)
return distancias
distancia_entre_usuarios(43).head()
# Ordenando o Dataframe pela Distância:
def mais_proximos_de(usuario_1):
distancias = distancia_entre_usuarios(usuario_1)
distancias = distancias.sort_values("Distancia")
distancias = distancias.set_index("Outro_user").drop(usuario_1)
return distancias
mais_proximos_de(43)
# O retorno da função mostra que o usuário 43 tem um perfil mais parecido com o usuário 172, enquanto é distante do usuário 405.
mais_proximos_de(43).head(20)
# # Gerando recomendação com KNN:
# KNN significa *K-nearest neighbors*, que traduzindo é *K-vizinhos mais próximos*. Este método é parecido com o que foi feito até agora, quando buscamos os perfis de usuários que fossem mais próximos, através dos filmes que eles assistiram e notas que atribuíram.
# Nesta fase, reformulei algumas funções. Por exemplo, a função **distancia_entre_perfis** recebeu um novo argumento, caso a quantidade de filmes que ambos assistiram for menor do que 5, a distância entre estes perfis não é calculada. Na função **distancia_entre_usuarios**, o novo argumento determina a quantidade máxima de perfis que se quer comparar. Já na função **mais_proximos_de**, adicionei os 2 argumentos citados anteriormente.
def distancia_entre_perfis(user_id1, user_id2, minimo=5):
notas1 = historico_usuario(user_id1)
notas2 = historico_usuario(user_id2)
diferenca = notas1.join(notas2, lsuffix="_do_user", rsuffix="_comparacao").dropna()
if len(diferenca) < minimo:
return None
distancia = np.linalg.norm(
diferenca["rating_do_user"] - diferenca["rating_comparacao"]
)
return [user_id1, user_id2, distancia]
def distancia_entre_usuarios(user_1, numero_maximo_de_analise=None):
todos_os_usuarios = notas["user_id"].unique()
if numero_maximo_de_analise:
todos_os_usuarios = todos_os_usuarios[:numero_maximo_de_analise]
distancias = [
distancia_entre_perfis(user_1, user_id) for user_id in todos_os_usuarios
]
distancias = list(filter(None, distancias))
distancias = pd.DataFrame(
distancias, columns=["Usuario_1", "Outro_user", "Distancia"]
)
return distancias
def mais_proximos_de(
user_1, quantidade_user_proximos=10, numero_maximo_de_analise=None
):
distancias = distancia_entre_usuarios(
user_1, numero_maximo_de_analise=numero_maximo_de_analise
)
distancias = distancias.sort_values("Distancia")
distancias = distancias.set_index("Outro_user").drop(user_1)
return distancias.head(quantidade_user_proximos)
mais_proximos_de(196, numero_maximo_de_analise=50)
# Ao testar a última função, vemos que os 10 perfis (dentro dos 50 primeiros presentes no dataset *notas*) os que tiveram maior proximidade com o usuário 196, são os usuários 251 e 97.
# ---
# Tendo conhecimento dos perfis mais próximos, agora é possível buscar novos filmes para o usuário.
# Primeiro, criei uma nova função que possui os mesmos argumentos que a função **mais_proximos_de**, que, em resumo, retorna 5 recomendações de filmes, ao fazer uma seleção daqueles que tiveram as melhores notas nos perfis mais próximos.
def sugestoes(user1, quantidade_user_proximos=10, numero_maximo_de_analise=None):
notas_user1 = historico_usuario(user1)
historico_filmes = notas_user1.index
similares = mais_proximos_de(
user1,
quantidade_user_proximos=quantidade_user_proximos,
numero_maximo_de_analise=numero_maximo_de_analise,
)
usuarios_similares = similares.index
notas_dos_similares = notas.set_index("user_id").loc[usuarios_similares]
recomendacoes = notas_dos_similares.groupby("item_id").mean()[["rating"]]
recomendacoes = recomendacoes.sort_values("rating", ascending=False)
return recomendacoes.join(filmes).head()
sugestoes(196, quantidade_user_proximos=2, numero_maximo_de_analise=50)
# As indicações (usando apenas 2 usuários) são:
# - Jerry Maguire: A Grande Virada;
# - Horizonte Perdido;
# - The Haunted World of Edward D. Wood Jr.;
# - Quanto Mais Quente Melhor;
# - Intriga Internacional.
sugestoes(196, numero_maximo_de_analise=50)
# Mas ao utilizar os perfis dos 10 usuários mais próximos, temos:
# - O Povo Contra Larry Flynt;
# - Shine - Brilhante;
# - The Haunted World of Edward D. Wood Jr.;
# - Gênio Indomável;
# - Juventude Transviada.
sugestoes(196)
# E quando não definimos um número máximo de usuários para análise, temos:
# - Psicose;
# - 2001 - Uma Odisseia no Espaço;
# - O Piano;
# - O Sangue de Romeo;
# - Crepúsculo dos Deuses.
# # Gerando recomendação para um novo usuário:
# Agora, digamos que tenho um novo usuário e seus filmes são:
# 
# E para adicioná-lo ao nosso sistema, fiz uma função que irá atribuir um número de ID e registrar os filmes e as notas. Escolhi as notas deste novo usuário de forma randômica:
# 
def novo_usuario(seus_filmes):
novo_usuario = notas["user_id"].max() + 1
notas_do_usuario_novo = pd.DataFrame(seus_filmes, columns=["item_id", "rating"])
notas_do_usuario_novo["user_id"] = novo_usuario
return pd.concat([notas, notas_do_usuario_novo])
notas = novo_usuario(
[
[723, 3],
[1094, 3],
[652, 1],
[1522, 5],
[1243, 2],
[1456, 1],
[1086, 5],
[1034, 2],
[703, 2],
[90, 3],
[217, 2],
]
)
notas.tail(12)
# Para o novo usuário temos as seguintes recomendações:
sugestoes(944)
|
# # Project 4
# I am using answers notebook from week 2 and changing its preprocessing to use Polars
import pandas as pd
import numpy as np
data = pd.read_parquet(
"/kaggle/input/forecasting-project2-data/project_2_data/sales_data.parquet"
)
data.head()
data.shape
data = data.loc[data.groupby("id").sales.cumsum() > 0]
def rmsse(train, val, y_pred):
train_scale = (
train.assign(
scale=train.groupby("id").sales.diff() ** 2,
)
.groupby("id")
.scale.mean()
)
score = (
val.assign(squared_error=(val.sales - y_pred) ** 2)
.groupby("id")
.squared_error.mean()
.to_frame()
.merge(train_scale, on="id")
.assign(rmsse=lambda x: np.sqrt(x.squared_error / x.scale))
.rmsse.mean()
)
return score
def test_rmsse():
test_train = pd.DataFrame(
{
"id": ["a", "a", "a", "b", "b", "b", "c", "c", "c"],
"sales": [3, 2, 5, 100, 150, 60, 10, 20, 30],
}
)
test_val = pd.DataFrame(
{
"id": ["a", "a", "a", "b", "b", "b", "c", "c", "c"],
"sales": [6, 1, 4, 200, 120, 270, 10, 20, 30],
}
)
test_y_pred = pd.Series([1, 2, 3, 180, 160, 240, 20, 30, 40])
assert np.abs(rmsse(test_train, test_val, test_y_pred) - 0.92290404515501) < 1e-6
test_rmsse()
# # Fitting models
# Don't worry about any error outputs here, unless you get the same "Retrying" error as Project 1
calendar = pd.read_parquet(
"/kaggle/input/forecasting-project2-data/project_2_data/calendar.parquet"
)
prices = pd.read_parquet(
"/kaggle/input/forecasting-project2-data/project_2_data/prices.parquet"
)
# ## Code in Pandas
from sklearn.preprocessing import OrdinalEncoder
lag_features = [1, 7, 28]
rolling_features = {
"mean": [7, 28],
"std": [7, 28],
}
seasonal_rolling_features = {
"mean": [4, 8],
"std": [4, 8],
}
def calc_lag_pd(df, shift_length, forecast_horizon, by_day_of_week=False):
group_cols = ["id"]
if by_day_of_week:
group_cols += ["day_of_week"]
feature_name = f"lag_{shift_length}_{forecast_horizon}"
return (
df.assign(day_of_week=df.index.get_level_values("date").dayofweek)
.groupby(group_cols)
.sales.shift(forecast_horizon + shift_length)
.rename(feature_name)
), feature_name
def calc_rolling_agg_pd(
df, window_length, forecast_horizon, agg_func="mean", by_day_of_week=False
):
group_cols = ["id"]
if by_day_of_week:
group_cols += ["day_of_week"]
if not by_day_of_week:
feature_name = f"rolling_{agg_func}_{window_length}_{forecast_horizon}"
else:
feature_name = f"seasonal_rolling_{agg_func}_{window_length}_{forecast_horizon}"
return (
df.assign(day_of_week=df.index.dayofweek)
.groupby(group_cols, group_keys=False)
.sales.rolling(
window_length, closed="right", min_periods=1
) # only requires 1 observation to be non-NaN
.agg({"sales": agg_func})
.reset_index()
.assign(date=lambda x: x.date + pd.Timedelta(days=28))
.set_index("date")
.rename(columns={"sales": feature_name})
), feature_name
def feature_engineering_pd(df, horizon):
cont_feats = []
for lag in lag_features:
fe_table, feature_name = calc_lag_pd(df, lag, horizon)
df = df.merge(fe_table, on=["id", "date"], how="left")
cont_feats.append(feature_name)
df = df.reset_index("id")
for agg_func, windows in rolling_features.items():
for window in windows:
fe_table, feature_name = calc_rolling_agg_pd(df, window, horizon, agg_func)
df = df.merge(fe_table, on=["id", "date"], how="left")
cont_feats.append(feature_name)
for agg_func, windows in seasonal_rolling_features.items():
for window in windows:
fe_table, feature_name = calc_rolling_agg_pd(
df, window, horizon, agg_func, by_day_of_week=True
)
df = df.merge(
fe_table.drop(columns="day_of_week"), on=["id", "date"], how="left"
)
cont_feats.append(feature_name)
df = (
df.merge(calendar[["snap_TX"]], on="date", how="left")
.merge(prices, on=["date", "store_id", "item_id"], how="left")
.assign(
day_of_week=lambda x: x.index.dayofweek,
day_of_month=lambda x: x.index.day,
month=lambda x: x.index.month,
year=lambda x: x.index.year,
)
)
cont_feats += ["sell_price", "day_of_week", "day_of_month", "month", "year"]
cat_feats = ["id", "item_id", "dept_id", "cat_id", "store_id", "snap_TX"]
enc_cat_feats = [f"{feat}_enc" for feat in cat_feats]
df[enc_cat_feats] = OrdinalEncoder().fit_transform(df[cat_feats])
max_date = df.index.get_level_values("date").max()
train = df.loc[: max_date - pd.Timedelta(days=28), :]
val = df.loc[
max_date
- pd.Timedelta(days=28 - (horizon - 7) - 1) : max_date
- pd.Timedelta(days=28 - horizon),
:,
]
price_feats = train.groupby("id").agg(
max_price=("sell_price", "max"),
median_price=("sell_price", "median"),
)
train = train.merge(price_feats, on="id", how="left")
val = val.merge(price_feats, on="id", how="left")
cont_feats += ["max_price", "median_price"]
return (train, val, cont_feats, enc_cat_feats)
from sklearn.preprocessing import OrdinalEncoder
lag_features = [1, 7, 28]
rolling_features = {
"mean": [7, 28],
"std": [7, 28],
}
seasonal_rolling_features = {
"mean": [4, 8],
"std": [4, 8],
}
def calc_lag_pl(df, shift_length, forecast_horizon, by_day_of_week=False):
group_cols = ["id"]
if by_day_of_week:
group_cols += ["day_of_week"]
feature_name = f"lag_{shift_length}_{forecast_horizon}"
return (
df.lazy()
.with_columns(pl.col("date").dt.weekday().alias("day_of_week"))
.with_columns(
(
pl.col("sales")
.shift(forecast_horizon + shift_length)
.over(group_cols)
.alias(feature_name)
)
)
.select(["date", "id"] + [feature_name])
.collect()
), feature_name
def calc_rolling_agg_pl(
df, window_length, forecast_horizon, agg_func="mean", by_day_of_week=False
):
group_cols = ["id"]
if by_day_of_week:
group_cols += ["day_of_week"]
if not by_day_of_week:
feature_name = f"rolling_{agg_func}_{window_length}_{forecast_horizon}"
else:
feature_name = f"seasonal_rolling_{agg_func}_{window_length}_{forecast_horizon}"
if agg_func == "mean":
return (
df.lazy()
.with_columns(pl.col("date").dt.weekday().alias("day_of_week"))
.with_columns(
(
pl.col("sales")
.rolling_mean(window_size=window_length, min_periods=1)
.over(group_cols)
.alias(feature_name)
)
)
.with_columns((pl.col("date") + pl.duration(days=28)).alias("date"))
.select(group_cols + ["date", feature_name])
.collect()
), feature_name
# ugly but function rolling_apply is slower and did not want to spend more time refactroing
elif agg_func == "std":
return (
df.lazy()
.with_columns(pl.col("date").dt.weekday().alias("day_of_week"))
.with_columns(
(
pl.col("sales")
.rolling_std(window_size=window_length, min_periods=1)
.over(group_cols)
.alias(feature_name)
)
)
.with_columns((pl.col("date") + pl.duration(days=28)).alias("date"))
.select(group_cols + ["date", feature_name])
.collect()
), feature_name
def feature_engineering_pl(df, horizon):
cont_feats = []
for lag in lag_features:
fe_table, feature_name = calc_lag_pl(df, lag, horizon)
df = df.join(fe_table, on=["id", "date"], how="left")
cont_feats.append(feature_name)
for agg_func, windows in rolling_features.items():
for window in windows:
fe_table, feature_name = calc_rolling_agg_pl(df, window, horizon, agg_func)
df = df.join(fe_table, on=["id", "date"], how="left")
cont_feats.append(feature_name)
for agg_func, windows in seasonal_rolling_features.items():
for window in windows:
fe_table, feature_name = calc_rolling_agg_pl(
df, window, horizon, agg_func, by_day_of_week=True
)
df = df.join(fe_table.drop("day_of_week"), on=["id", "date"], how="left")
cont_feats.append(feature_name)
df = (
df.join(
pl.from_pandas(calendar.reset_index()).select("date", "snap_TX"),
on="date",
how="left",
)
.join(
pl.from_pandas(prices.reset_index()),
on=["date", "store_id", "item_id"],
how="left",
)
.with_columns(
[
pl.col("date").dt.weekday().alias("dayofweek"),
pl.col("date").dt.day().alias("day"),
pl.col("date").dt.month().alias("month"),
pl.col("date").dt.year().alias("year"),
]
)
)
df = df.to_pandas().set_index("date")
cont_feats += ["sell_price", "day_of_week", "day_of_month", "month", "year"]
cat_feats = ["id", "item_id", "dept_id", "cat_id", "store_id", "snap_TX"]
enc_cat_feats = [f"{feat}_enc" for feat in cat_feats]
df[enc_cat_feats] = OrdinalEncoder().fit_transform(df[cat_feats])
max_date = df.index.get_level_values("date").max()
train = df.loc[: max_date - pd.Timedelta(days=28), :]
val = df.loc[
max_date
- pd.Timedelta(days=28 - (horizon - 7) - 1) : max_date
- pd.Timedelta(days=28 - horizon),
:,
]
price_feats = train.groupby("id").agg(
max_price=("sell_price", "max"),
median_price=("sell_price", "median"),
)
train = train.merge(price_feats, on="id", how="left")
val = val.merge(price_feats, on="id", how="left")
cont_feats += ["max_price", "median_price"]
return (train, val, cont_feats, enc_cat_feats)
# cont_feats += ['sell_price', 'day_of_week', 'day_of_month', 'month', 'year']
# cat_feats = ['id', 'item_id', 'dept_id', 'cat_id', 'store_id', 'snap_TX']
# enc_cat_feats = [f'{feat}_enc' for feat in cat_feats]
# df[enc_cat_feats] = OrdinalEncoder().fit_transform(df.select([cat_feats]))
# max_date = df.select('date').max()
# train = df.filter(pl.col('date') < max_date - pl.duration(days=28))
# val = df.filter((pl.col('date') >= max_date - pl.duration(days=28 - (horizon-7) - 1)) & (pl.col('date') < max_date - pl.duration(days=28 - horizon)))
# price_feats = (
# train
# .groupby('id')
# .agg([
# pl.col("sell_price").max().alias("max_price"),
# pl.col("sell_price").median().alias("median_price"),
# ])
# )
# train = train.join(price_feats, on='id', how='left')
# val = val.join(price_feats, on='id', how='left')
# cont_feats += ['max_price', 'median_price']
# return (
# train, val,
# cont_feats, enc_cat_feats
# )
# train_pd, val_pd, cont_feats_pd, cat_feats_pd = feature_engineering_pl(data_pl, horizon)
horizon = 28
# ## Code in polars
import polars as pl
def compare(df_pd, df_pl):
df_pd = (
pd.DataFrame(df_pd)
.reset_index()
.set_index(["date", "id"])
.sort_index()
.drop("day_of_week", axis=1, errors="ignore")
)
df_pl_aspd = (
df_pl.to_pandas()
.set_index(["date", "id"])
.sort_index()
.drop("day_of_week", axis=1, errors="ignore")
)
assert df_pd.equals(df_pl_aspd)
data_pl = pl.from_pandas(data.reset_index())
df_pd, _ = calc_lag_pd(data, 1, horizon)
df_pl, _ = calc_lag_pl(data_pl, 1, horizon)
compare(df_pd, df_pl)
fe_table_pd, feature_name = calc_rolling_agg_pd(
data.reset_index("id"), 28, horizon, "mean", by_day_of_week=True
)
fe_table_pl, feature_name = calc_rolling_agg_pl(
data_pl, 28, horizon, "mean", by_day_of_week=True
)
compare(fe_table_pd, fe_table_pl)
train_pd, val_pd, cont_feats_pd, cat_feats_pd = feature_engineering_pd(data, horizon)
train_pl, val_pl, cont_feats_pl, cat_feats_pl = feature_engineering_pl(data_pl, horizon)
|
import pandas as pd
import numpy as np
import collections
# - In the [discuttion][2], it is pointed out that levels 7, 15, 20, 21 and 22 are skipped.
# [2]: https://www.kaggle.com/competitions/predict-student-performance-from-game-play/discussion/390339#2174184
# - As other [problems][1] with the dataset have been noted, the dataset has recently been added. We analyzed the new dataset to see if the problem of skipping levels has been resolved.
# [1]: https://www.kaggle.com/competitions/predict-student-performance-from-game-play/discussion/395250
# - From the following analysis, we see that all sessions has ```level==22```. However, some sessions lacks levels 7, 15, 20, 21.
# - It was noted that there are several columns like index and elapsed_time in this dataset that do not seem to be consistent, and the competition host indicated that this may be due to a bug in the game. Since only certain levels are missing, perhaps this missingness is a bug of the game.
# - Please let me know if there is already another similar analysis. I'm new to kaggle and this is my first competition so please tell me something to improve this notebook and how to use kaggle.
train_path = "/kaggle/input/predict-student-performance-from-game-play/train.csv"
session_id = pd.read_csv(train_path, usecols=["session_id"])
level = pd.read_csv(train_path, usecols=["level"])
nlevel = pd.concat([session_id, level], axis=1).groupby("session_id").nunique()
print(nlevel[nlevel.level < 21])
print("\nNumber of unique levels are larger than 20 for all sessions.")
n23 = len(nlevel[nlevel.level == 23])
n22 = len(nlevel[nlevel.level == 22])
n21 = len(nlevel[nlevel.level == 21])
len_ = len(nlevel)
print("0 missing:".ljust(15) + f"{n23: 10d} sessions, {(n23/len_)*100: 10.3f} %")
print("1 missing:".ljust(15) + f"{n22: 10d} sessions, {(n22/len_)*100: 10.3f} %")
print("2 missing:".ljust(15) + f"{n21: 10d} sessions, {(n21/len_)*100: 10.3f} %")
session_and_level = pd.concat([session_id, level], axis=1)
full_levels = set(np.arange(0, 23))
one_missing_session = nlevel[nlevel.level == 22].index
diffs = []
for ID in one_missing_session:
tmp = session_and_level.loc[session_and_level.session_id == ID]
diff = full_levels - set(tmp.level.unique())
diffs.append(diff)
tmp = []
for diff in diffs:
for e in diff:
tmp.append(e)
print(f"Missing values distribution: {collections.Counter(tmp)} in {n22} sessions")
two_missing_session = nlevel[nlevel.level == 21].index
diffs = []
for ID in two_missing_session:
tmp = session_and_level.loc[session_and_level.session_id == ID]
diff = full_levels - set(tmp.level.unique())
diffs.append(diff)
tmp = []
for diff in diffs:
for e in diff:
tmp.append(e)
print(f"Missing values distribution: {collections.Counter(tmp)} in {n21} sessions")
|
import bz2
import lzma
import pickle
import matplotlib.pyplot as plt
import numpy as np
import shapely
import tqdm
from shapely.geometry import box
from shapely.geometry import MultiPolygon, Point, GeometryCollection, MultiLineString
from shapely import affinity
from uuid import uuid4
plans = pickle.load(open("/kaggle/input/dataset-aug-ours/outs.pkl", "rb"))
c = 0
size = 256
def buffer(p, w, bm=0.5):
p = (
affinity.scale(p, xfact=0.9, yfact=0.9, origin=(size / 2, size / 2))
if p
else Point(-100, -100)
)
if bm:
if isinstance(p, shapely.geometry.multipolygon.MultiPolygon):
return [
i.buffer(-bm * w, join_style=2)
.buffer((1.5 + bm) * w, join_style=2)
.buffer(-1.5 * w, join_style=2)
for i in p.geoms
]
return [
p.buffer(-bm * w, join_style=2)
.buffer((1.5 + bm) * w, join_style=2)
.buffer(-1.5 * w, join_style=2)
]
return p
aug_prefs = [
[0, False],
[0, True],
[90, False],
[90, True],
[180, False],
[180, True],
[270, False],
[270, True],
]
def augment(polygon, degree, flip_vertical, size=256, point=False):
if not polygon:
return Point(-100, -100) if point else box(-100, -100, -100, -100)
p = affinity.rotate(polygon, degree, origin=(size / 2, size / 2))
if flip_vertical:
p = affinity.scale(p, xfact=1, yfact=-1, origin=(size / 2, size / 2))
return p
augmented = []
for p in tqdm.tqdm(plans):
bedroom_p = p["bedroom"]
bathroom_p = p["bathroom"]
front_p = p["front"]
wall_width = p["wall_width"]
p["inner_g"] = buffer(p["inner_g"], p["wall_width"], 0.5)
if isinstance(p["inner_g"], list):
p["inner_g"] = max(p["inner_g"], key=lambda x: x.area)
p["kitchen_c"] = buffer(p["kitchen_c"], p["wall_width"], 0.5)
p["general"] = buffer(p["general"], p["wall_width"], 0.5)
p["balacony"] = buffer(p["balacony"], p["wall_width"], 0.5)
poly = bedroom_p.buffer(p["wall_width"] * 0.1, join_style=2).buffer(
-p["wall_width"] * 0.1, join_style=2
)
bedrooms = [i for i in poly.geoms] if isinstance(poly, MultiPolygon) else [poly]
bedrooms = [buffer(i, p["wall_width"])[0] for i in bedrooms]
bedrooms = sorted(bedrooms, key=lambda x: x.area, reverse=True)
p["bedroom"] = [i for i in bedrooms if i.area > 0.1 * bedroom_p.area][0:4]
polyB = bathroom_p.buffer(p["wall_width"] * 0.1, join_style=2).buffer(
-p["wall_width"] * 0.1, join_style=2
)
bathroom = [i for i in polyB.geoms] if isinstance(polyB, MultiPolygon) else [polyB]
bathroom = [buffer(i, p["wall_width"])[0] for i in bathroom]
bathroom = sorted(bathroom, key=lambda x: x.area, reverse=True)
p["bathroom"] = [i for i in bathroom if i.area > 0.1 * bathroom_p.area][0:4]
if front_p and p["inner_g"]:
front_p = buffer(front_p, p["wall_width"], 0)
front_p = front_p.buffer(wall_width).intersection(p["inner_g"].exterior)
if isinstance(front_p, (MultiPolygon, GeometryCollection, MultiLineString)):
front_p = max(front_p.geoms, key=lambda x: x.length)
front_p = front_p.centroid
p["front"] = front_p
plans[8]["front"]
import geopandas as gpd
gpd.GeoSeries(plans[0]["bedroom"] + plans[0]["bathroom"] + plans[0]["general"]).plot(
cmap="Dark2_r"
)
from typing import Iterable
import cv2, pickle
import numpy as np
import shapely
from pandas import Series
from shapely import MultiPolygon, Polygon, box, Point
from shapely.affinity import scale
def get_mask(poly, shape, point_s=5):
"""Return image contains multiploygon as a numpy array mask
Parameters
----------
poly: Polygon or MultiPolygon or Iterable[Polygon or MultiPolygon]
The Polygon/s to get mask for
shape: tuple
The shape of the canvas to draw polygon/s on
Returns
-------
ndarray
Mask array of the input polygon/s
:param point_s:
"""
try:
img = np.zeros(shape, dtype=np.uint8)
if isinstance(poly, Polygon):
if poly.is_empty:
return img
img = cv2.drawContours(img, np.int32([poly.exterior.coords]), -1, 255, -1)
elif isinstance(poly, MultiPolygon):
for p in poly.geoms:
img = cv2.drawContours(img, np.int32([p.exterior.coords]), -1, 255, -1)
elif isinstance(poly, Series):
polys = [p for p in poly.tolist() if p]
img = get_mask(polys, shape, point_s)
elif isinstance(poly, Iterable):
for p in poly:
img = (img != 0) | (get_mask(p, shape, point_s) != 0)
img = img.astype(np.uint8) * 255
elif isinstance(poly, Point):
p = poly.coords[0]
img = cv2.circle(img, (int(p[0]), int(p[1])), point_s, 255, -1)
return img.astype(np.uint8)
except:
return img
def get_centroid(poly):
x1, x2, y1, y2 = 0, 0, 0, 0
import random
MIN_ROOM_AREA = 8.5
MAX_ROOM_AREA = 46
MIN_BROOM_AREA = 3.5
MAX_BROOM_AREA = 15
def rand_room_area(n):
total = 0
for i in range(n):
MIDPOINT = (MIN_ROOM_AREA + MAX_ROOM_AREA) / 2
STD_DEV = (MAX_ROOM_AREA - MIN_ROOM_AREA) / 4
room_area = random.normalvariate(MIDPOINT, STD_DEV)
room_area = max(min(room_area, MAX_ROOM_AREA), MIN_ROOM_AREA)
room_area = round(room_area, 2)
total += room_area
return total
def rand_broom_area(n):
total = 0
for i in range(n):
MIDPOINT = (MIN_BROOM_AREA + MAX_BROOM_AREA) / 2
STD_DEV = (MAX_BROOM_AREA - MIN_BROOM_AREA) / 4
room_area = random.normalvariate(MIDPOINT, STD_DEV)
room_area = max(min(room_area, MAX_BROOM_AREA), MIN_BROOM_AREA)
room_area = round(room_area, 2)
total += room_area
return total
def estimate_total_area(num_bedrooms, num_bathrooms):
avg_ratio_1br_1ba = 0.6
avg_ratio_2br_2ba = 0.7
avg_ratio_3br_nba = 0.75
combined_bed_bath_area = rand_room_area(num_bedrooms) + rand_broom_area(
num_bathrooms
)
if num_bedrooms == 1 and num_bathrooms == 1:
area_ratio = avg_ratio_1br_1ba
elif num_bedrooms == 2 and num_bathrooms == 2:
area_ratio = avg_ratio_2br_2ba
else:
area_ratio = avg_ratio_3br_nba
total_area = combined_bed_bath_area / area_ratio
return total_area
import gc
torch.cuda.empty_cache()
gc.collect()
import numpy as np
import keras, os, random
import torch
from torch.utils.data import Dataset, DataLoader
onehot = {}
for i in [1, 2, 3, 4]:
for j in [1, 2, 3, 4]:
img = np.zeros((256, 256, 8))
img[:, :, i - 1] = np.ones((256, 256))
img[:, :, j + 4 - 1] = 1
onehot[(i, j)] = img[:, :, :]
import torch
import numpy as np
from torch.utils.data import Dataset
import torch
from torch.utils.data import Dataset
class PlanDataset(Dataset):
def __init__(self, plans, batch_size=32, image_size=(256, 256)):
self.plans = plans
self.batch_size = batch_size
self.image_size = image_size
self.num_samples = len(self.plans)
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
plan = self.plans[idx]
deg, flip = random.choice(aug_prefs)
no_bedrooms = max(1, len(plan["bedroom"]))
no_bathrooms = max(1, len(plan["bathroom"]))
area = estimate_total_area(no_bedrooms, no_bathrooms)
case = (
random.choice([f for f in range(1, no_bedrooms + 1) for j in range(f)])
if no_bedrooms > 1
else 1
)
inbedsnum = case - 1
x = np.zeros((*self.image_size, 4))
y = np.zeros((*self.image_size, 1))
# x[:,:,:8] = onehot[(no_bedrooms, no_bathrooms)][:,:,:8]
# x[:,:,9] = get_mask(augment(plan['front'], deg, flip), (256, 256), point_s=7) > 0
# x[:,:,10] = get_mask(augment(plan['inner_g'], deg, flip), (256, 256), point_s=10) > 0
# x[:,:,11 + inbedsnum] = 1
x[:, :, 0] = (
get_mask(augment(plan["front"], deg, flip), (256, 256), point_s=7) > 0
)
x[:, :, 1] = (
get_mask(augment(plan["inner_g"], deg, flip), (256, 256), point_s=10) > 0
)
x[:, :, 2] = min(1, area / 500)
bedrooms = [x for x in plan["bedroom"]]
inbeds = []
for _ in range(inbedsnum):
bedroom = random.choice(bedrooms)
bedc = (
bedroom.minimum_rotated_rectangle.centroid
if bedroom
else Point(-100, -100)
)
bedrooms = [x for x in bedrooms if x != bedroom]
inbeds.append(augment(bedc, deg, flip))
x[:, :, 3] = get_mask(inbeds, (256, 256), point_s=15) > 0
bedroom = (
random.choice(bedrooms).minimum_rotated_rectangle.centroid
if bedrooms
else Point(-100, -100)
)
y[:, :, 0] = get_mask(augment(bedroom, deg, flip), (256, 256), point_s=15) > 0
return (
torch.from_numpy(x).cuda().permute(2, 0, 1).float(),
torch.from_numpy(y).cuda().permute(2, 0, 1).float(),
)
from sklearn.model_selection import train_test_split
# # Model
import torch
import torch.nn as nn
import torchvision.models as models
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
import pickle
import numpy as np
import bz2
class RoomLayoutUNet(nn.Module):
def __init__(self, n_channels=3, n_classes=3):
super(RoomLayoutUNet, self).__init__()
self.model = models.resnet101(pretrained=True)
embedding = nn.Embedding(5, 1)
num_ftrs = self.model.fc.in_features
self.model.fc = nn.Linear(num_ftrs, 40)
def forward(self, x):
output = self.model(x)
return output
class RoomLayoutUNet(nn.Module):
def __init__(self, n_channels=4, n_classes=10):
super(RoomLayoutUNet, self).__init__()
self.model = models.segmentation.deeplabv3_resnet101(
pretrained=True, progress=True
).cuda()
print(self.model.backbone.conv1)
self.model.backbone.conv1 = nn.Conv2d(
n_channels,
64,
kernel_size=(7, 7),
stride=(2, 2),
padding=(3, 3),
bias=False,
).cuda()
self.model.classifier[4] = nn.Conv2d(
256, n_classes, kernel_size=(1, 1), stride=(1, 1)
).cuda()
self.embedding = nn.Embedding(5, 1).cuda()
def forward(self, x):
output = self.model(x)["out"]
return output
# model = RoomLayoutUNet().cuda()
# model.model.backbone.conv1 = nn.Conv2d(4, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False).cuda()
# model.model.classifier[4] = nn.Conv2d(256, 1, kernel_size=(1, 1), stride=(1, 1)).cuda()
model = torch.load("/kaggle/input/res101-bed/model_centers_bed__100.pth").cuda()
model.model.backbone.conv1 = nn.Conv2d(
4, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False
).cuda()
model.model.classifier[4] = nn.Conv2d(256, 1, kernel_size=(1, 1), stride=(1, 1)).cuda()
import tensorflow as tf
from tensorflow.keras import backend as K
def accuracy_2(y_true, y_pred):
numerator = torch.sum(((y_pred > 0.2) * y_true).bool().float())
denominator = torch.sum(y_true)
accuracy = numerator / denominator
return accuracy
train_plans, test_plans = train_test_split(
plans, test_size=0.02, shuffle=True, random_state=1997
)
plan_dataset = PlanDataset(train_plans)
plan_dataloader = DataLoader(plan_dataset, batch_size=29, shuffle=True)
plan_dataset_val = PlanDataset(test_plans)
plan_dataloader_val = DataLoader(plan_dataset_val, batch_size=29, shuffle=True)
import gc
try:
del inputs
del labels
del i
del d
del outputs
del train_loss
del val_loss
del train_acc
del val_acc
del loss
except:
...
gc.collect()
torch.cuda.empty_cache()
loss_fn = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)
from shapely.geometry import box
import geopandas as gpd
import pygeos
from tqdm import tqdm
from pygeos.creation import box
# model = RoomLayoutUNet().to(device)
num_epochs = 1000
os.makedirs("sample_output", exist_ok=True)
train_loss = []
val_loss = []
train_acc = []
val_acc = []
every = len(plan_dataloader_val) // 6
for epoch in range(num_epochs):
for i, d in tqdm(enumerate(plan_dataloader), total=len(plan_dataloader)):
inputs, labels = d
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_fn(outputs, labels)
train_acc.append(accuracy_2(labels, outputs).detach().cpu().item())
loss.backward()
train_loss.append(loss.detach().cpu().item())
del inputs
del labels
del d
del loss
del outputs
gc.collect()
torch.cuda.empty_cache()
optimizer.step()
if i % 1000 == 0:
with torch.no_grad():
for j, (inputs, labels) in enumerate(plan_dataloader_val):
outputs = model(inputs)
loss2 = loss_fn(outputs, labels)
val_loss.append(loss2.detach().cpu().item())
val_acc.append(accuracy_2(labels, outputs).detach().cpu().item())
inp1 = (
inputs[0]
.permute(1, 2, 0)
.cpu()
.detach()
.numpy()[:, :, [0, 1, 3]]
)
inp2 = labels[0].permute(1, 2, 0).cpu().detach().numpy()[:, :, :3]
inp3 = outputs[0].permute(1, 2, 0).cpu().detach().numpy()[:, :, :3]
print(
"Epoch {} - Train Loss: {:.6f} | Val Loss: {:.6f} | Train accuracy: {:.6f} | Val accuracy {:.6f} ".format(
epoch + 1,
np.mean(train_loss),
np.mean(val_loss),
np.mean(train_acc),
np.mean(val_acc),
)
)
val_loss = []
train_loss = []
train_acc = []
val_acc = []
if i % 1000 == 0:
plt.imshow(inp1, origin="lower")
plt.show()
plt.imshow(inp2, origin="lower")
plt.show()
plt.imshow(inp3, origin="lower")
plt.show()
# inp = labels[0].permute(1, 2, 0).cpu().detach().numpy()[:,:,4:7]
# plt.imshow( inp, origin='lower' )
# plt.show()
# inp = outputs[0].permute(1, 2, 0).cpu().detach().numpy()[:,:,4:7]
# plt.imshow( inp, origin='lower' )
# plt.show()
# inp = outputs[0].permute(1, 2, 0).cpu().detach().numpy()[:,:,10]
# plt.imshow( inp, origin='lower' )
# plt.show()
# inp = labels[0].permute(1, 2, 0).cpu().detach().numpy()[:,:,10]
# plt.imshow( inp, origin='lower' )
# plt.show()
# if epoch % 3 == 0:
torch.save(model.cuda(), f"model_centers_bed__{epoch}.pth")
# i = random.randint(0, 31)
# x = next(iter(test_data))
# plt.imshow( model(x[0])[i] )
# plt.show()
# plt.imshow(x[0][i][:, :, 8:11] + 2* x[1][i][:, :, :])
# plt.show()
torch.save(model.cuda(), f"model_centers_bed__{100}.pth")
def get_rects(imgr, bounds):
img = (imgr).astype(np.uint8)
# img[bounds>0] = 0
img[np.where(bounds == 0)] = 0
plt.imshow(bounds * 0.5 + img / 255)
plt.show()
shapes = []
cnts = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for i, c in enumerate(cnts[0]):
M = cv2.moments(c)
box = cv2.boundingRect(c)
x1, y1, w, h = box
x2, y2 = x1 + w, y1 + h
ii = imgr[y1:y2, x1:x2]
# y, x = np.indices(ii.shape)
# xx, yy = np.array([np.sum(x*ii)/np.sum(ii), np.sum(y*ii)/np.sum(ii)])
intensity = np.sum(ii)
if not M["m00"]:
continue
cx = M["m10"] / M["m00"]
cy = M["m01"] / M["m00"]
# if bounds[int(cy), int(cx)]:
shapes.append([cx, cy, intensity])
return [Point(*i[:2]) for i in sorted(shapes, key=lambda x: x[2], reverse=True)]
def imfy(img):
return (np.clip(img, 0, 1) * 255).astype(np.uint8)
from skimage.feature import blob_dog, blob_log, blob_doh
img = cv2.GaussianBlur(imfy(out), (15, 15), 5)
ret, thresh = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
blobs_log = blob_log(img, min_sigma=12, max_sigma=30, threshold=0.01)
kit = get_mask([Point(i[1], i[0]) for i in blobs_log], (256, 256), point_s=18)
kit = img * (kit > 0)
plt.imshow(kit)
out = model(x[0])[i]
plt.imshow(out)
|
# ##Pratica com novo DataSet
import pandas as pd
dados = pd.read_csv("//kaggle/input/top-250-anime-2023/top250_anime.csv")
dados
# Classificação qualitativa nominal -> Ordinal rank hierarquico
sorted(dados.Popularity.unique())
# Classificação qualitativa nominal
sorted(dados.Type.unique())
# Classificação Discreta -> Contem um numero finito ou enumeradas
print("De Score %f até %f" % (dados.Score.min(), dados.Score.max()))
# distribuição de frequencia Qualitativa
dados["Type"].value_counts()
# Mostrando em Percentual
dados["Type"].value_counts(normalize=True) * 100
frequencia = dados["Type"].value_counts()
percentual = dados["Type"].value_counts(normalize=True) * 100
df_frequencia = pd.DataFrame({"Frequencia": frequencia, "Percentual": percentual})
df_frequencia.rename_axis("Type of Production", axis="columns", inplace=True)
df_frequencia
# ## Exercicio Aula
dados2 = pd.DataFrame({"Profissão": [1, 2, 3, 1, 2, 2, 2, 3, 3, 2, 1, 3]})
dados2
frequencia = dados2.Profissão.value_counts()
percentual = dados2.Profissão.value_counts(normalize=True) * 100
dist_freq_qualitativas = pd.DataFrame(
{"Frequencia": frequencia, "Porcentagem(%)": percentual}
)
dist_freq_qualitativas.rename(
index={1: "Estatístico", 2: "Cientista de Dados", 3: "Programador Python"},
inplace=True,
)
dist_freq_qualitativas.rename_axis("Profissão", axis="columns", inplace=True)
dist_freq_qualitativas
pd.crosstab(dados.Type, dados.Score) # Fazendo cross de tabela de frequencia
# ## Continuando Projeto
dados.head(20)
# ##Classificação por Duration:
# ###Duração Curta - de 1 a 32
# ###Duração Média-Curta- de 32 a 64
# ###Duração Média - de 64 a 96
# ###Duração Média-Longa - de 96 a 128
# ###Duração Longa = de 128 ou mais
#
dados.Duration.min()
s = pd.Series(dados.Duration.value_counts())
sorted = s.sort_index(ascending=True)
sorted
classes = [3, 32, 64, 96, 128, 161]
labels = [
"Duração Curta",
"Duração Média-Curta",
"Duração Média",
"Duração Média-Longa",
"Duração Longa",
]
frequencia = pd.value_counts(
pd.cut(x=dados.Duration, bins=classes, labels=labels, include_lowest=True)
)
frequencia
percentual = (
pd.value_counts(
pd.cut(x=dados.Duration, bins=classes, labels=labels, include_lowest=True),
normalize=True,
)
* 100
)
percentual
dist_frequencia_quantitativa = pd.DataFrame(
{"Frequencia": frequencia, "Percentual (%)": percentual}
)
dist_frequencia_quantitativa
dist_frequencia_quantitativa.sort_index(ascending=False)
# #Utilizando metodo de Sturges
import numpy as np
n = dados.shape[0]
n
k = int(1 + (10 / 3) * np.log10(n))
print("Valor de classes => k =", k)
frequencia_k = pd.value_counts(
pd.cut(x=dados.Duration, bins=k, include_lowest=True), sort=False
)
frequencia_k
percentual_k = (
pd.value_counts(
pd.cut(x=dados.Duration, bins=k, include_lowest=True),
sort=False,
normalize=True,
)
* 100
)
percentual_k
dist_freq_quantitativa_k = pd.DataFrame(
{"Frequencia": frequencia_k, "Percentual(%)": percentual_k}
)
dist_freq_quantitativa_k.rename_axis("Classificação(K)", axis="columns", inplace=True)
dist_freq_quantitativa_k
# #Criando analise com Histograma
import seaborn as sns
ax = sns.distplot(dist_frequencia_quantitativa, kde=True)
ax.figure.set_size_inches(12, 6)
ax.set_title("Distribuição de Frequencias - Duração", fontsize=18)
ax.set_xlabel("Minutos", fontsize=14)
ax
dist_frequencia_quantitativa["Frequencia"].plot.bar(
width=1, color="blue", alpha=0.5, figsize=(12, 6)
)
dist_frequencia_quantitativa.hist(bins=50, figsize=(12, 6))
# ## Relação entre média, mediana e moda
# ***
#
dados.head()
ax = sns.distplot(dados.Episodes)
ax.figure.figure.set_size_inches(12, 6)
ax
Moda = dados.Episodes.mode()[0]
Moda
Mediana = dados.Episodes.median()
Mediana
Media = dados.Episodes.mean()
Media
Media > Mediana > Moda
ax = sns.distplot(dados.Score)
ax.figure.figure.set_size_inches(12, 6)
ax
Moda = dados.Score.mode()[0]
Moda
Mediana = dados.Score.median()
Mediana
Media = dados.Score.mean()
Media
Media > Mediana > Moda
dados.query("Episodes == 24").Episodes.value_counts()
# #Medidas Separatrizes
#
dados.query('Type == "Music"').Popularity.quantile([0.25, 0.5, 0.75]) # quartis
dados.Score.quantile([i / 10 for i in range(1, 10)]) # decis
dados.Score.quantile([i / 100 for i in range(1, 10)]) # percentis
ax = sns.distplot(
dados.Score, hist_kws={"cumulative": True}, kde_kws={"cumulative": True}, bins=10
)
ax.figure.set_size_inches(14, 6)
ax.set_title("Distribuição de Frequencias Acumulada", fontsize=18)
ax.set_xlabel("Score", fontsize=14)
ax.set_ylabel("Partes", fontsize=14)
ax
# ##Box Plot
#
ax = sns.boxplot(x="Score", data=dados, orient="h")
ax.figure.set_size_inches(12, 4)
ax.set_title("Score", fontsize=18)
ax.set_xlabel("Score", fontsize=14)
ax.set_ylabel("Partes", fontsize=14)
ax = sns.boxplot(x="Popularity", y="Type", data=dados, orient="h")
ax.figure.set_size_inches(12, 4)
ax.set_title("Popularity by Type", fontsize=18)
ax.set_xlabel("Popularity", fontsize=14)
ax.set_ylabel("Partes", fontsize=14)
dados.head()
ax = sns.boxplot(x="Score", y="Type", data=dados.query("Popularity > 1000"), orient="h")
ax.figure.set_size_inches(12, 4)
ax.set_title("Popularity by Type", fontsize=18)
ax.set_xlabel("Popularity", fontsize=14)
ax.set_ylabel("Partes", fontsize=14)
|
# ### Intially importing some of the necessary librabries:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
# ### Importing the dataset
df = pd.read_csv("../input/diamonds/diamonds.csv")
df.head()
# ### Understanding the data dictionary
df.columns
# * first column "Unnamed:0" is just as the index type so we can drop this column.
# * carat weight of the diamond-Continous type variable
# * cut quality of the cut (Fair, Good, Very Good, Premium, Ideal)- categorical type variable
# * color diamond colour, from J (worst) to D (best)-categorical type variable
# * clarity a measurement of how clear the diamond is (I1 (worst), SI2, SI1, VS2, VS1, VVS2, VVS1, IF (best)) - categorical type variable
# * x length in mm (0--10.74)-Continous variable type
# * y width in mm (0--58.9)-continous variable type
# * z depth in mm (0--31.8)-continous variable type
# * depth total depth percentage = z / mean(x, y)-Continous variable type
# * table width of top of diamond relative to widest point (43--95)-Continous variable type
# ### Understanding the target variable (Price)
df.price.head()
# * It is of continous datatype so we should use "Supervised linear regression model"
# ### Data sanity check
# Droping the 'Unnamed: 0' column.
df.drop(["Unnamed: 0"], axis=1, inplace=True)
df.head() # Rechecking the dataframe
df.shape # checking the shape of the dataframe
df.info() # To get overall information of the dataset
# * No data is missing..it is good to go
df.describe() # To check the statistical data
df.cut.value_counts() # to check unique values of cut variable.
df.color.value_counts() # to get unique values of color variable.
df.clarity.value_counts() # to get unique values of clarity variable.
df.columns # Viewing the columns
# #### By inspection:
# * Continous/Numerical variables are: "carat","depth","table","price",'x', 'y','z'
# * Categorical variables are: 'cut','color', 'clarity'
# Calling categorical columns as cat & continous variables as cont
cont = ["carat", "depth", "table", "x", "y", "z", "price"]
cat = ["cut", "color", "clarity"]
# ### Exploratory Data Analysis:
# #### Numerical Analysis ( Target variable vs remaing continous variables):
sns.pairplot(
data=df,
x_vars=["carat", "depth", "table", "x", "y", "z"],
y_vars="price",
diag_kind=None,
)
# ### Takeways from pairplot:
# * There is some sought of good linear relationship between x,y,z,carat and price variables.|
# ### Heatmap
# #### To find the correlation between the numerical variables we will plot heatmap.
plt.figure(figsize=(15, 10))
sns.heatmap(df[cont].corr(), annot=True, cmap="Greens")
# ### Take aways from heatmap:
# * Strongest correlation between "x" & "y" that is 0.98.
# * Good correlation between "price" and "carat" that is 0.92
# * Poor correlation between "depth" and "price" that is -0.11
# * Poor correlation between "table" and "price" that is 0.13.
# ### Analysis on Categorical variables:
def plot1(i):
plt.figure(figsize=(15, 5))
ax1 = plt.subplot(121)
sns.countplot(df[i], ax=ax1)
plt.title("Count distribution of {} type in diamond dataset".format(i))
ax2 = plt.subplot(122)
sns.barplot(x=df[i], y=df["price"], ax=ax2)
plt.title("Total price distribution for each {} type".format(i))
plt.show()
for i in cat:
plot1(i)
print("*" * 75)
# ### Takeways from above plots-
# * __Cut type__- Even Ideal cut type diamonds are high in the dataset, it doesn't have high price in total.
# * __Cut type__- Even Fair diamonds are least in the dataset, it secures 2nd position in total price distribution of cut category.
# * __Cut type__- Premium cut type diamonds in the dataset have high price in total.
# * __Color type__- J type has least in number of diamonds in dataset, but it has high price in total price distribution.
# * __Color type__- G type are maximum in number in dataset but it doesnot have high price in total price distribution in the dataset.
# * __Clarity type__- L1 diamonds are least in number in the dataset but it considerably has high price in total price distribution of clarity type diamonds.
# * __Clarity type__- Sl1 diamonds are maximum in the dataset but it doesn't have high price in total price distribution of clairty type diamonds.
# ### Dummy creation for categorical variables:
# Creating dummies for 3 categorical variables and dropping th first reductant variable.
cut = pd.get_dummies(df["cut"], drop_first=True)
color = pd.get_dummies(df["color"], drop_first=True)
clarity = pd.get_dummies(df["clarity"], drop_first=True)
# Adding the dummy variables to the dataframe.
df = pd.concat([cut, color, clarity, df], axis=1)
df.head()
# Dropping the main 3 categorical variables from dataframe
df.drop(cat, axis=1, inplace=True)
df.shape # Once again viewing the shape
# ### Train_Test_Split
# Importing scikit library
import sklearn
from sklearn.model_selection import train_test_split
df_train, df_test = train_test_split(df, train_size=0.7, random_state=100)
df.shape # Viewing the shape of main dataframe
df_train.shape # Viewing the shape of train dataset.
df_test.shape # Viewing ths shape of test dataset.
# # Scaling the Continous variable in train dataset:
df_train[cont].head() # Viewing the train dataset continous variables.
# * Here we shall use Minmax approach also called as normal distribution to squeeze the values to 0 to 1.
# Importing library
from sklearn.preprocessing import MinMaxScaler
Scaler = MinMaxScaler()
# Scaling the continous variables to 0-1
df_train[cont] = Scaler.fit_transform(df_train[cont])
df_train[cont].head() # Cross checking the test dataset once again.
df_train[cont].describe() # Checking the descriptive statistics of the scaled dataset.
# ### Divide train dataset into X and y datasets.
y_train = df_train.pop("price")
y_train
X_train = df_train
X_train.head()
X_train.shape
# ### Building our model
# ### Mixed Feature elimination
# * As the number of independent variables are 23 that is high. Initially we shall use Recursive Feature Elimination.
# * We will be using the LinearRegression function from SciKit Learn for its compatibility with RFE.
# * Once in the model the number of variables are 13, we shall use Manual Elimination method.
from sklearn.feature_selection import RFE
from sklearn.linear_model import LinearRegression
# Running RFE with output variable is equal to 13.
lm = LinearRegression()
lm.fit(X_train, y_train)
rfe = RFE(lm, 13)
rfe = rfe.fit(X_train, y_train)
list(zip(X_train.columns, rfe.support_, rfe.ranking_))
col = X_train.columns[rfe.support_] # Storing the acceptable variables into col list
X_train.columns[~rfe.support_]
len(col) # checking the length of acceptable variables.
# ### Building model using statsmodel, for the detailed statistics
# Creating X_train_rfe dataframe with RFE selected variables
X_train_rfe = X_train[col]
# Adding a constant variable
import statsmodels.api as sm
X_train_rfe = sm.add_constant(X_train_rfe)
X_train_rfe.head()
# Running the linear model
lm = sm.OLS(y_train, X_train_rfe).fit()
# Let's see the summary of our linear model
lm.summary()
# ### Checking the multicollinearity of variables by Variance Inflation Factor:
X_vif1 = X_train_rfe.drop(["const"], axis=1)
# Calculate the VIFs for the new model
from statsmodels.stats.outliers_influence import variance_inflation_factor
# Checking the Vif's:
vif = pd.DataFrame()
X = X_vif1
vif["Features"] = X.columns
vif["VIF"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
vif["VIF"] = round(vif["VIF"], 2)
vif = vif.sort_values(by="VIF", ascending=False)
vif
# ### Vif for "x" variable is high. It means effected by mutlicollinearity. So, we shall drop "x" variable.
X_train_new1 = X_train_rfe.drop(["x"], axis=1)
X_train_new1.columns
# Running the linear model
lm = sm.OLS(y_train, X_train_new1).fit()
lm.summary()
### Checking the VIF:
X_vif2 = X_train_new1.drop(["const"], axis=1)
vif = pd.DataFrame()
X = X_vif2
vif["Features"] = X.columns
vif["VIF"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
vif["VIF"] = round(vif["VIF"], 2)
vif = vif.sort_values(by="VIF", ascending=False)
vif
# ### Vif for "depth" variable is high. It means effected by mutlicollinearity. So, we shall drop "depth" variable.
X_train_new2 = X_train_new1.drop(["depth"], axis=1)
len(X_train_new2.columns)
lm = sm.OLS(y_train, X_train_new2).fit()
lm.summary()
### Checking the VIF:
X_vif3 = X_train_new2.drop(["const"], axis=1)
vif = pd.DataFrame()
X = X_vif3
vif["Features"] = X.columns
vif["VIF"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
vif["VIF"] = round(vif["VIF"], 2)
vif = vif.sort_values(by="VIF", ascending=False)
vif
# ### Vif for "table" variable is high. It means effected by mutlicollinearity. So, we shall drop "table" variable.
X_train_new3 = X_train_new2.drop(["table"], axis=1)
len(X_train_new3.columns)
lm = sm.OLS(y_train, X_train_new3).fit()
lm.summary()
### Checking the VIF:
X_vif4 = X_train_new3.drop(["const"], axis=1)
vif = pd.DataFrame()
X = X_vif4
vif["Features"] = X.columns
vif["VIF"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
vif["VIF"] = round(vif["VIF"], 2)
vif = vif.sort_values(by="VIF", ascending=False)
vif
# ### Now finally, Co-efficients are significant (p values are less than 5% confidence interval) and VIF's also in considerable range(<5). let's write in an equation form:
# * price=-0.3022-0.05*(I)-0.10*(J)+0.3026*(IF)+0.205*(Sl1)+0.154*(Sl2)+0.257*(VS1)+0.241*(VS2)+0.284*(VVS1)+0.2817*(VVS2)+2.27*(carat)
# ### Residual Analysis on train_dataset:
# Finding the price predicted values using the regression model built already:
y_train_pred = lm.predict(X_train_new3)
res = y_train - y_train_pred
res.head()
plt.figure(figsize=(15, 5))
ax1 = plt.subplot(121)
sns.distplot(res, ax=ax1)
plt.title(
"Error density distribution",
fontdict={"fontsize": 20, "fontweight": -0.5, "color": "Red"},
)
plt.xlabel("Error terms")
ax2 = plt.subplot(122)
sns.scatterplot(x=y_train, y=res, ax=ax2)
plt.title(
"Error terms vs price_train",
fontdict={"fontsize": 20, "fontweight": -0.5, "color": "Red"},
)
plt.ylabel("Error terms")
plt.show()
# ### Validating linear regression assumptions:
# #### Insights from above two graphs:
# * Errors terms were normally distributed(from left graph 1).
# * Errors terms are more or less randomly distrubted with x values.
# * Variance is also fine..but for some data points error term's variance is quite high.
# ### Making Predictions on Test data
# #### Scaling the Test dataset
df_test.head()
df_test[cont] = Scaler.transform(df_test[cont])
df_test[cont].head()
df_test[cont].describe()
# ### Creating X_test, y_test variables
y_test = df_test.pop("price")
y_test.head()
X_test = df_test
X_test.head()
# Now let's use our model to make predictions.
# Creating X_test_new dataframe by dropping variables from X_test
X_test_new = X_test[X_vif4.columns]
# Adding a constant variable
X_test_new = sm.add_constant(X_test_new)
X_test_new.head()
y_pred_lm = lm.predict(X_test_new)
X_test_new.shape
# Evaluating the r-square for test data:
from sklearn.metrics import r2_score
r2_score(y_true=y_test, y_pred=y_pred_lm)
n = 16182
r2 = 0.907
k = 10
Adjusted_R2 = 1 - float((1 - r2) * (n - 1) / (n - k - 1))
print(Adjusted_R2)
# ### Model Visualization:
# Scatter plot:
sns.scatterplot(x=y_test, y=y_pred_lm)
plt.xlabel("y_actual on test data")
plt.ylabel("y_predict on test data")
plt.title(
"y_actual vs y_predict",
fontdict={"fontsize": 20, "fontweight": -0.5, "color": "Red"},
)
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# importing the required libraries
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import f1_score
# ## Reading the data from csv file
data = pd.read_csv(
"/kaggle/input/ip-network-traffic-flows-labeled-with-87-apps/Dataset-Unicauca-Version2-87Atts.csv"
)
data.head()
print("Number of Rows: {}".format(data.shape[0]))
print("Number of Columns: {}".format(data.shape[1]))
# Graph of Protocol Name vs Frequency
freq_protocol = data["ProtocolName"].value_counts()
# sns.histplot(freq_protocol.values())
print(len(freq_protocol))
for key, value in freq_protocol.items():
print(key, value)
# filtering the classes which have more than 10000 rows
requiredProtocolName = []
for key, value in freq_protocol.items():
if value >= 10000:
requiredProtocolName.append(key)
print(requiredProtocolName)
listofDataFrames = []
for protocol in requiredProtocolName:
listofDataFrames.append(
pd.DataFrame(data[data["ProtocolName"] == protocol].sample(n=10000))
)
sampledData = pd.concat(listofDataFrames)
sampledData.shape
# taking random rows
data = sampledData
# remove the rows that contains NULL values
data.dropna(inplace=True)
data.dropna(axis="columns")
data.reset_index(drop=True, inplace=True)
# remove columns which contains zeroes in the data
data = data.loc[:, (data != 0).any(axis=0)]
print("Shape after removing rows with NULL Values")
print("Number of Rows: {}".format(data.shape[0]))
print("Number of Columns: {}".format(data.shape[1]))
# Graph of Protocol Name vs Frequency
freq_protocol = data["ProtocolName"].value_counts()
# sns.histplot(freq_protocol.values())
freq_protocol
# Commented because target column can be text/string
# # converting the protocol name (target column) to required format (int)
# # using LabelEncoder function from sklearn.preprocession library
# encoder = LabelEncoder().fit(data['ProtocolName'])
# data['ProtocolName'] = encoder.transform(data['ProtocolName'])
# # values = encoder.inverse_transform(data['ProtocolName'])
# # values
target_column = data["ProtocolName"]
# get all the column heads
data.columns
# removing extra columns that are not useful for finding correlation
# axis = 1 because we need to drop the columns
# by default axis = 0 (drop the rows)
dataset = data.drop(
[
"Flow.ID",
"Source.IP",
"Label",
"Timestamp",
"Destination.IP",
"Source.Port",
"Destination.Port",
"Protocol",
],
axis=1,
)
dataset.head()
# ## Correlation Matrix
# finding the correlation matrix
correlation_matrix = dataset.corr()
correlation_matrix.head()
# plotting the heatmap
plt.figure(figsize=(30, 30))
sns.heatmap(correlation_matrix, cmap="viridis")
plt.plot()
sorted_corr_matrix_protocolName = correlation_matrix["ProtocolName"].sort_values(
ascending=False
)
allKeys = list(sorted_corr_matrix_protocolName.keys())
# removing the target column
allKeys.remove("ProtocolName")
feature_map = {}
for colName in allKeys:
correlation = round(sorted_corr_matrix_protocolName[colName], 2)
if abs(correlation) >= 0.01:
if correlation in feature_map:
feature_map[correlation].append(colName)
else:
feature_map[correlation] = [colName]
print("Columns with absolute correlation greater than 0.01 with ProtocolName: \n")
print(feature_map)
final_features = []
import random
# random_columns = []
for correlation, column_list in feature_map.items():
final_features.append(random.choice(column_list))
print("Randomly selected columns for each correlation value: ")
print(final_features)
pos_value_columns = final_features
# final data which would be used for prediction
data_for_prediction = data[pos_value_columns]
data_for_prediction
# ## Random Forest Classifier
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
feature_train, feature_test, target_train, target_test = train_test_split(
data_for_prediction, target_column, test_size=0.2
)
clf = RandomForestClassifier(n_estimators=200)
clf.fit(feature_train, target_train)
predictions = clf.predict(feature_test)
print("Accuracy with Dimensionaility Reduction", clf.score(feature_test, target_test))
f1Score = f1_score(target_test, predictions, average="weighted")
print("F1 score for Random Forest", f1Score)
# print(len(f1ScoreList), len(set(target_test)))
# ## Logistic Regression
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
feature_train = sc.fit_transform(feature_train)
feature_test = sc.transform(feature_test)
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state=0, solver="lbfgs", max_iter=100)
classifier.fit(feature_train, target_train)
from sklearn.metrics import confusion_matrix, accuracy_score
target_pred = classifier.predict(feature_test)
print("Accuracy in Logistic Regression", accuracy_score(target_test, target_pred))
f1Score = f1_score(target_test, target_pred, average="weighted")
print("F1 score for Logistic Regression", f1Score)
# labels = list(set(target_column))
# f1_scores = f1_score(target_test, target_pred, average=None, labels=labels)
# f1_scores_with_labels = {label:score for label,score in zip(labels, f1_scores)}
print(target_pred)
|
numbers = [12, 7, 8, 15, 20, 10, 45]
max_even = None
i = 0
while i < len(numbers):
num = numbers[i]
i += 1
if num % 2 == 0:
if max_even is None or num > max_even:
max_even = num
if max_even is not None:
print("The max_even number is:", max_even)
else:
print("There is no max_even number")
numbers = [1, 56, 90, 100, 20, 65, 90]
max_even = None
i = 0
while i < len(numbers):
num = numbers[i]
i += 1
if num % 2 == 0:
if max_even is None or num > max_even:
max_even = num
if max_even is not None:
print("The max_even number is:", max_even)
else:
print("There is no max_even numbers")
numbers = [1, 56, 90, 100, 20, 65, 90, 1000, 1001]
max_even = None
for number in numbers:
if number % 2 == 0:
if max_even is None or number > max_even:
max_even = number
if max_even is not None:
print("The max_even number is:", max_even)
else:
print("There is no max_even number")
numbers = [1, 56, 90, 100, 20, 65, 90, 1000, 1001]
i = 0
max_even = None
while i < len(numbers):
num = numbers[i]
i += 1
if num % 2 == 0:
if max_even is None or num > max_even:
max_even = num
if max_even is not None:
print("The max_even number is:", max_even)
else:
print("There is no max_even numbers")
numbers = [1, 56, 90, 100, 20, 65, 90, 1000, 1001]
max_odd = None
for number in numbers:
if number % 2 != 0:
if max_odd is None or number > max_odd:
max_odd = number
if max_odd is not None:
print("The max_odd number is:", max_odd)
else:
print("There is no max_odd number")
def chess(x, y):
x = 10
y = 23
if x >= 10:
print("The number is > or == to x")
if y > 20:
print("Y is eligible to play game as his age is big")
else:
print("Y is not eligible to play a game")
else:
print("The number is > to y")
chess(1, 1)
# In function they are 2 types of local variable and global variable
# here in this example i will show u local variable
def add(x=2, y=2):
return x + y
# Now i will give the example of Global variable
a = 12
b = 1
def div(num):
global a
global b
y = num + a + b
return y
def total_sales(
monday_sales=0,
tuesday_sales=0,
wensday_sales=0,
thrusday_sales=0,
friday_sales=0,
saturday_sales=0,
sunday_sales=0,
):
total_sales = (
monday_sales
+ tuesday_sales
+ wensday_sales
+ thrusday_sales
+ friday_sales
+ saturday_sales
+ sunday_sales
)
return total_sales
total_sales(
monday_sales=10,
tuesday_sales=10,
wensday_sales=10,
thrusday_sales=10,
friday_sales=10,
saturday_sales=10,
sunday_sales=10,
)
total_sales(1, 1, 1, 1, 1, 1, 1)
def count(word):
n = {}
for char in word:
if char in n:
n[char] += 1
else:
n[char] = 1
return n
words = "custom Function"
result = count(words)
print(result)
|
# # Final Project
# As in all machine learning problems you should complete the following steps:
# 1. Load and explore the data (using plots and histograms )
# 2. Clean/preprocess/transform the data if necessary (first performed on training data and next on test data)
# 3. Train the machine learning model (in this assignment two models, one linear and one non-linear)
# 4. Evaluate and optimise the model
# However, while performing the steps you should consider our main goals and research questions for doing this project and try to adress them. This can be either in each step or afterwards in discussion and conclusion section (your call!). Below are the research question we are interested in:
# * What are the necessary step to clean and prepare the dataset as we have categorial features and missing data.
# * What model/classifier provides the best result for this application.
# * What is the best choice for our cost function and performance metrics for this problem.
# Please also note the following points during the assignment:
# * Use functions from open source libraries like sci-kit learn and keras and avoid using your own hand-written functions from previous assignments. You can also use our [cheatsheet](https://colab.research.google.com/drive/12h-QBlsaWXkjGIRoJXfF4yqi1elnX9qn?usp=sharing).
# * Feel free to contact us on Teams if you need more description.
# * This notebook is structured like a scientific paper. The text should provide a high-level overview of your approach. Please don't include any details about your code in the text but add them as comments in the code itself. Your code should be cleane and readable with enough comments.
# * There are some instructions and questions in each section, remove the highlighted text in blue and replace it with your explanations and answers.
# You can delete this section before submission.
# ## 1. Introduction
# Name: Mounzir Baroud and Dennis Landman
# Username: Mounzir and Dennis Landman
#
# ## 2. Data
# ### 2.1 Dataset
# In this section, we load and explore the dataset.
#
#
from tqdm.notebook import tqdm
import numpy as np
import pandas as pd
pd.options.display.precision = 15
import os
print(os.listdir("../input"))
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
# full data
# train = pd.read_csv('/kaggle/input/LANL-Earthquake-Prediction/train.csv', dtype={'acoustic_data': np.int16, 'time_to_failure': np.float64})
# 300k rows
train = pd.read_csv(
"/kaggle/input/LANL-Earthquake-Prediction/train.csv",
nrows=30_000_000,
dtype={"acoustic_data": np.int16, "time_to_failure": np.float64},
)
train.head(10)
train_acoustic_data_small = train["acoustic_data"].values[::100]
train_time_to_failure_small = train["time_to_failure"].values[::100]
fig, ax1 = plt.subplots(figsize=(16, 8))
plt.title("Trends of acoustic_data and time_to_failure. 2% of data (sampled)")
plt.plot(train_acoustic_data_small, color="b")
ax1.set_ylabel("acoustic_data", color="b")
plt.legend(["acoustic_data"])
ax2 = ax1.twinx()
plt.plot(train_time_to_failure_small, color="g")
ax2.set_ylabel("time_to_failure", color="g")
plt.legend(["time_to_failure"], loc=(0.875, 0.9))
plt.grid(False)
del train_acoustic_data_small
del train_time_to_failure_small
# Create a training file with simple derived features
rows = 150_000
segments = int(np.floor(train.shape[0] / rows))
# print(segments)
def add_trend_feature(arr, abs_values=False):
"""Fit a univariate linear regression and return the coefficient."""
idx = np.array(range(len(arr)))
if abs_values:
arr = np.abs(arr)
lr = LinearRegression()
lr.fit(idx.reshape(-1, 1), arr)
return lr.coef_[0]
def extract_features_from_segment(X):
"""Returns a dictionary with the features for the given segment of acoustic data."""
features = []
features.append(X.mean())
features.append(X.std())
features.append(X.max())
features.append(X.min())
features.append(X.kurtosis())
features.append(X.skew())
features.append(np.quantile(X, 0.95))
features.append(np.quantile(X, 0.90))
features.append(np.quantile(X, 0.10))
features.append(np.quantile(X, 0.01))
return pd.Series(features)
train = pd.read_csv(
"/kaggle/input/LANL-Earthquake-Prediction/train.csv",
iterator=True,
chunksize=150_000,
dtype={"acoustic_data": np.int16, "time_to_failure": np.float64},
)
X_train = pd.DataFrame()
y_train = pd.Series()
for df in train:
ch = extract_features_from_segment(df["acoustic_data"])
X_train = X_train.append(ch, ignore_index=True)
y_train = y_train.append(pd.Series(df["time_to_failure"].values[-1]))
X_train.describe()
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from sklearn.svm import NuSVR, SVR
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
parameters = [
{
"gamma": [0.001, 0.005, 0.01, 0.02, 0.05, 0.1],
"C": [0.1, 0.2, 0.25, 0.5, 1, 1.5, 2],
}
]
#'nu': [0.75, 0.8, 0.85, 0.9, 0.95, 0.97]}]
reg1 = GridSearchCV(
SVR(kernel="rbf", tol=0.01), parameters, cv=5, scoring="neg_mean_absolute_error"
)
reg1.fit(X_train_scaled, y_train.values.flatten())
y_pred1 = reg1.predict(X_train_scaled)
print("Best CV score: {:.4f}".format(reg1.best_score_))
print(reg1.best_params_)
# features = {}
# features['ave'] = x.values.mean()
# features['std'] = x.values.std()
# features['max'] = x.values.max()
# features['min'] = x.values.min()
# features['q90'] = np.quantile(x.values, 0.90)
# features['q95'] = np.quantile(x.values, 0.95)
# features['q99'] = np.quantile(x.values, 0.99)
# features['q05'] = np.quantile(x.values, 0.05)
# features['q10'] = np.quantile(x.values, 0.10)
# features['q01'] = np.quantile(x.values, 0.01)
# features['std_to_mean'] = features['std'] / features['ave']
# features['abs_max'] = np.abs(x.values).max()
# features['abs_mean'] = np.abs(x.values).mean()
# features['abs_std'] = np.abs(x.values).std()
# features['trend'] = add_trend_feature(x.values)
# features['abs_trend'] = add_trend_feature(x.values, abs_values=True)
# # New features - rolling features
# for w in [10, 50, 100, 1000]:
# x_roll_abs_mean = x.abs().rolling(w).mean().dropna().values
# x_roll_mean = x.rolling(w).mean().dropna().values
# x_roll_std = x.rolling(w).std().dropna().values
# x_roll_min = x.rolling(w).min().dropna().values
# x_roll_max = x.rolling(w).max().dropna().values
# features['ave_roll_std_' + str(w)] = x_roll_std.mean()
# features['std_roll_std_' + str(w)] = x_roll_std.std()
# features['max_roll_std_' + str(w)] = x_roll_std.max()
# features['min_roll_std_' + str(w)] = x_roll_std.min()
# features['q01_roll_std_' + str(w)] = np.quantile(x_roll_std, 0.01)
# features['q05_roll_std_' + str(w)] = np.quantile(x_roll_std, 0.05)
# features['q10_roll_std_' + str(w)] = np.quantile(x_roll_std, 0.10)
# features['q95_roll_std_' + str(w)] = np.quantile(x_roll_std, 0.95)
# features['q99_roll_std_' + str(w)] = np.quantile(x_roll_std, 0.99)
# features['ave_roll_mean_' + str(w)] = x_roll_mean.mean()
# features['std_roll_mean_' + str(w)] = x_roll_mean.std()
# features['max_roll_mean_' + str(w)] = x_roll_mean.max()
# features['min_roll_mean_' + str(w)] = x_roll_mean.min()
# features['q05_roll_mean_' + str(w)] = np.quantile(x_roll_mean, 0.05)
# features['q95_roll_mean_' + str(w)] = np.quantile(x_roll_mean, 0.95)
# features['ave_roll_abs_mean_' + str(w)] = x_roll_abs_mean.mean()
# features['std_roll_abs_mean_' + str(w)] = x_roll_abs_mean.std()
# features['q05_roll_abs_mean_' + str(w)] = np.quantile(x_roll_abs_mean, 0.05)
# features['q95_roll_abs_mean_' + str(w)] = np.quantile(x_roll_abs_mean, 0.95)
# features['std_roll_min_' + str(w)] = x_roll_min.std()
# features['max_roll_min_' + str(w)] = x_roll_min.max()
# features['q05_roll_min_' + str(w)] = np.quantile(x_roll_min, 0.05)
# features['q95_roll_min_' + str(w)] = np.quantile(x_roll_min, 0.95)
# features['std_roll_max_' + str(w)] = x_roll_max.std()
# features['min_roll_max_' + str(w)] = x_roll_max.min()
# features['q05_roll_max_' + str(w)] = np.quantile(x_roll_max, 0.05)
# features['q95_roll_max_' + str(w)] = np.quantile(x_roll_max, 0.95)
features_test = []
for i in tqdm(range(1)):
seg = train.iloc[i * rows : i * rows + rows]
features_test.append(extract_features_from_segment(seg.acoustic_data))
print(features_test)
features_list = []
for i in tqdm(range(segments)):
seg = train.iloc[i * rows : i * rows + rows]
features_list.append(extract_features_from_segment(seg.acoustic_data))
# print(features_list)
|
# ## Dataset:
# https://www.kaggle.com/datasets/fajarkhaswara/religion-in-indonesia
#
# ## Definisi Indonesia Timur:
# https://indonesiatimur.co/definisi/
# ## Pengantar.....
# Sedikit pengantar, pada kesempatan ini saya hendak mempraktekan Exploratory Data Analysis (EDA) pada dataset religion-in-indonesia. Sesuai dengan judul EDA yang dilakukan ini berfokus pada beberapa provinsi yang berada dikawasan indonesia timur (belum mencakup provinsi-provinsi yang baru di Tanah Papua). EDA sendiri memiliki definisi yang luas namun secara singkat "EDA sebagai proses membangun asumsi, hipotesis, anomali dari data yang sedang dipelajari", pada notebook ini EDA di cover oleh dua pertanyaan untuk membantu menganalisis dataset ini.
# input beberapa library yang digunakan
# juga dataset yang digunakan
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use("bmh")
import seaborn as sns
dataset = pd.read_csv(
"/kaggle/input/religion-in-indonesia/population_of_religious_adherents_in_Indonesia_by_province.csv"
)
dataset
# #### list provinsi-provinsi diindonesia timur
indonesia_timur = pd.DataFrame(
{
"province": [
"East Nusa Tenggara",
"Gorontalo",
"South Sulawesi",
"South East Sulawesi",
"Central Sulawesi",
"North Sulawesi",
"West Sulawesi",
"Maluku",
"North Maluku",
"Papua",
"West Papua",
"Mountain Papua",
"South Papua",
"Central Papua",
"West Southeast Papua",
]
}
)
kawasan_indonesia_timur = pd.merge(
indonesia_timur, dataset, on="province", how="left"
) # merge dataset dengan list provinsi indonesia timur
kawasan_indonesia_timur
kawasan_indonesia_timur.dropna(axis=0, inplace=True) # hapus NaN
# kawasan_indonesia_timur.drop(labels='total', axis=1, inplace=True) # hapus kolom total
kawasan_indonesia_timur.set_index(
"province", inplace=True
) # set kolom provinsi menjadi index
# ubah tipe data ke-int
df = kawasan_indonesia_timur.applymap(int, na_action="ignore")
df = df.reset_index()
pair_plot = sns.pairplot(df, hue="province")
def recode_province(province):
if province == "West Papua":
return 1
else:
return 0
df["pb"] = df["province"].apply(recode_province)
scatter_plot = plt.figure()
axes1 = scatter_plot.add_subplot(1, 1, 1)
axes1.scatter(x=df["christian"], y=df["province"], c=df["pb"], alpha=0.5)
axes1.set_title("Agama Kristen di Indonesia Timur")
axes1.set_xlabel("Jumlah")
axes1.set_ylabel("Provinsi")
scatter_plot.show()
def recode_province(province):
if province == "West Papua":
return 1
else:
return 0
df["pb"] = df["province"].apply(recode_province)
sns.set_style("whitegrid")
bar1 = sns.barplot(x="christian", y="province", data=df, hue="pb")
bar1.legend()
bar1.set_title("Agama Kristen di Indonesia Timur")
bar1.set_xlabel("Jumlah")
bar1.set_ylabel("Provinsi")
# #### 1. Total Populasi Pada setiap Provinsi
# assign var
no_satu = df
no_satu = pd.DataFrame(
no_satu.sum(axis=1), columns=["jumlah"]
) # buat dataframe baru dari jumlah total
jumlah = no_satu # var jumlah antar provinsi
total = jumlah.sum() # var jumlah total populasi pada provinsi
no_satu["presentasi"] = jumlah / total * 100 # Menghitung presentasinya
no_satu = no_satu.sort_values("presentasi", ascending=False) # Sort Data
# print hasil
no_satu
# #### 2. Total Populasi Pada setiap Agama
# assign var
no_dua = df
no_dua = pd.DataFrame(
no_dua.sum(), columns=["jumlah"]
) # buat dataframe baru dari jumlah total
jumlah = no_dua # var jumlah antar provinsi
total = jumlah.sum() # var jumlah total populasi pada provinsi
no_dua["presentasi"] = jumlah / total * 100 # Menghitung presentasinya
no_dua = no_dua.sort_values("presentasi", ascending=False) # Sort Data
# print hasil
no_dua
# ## Visualisasi
# deklarasi beberapa variable
label = list(no_satu.index)
sizes = list(no_satu.jumlah)
style_1 = (0, 0, 0, 0, 0, 0, 0, 0.3, 0, 0, 0)
# pie chart
fig1, ax1 = plt.subplots(figsize=(16, 8), subplot_kw=dict(aspect="equal"))
ax1.pie(
sizes, labels=label, autopct="%1.1f%%", shadow=True, startangle=50, explode=style_1
)
# judul
fig1.suptitle(
"Populasi Penduduk di Indonesia Timur berdasarkan Provinsi",
fontsize=20,
fontweight="bold",
)
plt.show()
# deklarasi beberapa variable
label2 = list(no_dua.index)
sizes2 = list(no_dua.jumlah)
rata2 = no_dua.jumlah.mean()
# bar chart
fig2, ax2 = plt.subplots(figsize=(16, 8))
ax2.bar(label2, sizes2) # color=bar_colors)
# judul
fig2.suptitle(
"Jumlah Populasi berdasarkan suatu Agama di Indonesia Timur",
fontsize=20,
fontweight="bold",
)
# label
ax2.set_xlabel("Agama")
ax2.set_ylabel("Populasi")
# Garis Horizontal yang menunjukan rata-rata
ax2.axhline(rata2, ls="--", color="r")
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Importing the required libraries
# # Import libraries
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Dropout
from sklearn.metrics import confusion_matrix
import seaborn as sns
# # Load the data
# Load the data
data = pd.read_csv("/kaggle/input/breast-cancer-wisconsin-data/data.csv")
# # Encode the target variable 'diagnosis'
# Drop the 'Unnamed: 32' column
data.drop(labels="Unnamed: 32", inplace=True, axis=1)
# Encode the target variable 'diagnosis'
x = data.iloc[:, 2:].values
y = data.iloc[:, 1].values
encoder = LabelEncoder()
y = encoder.fit_transform(y)
# # Split the data into training and testing setsm
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=0)
# # Scale the data using StandardScaler
# Scale the data using StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# # Define the model architecture and compile it
# Define the model architecture and compile it
model = Sequential()
model.add(Dense(16, activation="relu", input_shape=(30,)))
model.add(Dropout(0.1))
model.add(Dense(16, activation="relu"))
model.add(Dropout(0.1))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
model.summary()
# # Train the model on the training set
# Train the model on the training set
model.fit(X_train, y_train, batch_size=100, epochs=150)
# # Predict on the test set and calculate confusion matrix
# Predict on the test set and calculate confusion matrix
pred = model.predict(X_test)
pred = pred > 0.5
cm = confusion_matrix(y_test, pred)
# Plot the confusion matrix using seaborn heatmap
sns.heatmap(cm, annot=True)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
print(os.listdir("../input/LANL-Earthquake-Prediction"))
train = pd.read_csv(
"../input/LANL-Earthquake-Prediction/train.csv",
dtype={"acoustic_data": np.int16, "time_to_failure": np.float64},
)
pd.options.display.precision = 20
train.head()
|
# # Preparing the MNIST Fashion dataset
import os
import time
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tqdm import tqdm
from tensorflow.keras import layers
# loading the MNIST fashion dataset
(train_images, _), (_, _) = tf.keras.datasets.fashion_mnist.load_data()
train_images.shape
# reshaping for NN
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype("float32")
train_images.shape
# example
train_images[56782, :10, :10]
# normalising the data in arrays, so the values are between -1 and 1
train_images = (train_images - 127.5) / 127.5
train_images[56782, :10, :10]
# let's chech the image after our modifications
plt.imshow(train_images[2567].squeeze(), cmap="gray")
buffer_size = 60000
batch_size = 128
# Training set is prepared by dividing the whole data into batches and shuffling it.
train_dataset = (
tf.data.Dataset.from_tensor_slices(train_images)
.shuffle(buffer_size)
.batch(batch_size)
)
# # Genetator
# The generator uses tf.keras.layers.Dense layers to produce an image from a seed (random noise). Start with a Dense layer that takes this seed as input, then using Relu activations for each Dense layer.Note that activation in final layer is tanh which outputs in the range -1 to 1. we reshape the 784-d tensor to (Batch Size, 28, 28, 1) using layers.Reshape function
# Both the generator and discriminator are defined using the Keras Sequential API.
def generator_model():
model = tf.keras.Sequential()
model.add(layers.Dense(64, input_dim=100))
model.add(layers.ReLU())
model.add(layers.Dense(128))
model.add(layers.ReLU())
model.add(layers.Dense(256))
model.add(layers.ReLU())
model.add(layers.Dense(784, activation="tanh"))
model.add(layers.Reshape((28, 28, 1)))
return model
generator = generator_model()
generator.summary()
# Using the (as yet untrained) generator to create an random gray scale image.
noise = tf.random.normal([1, 100])
generated_image = generator(noise, training=False)
generated_image.shape
plt.imshow(generated_image[0, :, :, 0], cmap="gray")
# # Discriminator
# Note that the discriminator is a binary classifier, consisting only of fully -connected layers. So, the discriminator expects a tensor of shape (Batch Size, 28, 28, 1).
# We are flattening out input to feed it into Dense layers. We use LeakyReLU fro better performance.
# Output is the probability score.
# discriminator model
def discriminator_model():
model = tf.keras.Sequential()
model.add(layers.Input(shape=(28, 28, 1)))
model.add(layers.Flatten())
model.add(layers.Dense(256))
model.add(layers.LeakyReLU(0.2))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(128))
model.add(layers.LeakyReLU(0.2))
model.add(layers.Dropout(0.3))
model.add(layers.Dense(64))
model.add(layers.LeakyReLU(0.2))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(1, activation="sigmoid"))
return model
discriminator = discriminator_model()
discriminator.summary()
# Here We are using the (as yet untrained) discriminator to classify the generated images as real or fake.
# The model will be trained to output values > 0.5 for real images, and values <0.5 for fake images.
discriminator = discriminator_model()
output = discriminator(generated_image)
print(output)
# # Loss and Optimizer
# Now We are defining loss functions.
# In this case it is Binary Cross entropy as target is real or fake images
# It compares the discriminator's predictions on real images to an array of 1s,
# and the discriminator's predictions on fake (generated) images to an array of 0s.
bce = tf.keras.losses.BinaryCrossentropy()
def discriminator_loss(real_output, fake_output):
real_loss = bce(tf.ones_like(real_output), real_output)
fake_loss = bce(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
# The generator's loss quantifies how well it was able to trick the discriminator.
# Intuitively, if the generator is performing well, the discriminator will classify
# the fake images as real (or 1)
def generator_loss(fake_output):
gen_loss = bce(tf.ones_like(fake_output), fake_output)
return gen_loss
# The discriminator and the generator optimizers are different since two networks
# would be trained separately.
generator_optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
# save and restore models, which can be helpful in case a long running training task is interrupted.
checkpoint_dir = "./training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(
generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator,
)
# # Training
# The training loop begins with generator receiving a random seed as input. That seed is used to produce an image. The discriminator is then used to classify real images (drawn from the training set) and fakes images (produced by the generator). The loss is calculated for each of these models, and the gradients are used to update the generator and discriminator.
epochs = 50
noise_dim = 100
num_examples_to_generate = 16
seed = tf.random.normal([num_examples_to_generate, noise_dim])
# The train_step function performs a single training step for a generative adversarial network (GAN), which consists of the following steps:
# 1. Generate random noise of shape [batch_size, noise_dim] using tf.random.normal().
# 2. Use tf.GradientTape() to record the gradient computations during forward pass for both the generator and discriminator.
# 3. Generate fake images by feeding the random noise to the generator.
# 4. Compute the output of the discriminator for both real and fake images.
# 5. Compute the discriminator loss using discriminator_loss() function with the real and fake outputs.
# 6. Compute the generator loss using generator_loss() function with the fake output.
# 7. Compute the gradients of the generator and discriminator with respect to their trainable variables using the corresponding gradient tapes.
# 8. Apply the gradients to the optimizer for both the generator and discriminator.
# 9. Return the generator loss, discriminator loss, mean of the real output, and mean of the fake output.
#
# We are using tf.function This annotation causes the function to be "compiled".
@tf.function
def train_step(images):
noise = tf.random.normal([batch_size, noise_dim])
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise, training=True)
real_output = discriminator(images, training=True)
fake_output = discriminator(generated_images, training=True)
disc_loss = discriminator_loss(real_output, fake_output)
gen_loss = generator_loss(fake_output)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(
disc_loss, discriminator.trainable_variables
)
generator_optimizer.apply_gradients(
zip(gradients_of_generator, generator.trainable_variables)
)
discriminator_optimizer.apply_gradients(
zip(gradients_of_discriminator, discriminator.trainable_variables)
)
return (
gen_loss,
disc_loss,
tf.reduce_mean(real_output),
tf.reduce_mean(fake_output),
)
# Notice training is set to False. This is so all layers run in inference mode (batchnorm).
# Function for generating and plotting images is defined
def generate_and_plot_images(model, epoch, test_input):
predictions = model(test_input, training=False)
fig = plt.figure(figsize=(8, 4))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i + 1)
pred = (predictions[i, :, :, 0] + 1) * 127.5
pred = np.array(pred)
plt.imshow(pred.astype(np.uint8), cmap="gray")
plt.axis("off")
plt.savefig("image_at_epoch_{:04d}.png".format(epoch))
plt.show()
# The "train" function trains a GAN on the given dataset for the specified number of epochs by performing the following steps:
# 1. Initialize empty lists gen_loss_list, disc_loss_list, real_score_list, and fake_score_list to keep track of the losses and scores during training.
# 2. For each epoch, iterate over each batch in the dataset using a for loop.
# 3. Call the train_step function on the current batch to perform a single training step for the GAN. 4. The returned values of generator loss, discriminator loss, real score, and fake score are accumulated to compute the mean losses and scores for the epoch.
# 5. Print the generator and discriminator losses and real and fake scores for the current epoch.
# 6. Generate and plot sample images using the generate_and_plot_images function.
# 7. Append the mean generator and discriminator losses, real score, and fake score to their respective lists.
# 8. Every 10 epochs, save a checkpoint of the model using the save method of a tf.train.Checkpoint object.
# 9. Print the time taken to complete the epoch.
# 10. Return the gen_loss_list, disc_loss_list, real_score_list, and fake_score_list. These lists contain the losses and scores for each epoch, and can be used to analyze the performance of the GAN over time.
# train function
def train(dataset, epochs):
gen_loss_list = []
disc_loss_list = []
real_score_list = []
fake_score_list = []
for epoch in tqdm(range(epochs)):
start = time.time()
num_batches = len(dataset)
print(f"Training started with epoch {epoch + 1} with {num_batches} batches...")
total_gen_loss = 0
total_disc_loss = 0
for batch in dataset:
generator_loss, discriminator_loss, real_score, fake_score = train_step(
batch
)
total_gen_loss += generator_loss
total_disc_loss += discriminator_loss
mean_gen_loss = total_gen_loss / num_batches
mean_disc_loss = total_disc_loss / num_batches
print(
"Losses after epoch %5d: generator %.3f, discriminator %.3f, real_score %.2f%%, fake_score %.2f%%"
% (
epoch + 1,
generator_loss,
discriminator_loss,
real_score * 100,
fake_score * 100,
)
)
generate_and_plot_images(generator, epoch + 1, seed)
gen_loss_list.append(mean_gen_loss)
disc_loss_list.append(mean_disc_loss)
real_score_list.append(real_score)
fake_score_list.append(fake_score)
if (epoch + 1) % 10 == 0:
checkpoint.save(file_prefix=checkpoint_prefix)
print("Time for epoch {} is {} sec".format(epoch + 1, time.time() - start))
return gen_loss_list, disc_loss_list, real_score_list, fake_score_list
# Taking the train_dataset and epochs as the parameters, the train function calls the train_step
# function, at every new batch. At the beginning of the training, the generated images look like
# random noise. As training progresses, the generated images will look more real.
gen_loss_epochs, disc_loss_epochs, real_score_list, fake_score_list = train(
train_dataset, epochs=epochs
)
# # Plotting visualisations
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8))
ax1.plot(gen_loss_epochs, label="Generator loss", alpha=0.5)
ax1.plot(disc_loss_epochs, label="Discriminator loss", alpha=0.5)
ax1.legend()
ax1.set_title("Training Losses")
ax2.plot(real_score_list, label="Real score", alpha=0.5)
ax2.plot(fake_score_list, label="Fake score", alpha=0.5)
ax2.set_title("Accuracy Scores")
ax2.legend()
|
from sklearn.cluster import KMeans
import glob
from sklearn.metrics.pairwise import euclidean_distances
import csv
import os
from PIL import Image
from itertools import product
import numpy as np
PATCH_IMAGE_SIZE = 256 // 64
SOURCE_DIR = "/kaggle/input/tomato-short-dataset-5000/test"
OUTPUT_FILE = "/kaggle/working/test_weights.csv"
header = ["filename", "class", "weight", "full_path"]
with open(OUTPUT_FILE, "w", newline="") as file:
writer = csv.writer(file)
writer.writerow(header)
def tile(filename, dir_in, d):
patches = []
name, ext = os.path.splitext(filename)
try:
img = Image.open(os.path.join(dir_in, filename))
except:
print("Could not open image: ", filename)
return
w, h = img.size
grid = product(range(0, h - h % d, d), range(0, w - w % d, d))
for i, j in grid:
box = (j, i, j + d, i + d)
patches.append(img.crop(box))
return patches
def extractFetures(files, subfolder):
features = []
labels = []
file_count = 0
for file in files:
file_count += 1
# create patches
m_patches = tile(file, subfolder, PATCH_IMAGE_SIZE)
# if error occurred in file reading, skipping that file.
if not m_patches:
continue
# convert to numpy array
m_patches = [np.array(patch) for patch in m_patches]
# extract features
m_features = [patch.flatten() for patch in m_patches]
# we have 16 features per image, with 16 same labels
for f in m_features:
features.append(f)
labels.append(os.path.basename(file))
return features, labels, file_count
def getMeanDistance(kmean, features):
mean_distances = []
# Get cluster centroids
centroids = kmean.cluster_centers_
# Calculate mean distance for each cluster
for i in range(3):
points = []
# Select points in cluster
for j in range(kmean.labels_.size):
l = kmean.labels_[j]
if l == i:
points.append(features[j])
# Calculate distances between points and centroid
print("Calculating for cluster: " + str(i))
distances = euclidean_distances(points, centroids[i].reshape(1, -1))
# Calculate mean distance
mean_distance = np.mean(distances)
# Add mean distance to list
mean_distances.append(mean_distance)
return mean_distances
def saveToCSV(files, weights):
idx = 0
with open(OUTPUT_FILE, "a", newline="") as file:
writer = csv.writer(file)
for f in files:
label = os.path.basename(f)
dirname = os.path.dirname(f)
classname = dirname.split("/")[-1]
writer.writerow([label, classname, weights[idx], dirname])
idx += 1
subfolders = [f.path for f in os.scandir(SOURCE_DIR) if f.is_dir()]
for subfolder in subfolders:
files = os.listdir(subfolder)
print("-----------------------------")
print("Processing folder: ", subfolder)
features, labels, file_count = extractFetures(files, subfolder)
print("\nTotal Features: ", len(features))
print("Total Labels: ", len(labels))
print("Total File fetched: ", file_count)
print("Creating Clusters...")
# Creating Clusters
k = 3
kmean = KMeans(k, random_state=40)
kmean.fit(features)
clusters_labels = kmean.predict(features)
print("Clusters created.")
files = glob.glob(subfolder + "/*.JPG")
print("Total Files found: ", len(files))
files_clusters = dict()
for f in files:
filename = os.path.basename(f)
files_clusters[filename] = dict()
files_clusters[filename]["clusters"] = [0 for i in range(3)]
cluster_count = [0 for i in range(3)]
for i in range(len(labels)):
label = labels[i]
cluster = clusters_labels[i]
if label in files_clusters:
files_clusters[label]["clusters"][cluster] += 1
cluster_count[cluster] += 1
print("\nCluster Size: ", cluster_count)
file1Cluster = files_clusters[list(files_clusters.keys())[0]]
print("File 1 Cluster: ", file1Cluster)
print("Total features per image:", sum(file1Cluster["clusters"]))
print("Calculating mean distance...")
# Initialize list to store mean distances for each cluster
mean_distances = getMeanDistance(kmean, features)
print("Mean Distance is: ", mean_distances)
print("Calculating weight...")
# weights
# c = k * d/n
ce = [0 for i in range(3)]
for i in range(3):
ce[i] = mean_distances[i] / cluster_count[i]
# normalize
cluster_coefficient = [float(i) / sum(ce) for i in ce]
print("Cluster_coefficients: ", cluster_coefficient)
c1 = cluster_coefficient[0]
c2 = cluster_coefficient[1]
c3 = cluster_coefficient[2]
weights = []
for f in files:
label = os.path.basename(f)
fc = files_clusters[label]["clusters"]
w = (c1 * fc[0] + c2 * fc[1] + c3 * fc[2]) / (fc[0] + fc[1] + fc[2])
weights.append(w)
# saving weights
print("Saving weights...")
saveToCSV(files, weights)
print("Weights saved.\n------------------------------\n\n")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objs as go
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Cleaning and Exploration
# First, I'll have a look at the data using head, tail, and info.
data = pd.read_csv(
"/kaggle/input/life-expectancy-who-updated/Life-Expectancy-Data-Updated.csv"
)
data.head()
data.tail()
data.info()
# I'm interested in seeing what Region values there are for future analysis.
data["Region"].unique()
print("Missing values distribution: ")
print(data.isnull().mean())
# The dataset appears to be quite clean already! I'll do a check for duplicates next.
data.duplicated().sum()
# I'd like to get an idea of what the data looks like grouped by region, then I'll run describe() to see some quick stats of the data.
data.describe()
# # Visualizing Life Expectancy and its Factors
# Below I'll make a comparison of the average life expectancy by region.
avg_life_expectancies = data.groupby("Region").Life_expectancy.mean()
avg_life_expectancies.columns = ["Region", "Life Expectancy"]
print(avg_life_expectancies)
fig = plt.figure(figsize=(10, 5))
avg_life_expectancies.plot(kind="barh")
plt.xlabel("Life expectancy")
plt.title("Average Life Expectancy per Region from 2000-2015")
plt.show()
# Based on these average life expectancies, we can see that the highest have been in the European Union and North America. However, this represents the average over 15 years, so there are more questions we can ask about this data! I'd like to know how the life expectancies have changed over time in several regions.
data_sorted_by_year = data.sort_values(by="Year")
data_sorted_by_year = data_sorted_by_year.reset_index()
data_sorted_by_year.head()
data_sorted_by_year_Africa = data_sorted_by_year[
data_sorted_by_year["Region"] == "Africa"
]
data_sorted_by_year_Africa
data_sorted_by_year_Africa.reset_index()
# Next I'll isolate the columns I want in my visualization and group by year.
df_af = data_sorted_by_year_Africa[["Year", "Life_expectancy"]]
df_af = df_af.groupby("Year").Life_expectancy.mean()
df_af
# Plot using matplotlib.
africa_plot = plt.plot(df_af, "green")
# I'm curious to see how this line plot compares to a region with a historically higher GDP per capita. I'll add another line to the graph showing the EU region's life expectancy over time.
df_eu = data_sorted_by_year[data_sorted_by_year["Region"] == "European Union"]
df_eu = df_eu.groupby("Year").Life_expectancy.mean()
df_eu
# Now that we have that information, we can compare both. The EU's life expectancy started off higher, but is plateuing. Africa's life expectancy is rising more quickly, but is still not very close to the EU's.
plt.plot(df_eu)
plt.plot(df_af, "green")
plt.xlim(2000, 2015)
plt.ylim(50, 85)
plt.margins(0.2)
plt.xlabel("Year")
plt.ylabel("Life Expectancy")
plt.legend(["EU", "Africa"], loc="lower right")
plt.title("Life Expectancies of Africa vs EU 2000-2015")
# The relationship between GDP per capita and life expectancy has long been studied and debated. I'm curious to know what it looks like based on the given dataset. To take a look at this, I'll use the year 2015 and get an average of the GDP of countries in a region, as well as the average life expectancy for countries in the region.
series_2015 = data[data.Year == 2015] # only year 2015
series_2015.reset_index()
df_agg = series_2015.groupby("Region").agg(
avg_life=pd.NamedAgg(column="Life_expectancy", aggfunc="mean"),
avg_gdp=pd.NamedAgg(column="GDP_per_capita", aggfunc="mean"),
)
df_agg.reset_index()
# In order to label points on the scatter plot:
sorted_df = df_agg.sort_values(by=["avg_gdp"], ascending=True)
sorted_df.reset_index()
plt.scatter(df_agg.avg_gdp, df_agg.avg_life, s=90)
plt.title("Average GDP vs Life Expectancy Across Regions 2015")
plt.xscale("log")
plt.xlabel("GDP")
plt.ylabel("Life Expectancy")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Overview
# 
# In this notebook we will use a Convolutional Neural Network (CNN) to classify images from the CIFAR-10 dataset. The task is pretty hard, as the images from the dataset are not that easy to clasify with great accuracy even for a person. But PyTorch let us build pretty easilly a CNN, whill is of great help.
# # Introduction to Convolutional Neural Network (CNN)
# 
# A Convolutional Neural Network (CNN) is a class of artificial neural network most commonly applied to analyze visual imagery. They are specifically designed to process pixel data and are used in image recognition and processing.
# CNNs were the models that allowed computer vision to scale from simple applications to powering sophisticated products and services.
# A great intro into the subject can be found [here](https://towardsdatascience.com/intuitively-understanding-convolutions-for-deep-learning-1f6f42faee1)
# # Imports
# Let's start with out imports. We import a bounch of things from torch, some utils and some data for plotting.
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import random_split
from torch.utils.data.dataloader import DataLoader
from torchvision.utils import make_grid
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.metrics import confusion_matrix
# # Loading and prepparing the data
# Load the data from disk, we will use the available PyTorch functionality for this.
dataset = torchvision.datasets.CIFAR10(
root="/kaggle/input/cifar10-python/",
train=True,
download=False,
transform=transforms.ToTensor(),
)
testset = torchvision.datasets.CIFAR10(
root="/kaggle/input/cifar10-python/",
train=False,
download=False,
transform=transforms.ToTensor(),
)
# Split the train data into train and validate, create data loaders and define the classes and the batch size constant.
batchSize = 200
trainset, validateset = random_split(dataset, [45000, 5000])
train = DataLoader(trainset, batchSize, shuffle=True)
validate = DataLoader(validateset, batchSize, shuffle=True)
testLoader = DataLoader(testset, batch_size=batchSize, shuffle=False)
classes = (
"plane",
"car",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
)
# # Display some of the images
# Display some of the pictures, to verify everything is ok, and also it will make some good plots :)
# First let's display the first batch of 200 images from the training set.
for images, labels in train:
fig, ax = plt.subplots(figsize=(12, 6))
ax.set_xticks([])
ax.set_yticks([])
ax.imshow(make_grid(images, nrow=20).permute(1, 2, 0))
break
# Now let's display a few of the images a little bigger, also with a their lable as a title.
for i in range(0, 12):
img, label = validateset[i]
plt.subplot(3, 4, i + 1)
plt.title(classes[label])
plt.axis("off")
plt.imshow(img.permute(1, 2, 0))
# As you can see this is a pretty hard problem, as many of the images are hard for even a human. CNNs to the rescue!
# # Creating the CNN Classification Model class
# We will define a class that will serve as our classification model. We will levarage the functionality provided to us by PyTorch.
class Cifar10Classifier(nn.Module):
def __init__(self):
super().__init__()
# define all the transformers sequencially
self.network = nn.Sequential(
# here we define 3 channels as our inpur, 32 channels as the output,
# the size of the kernel, the padding and the stride
nn.Conv2d(
in_channels=3, out_channels=32, kernel_size=3, padding=1, stride=1
),
nn.ReLU(),
nn.Conv2d(
in_channels=32, out_channels=64, kernel_size=3, padding=1, stride=1
),
nn.ReLU(),
# apply a max pool layer
nn.MaxPool2d(2, 2),
# continue the process in the next two layers
nn.Conv2d(
in_channels=64, out_channels=128, kernel_size=3, padding=1, stride=1
),
nn.ReLU(),
nn.Conv2d(
in_channels=128, out_channels=128, kernel_size=3, padding=1, stride=1
),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(
in_channels=128, out_channels=256, kernel_size=3, padding=1, stride=1
),
nn.ReLU(),
nn.Conv2d(
in_channels=256, out_channels=256, kernel_size=3, padding=1, stride=1
),
nn.ReLU(),
nn.MaxPool2d(2, 2),
# final layer, we decrease the number of outputs to 10, which is our number of classes
nn.Flatten(),
nn.Linear(256 * 4 * 4, 1024),
nn.ReLU(),
nn.Linear(1024, 512),
nn.ReLU(),
nn.Linear(512, 10),
)
def trainingStep(self, batch):
# unpack the images and labels from the
images, labels = batch
# call the model itself
out = self(images)
# compute the loss
loss = F.cross_entropy(out, labels)
return loss
def accuracy(self, outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
def validationStep(self, batch):
images, labels = batch
out = self(images)
loss = F.cross_entropy(out, labels)
accuracy = self.accuracy(out, labels)
return {"loss": loss, "accuracy": accuracy}
def validationEpochEnd(self, outputs):
batchLosses = [row["loss"] for row in outputs]
epochLosses = torch.stack(batchLosses).mean()
batchAcc = [row["accuracy"] for row in outputs]
epochAcc = torch.stack(batchAcc).mean()
return {"loss": epochLosses.item(), "accuracy": epochAcc.item()}
def forward(self, x):
return self.network(x)
# # Training the CNN classification model
# We will first define an evaluation method, a train method and then we will train our model.
# the evaluation model, please note we will disable the gradiant descent on this method
@torch.no_grad()
def evaluateModel(model, validationLoader):
# puts the model in eval mode
model.eval()
out = [model.validationStep(batch) for batch in validationLoader]
return model.validationEpochEnd(out)
# method for training the model
def trainModel(
epochs,
lr,
model,
trainLoader,
validationLoader,
optimizationFunction=torch.optim.SGD,
):
optimizer = optimizationFunction(model.parameters(), lr)
for epoch in range(epochs):
print(f"training epoch {epoch}")
# puts the model in train mode
model.train()
trainingLosses = []
# training
for batch in trainLoader:
loss = model.trainingStep(batch)
trainingLosses.append(loss)
loss.backward()
optimizer.step()
optimizer.zero_grad()
print(
f"after training epoch {epoch} we get results {evaluateModel(model, validationLoader)}"
)
# Now we will instantiate the model and performn an evaluation on it, before training the model. Because the weights of the model are randomly initliasied we have an accuracy of about 10% (100% devided by the number of classes).
# instantiate the model
model = Cifar10Classifier()
# do an evaluation of the model on the tra
evaluateModel(model, validate)
# Now let's do the training of the model
trainModel(
6,
0.001,
model,
trainLoader=train,
validationLoader=validate,
optimizationFunction=torch.optim.Adam,
)
# We trained for 6 epochs, with this type of network we can achieve an accuracy of about 0.75 - 0.8. In our case we got about 0.72 which is satisfactory. Training the model for more epoch will end up yielding smaller improvements, capping bellow 0.8.
# # Predicting the imagge class with our trainned model
# First predict the accuracy for the validation dataset and then the test dataset.
print(f"validation dataset accuracy: {evaluateModel(model, validate)}")
print(f"test dataset accuracy: {evaluateModel(model, testLoader)}")
# So we can see we have the accuracy and the loss pretty similar for the validation and test datasets. The accuracy of the test dataset is **0.5656**.
# Let's define a new utility function to predict the label of an image based on the model.
def predictImage(img, model):
xb = img.unsqueeze(0)
yb = model(xb)
_, pred = torch.max(yb, dim=1)
return pred[0].item()
# Now we will plot maybe the most interesting image of the notebook, several images with the predicted and actual labels. Again we can see that the CIFAR-10 dataset is pretty hard, even for humans!
with torch.no_grad():
plt.subplots(figsize=(12, 10))
for i in range(0, 12):
img, label = testset[i]
predictedValue = predictImage(img, model)
plt.subplot(3, 4, i + 1)
plt.title(f"predicted: {classes[predictedValue]} \n actual: {classes[label]}")
plt.axis("off")
plt.imshow(img.permute(1, 2, 0))
# # Confusion matrix
# No classification problem is not complete without a confudion matrix, so let's make one!
predictions = np.empty((0, len(testset)), np.int32)
actualValues = np.empty((0, len(testset)), np.int32)
with torch.no_grad():
for i in range(0, len(testset)):
testImg, testLabel = testset[i]
predictedValue = predictImage(testImg, model)
predictions = np.append(predictions, predictedValue)
actualValues = np.append(actualValues, testLabel)
confusionMatrix = confusion_matrix(actualValues, predictions)
confusionMatrixDf = pd.DataFrame(
confusionMatrix, index=[i for i in classes], columns=[i for i in classes]
)
plt.figure(figsize=(10, 7))
sns.heatmap(confusionMatrixDf, annot=True, cmap="Blues", fmt="g")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# task-1
data = pd.read_csv("/kaggle/input/automobile/automobile_data.csv")
data
# task-2
data.shape
# task-3
data[0:3]
# task-4
data[-5:]
# task-5
data.columns
# task-6
print("First column type")
print(data[data.columns[0]].dtypes)
print("First column type")
print(data[data.columns[-1]].dtypes)
# task-7
print(data["Popularity"].dtypes)
# task-8
num_cols = data.select_dtypes(include="number").columns.tolist()
print(num_cols)
# task-9
non_num_cols = data.select_dtypes(exclude="number").columns.tolist()
print(non_num_cols)
# task-10
for i in non_num_cols:
print(data[i].describe())
# task-11
for i in num_cols:
print(data[i].describe())
# task-12
data["Style"].unique()
# task-13
data["Year"].unique()
# task-14
print(data["Cylinders"].max())
print(data["Cylinders"].min())
# task-15
data["Cylinders"].isna().sum()
# task-16
data["HP"].isna().sum()
# task-17
len(data[data["Make"] == "BMW"])
# task-18
data["Doors"].unique()
# task-19
data["Cylinders"].isnull().sum()
# task-20
data[data["MSRP"] == data["MSRP"].max()]
# task-21
data = data.drop("Fuel Type", axis=1)
data = data.drop("Category", axis=1)
data = data.drop("Style", axis=1)
data
# task-22
data["Cylinders"] = data["Cylinders"].fillna(data["Cylinders"].mean())
data["Cylinders"].isna().sum()
# task-23
data["HP"] = data["HP"].fillna(data["HP"].mean())
data["HP"].isna().sum()
# task-24
data = data.astype(str)
# Print the result
print(data.dtypes)
# task-25
print(data["Transmission"].value_counts()["MANUAL"])
# task-1
data = pd.read_csv("/kaggle/input/automobile/automobile_data.csv")
# task-26
data[(data["Year"] == 2013) & (data["Fuel Type"] == "electric")]
# task-27
data[(data["Make"] == "Honda") & (data["Fuel Type"] == "electric")]
# task-28
data[data["HP"] == data["HP"].max()]
# task-29
data = data.rename(columns={"HP": "Horse Power"})
data
# task-30
data[data["Doors"] < 3]
# task-31
data.shape
# task-32
data[0:1]
# task-33
data[-1:]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
from warnings import filterwarnings
filterwarnings("ignore")
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_df = pd.read_csv("/kaggle/input/gdz-elektrik-datathon-2023/train.csv")
sub_df = pd.read_csv("/kaggle/input/gdz-elektrik-datathon-2023/sample_submission.csv")
med_df = pd.read_csv("/kaggle/input/gdz-elektrik-datathon-2023/med.csv")
train_df.head(10)
train_df.info()
train_df["Tarih"] = pd.to_datetime(train_df["Tarih"])
sub_df["Tarih"] = pd.to_datetime(sub_df["Tarih"])
med_df["Tarih"] = pd.to_datetime(med_df["Tarih"])
train_df.info()
train_df["date"] = train_df["Tarih"].map(lambda x: x.strftime("%Y-%m-%d"))
train_df["year"] = train_df["Tarih"].dt.year
train_df["month"] = train_df["Tarih"].dt.month
train_df["day"] = train_df["Tarih"].dt.day
train_df["hour"] = train_df["Tarih"].dt.hour
train_df["day_of_week"] = train_df["Tarih"].dt.dayofweek
# day of week 0: pazartesi
# day of week 6: pazar
sub_df["date"] = sub_df["Tarih"].map(lambda x: x.strftime("%Y-%m-%d"))
sub_df["year"] = sub_df["Tarih"].dt.year
sub_df["month"] = sub_df["Tarih"].dt.month
sub_df["day"] = sub_df["Tarih"].dt.day
sub_df["hour"] = sub_df["Tarih"].dt.hour
sub_df["day_of_week"] = sub_df["Tarih"].dt.dayofweek
sub_df["Kesinti"] = 0
med_df["date"] = med_df["Tarih"].map(lambda x: x.strftime("%Y-%m-%d"))
med_df["year"] = med_df["Tarih"].dt.year
med_df["month"] = med_df["Tarih"].dt.month
med_df["day"] = med_df["Tarih"].dt.day
med_df["hour"] = med_df["Tarih"].dt.hour
med_df["day_of_week"] = med_df["Tarih"].dt.dayofweek
train_df.head()
med_df[["year", "month", "day", "hour"]] = med_df[
["year", "month", "day", "hour"]
].astype(int)
train_df["Kesinti"] = train_df.apply(
lambda row: int(
(
row[["year", "month", "day", "hour"]]
== med_df[["year", "month", "day", "hour"]]
)
.all(axis=1)
.any()
),
axis=1,
)
takvim = pd.read_csv("/kaggle/input/turkish-calendar/Turkish calendar.csv", sep=(";"))
new_sub = takvim[853:884]
new_train = takvim[884:2557]
new_sub.loc[:, "month"] = pd.to_datetime(
new_sub["CALENDAR_DATE"], format="%d.%m.%Y"
).dt.month
new_sub.loc[:, "year"] = pd.to_datetime(
new_sub["CALENDAR_DATE"], format="%d.%m.%Y"
).dt.year
new_sub.loc[:, "day"] = pd.to_datetime(
new_sub["CALENDAR_DATE"], format="%d.%m.%Y"
).dt.day
new_train.loc[:, "month"] = pd.to_datetime(
new_train["CALENDAR_DATE"], format="%d.%m.%Y"
).dt.month
new_train.loc[:, "year"] = pd.to_datetime(
new_train["CALENDAR_DATE"], format="%d.%m.%Y"
).dt.year
new_train.loc[:, "day"] = pd.to_datetime(
new_train["CALENDAR_DATE"], format="%d.%m.%Y"
).dt.day
new_train
new_sub["WEEKEND_FLAG"] = new_sub["WEEKEND_FLAG"].replace(["N"], 0)
new_sub["WEEKEND_FLAG"] = new_sub["WEEKEND_FLAG"].replace(["Y"], 1)
new_sub["RAMADAN_FLAG"] = new_sub["RAMADAN_FLAG"].replace(["N"], 0)
new_sub["RAMADAN_FLAG"] = new_sub["RAMADAN_FLAG"].replace(["Y"], 1)
new_sub["PUBLIC_HOLIDAY_FLAG"] = new_sub["PUBLIC_HOLIDAY_FLAG"].replace(["N"], 0)
new_sub["PUBLIC_HOLIDAY_FLAG"] = new_sub["PUBLIC_HOLIDAY_FLAG"].replace(["Y"], 1)
new_train["WEEKEND_FLAG"] = new_train["WEEKEND_FLAG"].replace(["N"], 0)
new_train["WEEKEND_FLAG"] = new_train["WEEKEND_FLAG"].replace(["Y"], 1)
new_train["RAMADAN_FLAG"] = new_train["RAMADAN_FLAG"].replace(["N"], 0)
new_train["RAMADAN_FLAG"] = new_train["RAMADAN_FLAG"].replace(["Y"], 1)
new_train["PUBLIC_HOLIDAY_FLAG"] = new_train["PUBLIC_HOLIDAY_FLAG"].replace(["N"], 0)
new_train["PUBLIC_HOLIDAY_FLAG"] = new_train["PUBLIC_HOLIDAY_FLAG"].replace(["Y"], 1)
new_sub.drop(
["SPECIAL_DAY_SK", "SPECIAL_DAY_SK2", "CALENDAR_DATE"], axis=1, inplace=True
)
new_train.drop(
["SPECIAL_DAY_SK", "SPECIAL_DAY_SK2", "CALENDAR_DATE"], axis=1, inplace=True
)
merged_sub = pd.merge(new_sub, sub_df, on=["year", "month", "day"])
merge_train = pd.merge(new_train, train_df, on=["year", "month", "day"])
merged_sub
merged_sub.drop(["Tarih", "date", "Dağıtılan Enerji (MWh)"], axis=1, inplace=True)
merge_train.drop(["Tarih", "date"], axis=1, inplace=True)
merge_train.info()
x = merge_train.drop(["Dağıtılan Enerji (MWh)"], axis=1)
y = merge_train["Dağıtılan Enerji (MWh)"]
from sklearn.model_selection import train_test_split, RandomizedSearchCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
import xgboost as xgb
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from sklearn.metrics import mean_absolute_percentage_error
from sklearn.ensemble import VotingRegressor
from sklearn.neighbors import KNeighborsRegressor
X_train, X_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=42
)
# rf_model = RandomForestRegressor(n_estimators=100, random_state=42)
# rf_model.fit(X_train, y_train)
# # Make predictions on the validation set
# y_pred = rf_model.predict(X_test)
#
from sklearn.metrics import accuracy_score
knn_model = KNeighborsRegressor().fit(X_train, y_train)
y_pred = knn_model.predict(X_test)
MAPE = []
for k in range(15):
k = k + 1
knn_model = KNeighborsRegressor(n_neighbors=k).fit(X_train, y_train)
y_pred = knn_model.predict(X_test)
mape = mean_absolute_percentage_error(y_test, y_pred)
MAPE.append(mape)
print("k =", k, "için RMSE değeri: ", mape)
y_pred1 = knn_model.predict(merged_sub)
subm = pd.read_csv("/kaggle/input/gdz-elektrik-datathon-2023/sample_submission.csv")
subm["Dağıtılan Enerji (MWh)"] = y_pred1
subm
subm.to_csv("KNN.csv", index=None)
|
# importing libraries for working with the dataset
import pandas as pd
from prettytable import PrettyTable, ALL
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from wordcloud import WordCloud
pd.options.mode.chained_assignment = None # default='warn'
# function to find correlation
def correlation(col1, col2):
return round(df_airport[col1].corr(df_airport[col2]), 3)
# function to print table
def create_table(df, col1, col2, table_title):
table = PrettyTable()
table.title = table_title
table.field_names = [col1, col2]
for i in range(0, len(df[col1])):
table.add_row([df[col1][i], "{:,}".format(df[col2][i])])
print(f"** {table_title} **")
print(table)
# read csv from google drive
df_airport = pd.read_csv("/kaggle/input/usa-airport-dataset/Airports2.csv")
print("null values count per column\n", df_airport.isnull().sum())
print("shape of dataset: ", df_airport.shape)
without_dublicate = df_airport.drop_duplicates()
print("shape of dataset without dublicate: ", without_dublicate.shape)
print("total dublicate rows=", df_airport.shape[0] - without_dublicate.shape[0])
# I will not be using last 4 columns so I don't need to fill null values
# Finding coorelations between numerical columns
print(
f"There is high poisitive Correlation of {correlation('Passengers','Flights')} between number of flights and passengers"
)
print(
f"There is high poisitive Correlation of {correlation('Passengers','Seats')} between passengers and Seats"
)
print(
f"There is high poisitive Correlation of {correlation('Seats','Flights')} between Seats and flights"
)
print(
f"There is low negative Correlation of {correlation('Distance','Flights')} between number of flight and Distance"
)
print(
f"There is low Correlation of {correlation('Origin_population','Passengers')} between Population and Passengeres"
)
print(
f"There is low Correlation of {correlation('Destination_population','Passengers')} between Population and Passengeres"
)
# print correlation matrix
ax = sns.heatmap(
df_airport[["Passengers", "Seats", "Flights"]].corr(), annot=True
).set_title("Correlation matrix")
# reduce the unsed columns to to speed computation
df_airport = df_airport[
[
"Origin_airport",
"Destination_airport",
"Origin_city",
"Destination_city",
"Passengers",
"Flights",
"Fly_date",
]
]
# Splitting state and city
df_airport[["Origin_city", "Origin_state"]] = df_airport["Origin_city"].str.split(
",", expand=True
)
df_airport[["Destination_city", "Destination_state"]] = df_airport[
"Destination_city"
].str.split(",", expand=True)
# Convert date type to be ready for time series analysis and correlation
df_airport["Fly_date"] = pd.to_datetime(df_airport["Fly_date"], errors="coerce")
df_airport["Fly_year"] = df_airport["Fly_date"].dt.strftime("%Y").astype("int")
df_airport["Fly_month"] = df_airport["Fly_date"].dt.strftime("%m").astype("int")
# Crate a word cloud
# Import the wordcloud library
# long_string = a,b,c...
def world_cloud_generate(long_string):
# create a worldcloud object
wordcloud = WordCloud(
background_color="white",
max_words=500000,
contour_width=3,
contour_color="steelblue",
width=500,
height=300,
repeat=False,
include_numbers=False,
collocations=False,
)
# Generate a world cloud
wordcloud.generate(long_string)
return wordcloud
long_string_title = ",".join(list(df_airport["Destination_city"][0:1000000].values))
wordcloud1 = world_cloud_generate(long_string_title)
# Visualize the worldcloud
wordcloud1.to_image()
print(
f"There is almost no Correlation {correlation('Passengers','Fly_year')} between NO. of passengers over the years"
)
# I find that hard to belive there is no relationship so I tracked number of passengers annually
Passengers_count_annualy = df_airport.groupby("Fly_year")["Passengers"].agg(["sum"])
Passengers_count_annualy.reset_index(inplace=True)
Passengers_count_annualy["sum"] = Passengers_count_annualy["sum"].apply(
lambda x: "{:,}".format(x)
)
Passengers_count_annualy.columns = ["Fly_year", "Total passengers"]
Passengers_count_annualy
# There was a positive increase in the number of passengers annually until 2001-2002, after which it dropped dramatically
# due to the 9/11 event. The number of passengers started to increase again in 2003, but 2008 marked the onset
# of the Great Recession, which explains the second decrease.
# Find the popular destinations for each year
Passengers_dest_annualy = df_airport.groupby(["Fly_year", "Destination_state"])[
"Passengers"
].agg(["sum"])
Passengers_dest_annualy.reset_index(inplace=True)
annual_grouped = Passengers_dest_annualy.groupby("Fly_year")["sum"].agg(["max"])
annual_grouped.reset_index(inplace=True)
print("Most travelled to State per year:")
Passengers_dest_annualy = pd.merge(
Passengers_dest_annualy, annual_grouped, left_on="sum", right_on="max"
)
Passengers_dest_annualy.drop(["Fly_year_y", "sum"], axis=1, inplace=True)
Passengers_dest_annualy.columns = [
"Fly_year",
"Destination_state",
"Max_no_of_passengers",
]
Passengers_dest_annualy[
"Max_no_of_passengers"
] = Passengers_dest_annualy.Max_no_of_passengers.apply(lambda x: "{:,}".format(x))
Passengers_dest_annualy
# Texas and California are most visted states!
# Top arrival and departure cities
create_table(
df_airport["Origin_city"]
.value_counts()
.head(3)
.rename_axis("Origin_city")
.reset_index(name="No of flights"),
"Origin_city",
"No of flights",
"Top departure cities: ",
)
create_table(
df_airport["Destination_city"]
.value_counts()
.head(3)
.rename_axis("Destination_city")
.reset_index(name="No of flights"),
"Destination_city",
"No of flights",
"Top arrival cities: ",
)
# Make plot to compare passengers and flights monthly trends
Flights_count = df_airport.groupby("Fly_month")["Flights"].agg(["mean"])
Flights_count.reset_index(inplace=True)
Passengers_count = df_airport.groupby("Fly_month")["Passengers"].agg(["mean"])
Passengers_count.reset_index(inplace=True)
fig, axs = plt.subplots(2)
fig.suptitle("Passengers vs Flights monthly trend")
axs[0].plot(Passengers_count["Fly_month"], Passengers_count["mean"])
axs[1].plot(Flights_count["Fly_month"], Flights_count["mean"])
# plot annual flight counts in bar chart
Flights_count_annual = df_airport.groupby("Fly_year")["Flights"].agg(["sum"])
Flights_count_annual.reset_index(inplace=True)
ax = Flights_count_annual.plot.bar(
x="Fly_year",
y="sum",
rot=70,
title="Annual Flights count",
color=(240 / 255, 83 / 255, 101 / 255),
ylabel="No of Flights",
fontsize="large",
figsize=(10, 4),
)
current_values = plt.gca().get_yticks()
plt.gca().set_yticklabels(["{:,.0f}".format(x) for x in current_values])
plt.show()
# plot annual passengers counts in bar chart
passengers_count_annual = df_airport.groupby("Fly_year")["Passengers"].agg(["sum"])
passengers_count_annual.reset_index(inplace=True)
ax = passengers_count_annual.plot.bar(
x="Fly_year",
y="sum",
rot=70,
title="Passengers count",
color=(240 / 255, 83 / 255, 101 / 255),
ylabel="No of Passengers",
fontsize="large",
figsize=(10, 4),
)
current_values = plt.gca().get_yticks()
plt.gca().set_yticklabels(["{:,.0f}".format(x) for x in current_values])
plt.show()
# we can see 9/11 and great recession effects more clearly,
# reduce the unsed columns to to speed computation
df_airport["Path"] = (
df_airport["Origin_airport"] + "-" + df_airport["Destination_airport"]
)
df_airport = df_airport[
["Fly_year", "Path", "Origin_city", "Destination_city", "Passengers"]
]
# reduce the unsed columns to to speed computation
df_airport = df_airport[
["Fly_year", "Path", "Origin_city", "Destination_city", "Passengers"]
]
table = PrettyTable()
table.hrules = ALL
table.field_names = [
"Fly_year",
"Path",
"Origin_city",
"Destination_city",
"Passengers",
]
for i in range(1990, 2010):
df1 = df_airport.loc[df_airport.Fly_year == i]
df1 = df1.sort_values(by="Passengers", ascending=False).head(n=1)
df1.reset_index(inplace=True)
df1["Passengers"] = df1.Passengers.apply(lambda x: "{:,}".format(x))
table.add_row(
[
df1["Fly_year"][0],
df1["Path"][0],
df1["Origin_city"][0],
df1["Destination_city"][0],
df1["Passengers"][0],
]
)
print(f"\t\t\t** Most Used flight path **")
print(table)
|
# Predicting Stock Prices
# How to use LSTMs
# **Context**
# > This notebook is designed to demonstrate a concise script for predicting stock prices utilizing a Long Short-Term Memory (LSTM) model. I have provided an introduction to Time Series Analysis, which I encourage you to review if you have not already done so.
# **Reminder**
# > A Time Series is a **time-indexed** series of data. In Finance, a time series tracks the movement of the chosen data points, such as a *security’s price*, over a specified period of time with data points recorded at **regular intervals**.
# **Why is it used for ?**
# > Time series analysis can be useful to see how a given asset, security, or economic variable changes over time. It can also be used to examine how the changes associated with the chosen data point compare to shifts in other variables over the same time period.
# >
# > For example, suppose you wanted to analyze a time series of daily closing stock prices for a given stock over a period of one year. You would obtain a list of all the closing prices for the stock from each day for the past year and list them in chronological order.
# ## Summary
# **1. Import libraries**
# **2. Preprocessing**
# **3. Build LSTM model**
# **4. Training**
# **5. Predictions**
# Imports
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import pandas_datareader as web
import datetime as dt
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
# **Back to summary**
# ----
# Preprocessing
# Load csv
df = pd.read_csv(
"../input/cac40-stocks-dataset/preprocessed_CAC40.csv", parse_dates=["Date"]
)
def load_data(company, start, end):
"""
Load data for the specified company and date range.
:param company: The company's stock symbol (str)
:param start: The starting date for the data range (str or datetime)
:param end: The ending date for the data range (str or datetime)
:return: A dataframe containing the relevant stock data (pandas.DataFrame)
"""
dataframe = df.copy()
dataframe = dataframe.loc[dataframe.Name == company, :]
dataframe = dataframe.loc[
(dataframe["Date"] > start) & (dataframe["Date"] < end), :
]
dataframe = dataframe.rename(columns={"Closing_Price": "Close"})
return dataframe
COMPANY = "Accor"
START_DATE = dt.datetime(2015, 1, 1)
END_DATE = dt.datetime(2020, 1, 1)
START_DATE_TEST = END_DATE
data = load_data(company=COMPANY, start=START_DATE, end=END_DATE)
# Normalize data
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(data["Close"].values.reshape(-1, 1))
# Set the number of days used for prediction
prediction_days = 60
# Initialize empty lists for training data input and output
x_train = []
y_train = []
# Iterate through the scaled data, starting from the prediction_days index
for x in range(prediction_days, len(scaled_data)):
# Append the previous 'prediction_days' values to x_train
x_train.append(scaled_data[x - prediction_days : x, 0])
# Append the current value to y_train
y_train.append(scaled_data[x, 0])
# Convert the x_train and y_train lists to numpy arrays
x_train, y_train = np.array(x_train), np.array(y_train)
# Reshape x_train to a 3D array with the appropriate dimensions for the LSTM model
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
# **Back to summary**
# ----
# LSTM Model
# **What is a LSTM ?**
# > Long Short Term Memory networks – usually just called “LSTMs” – are a special kind of RNN, capable of learning long-term dependencies. Introduced by Hochreiter & Schmidhuber (1997), and were refined and popularized by many people in following work. They work tremendously well on a large variety of problems, and are now widely used.
# >
# > LSTMs are explicitly designed to avoid the long-term dependency problem. Remembering information for long periods of time is practically their default behavior, not something they struggle to learn!
# >
# > All recurrent neural networks have the form of a chain of repeating modules of neural network.
# You can find more details here: http://colah.github.io/posts/2015-08-Understanding-LSTMs/
def LSTM_model():
"""
Create and configure an LSTM model for stock price prediction.
:return: The configured LSTM model (keras.Sequential)
"""
# Initialize a sequential model
model = Sequential()
# Add the first LSTM layer with 50 units, input shape, and return sequences
model.add(LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1], 1)))
# Add dropout to prevent overfitting
model.add(Dropout(0.2))
# Add a second LSTM layer with 50 units and return sequences
model.add(LSTM(units=50, return_sequences=True))
# Add dropout to prevent overfitting
model.add(Dropout(0.2))
# Add a third LSTM layer with 50 units
model.add(LSTM(units=50))
# Add dropout to prevent overfitting
model.add(Dropout(0.2))
# Add a dense output layer with one unit
model.add(Dense(units=1))
return model
# **Back to summary**
# ----
# Training
model = LSTM_model()
model.summary()
model.compile(optimizer="adam", loss="mean_squared_error")
# Define callbacks
# Save weights only for best model
checkpointer = ModelCheckpoint(
filepath="weights_best.hdf5", verbose=2, save_best_only=True
)
model.fit(x_train, y_train, epochs=25, batch_size=32, callbacks=[checkpointer])
# **Back to summary**
# ----
# Inference
# Load test data for the specified company and date range
test_data = load_data(company=COMPANY, start=START_DATE_TEST, end=dt.datetime.now())
# Extract the actual closing prices from the test data
actual_prices = test_data["Close"].values
# Concatenate the training and test data along the 'Close' column
total_dataset = pd.concat((data["Close"], test_data["Close"]), axis=0)
# Extract the relevant portion of the dataset for model inputs
model_inputs = total_dataset[
len(total_dataset) - len(test_data) - prediction_days :
].values
# Reshape the model inputs to a 2D array with a single column
model_inputs = model_inputs.reshape(-1, 1)
# Apply the same scaling used for training data to the model inputs
model_inputs = scaler.transform(model_inputs)
# Initialize an empty list for test data input
x_test = []
# Iterate through the model inputs, starting from the prediction_days index
for x in range(prediction_days, len(model_inputs)):
# Append the previous 'prediction_days' values to x_test
x_test.append(model_inputs[x - prediction_days : x, 0])
# Convert the x_test list to a numpy array
x_test = np.array(x_test)
# Reshape x_test to a 3D array with the appropriate dimensions for the LSTM model
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
# Generate price predictions using the LSTM model
predicted_prices = model.predict(x_test)
# Invert the scaling applied to the predicted prices to obtain actual values
predicted_prices = scaler.inverse_transform(predicted_prices)
# Plot the actual prices using a black line
plt.plot(actual_prices, color="black", label=f"Actual {COMPANY} price")
# Plot the predicted prices using a green line
plt.plot(predicted_prices, color="green", label=f"Predicted {COMPANY} price")
# Set the title of the plot using the company name
plt.title(f"{COMPANY} share price")
# Set the x-axis label as 'time'
plt.xlabel("time")
# Set the y-axis label using the company name
plt.ylabel(f"{COMPANY} share price")
# Display a legend to differentiate the actual and predicted prices
plt.legend()
# Show the plot on the screen
plt.show()
# Extract the last 'prediction_days' values from the model inputs
real_data = [
model_inputs[len(model_inputs) + 1 - prediction_days : len(model_inputs + 1), 0]
]
# Convert the real_data list to a numpy array
real_data = np.array(real_data)
# Reshape real_data to a 3D array with the appropriate dimensions for the LSTM model
real_data = np.reshape(real_data, (real_data.shape[0], real_data.shape[1], 1))
# Generate a prediction using the LSTM model with the real_data input
prediction = model.predict(real_data)
# Invert the scaling applied to the prediction to obtain the actual value
prediction = scaler.inverse_transform(prediction)
# Print the prediction result to the console
print(f"Prediction: {prediction[0][0]}")
|
# # CS6220 Sprint 2023 Final Project (Peter Liu)
# ## A. Preparation part
# ### 1. Check data paths, initial format and decide what data to use
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_absolute_error
# To display all columns of dataframe
pd.set_option("display.max_columns", None)
import os
for dirname, _, filenames in os.walk(
"/kaggle/input/house-prices-advanced-regression-techniques"
):
for filename in filenames:
print(os.path.join(dirname, filename))
# print data description:
with open(
"/kaggle/input/house-prices-advanced-regression-techniques/data_description.txt",
"r",
) as f:
desc = f.readlines()
print(desc)
# Exploring shapes of dataset - train
raw_train = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
raw_train = raw_train.set_index("Id")
print(raw_train.shape)
raw_train.head()
# Exploring shapes of dataset - test
raw_test = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/test.csv"
)
raw_test = raw_test.set_index("Id")
print(raw_test.shape)
raw_test.head()
# Exploring shapes of dataset - sample_submission
# We know that this is not useful data, but an indication of
# what is to be submitted in we are in the contest
sub = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/sample_submission.csv"
)
sub = sub.set_index("Id")
print(sub.shape)
sub.head()
# ### 2. Data cleaning
# The following is missing data that needs special processing for all 19 columns
# Check na, see if they are missing data or real value
na_cols = raw_train.isna().sum()[raw_train.isna().sum() != 0]
na_cols
for col in na_cols.index:
print(f"\nInpsecting {col}, {na_cols.loc[col]} nos. of na value")
print(pd.unique(raw_train[col]))
# real na means the data is actually missing
real_na_cols = ["LotFrontage", "MasVnrType", "MasVnrArea", "Electrical"]
# useful na means the na is a category and actually represents a meaning
useful_na_cols = [
"Alley",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"FireplaceQu",
"GarageType",
"GarageYrBlt",
"GarageFinish",
"GarageQual",
"GarageCond",
"PoolQC",
"Fence",
"MiscFeature",
]
# #### 2.1 Function for alerting NAs and print the columns with na values
"""
alert_na: identify any na values within the dataframe,
and print out any column name that has at least one na values and alert
This function does not modify data
input: dataframe
return: none
"""
def alert_na(df):
# Check whole table has any na
if df.isna().any().any():
print("There are still na values, should be removed")
print(X_test.isna().any()[X_test.isna().any()].index)
# #### 2.2 Function for data cleaning and get rid of NAs
"""
data_cleaning: after summarizing the data in section 2, all colums with na values are analyzed. Two groups are presented.
Firstly, those with useful na are replaced with text "NA" to signal that it is meant to be one of the categories
Secondly, for those na values that signals missing value, mean or mode is used to replace the values
Finally, checking is done to make sure there are no na values
This function modifies data inplace
input: dataframe
return: none
"""
def data_cleaning(df):
# Replace useful na with another value so it can be distinguished later
for col in useful_na_cols:
df[col].fillna("NA", inplace=True)
# Checking if na values are all replaced
if df[col].isna().any():
print(f"{col} na values not fully replaced")
# Fill in real na data case by case and check for na afterwards
# LotFrontage replace with mean
df["LotFrontage"].fillna(df["LotFrontage"].mean().round(0), inplace=True)
# MasVnrType replace with most popular type
df["MasVnrType"].fillna(df["MasVnrType"].mode().tolist()[0], inplace=True)
# MasVnrArea replace with mean
df["MasVnrArea"].fillna(df["MasVnrArea"].mean().round(0), inplace=True)
# Electrical replace with most popular type
df["Electrical"].fillna(df["Electrical"].mode().tolist()[0], inplace=True)
for col in real_na_cols:
# Checking if na values are all replaced
if df[col].isna().any():
print(f"{col} na values not fully replaced")
alert_na(df)
# ### 3. Function for feature engineering to make the best use of data
"""
feature_engineering: Given all the attributes, it is normal that some attributes can be combined
to make meaning combo. Added feature brings additional context to the model and tends to improve the accuracy.
This function modifies data inplace
input: dataframe
return: none
"""
def feature_engineering(df):
# total
df["TotalPorchArea"] = df[
["WoodDeckSF", "OpenPorchSF", "EnclosedPorch", "3SsnPorch", "ScreenPorch"]
].sum(axis=1)
df["TotalBath"] = df[["BsmtFullBath", "BsmtHalfBath", "FullBath", "HalfBath"]].sum(
axis=1
)
# booleans
df["CentralAir"] = np.where(
df["CentralAir"] == "N", 0, 1
) # Street itself is a boolean
df["hasBsmt"] = np.where(df["TotalBsmtSF"] == 0, 0, 1)
df["has2ndFlr"] = np.where(df["2ndFlrSF"] == 0, 0, 1)
df["hasFirePlace"] = np.where(df["FireplaceQu"] == "NA", 0, 1)
df["hasAlley"] = np.where(df["Alley"] == "NA", 0, 1)
df["hasFence"] = np.where(df["Fence"] == "NA", 0, 1)
df["Street"] = np.where(df["Street"] == "Pave", 0, 1) # Street itself is a boolean
df["hasPool"] = np.where(df["PoolArea"] == 0, 0, 1)
df["hasMasVnr"] = np.where(df["MasVnrArea"] == 0, 0, 1)
df["hasGarage"] = np.where(df["GarageArea"] == 0, 0, 1)
df["hasPorch"] = np.where(df["TotalPorchArea"] == 0, 0, 1)
# ### 4. Encoding categorical data, which preserves the order of categories
"""
rank_encode: Going through all description, there are hidden rankings despite categorical.
For instance, ExterQual is split into the Highest Ex, Gd, TA, FA then the lowest Po.
This meaning must be captured in a scale following the description.
This preserves a lot of crucial info that cannot be randomly assigned a category
This function modifies data inplace
input: dataframe
return: none
"""
def rank_encode(df):
ranked_categorical = [
"Alley",
"LotShape",
"LandContour",
"LotConfig",
"LandSlope",
"HouseStyle",
"RoofStyle",
"MasVnrType",
"ExterQual",
"ExterCond",
"Foundation",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"HeatingQC",
"Electrical",
"KitchenQual",
"Functional",
"FireplaceQu",
"GarageFinish",
"GarageQual",
"GarageCond",
"PavedDrive",
"PoolQC",
"Fence",
"SaleType",
"SaleCondition",
]
# Reading the orders from desc variable, assign rankings according
cat_dict = dict()
counter = 0
for line in desc:
try:
# column names only has ":" without any tabs
if ":" in line and "\t" not in line:
# set dict to be filled
# resets zero every time new column comes along
counter = 0
temp_col = line.split(":")[0]
cat_dict[temp_col] = dict()
# It means parse and fill in dictionary
elif "\t" in line and "".join(e for e in line if e.isalnum()) != "":
cat_dict[temp_col][line.split("\t")[0].strip()] = counter
counter += 1
except:
# skip in case abnormal line structure
pass
for cat in ranked_categorical:
df[cat] = df[cat].map(cat_dict[cat])
# ### 5. Standardization of data
"""
normalize: Normalization to be performed to make sure regression model runs more with higher accuracy and converges faster
It makes the data for each column direction has mean = 0 and standard deviation = 1
It then fillna since sometimes the data has only one value (i.e. only 0), and line 10 makes the division operation returns nan when dividing zero
This function modifies data inplace
input: dataframe
return: none
"""
def normalize(df):
df -= df.mean()
df /= df.std()
# Sometimes the data has only one value (i.e. only 0),
# and it makes the above operation return nan, replace nan with zero
df.fillna(0, inplace=True)
assert df.mean().sum() < 1e-10, "dataframe not normalized"
assert (df.std() - 1).sum() < 1e-10, "dataframe not normalized"
# ### 6. Function for filtering top relevant factors
"""
getTopVar: this uses correlation matrix to list importance of each data against saleprice, then
make absolute (since extreme negative values are also highly correlated), and then rank the values in descending order
It picks the top nosOfTopVar and return as identified factors with highest importances in price prediction
This function does not modify data inplace
input:
X: dataframe without target value
y: Series of target value
nosOfTopVar: int the number of top factors to be picked and to be returned
return_corr: boolean value stating if correlation matrix is to be returned
includeSalePrice: boolean value stating if the saleprice value is to be returned, true only if plot
return:
topVar: list of top factor that are the most correlated to saleprice as determined by correlation matrix
corr_series: the correlation list with respect to sales price for top picked attributes
"""
def getTopVar(X, y, nosOfTopVar, return_corr=False, excludeSalePrice=True):
corr_series = X.join(y).corr()["SalePrice"].sort_values(ascending=False)
# Pick the top most relevant variable with respect to sale price,
# if excludeSalePrice is False, False * 1 = 0 meaning includeSalePrice,
# skipping the saleprice if excludeSalePrice is True since True * 1 = 1
topVar = (
corr_series.abs()
.sort_values(ascending=False)
.index[excludeSalePrice * 1 : nosOfTopVar + 1]
)
if return_corr:
return topVar, corr_series
return topVar
# ### 7. Function for plotting
"""
plot: ploting the graphs:
1. histogram of saleprice vs number count
2. a heapmap showing top contributing variables with respect with sale price
3. bar chart showing the actual values of topVar correlation with salePrice in detal
4. pairplot of each topVar against salePrice
input:
X: dataframe without target value
y: Series of target value
return: none
"""
def plot(X, y, nosOfTopVar):
topVar = getTopVar(X, y, nosOfTopVar)
# Sale price histogram
plt.hist(y, edgecolor="red", bins=20)
plt.title("SalePrice histogram")
plt.xlabel("Price")
plt.ylabel("Count")
plt.plot()
# heatmap
topVar, corr_series = getTopVar(
X, y, nosOfTopVar, return_corr=True, excludeSalePrice=False
) # SalePrice necessary for plot
plot_corr_series = corr_series[topVar].sort_values(ascending=False)
# pick the top vars on index and columns respectively
top_corr_df = X.join(y).corr()[topVar].loc[topVar]
# To sort the correlation matrix on x and y axis
top_corr_df = (
top_corr_df.sort_values("SalePrice", ascending=False)
.transpose()
.sort_values("SalePrice", ascending=False)
)
plt.figure(figsize=(12, 10))
sns.heatmap(top_corr_df, cmap="coolwarm", annot=True, annot_kws={"fontsize": 8})
plt.title(
f"Heatmap of top {nosOfTopVar} nos. of factor's correlation with SalePrice"
)
plt.show()
# correlation ranking plot in bar char, reuses plot_corr_df
plt.figure(figsize=(12, 6))
plt.grid()
# cutting the first item since it must be saleprice, i.e. saleprice is 100% correlating with saleprice, which is not useful to plot
plot_corr_series = plot_corr_series.iloc[1:]
plt.bar(plot_corr_series.index, plot_corr_series.values)
plt.xticks(rotation=90)
plt.title(f"Top {nosOfTopVar} factor correlated to sale price")
plt.xlabel("Factors")
plt.ylabel("Deg of corr")
plt.plot()
# pairplot of topVar against Saleprice
g = sns.pairplot(X.join(y), x_vars=["SalePrice"], y_vars=topVar, height=4)
g.fig.suptitle("Pairplot of top factors", y=1.01)
# ### 8. Contants definition
TOPVAR = 20
TEST_SIZE = 0.2
MAX_TOP_VAR = 60
MAX_SEED = 50
random_seed = 0
# ### 9. Function for data Preprocessing
"""
prepare_data: gets the data (with all attribute and target value) and all other input, perform all preprocessing
and return the desired value to be put into model
input:
input_df: dataframe to be preprocessed
_test_size: portion of test size to all data to be analyzed
_random_seed: the seed to perform train_test_split on input_df
plot_graph
"""
def prepare_data(input_df, _test_size, _random_seed, plot_graph=False):
_X_train, _X_test, _y_train, _y_test = train_test_split(
input_df.iloc[:, :-1],
input_df.iloc[:, -1],
test_size=_test_size,
random_state=_random_seed,
)
# -------- Preparation X_train --------
data_cleaning(_X_train)
feature_engineering(_X_train)
# Only run ONCE
rank_encode(_X_train)
# Remove all categorical data
_X_train = _X_train[_X_train.describe().columns]
if plot_graph:
plot(_X_train, _y_train, TOPVAR)
normalize(_X_train)
# Make sure again we do not have any na after mapping categorical values
alert_na(_X_train)
# -------- Preparation X_test --------
data_cleaning(_X_test)
feature_engineering(_X_test)
# Only run ONCE
rank_encode(_X_test)
# Remove all categorical data
_X_test = _X_test[_X_test.describe().columns]
normalize(_X_test)
# Make sure again we do not have any na after mapping categorical values
alert_na(_X_test)
return _X_train, _X_test, _y_train, _y_test
# ## B. Exploratory Data Analysis
raw_train.info()
raw_train.describe()
_, _, _, _ = prepare_data(raw_train, TEST_SIZE, random_seed, plot_graph=True)
# ## C. Modelling
# ### 1. Linear Regression modelling
# #### 1.1 Linear Regression modelling based on certain train_test_split
linear = LinearRegression()
random_seed = 42
# Use raw_train only as it is the only part of data that has corresponding true y value in it
X_train, X_test, y_train, y_test = prepare_data(raw_train, TEST_SIZE, random_seed)
# Tunning parameters, so far to adjust to nos. of top factors to be included in regression calculation
score_dict = dict()
for i in range(1, MAX_TOP_VAR + 1):
topVar = getTopVar(X_train, y_train, i)
_reg = linear.fit(X_train[topVar], y_train)
score_dict[i] = list()
score_dict[i].append(_reg.score(X_train[topVar], y_train))
score_dict[i].append(_reg.score(X_test[topVar], y_test))
score_df = pd.DataFrame.from_dict(
score_dict, orient="index", columns=["train_R2", "test_R2"]
)
plt.plot(score_df.index, score_df["train_R2"], label="Train R2")
plt.plot(score_df.index, score_df["test_R2"], label="Test R2")
plt.legend(loc="lower left")
plt.xlabel("Number of factor (most relevant) used in model")
plt.ylabel("R2 score")
plt.title("Number of top factor on accuracy of model with random seed = 42")
plt.grid()
plt.show()
# #### 1.2 Regression modelling based on another train_test_split
random_seed = 3
# Use raw_train only as it is the only part of data that has corresponding true y value in it
X_train, X_test, y_train, y_test = prepare_data(raw_train, TEST_SIZE, random_seed)
# Tunning parameters, so far to adjust to nos. of top factors to be included in regression calculation
score_dict = dict()
for i in range(1, MAX_TOP_VAR + 1):
topVar = getTopVar(X_train, y_train, i)
_reg = linear.fit(X_train[topVar], y_train)
score_dict[i] = list()
score_dict[i].append(_reg.score(X_train[topVar], y_train))
score_dict[i].append(_reg.score(X_test[topVar], y_test))
score_df = pd.DataFrame.from_dict(
score_dict, orient="index", columns=["train_R2", "test_R2"]
)
plt.plot(score_df.index, score_df["train_R2"], label="Train R2")
plt.plot(score_df.index, score_df["test_R2"], label="Test R2")
plt.legend(loc="lower left")
plt.xlabel("Number of factor (most relevant) used in model")
plt.ylabel("R2 score")
plt.title("Number of top factor on accuracy of model with random seed = 3")
plt.grid()
plt.show()
# #### 1.3 Cross validation with different random seeds, averaging across all seeds for mean R2 score for train and test data for regression model
_train_dict_linear = dict()
_test_dict_linear = dict()
_test_dict_linear_mae = dict()
for j in range(1, MAX_TOP_VAR + 1):
_train_dict_linear[j] = list()
_test_dict_linear[j] = list()
_test_dict_linear_mae[j] = list()
# Looping through different train_test_split randomness to perform cross validation on accuracy score
for i in range(MAX_SEED):
X_train, X_test, y_train, y_test = prepare_data(raw_train, TEST_SIZE, i)
# Change population of score dataframe to get average later in plot
for j in range(1, MAX_TOP_VAR + 1):
topVar = getTopVar(X_train, y_train, j)
_reg = linear.fit(X_train[topVar], y_train)
_train_dict_linear[j].append(_reg.score(X_train[topVar], y_train))
_test_dict_linear[j].append(_reg.score(X_test[topVar], y_test))
_test_dict_linear_mae[j].append(
mean_absolute_error(y_test, _reg.predict(X_test[topVar]))
)
# Taking the mean of train and test and get the mean
overall_score_df_linear_avg = pd.concat(
[
pd.DataFrame.from_dict(_train_dict_linear, orient="index").mean(axis=1),
pd.DataFrame.from_dict(_test_dict_linear, orient="index").mean(axis=1),
],
axis=1,
)
overall_score_df_linear_avg.columns = ["overall_train_R2", "overall_test_R2"]
plt.plot(
overall_score_df_linear_avg.index,
overall_score_df_linear_avg["overall_train_R2"],
label="Overall Train R2",
)
plt.plot(
overall_score_df_linear_avg.index,
overall_score_df_linear_avg["overall_test_R2"],
label="Overall Test R2",
)
plt.legend(loc="lower right")
plt.xlabel("Number of factor (most relevant) used in model")
plt.ylabel("Overall R2 score with cross validation")
plt.title(
"Number of top factor on accuracy of linear regression model\nCross validation with different random seeds, averaging across all seeds for mean R2 score"
)
plt.grid()
plt.show()
# Checking the degree of overfitting
plt.plot(
overall_score_df_linear_avg["overall_train_R2"]
- overall_score_df_linear_avg["overall_test_R2"]
)
plt.xlabel("Number of factor (most relevant) used in model")
plt.ylabel("Overfitting-caused-divergence (R2 score)\nwith cross validation")
plt.title(
"Degree of overfitting (R2 score) with respect to\nnos. of top factor involved on linear regression"
)
plt.grid()
plt.show()
# Checking the std of train test data accross all random seed cross validation
overall_score_df_linear_std = pd.concat(
[
pd.DataFrame.from_dict(_train_dict_linear, orient="index").std(axis=1),
pd.DataFrame.from_dict(_test_dict_linear, orient="index").std(axis=1),
],
axis=1,
)
overall_score_df_linear_std.columns = ["overall_train_R2", "overall_test_R2"]
plt.plot(
overall_score_df_linear_std.index,
overall_score_df_linear_std["overall_train_R2"],
label="Overall Train R2",
)
plt.plot(
overall_score_df_linear_std.index,
overall_score_df_linear_std["overall_test_R2"],
label="Overall Test R2",
)
plt.legend(loc="lower right")
plt.xlabel("Number of factor (most relevant) used in model")
plt.ylabel("Overall R2 score standard deviation\nwith cross validation")
plt.title(
"Standard deviation of accuracy of linear regression model\nCross validation with different random seeds for R2 score"
)
plt.grid()
plt.show()
plt.plot(pd.DataFrame.from_dict(_test_dict_linear_mae, orient="index").mean(axis=1))
plt.ylabel("MAE of price prediction")
plt.xlabel("Nos. of top variable")
plt.title(
"Mean absolute error for SalePrice out-of-sample prediction\nLinear Regression"
)
plt.grid()
plt.show()
# ### 2. Extreme Gradient Boost Regression (XG Boost) modelling, repeat with Linear Regresison modelling steps above
xg_boost = GradientBoostingRegressor(random_state=0)
random_seed = 42
# Use raw_train only as it is the only part of data that has corresponding true y value in it
X_train, X_test, y_train, y_test = prepare_data(raw_train, TEST_SIZE, random_seed)
# Tunning parameters, so far to adjust to nos. of top factors to be included in regression calculation
score_dict = dict()
for i in range(1, MAX_TOP_VAR + 1):
topVar = getTopVar(X_train, y_train, i)
_reg = xg_boost = GradientBoostingRegressor(random_state=0).fit(
X_train[topVar], y_train
)
score_dict[i] = list()
score_dict[i].append(_reg.score(X_train[topVar], y_train))
score_dict[i].append(_reg.score(X_test[topVar], y_test))
score_df = pd.DataFrame.from_dict(
score_dict, orient="index", columns=["train_R2", "test_R2"]
)
plt.plot(score_df.index, score_df["train_R2"], label="Train R2")
plt.plot(score_df.index, score_df["test_R2"], label="Test R2")
plt.legend(loc="lower left")
plt.xlabel("Number of factor (most relevant) used in model")
plt.ylabel("R2 score")
plt.title("Number of top factor on accuracy of model with random seed = 42")
plt.grid()
plt.show()
random_seed = 3
# Use raw_train only as it is the only part of data that has corresponding true y value in it
X_train, X_test, y_train, y_test = prepare_data(raw_train, TEST_SIZE, random_seed)
# Tunning parameters, so far to adjust to nos. of top factors to be included in regression calculation
score_dict = dict()
for i in range(1, MAX_TOP_VAR + 1):
topVar = getTopVar(X_train, y_train, i)
_reg = xg_boost = GradientBoostingRegressor(random_state=0).fit(
X_train[topVar], y_train
)
score_dict[i] = list()
score_dict[i].append(_reg.score(X_train[topVar], y_train))
score_dict[i].append(_reg.score(X_test[topVar], y_test))
score_df = pd.DataFrame.from_dict(
score_dict, orient="index", columns=["train_R2", "test_R2"]
)
plt.plot(score_df.index, score_df["train_R2"], label="Train R2")
plt.plot(score_df.index, score_df["test_R2"], label="Test R2")
plt.legend(loc="lower left")
plt.xlabel("Number of factor (most relevant) used in model")
plt.ylabel("R2 score")
plt.title("Number of top factor on accuracy of model with random seed = 3")
plt.grid()
plt.show()
# =====================================================================================================
_train_dict_xg = dict()
_test_dict_xg = dict()
_test_dict_xg_mae = dict()
for j in range(1, MAX_TOP_VAR + 1):
_train_dict_xg[j] = list()
_test_dict_xg[j] = list()
_test_dict_xg_mae[j] = list()
# Looping through different train_test_split randomness to perform cross validation on accuracy score
for i in range(MAX_SEED):
X_train, X_test, y_train, y_test = prepare_data(raw_train, TEST_SIZE, i)
# Change population of score dataframe to get average later in plot
for j in range(1, MAX_TOP_VAR + 1):
topVar = getTopVar(X_train, y_train, j)
_reg = xg_boost = GradientBoostingRegressor(random_state=0).fit(
X_train[topVar], y_train
)
_train_dict_xg[j].append(_reg.score(X_train[topVar], y_train))
_test_dict_xg[j].append(_reg.score(X_test[topVar], y_test))
_test_dict_xg_mae[j].append(
mean_absolute_error(y_test, _reg.predict(X_test[topVar]))
)
# Taking the mean of train and test and get the mean
overall_score_df_xg_avg = pd.concat(
[
pd.DataFrame.from_dict(_train_dict_xg, orient="index").mean(axis=1),
pd.DataFrame.from_dict(_test_dict_xg, orient="index").mean(axis=1),
],
axis=1,
)
overall_score_df_xg_avg.columns = ["overall_train_R2", "overall_test_R2"]
plt.plot(
overall_score_df_xg_avg.index,
overall_score_df_xg_avg["overall_train_R2"],
label="Overall Train R2",
)
plt.plot(
overall_score_df_xg_avg.index,
overall_score_df_xg_avg["overall_test_R2"],
label="Overall Test R2",
)
plt.legend(loc="lower right")
plt.xlabel("Number of factor (most relevant) used in model")
plt.ylabel("Overall R2 score with cross validation")
plt.title(
"Number of top factor on accuracy of XG Boost regression model\nCross validation with different random seeds, averaging across all seeds for mean R2 score"
)
plt.grid()
plt.show()
# Checking the degree of overfitting
plt.plot(
overall_score_df_xg_avg["overall_train_R2"]
- overall_score_df_xg_avg["overall_test_R2"]
)
plt.xlabel("Number of factor (most relevant) used in model")
plt.ylabel("Overfitting-caused-divergence (R2 score)\nwith cross validation")
plt.title(
"Degree of overfitting (R2 score) with respect to\nnos. of top factor involved on XG Boost regression"
)
plt.grid()
plt.show()
# Checking the std of train test data accross all random seed cross validation
overall_score_df_xg_std = pd.concat(
[
pd.DataFrame.from_dict(_train_dict_xg, orient="index").std(axis=1),
pd.DataFrame.from_dict(_test_dict_xg, orient="index").std(axis=1),
],
axis=1,
)
overall_score_df_xg_std.columns = ["overall_train_R2", "overall_test_R2"]
plt.plot(
overall_score_df_xg_std.index,
overall_score_df_xg_std["overall_train_R2"],
label="Overall Train R2",
)
plt.plot(
overall_score_df_xg_std.index,
overall_score_df_xg_std["overall_test_R2"],
label="Overall Test R2",
)
plt.legend(loc="lower right")
plt.xlabel("Number of factor (most relevant) used in model")
plt.ylabel("Overall R2 score std\nwith cross validation")
plt.title(
"Standard deviation of accuracy of XS Boost regression model\nCross validation with different random seeds for R2 score"
)
plt.grid()
plt.show()
plt.plot(pd.DataFrame.from_dict(_test_dict_xg_mae, orient="index").mean(axis=1))
plt.ylabel("MAE of price prediction")
plt.xlabel("Nos. of top variable")
plt.title(
"Mean absolute error for SalePrice out-of-sample prediction\nXG Boost Regression"
)
plt.grid()
plt.show()
# ### 3. Cross model comparison
plt.plot(overall_score_df_linear_avg["overall_test_R2"], label="Linear")
plt.plot(overall_score_df_xg_avg["overall_test_R2"], label="XG Boost")
plt.xlabel("Number of factor (most relevant) used in model")
plt.ylabel("Overall R2 score with cross validation")
plt.title(
"Cross-validated average for out-of-sample R2 score\nfor Linear and XG Boost Regression"
)
plt.legend(loc="lower right")
plt.grid()
plt.show()
plt.plot(overall_score_df_linear_std["overall_test_R2"], label="Linear")
plt.plot(overall_score_df_xg_std["overall_test_R2"], label="XG Boost")
plt.xlabel("Number of factor (most relevant) used in model")
plt.ylabel("Std of R2 score with cross validation")
plt.title(
"Cross-validated standard deviation for out-of-sample R2 score\nfor Linear and XG Boost Regression"
)
plt.legend(loc="lower right")
plt.grid()
plt.show()
plt.plot(
pd.DataFrame.from_dict(_test_dict_linear_mae, orient="index").mean(axis=1),
label="Linear",
)
plt.plot(
pd.DataFrame.from_dict(_test_dict_xg_mae, orient="index").mean(axis=1),
label="XG Boost",
)
plt.xlabel("Number of factor (most relevant) used in model")
plt.ylabel("MAE score with cross validation")
plt.title(
"Cross-validated average for out-of-sample MAE score\nfor Linear and XG Boost Regression"
)
plt.legend(loc="lower right")
plt.grid()
plt.show()
|
import riiideducation
env = riiideducation.make_env()
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import dask.dataframe as dd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
dtypes = {
"row_id": "int64",
"timestamp": "int64",
"user_id": "int32",
"content_id": "int16",
"content_type_id": "boolean",
"task_container_id": "int16",
"user_answer": "int8",
"answered_correctly": "int8",
"prior_question_elapsed_time": "float32",
"prior_question_had_explanation": "boolean",
}
# Training data is in the competition dataset as usual
# train_df = pd.read_csv('../input/riiid-test-answer-prediction/train.csv', dtype=dtypes)
train_df = dd.read_csv(
"../input/riiid-test-answer-prediction/train.csv", dtype=dtypes
).compute()
train_df.head()
y = train_df[["answered_correctly"]]
X = train_df.drop(["row_id", "answered_correctly"], axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15)
model = KNeighborsClassifier(n_neighbors=3)
model.fit(X_test, y_test)
iter_test = env.iter_test()
for test_df, sample_prediction_df in iter_test:
test_df["answered_correctly"] = model.predict(test_df.drop(["row_id"]))
env.predict(test_df[["row_id", "answered_correctly"]])
|
# # Celeb Faces Mediapipe Images
# Mediapipe face detection
import cv2
import os
import math
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import mediapipe as mp
mp_face_detection = mp.solutions.face_detection
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
paths0 = []
for dirname, _, filenames in os.walk(
"/kaggle/input/celeba-dataset/img_align_celeba/img_align_celeba"
):
for filename in filenames[0:10]:
if filename[-4:] == ".jpg":
paths0 += [(os.path.join(dirname, filename))]
t += 1
print(paths0[0:3])
paths = random.sample(paths0, 2)
paths2 = []
for i, path in enumerate(paths):
if i % 10 == 0:
print("i=", i)
file = path.split("/")[-1]
label = path.split("/")[-2]
image = cv2.imread(path)
image = cv2.resize(image, dsize=(400, 400))
with mp_face_detection.FaceDetection(
model_selection=1, min_detection_confidence=0.2
) as face_detection:
try:
results = face_detection.process(cv2.flip(image, 1))
if results.detections:
image_hight, image_width, _ = image.shape
annotated_image = cv2.flip(image.copy(), 1)
mp_drawing.draw_landmarks(
annotated_image,
results.detections,
mp_face_detection.FaceKeyPoint.NOSE_TIP,
mp_drawing_styles.get_default_pose_landmarks_style(),
)
anno_img = cv2.flip(annotated_image, 1)
cv2.imwrite(file, anno_img)
paths2 += [file]
except:
continue
selected_num = random.sample(range(len(data)), 9)
fig, axes = plt.subplots(3, 3, figsize=(10, 10))
for i, ax in enumerate(axes.flat):
j = selected_num[i]
img_path = data.iloc[j, 0]
img = plt.imread(img_path)
ax.imshow(img)
ax.axis("off")
plt.tight_layout()
plt.show()
#!rm *
|
# This notebook deals with problems from the structy.net platform
# ## 0. Introduction
# ### max value
# Write a function, max_value, that takes in list of numbers as an argument. The function should return the largest number in the list.
# Solve this without using any built-in list methods.
# You can assume that the list is non-empty.
# **test_00:**
# > max_value([4, 7, 2, 8, 10, 9]) # -> 10
# **test_01:**
# > max_value([10, 5, 40, 40.3]) # -> 40.3
# **test_02:**
# > max_value([-5, -2, -1, -11]) # -> -1
# **test_03:**
# > max_value([42]) # -> 42
# **test_04:**
# > max_value([1000, 8]) # -> 1000
# **test_05:**
# > max_value([1000, 8, 9000]) # -> 9000
# **test_06:**
# > max_value([2, 5, 1, 1, 4]) # -> 5
def max_value(nums):
max = float("-inf")
for i in nums:
if i > max:
max = i
return max
# ### is prime
# Write a function, is_prime, that takes in a number as an argument. The function should return a boolean indicating whether or not the given number is prime.
# A prime number is a number that is only divisible by two distinct numbers: 1 and itself.
# For example, 7 is a prime because it is only divisible by 1 and 7. For example, 6 is not a prime because it is divisible by 1, 2, 3, and 6.
# You can assume that the input number is a positive integer.
# **test_00:**
# > is_prime(2) # -> True
# **test_01:**
# > is_prime(3) # -> True
# **test_02:**
# > is_prime(4) # -> False
# **test_03:**
# > is_prime(5) # -> True
# **test_04:**
# > is_prime(6) # -> False
# **test_05:**
# > is_prime(7) # -> True
# **test_06:**
# > is_prime(8) # -> False
# **test_07:**
# > is_prime(25) # -> False
# **test_08:**
# > is_prime(31) # -> True
# **test_09:**
# > is_prime(2017) # -> True
# **test_10:**
# > is_prime(2048) # -> False
# **test_11:**
# > is_prime(1) # -> False
# **test_12:**
# > is_prime(713) # -> False
#
from math import sqrt, floor
def is_prime(n):
if n <= 1:
isprime = False
else:
isprime = True
for i in range(2, floor(sqrt(n)) + 1):
if n % i == 0:
isprime = False
return isprime
# ## 1. Array and String
# ### uncompress
# Write a function, uncompress, that takes in a string as an argument. The input string will be formatted into multiple groups according to the following pattern:
# for example, '2c' or '3a'.
# The function should return an uncompressed version of the string where each 'char' of a group is repeated 'number' times consecutively. You may assume that the input string is well-formed according to the previously mentioned pattern.
# **test_00:**
# > uncompress("2c3a1t") # -> 'ccaaat'
#
# **test_01:**
# > uncompress("4s2b") # -> 'ssssbb'
#
# **test_02:**
# > uncompress("2p1o5p") # -> 'ppoppppp'
#
# **test_03:**
# > uncompress("3n12e2z") # -> 'nnneeeeeeeeeeeezz'
#
# **test_04:**
# > uncompress("127y") # -> 'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy....
# First solution
def uncompress(s):
str = ""
num = ""
for i in range(len(s)):
if s[i].isnumeric():
num += s[i]
else:
str += s[i] * int(num)
num = ""
return str
# Second Solution
def uncompress(s):
str = ""
j = 0
for i in range(len(s)):
if s[i].isalpha():
j = int(s[j:i])
str += j * s[i]
j = i + 1
return str
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
## Importing required libraries
import numpy as np, gc
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import sklearn
import seaborn as sns
from sklearn.metrics import confusion_matrix
import random
from sklearn.model_selection import KFold, GroupKFold
from xgboost import XGBClassifier
from sklearn.metrics import f1_score
import pandas as pd
import pyarrow.parquet as pq
# Load the parquet file into a PyArrow table
table = pq.read_table("/kaggle/input/how-to-get-32gb-ram/train.parquet")
# Convert the PyArrow table to a Pandas dataframe
train_df = table.to_pandas()
train_df.describe()
# Rename the columns in the dataframe using the labels vector
# labels = ['label_1', 'label_2', 'label_3', ...] # replace with your own labels
# df.columns = labels
train_df.columns
ROOMS = train_df.room_fqid.unique()
print("Number of rooms:", len(ROOMS))
print(ROOMS)
len(train_df)
train_df.shape[1]
train_df.columns
train_df = train_df.drop(columns=["fullscreen", "hq", "music"])
train_df.columns
train_df["event_name"].unique()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from statsmodels.tsa.seasonal import seasonal_decompose
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from tqdm import tqdm
from math import ceil
from itertools import product
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Functions
# Several attempts of preprocess train_detailed.csv haven't reveled any advatages of using detailed table. Train.csv was used for training. There are function that I used for analysis of traing_detailed.csv
def df_modification(df):
"""
Modification of primary dataframe with potential useful values
input - pandas dataframe
output - pandas dataframe with added columns
"""
# choosing the columns with dates
subset = [x for x in df.columns if "Date" in x]
# Deleting -0400 string and changing format to datetime
df[subset] = df[subset].applymap(lambda x: x.replace(" -0400", ""))
df[subset] = df[subset].apply(lambda x: pd.to_datetime(x), axis=1)
# Selecting the category labeled as InBed by the device analyser
df = df.query('value == "HKCategoryValueSleepAnalysisInBed"')
# Column with sleeping hours as time difference of end and start
df["sh"] = (df["endDate"] - df["startDate"]).dt.total_seconds() / 3600
# The difference between endDate and startDate in the next row. It could identify regions for sleeping hours combinations
df["h_next"] = (
np.roll(df["startDate"], -1) - df["endDate"]
).dt.total_seconds() / 3600
df.loc[df.index[-1], "h_next"] = 0
# Label if next row has the same device. It could be useful with combination of short sleeping hours
df["same_prev_dev"] = np.roll(df["sourceName"], 1) == df["sourceName"]
# Label regions that should be combined together
df["combine"] = False
df.loc[
df.query(
"sh <= 5 and same_prev_dev == True and h_next < 2 and h_next > 0"
).index,
"combine",
] = True
return df
def plotting_colors(df):
"""
Simple function to observe the order of devices in unsorted dataframe.
It could be useful for understanding if signal from two different diveces came to analser. In that case data should be filtered
"""
plt.scatter(
y=[_ for _ in range(df.shape[0])],
x=[1] * df.shape[0],
c=df["sourceName"]
.replace(dict(zip(df["sourceName"].unique(), ["yellow", "black", "blue"])))
.values,
)
plt.show
def one_hot(df):
"""
One hot encoding of not Data values
"""
return pd.get_dummies(df.loc[:, ["Date" not in x for x in df.columns]])
def decompose(df, period):
"""
Seasonal decomposion of sleeping hours
"""
res = seasonal_decompose(df["sleep_hours"].values, period=period)
res.plot()
plt.show()
def topNcorr(df, N):
"""
Fuction return top N correlations with 'sleeping hours' in onehot dataframe
"""
df_corr = (df.join(df_onehot)).corr()
return df_corr["sleep_hours"].sort_values(ascending=False, key=abs)[1 : N + 1]
def cond_plot(window, out_vals, conv, shuffle, name):
class_preds = prediction_generation(
df_data,
df_submission,
window=window,
out_vals=out_vals,
conv=conv,
shuffle=shuffle,
)
class_preds.training(name)
class_preds.prediction(name)
class_preds.sub = class_preds.sub.assign(
window=window, out_vals=out_vals, conv=conv, shuffle=shuffle
)
return class_preds.sub
class prediction_generation:
"""The class for datasets generation, choosing of model hyperparameters and submission data generation"""
def __init__(
self,
df_data,
df_submission,
scale=True,
window=100,
out_vals=1,
train_val=0.8,
loss="mse",
conv=False,
epochs=100,
shuffle=False,
):
"""
window - lenght of sample for prefiction next values
out_val - the number of values to predict
train_val - the part of train dataset compared to the whole data
loss - loss function during training
conv - using convolution before LSTM part
batch - batch size durin training
"""
self.data = df_data
self.sub = df_submission
self.scale = scale
self.window = window
self.out_vals = out_vals
self.train_val = train_val
self.loss = loss
self.epochs = epochs
self.shuffle = shuffle
self.conv = conv
self.train, self.val = self.dataset()
self.model = self.get_model()
self.best_weights = self.model.get_weights() # will be chosen during traning
def dataset(self):
"""
Creation of train and validation datasets.
"""
self.data_arr = self.data["sleep_hours"].values
if self.scale:
self.scale_factor = self.data["sleep_hours"].max()
self.data_arr /= self.scale_factor
# Making windows of data throug the whole array
X, y = [], []
for i in range(len(self.data_arr) - self.window - self.out_vals + 1):
X.append(self.data_arr[i : i + self.window].reshape(self.window, 1))
y.append(self.data_arr[i + self.window : i + self.window + self.out_vals])
dataset = tf.data.Dataset.from_tensor_slices((X, y))
self.train_ln = round(len(X) * self.train_val)
self.val_ln = len(X) - self.train_ln
if self.shuffle:
dataset = dataset.shuffle(len(X))
train = dataset.take(self.train_ln).batch(self.train_ln)
val = dataset.skip(self.train_ln).batch(self.val_ln)
return train, val
def get_model(self):
input_model = tf.keras.Input(shape=(self.window, 1))
if self.conv:
def conv_dil(x, dilation):
return tf.keras.layers.Conv1D(
1,
kernel_size=7,
dilation_rate=dilation,
padding="same",
activation="relu",
data_format="channels_last",
)(x)
concat = tf.keras.layers.Concatenate()(
[conv_dil(input_model, dil) for dil in [1, 7, 31, 50]]
)
out = tf.keras.layers.Dropout(0.1)(concat)
out = tf.keras.layers.Dense(1)(out)
else:
out = input_model
out = tf.keras.layers.LSTM(500)(out)
out = tf.keras.layers.Dropout(0.1)(out)
out = tf.keras.layers.Dense(self.out_vals)(out)
model = tf.keras.models.Model(input_model, out)
model.compile(optimizer="adam", loss=tf.keras.losses.MeanSquaredError())
return model
def training(self, name):
"""Training model"""
val_loss = self.model.evaluate(self.val, verbose=0)
best_epoch = 0
for epoch in tqdm(
range(self.epochs), position=0, leave=True, desc=f"training {name}"
):
history = self.model.fit(
self.train, validation_data=self.val, epochs=1, verbose=0
)
if history.history["val_loss"][0] < val_loss:
val_loss = history.history["val_loss"][0]
self.best_weights = self.model.get_weights()
best_epoch = epoch + 1
self.model.set_weights(self.best_weights)
print(
f"The best results was achieved on the {best_epoch} epoch with val_loss {val_loss}"
)
def prediction(self, name):
y_pred = []
# data_arr = self.data['sleep_hours'].values / self.data['sleep_hours'].max()
fst_X = self.data_arr[-self.window :].reshape(1, self.window, 1)
n_preds = ceil(self.sub.shape[0] / self.out_vals)
for _ in tqdm(
range(n_preds), position=0, leave=True, desc=f"prediction {name}"
):
pred = self.model.predict(fst_X, verbose=0)
y_pred.append(pred)
fst_X = np.concatenate([fst_X.flatten(), pred.flatten()])[
-self.window :
].reshape(1, self.window, 1)
self.sub["sleep_hours"] = np.array(y_pred).flatten()[: self.sub.shape[0]]
if self.scale:
self.sub["sleep_hours"] *= self.scale_factor
# ### Dataset creation
# Dataset was created from train.csv file
# Once it was approximetly choosen parameters of network in version 3 of this notebook, next step is to choose s way of preprocessing data
# There are many ways:
#
# Use raw data
#
# Diveded by two outliers
#
# Replace outliers with ditribution of not-outliers data
#
# Use difference of this and next day
#
# Substract mean of data
#
# ...
# Let's try saveral of them and to see if we could see the difference
# Reading the dataframe
df_data = pd.read_csv("../input/kaggle-pog-series-s01e04/train.csv")
df_submission = pd.read_csv("../input/kaggle-pog-series-s01e04/sample_submission.csv")
def cleaning_data(df):
df["sleep_hours"] = df["sleep_hours"].apply(lambda x: x / 2 if x > 10 else x)
q25, q75 = np.percentile(df["sleep_hours"], 25), np.percentile(
df["sleep_hours"], 75
)
iqr = q75 - q25
lower, upper = q25 - 1.5 * iqr, q75 + 1.5 * iqr
df["outlier"] = df["sleep_hours"].apply(lambda x: x < lower or x > upper)
mean, std = (
df.query("outlier == False")["sleep_hours"].describe()["mean"],
df.query("outlier == True")["sleep_hours"].describe()["std"],
)
df.loc[df.query("outlier == True").index, "sleep_hours"] = mean
df = df.drop("outlier", axis=1)
return df
data_option = {}
data_option["raw"] = df_data
data_option["diff"]
df_diff = pd.read_csv("../input/kaggle-pog-series-s01e04/train.csv")
df_diff["sleep_hours"] = df_data["sleep_hours"].diff()
df_diff = df_diff.dropna()
df_data_clean = cleaning_data(df_data)
df_data_clean.plot()
pred = prediction_generation(
df_data_clean,
df_submission,
scale=True,
window=1000,
out_vals=100,
conv=True,
shuffle=True,
epochs=250,
)
pred.training("diff1")
pred.prediction("diff1")
pred.sub.plot()
pred.sub.to_csv("sub11.csv", index=False)
def cond_plot_cleaning(
name,
df_data,
df_submission=df_submission,
window=1000,
out_vals=10,
conv=True,
shuffle=False,
):
class_preds = prediction_generation(
df_data,
df_submission,
window=window,
out_vals=out_vals,
conv=conv,
shuffle=shuffle,
)
class_preds.training(name)
class_preds.prediction(name)
class_preds.sub = class_preds.sub.assign(name=name)
return class_preds.sub
# df_data = cleaning_data(df_data)
vals_product = product(
[10, 30, 100, 300, 1000], [10, 30, 100, 300, 1000], [True, False], [True, False]
)
total = len(list(vals_product))
vals_product = product(
[10, 30, 100, 300, 1000], [10, 30, 100, 300, 1000], [True, False], [True, False]
)
dataframe_list = []
for i, (window, out_vals, conv, shuffle) in enumerate(vals_product):
name = (
f"{i+1}/{total} win_{window} out_vals_{out_vals} conv_{conv} shuffle_{shuffle}"
)
dataframe_list.append(cond_plot(window, out_vals, conv, shuffle, name))
df_res = pd.concat(dataframe_list)
df_res.to_csv("results.csv", index=False)
df_res["index"] = list(range(419)) * 100
g = sns.FacetGrid(data=df_res.query("shuffle == True"), col="window", row="out_vals")
g.map_dataframe(sns.pointplot, y="sleep_hours", x="index", hue="conv")
g = sns.FacetGrid(data=df_res.query("shuffle == False"), col="window", row="out_vals")
g.map_dataframe(sns.pointplot, y="sleep_hours", x="index", hue="conv")
df_res.query("shuffle == False and conv == True and window == 1000 and out_vals == 10")[
["date", "sleep_hours"]
].to_csv("submission9.csv", index=False)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objects as go
import plotly.express as px
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import xgboost as xgb
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.metrics import (
confusion_matrix,
accuracy_score,
recall_score,
precision_score,
f1_score,
)
from sklearn.metrics import classification_report
from sklearn.preprocessing import LabelEncoder
import statsmodels.api as sm
from scipy import stats
from sklearn.model_selection import cross_val_score
plt.rcParams["font.sans-serif"] = ["SimHei"]
plt.rcParams["axes.unicode_minus"] = False
# 繪製sankey plot function
def get_sankey_df(df, cat_cols=[], value_cols=""):
colorNumList = []
labelList = []
for catCol in cat_cols:
labelListTemp = list(set(df[catCol].values))
colorNumList.append(len(labelListTemp))
labelList = labelList + labelListTemp
labelList = list(dict.fromkeys(labelList))
for i in range(len(cat_cols) - 1):
if i == 0:
sourceTargetDf = df[[cat_cols[i], cat_cols[i + 1], value_cols]]
sourceTargetDf.columns = ["source", "target", "count"]
else:
tempDf = df[[cat_cols[i], cat_cols[i + 1], value_cols]]
tempDf.columns = ["source", "target", "count"]
sourceTargetDf = pd.concat([sourceTargetDf, tempDf])
sourceTargetDf = (
sourceTargetDf.groupby(["source", "target"])
.agg({"count": "sum"})
.reset_index()
)
# add index for source-target pair
sourceTargetDf["sourceID"] = sourceTargetDf["source"].apply(
lambda x: labelList.index(x)
)
sourceTargetDf["targetID"] = sourceTargetDf["target"].apply(
lambda x: labelList.index(x)
)
return {"label_list": labelList, "df": sourceTargetDf}
# # About
# 本次根據 IBM Watson Marketing Customer Value Data 做分析,主要有兩個目的,
# 1. 用戶畫像描繪
# - 用戶畫像能夠幫助利益相關者了解自身產品的使用者特性與狀況。
# - 精準行銷,不同產品類型可能有不同受眾。
# - 透過洞察,看出客戶的潛藏需求,提供更貼心、更好的服務。
# 2. 再續訂預測
# - 以機器學習模型建立分類模型,幫助判斷當前客戶是否會再續訂目前的服務或方案。
# - 提早看出客戶再續約意願,可以過濾出無續約意願的客戶,把成本投入其他高價值客戶身上。
# - 針對續約意願高的客戶,推出更客製化服務,增加黏著度。
# ## 變數說明
# - 客戶編號, customer id
# - 州, 居住的州
# - Customer Lifetime Value, 客戶的生命週期價值
# - Response, 是否願意續約(是,否)
# - Coverage, 產品覆蓋範圍(基本、進階、高級)
# - Education, 教育程度(高中或以下, 大學, 碩士, 博士)
# - Effective To Date, 生效日期(保單)
# - EmploymentStatus, 就業程度(就業中, 待業, 病假, 殘疾人士, 退休)
# - Gender, 性別
# - Income, 年收入
# - Location Code, 位置代碼(郊區, 鄉村, 城市)
# - Mariage Status, 婚姻狀況(單身, 已婚, 離婚)
# - Monthly Premium Auto, 每月平均繳納金額
# - Months Since Last Claim, 自上次索賠以來的月數
# - Months Since Policy Inception, 自產品生效以來的月數
# - Number of Open Complaints, 未解決的索賠數量
# - Number of Policies, 保單數量
# - Policy Type, 產品類型(個人, 公司, 特殊)
# - Policy 各產品類型的級別(L1, L2, L3)
# - Renew Offer Type 續訂邀約類型(offer 1,offer 2,offer 3,offer 4)
# - Sales Channel 銷售渠道(代理商、分行, 客服中心, 網路)
# - Total Claim Amount 累積索賠金額
# - Vehicle Class 車輛類型(四門, 雙門, SUV, 跑車, 豪華SUV, 豪車)
# - Vehicle Size 車輛類型(大, 中, 小)
# # 載入套件、數據
df = pd.read_csv(
"../input/ibm-watson-marketing-customer-value-data/WA_Fn-UseC_-Marketing-Customer-Value-Analysis.csv"
)
df.head()
df.shape
pd.set_option("display.max_columns", None)
df.head()
# # Part 1. 用戶畫像分析 Persona
# ## 資料清洗與檢驗
# 此數據無重複值或重複客戶,也沒發現遺失值。
# chech if there is any duplicated customer
df["Customer"].duplicated().any()
# percentage of missing value
df.apply(lambda x: sum(x.isnull()) / len(x), axis=0)
# ### 離群值檢驗
# 以盒鬚圖方法檢驗離群值,設定資料值小於 Q1 - 3xIQR 或大於 Q3 + 3xIQR 為離群值
# 有部分變數存在離群值,後續依情形排除。
def outlier_dectect(df, column, cutoff_rate):
global lower, upper
q1, q3 = np.quantile(df[column], 0.25), np.quantile(df[column], 0.75)
IQR = q3 - q1
cut_off = IQR * cutoff_rate
lower, upper = q1 - cut_off, q3 + cut_off
print("The lower bound value is", lower)
print("The upper bound value is", upper)
df1 = df[df[column] > upper]
df2 = df[df[column] < lower]
return print("Total number of outliers are", df1.shape[0] + df2.shape[0])
df_remove_outlier = df.copy()
df_remove_outlier.loc[
df_remove_outlier["Customer Lifetime Value"] >= 16414.04, "Customer Lifetime Value"
] = 16414
df_remove_outlier.loc[
df_remove_outlier["Monthly Premium Auto"] >= 170.5, "Monthly Premium Auto"
] = 170.5
df_remove_outlier.loc[
df_remove_outlier["Monthly Premium Auto"] <= 6.5, "Monthly Premium Auto"
] = 6.5
df_remove_outlier.loc[
df_remove_outlier["Total Claim Amount"] >= 960.3997, "Total Claim Amount"
] = 960.3997
out_index = df[
(df["Customer Lifetime Value"] >= 16414.04)
| (df["Monthly Premium Auto"] >= 170.5)
| (df["Monthly Premium Auto"] <= 6.5)
| (df["Total Claim Amount"] >= 960.3997)
].index
df_remove_outlier = df.drop(axis=0, labels=out_index)
for col in df.columns[(df.dtypes == "int64") | (df.dtypes == "float64")]:
print(f"Outlier detection for `{col}`:")
outlier_dectect(df, col, 1.5)
print("\n")
# ## Quick EDA
# ### 客戶性別
# 兩性別比例無明顯差異
gender_df = df.groupby("Gender")["Customer"].count()
gender_df = gender_df.to_frame()
gender_df.plot(kind="pie", subplots=True, autopct="%1.1f%%")
plt.title("Customer gender distribution")
plt.show()
# ### 客戶居住地區
# 多數客戶選擇居住在郊區,又以California最多人居住。
loc_df = df.groupby(["Location Code", "State"])["Customer"].count()
loc_df = loc_df.to_frame()
loc_df = loc_df.reset_index()
loc_data_for_sankey = get_sankey_df(loc_df, ["Location Code", "State"], "Customer")
colors = [
"rgb(249, 226, 175)",
"rgb(0, 159, 189)",
"rgb(33, 0, 98)",
"rgb(119, 3, 123)",
"rgb(250, 112, 112)",
"rgb(251, 242, 207)",
"rgb(198, 235, 197)",
"rgb(178, 164, 255)",
]
fig = go.Figure(
data=[
go.Sankey(
node=dict(
pad=15,
thickness=20,
line=dict(color="black", width=0.5),
label=loc_data_for_sankey["label_list"],
color=colors,
),
link=dict(
source=loc_data_for_sankey["df"]["sourceID"],
target=loc_data_for_sankey["df"]["targetID"],
value=loc_data_for_sankey["df"]["count"],
),
)
]
)
fig.update_layout(title_text="Sankey diagram - 客戶居住地區分佈", font_size=15, width=600)
fig.show()
# ### 客戶教育程度
# 有碩士、博士學歷的是少數,大多客戶為學士或以下的學歷。
edu_df = df.groupby("Education")["Customer"].count()
edu_df = edu_df.to_frame().reset_index()
edu_df
fig = px.bar(edu_df, x="Education", y="Customer")
fig.update_layout(
title_text="客戶教育程度分佈",
barmode="stack",
xaxis={"categoryorder": "total descending"},
width=600,
)
fig.show()
# ### 持有汽車種類
# 4門、雙門、SUV占多數的中型車占多數。
#
vehicel_df = df.groupby(["Vehicle Size", "Vehicle Class"])["Customer"].count()
vehicel_df = vehicel_df.to_frame()
vehicel_df = vehicel_df.reset_index()
vehicel_df
fig = px.histogram(
vehicel_df, x="Vehicle Class", y="Customer", color="Vehicle Size", barmode="group"
)
fig.update_layout(
title_text="客戶持有汽車類型分佈",
barmode="stack",
xaxis={"categoryorder": "total descending"},
width=600,
)
fig.show()
# ### 收入
# 收入的分配極度右偏,有2.5成客戶收入不到 2000美元
fig = px.histogram(df, x="Income", width=600)
fig.show()
# ### 婚姻狀態
marriage_df = df.groupby("Marital Status")["Customer"].count()
marriage_df = marriage_df.to_frame().reset_index()
fig = px.bar(marriage_df, x="Marital Status", y="Customer")
fig.update_layout(
title_text="客戶婚姻狀態分佈",
barmode="stack",
xaxis={"categoryorder": "total descending"},
width=600,
)
fig.show()
# ### 職業類型
employstatus_df = df.groupby("EmploymentStatus")["Customer"].count()
employstatus_df = employstatus_df.to_frame().reset_index()
fig = px.bar(employstatus_df, x="EmploymentStatus", y="Customer")
fig.update_layout(
title_text="客戶職業類型分佈",
barmode="stack",
xaxis={"categoryorder": "total descending"},
width=600,
)
fig.show()
# 因為這些類別變數是有序性的,也有部分連續型變數嚴重右偏,故以pd.factorize給予所有變數label encoding,再以Spearman觀察所有變數的相關性
df_corr = df.drop(["Response", "Customer"], axis=1).apply(lambda x: pd.factorize(x)[0])
df_corr = df_corr.corr("spearman").round(3)
# 從相關性可發現一些有趣的事實:
# - 顧客終身價值 與**收入**、**累積索賠金額**有低至中度相關
# - 累積索賠金額 與**客戶地區**有中度相關
# - 每月平均繳納金額 與**產品涵蓋類型**、**持有汽車種類**有中度相關
plt.figure(figsize=(15, 12))
ax = sns.heatmap(df_corr, annot=True, cmap="Blues")
plt.title("Spearman correlation between variables")
plt.show()
# 顧客終身價值(CLV)能夠幫助我們決定多少成本應該投入在這些客戶身上,結合產品生效以來月數(Months Since Policy Inception)可以看客戶的忠誠度,接下來以兩變數做市場區分
df_cus_seg = df.copy()
df_cus_seg["CLV seg"] = df_cus_seg["Customer Lifetime Value"].apply(
lambda x: "high" if x > df_cus_seg["Customer Lifetime Value"].median() else "low"
)
df_cus_seg["policy duration seg"] = df_cus_seg["Months Since Policy Inception"].apply(
lambda x: "high"
if x > df_cus_seg["Months Since Policy Inception"].median()
else "low"
)
fig = df_cus_seg.loc[
(df_cus_seg["CLV seg"] == "high") & (df_cus_seg["policy duration seg"] == "high")
].plot.scatter(
x="Months Since Policy Inception", y="Customer Lifetime Value", c="red", logy=True
)
df_cus_seg.loc[
(df_cus_seg["CLV seg"] == "low") & (df_cus_seg["policy duration seg"] == "high")
].plot.scatter(
ax=fig,
x="Months Since Policy Inception",
y="Customer Lifetime Value",
c="blue",
logy=True,
)
df_cus_seg.loc[
(df_cus_seg["CLV seg"] == "high") & (df_cus_seg["policy duration seg"] == "low")
].plot.scatter(
ax=fig,
x="Months Since Policy Inception",
y="Customer Lifetime Value",
c="yellow",
logy=True,
)
df_cus_seg.loc[
(df_cus_seg["CLV seg"] == "low") & (df_cus_seg["policy duration seg"] == "low")
].plot.scatter(
ax=fig,
x="Months Since Policy Inception",
y="Customer Lifetime Value",
c="grey",
logy=True,
)
fig.set_ylabel("log(CLV)")
fig.set_xlabel("Months Since Policy Inception")
fig.set_title("Customer Segmentation - Based on CLV and Months Since Policy Inception")
plt.show()
response_rate_by_cus_seg = (
df_cus_seg.loc[df_cus_seg["Response"] == "Yes"]
.groupby(["CLV seg", "policy duration seg"])
.count()["Customer"]
/ df_cus_seg.groupby(["CLV seg", "policy duration seg"]).count()["Customer"]
)
response_rate_by_cus_seg = (
response_rate_by_cus_seg.to_frame()
.reset_index()
.rename(columns={"Customer": "Response (%)"})
)
response_rate_by_cus_seg["Response (%)"] = (
response_rate_by_cus_seg["Response (%)"] * 100
)
# 將客戶分成4群後,依各組分別觀察其回應率,可以看到保單生效月數長,其回應率較高,也就是說長期投保的客戶有較高的回應率。
# 另外,低終身價值但是保單生效月數長的客戶有最高的回應率。
fig = px.bar(
response_rate_by_cus_seg,
x="CLV seg",
y="Response (%)",
color="policy duration seg",
width=600,
barmode="group",
title="分群後 - 客戶回應率",
)
fig.show()
# ### 客戶分群 - 基於 DBSCAN
for_cluster_X = df.drop(axis=1, labels=["Customer", "Response", "Effective To Date"])
famd = FAMD(n_components=3).fit(for_cluster_X)
famd_X = famd.row_coordinates(for_cluster_X)
famd_X.columns = ["comp1", "comp2", "comp3"]
model = KMeans(n_clusters=3, random_state=42).fit(famd_X)
famd_X["cluster"] = pd.Categorical(model.labels_)
famd_X["cluster"].value_counts()
sns.scatterplot(x="comp1", y="comp2", hue="cluster", data=famd_X)
kmean_df = df.copy()
kmean_df["cluster"] = pd.Categorical(model.labels_)
kmean_df.head()
kmean_df.groupby("cluster").agg(
{
"Customer Lifetime Value": np.mean,
"Income": np.mean,
"Monthly Premium Auto": np.mean,
"Months Since Last Claim": np.mean,
"Months Since Policy Inception": np.mean,
"Number of Open Complaints": np.mean,
"Number of Policies": np.mean,
"Total Claim Amount": np.mean,
}
)
kmean_df.groupby("cluster")["Location Code"].value_counts()
kmean_df.groupby("cluster")["Coverage"].value_counts()
kmean_df.groupby("cluster")["Marital Status"].value_counts()
kmean_df.groupby("cluster")["EmploymentStatus"].value_counts()
# # Part 2. 用戶續訂預測
# ## Quick EDA
# ## Target(目標變數): Response
response_df = df.groupby("Response")["Customer"].count()
response_df = response_df.to_frame().reset_index()
response_df = response_df.assign(percentage=lambda x: x.Customer / len(df) * 100)
response_df["percentage"] = response_df["percentage"].apply(
lambda x: "{0:1.2f}%".format(x)
)
fig = px.bar(response_df, x="Response", y="Customer", text="percentage")
fig.update_layout(
title_text="客戶回應狀況分佈",
barmode="stack",
xaxis={"categoryorder": "total descending"},
width=600,
)
fig.show()
# ### Response V.S. Education
fig = px.histogram(
df,
x="Response",
y="Customer",
color="Education",
barmode="group",
histfunc="count",
width=600,
)
fig.update_layout(title_text="客戶回應狀況 V.S. 教育程度")
fig.show()
# ### Response V.S. Sales Channel
# agent渠道的回應率最高,但是整體來說從agent來的渠道也最多,還無法下定論渠道是否會影響客戶回應率。
fig = px.histogram(
df,
x="Response",
y="Customer",
color="Sales Channel",
barmode="group",
histfunc="count",
width=600,
)
fig.update_layout(title_text="客戶回應狀況 V.S. 銷售渠道")
fig.show()
# ### Response V.S. EmploymentStatus
# 受雇客戶回應人數最多,很符合常理,而且受雇人數本來就最多,值得注意的是退休人士的回應人數多於為回應人數。
# 可以看出退休人士對於保單服務的黏著度更高。
fig = px.histogram(
df,
x="Response",
y="Customer",
color="EmploymentStatus",
barmode="group",
histfunc="count",
width=600,
)
fig.update_layout(title_text="客戶回應狀況 V.S. 就業狀況")
fig.show()
# ### Response V.S. Renew Offer Type
# 在此處可以發現一個關鍵點,Offer1, Offer2 的續訂佔絕大多數,offer3,offer4幾乎沒人續訂。
fig = px.histogram(
df,
x="Response",
y="Customer",
color="Renew Offer Type",
barmode="group",
histfunc="count",
width=600,
)
fig.update_layout(title_text="客戶回應狀況 V.S. 產品類型")
fig.show()
# ### Response rate V.S. Policy
fig = px.histogram(
df,
x="Response",
y="Customer",
color="Policy",
barmode="group",
histfunc="count",
width=600,
)
fig.update_layout(title_text="客戶回應狀況 V.S. 產品類型的級別")
fig.show()
# ### 有回應的客戶中,其保單類型與層級分佈為何?
# 從 Sankey diagram 來看,回應人數最多的保單層級前五名:
# Personal L3 > Personal L2 > Personal L1 > Corporate L2 > Corporate L3
policy_df = (
df[df["Response"] == "Yes"]
.groupby(["Renew Offer Type", "Policy"])["Customer"]
.count()
)
policy_df = policy_df.to_frame()
policy_df = policy_df.reset_index()
policy_data_for_sankey = get_sankey_df(
policy_df, ["Renew Offer Type", "Policy"], "Customer"
)
fig = go.Figure(
data=[
go.Sankey(
node=dict(
pad=15,
thickness=20,
line=dict(color="black", width=0.5),
label=policy_data_for_sankey["label_list"],
),
link=dict(
source=policy_data_for_sankey["df"]["sourceID"],
target=policy_data_for_sankey["df"]["targetID"],
value=policy_data_for_sankey["df"]["count"],
),
)
]
)
fig.update_layout(title_text="Sankey diagram - 客戶購買保單類型與層級分佈", font_size=15, width=600)
fig.show()
# ### Response V.S. Income
# 從box-plot來看,收入與客戶是否回應兩者有相關性。
fig = px.box(df, x="Response", y="Income", width=600)
fig.update_layout(title_text="客戶回應狀況 V.S. 收入")
fig.show()
# ### Response V.S. Total Claim Amount
# 從box-plot來看,累積索賠金額與客戶是否回應兩者有相關性。
outlier_dectect(df, "Total Claim Amount", 1.5)
total_claim_remove_out_df = df[df["Total Claim Amount"] <= 960.3997]
fig = px.box(total_claim_remove_out_df, x="Response", y="Total Claim Amount", width=600)
fig.update_layout(title_text="客戶回應狀況 V.S. 累積索賠金額")
fig.show()
df.loc[df["Response"] == "Yes", "Response_label"] = 1
df.loc[df["Response"] == "No", "Response_label"] = 0
numeric_df = df.select_dtypes(["int64", "float64"]).drop(
axis=1, labels=["Response_label"]
)
cat_df = df.select_dtypes("object").drop(axis=1, labels=["Customer", "Response"])
lb = LabelEncoder()
for col in cat_df.columns:
cat_df[col] = lb.fit_transform(cat_df[col])
all_df = pd.concat([cat_df, numeric_df], axis=1)
X = all_df
y = df["Response_label"]
lr_model = sm.Logit(y, X)
lr_model_fit = lr_model.fit()
lr_model_fit.summary()
lr_model_fit.pvalues[lr_model_fit.pvalues < 0.05]
# ## 類別變數篩選 - 卡方獨立性檢定
# 探討類別型變數與客戶是否回應之間的是否有關聯,可做為類別變數篩選手段。
def get_chi_square_res(df, x="", y=""):
res = pd.crosstab(df[x], df[y], margins=False)
chi2, p, dof, ex = stats.chi2_contingency(res, correction=False)
return p
lst = []
for i in cat_df.columns:
p = get_chi_square_res(df, i, "Response")
lst.append(p)
chi_square_test_res = pd.Series(lst, index=cat_df.columns).to_frame(
name="Chi-square test's p-value"
)
chi_imp_cat_col = chi_square_test_res[
chi_square_test_res["Chi-square test's p-value"] < 0.05
].index
print(f"important categorical features via chi-square test : {list(chi_imp_cat_col)}")
# ### 連續型變數篩選 - Two-sample Kolmogorov-Smirnov Test
# 從KS 檢定來看,每個連續型變數在 Response = 'Yes' 與 Response = 'No' 時兩者分配顯著不相同,
# 因此在區別客戶是否續訂的分類上可能也有所幫助。
from sklearn.preprocessing import MinMaxScaler
# 歸一化
scaler = MinMaxScaler()
numeric_X_scaled = scaler.fit_transform(numeric_df)
numeric_X_scaled = pd.DataFrame(numeric_X_scaled, columns=numeric_df.columns)
numeric_lr = sm.Logit(y, numeric_X_scaled)
numeric_lr.fit().summary()
from scipy.stats import ks_2samp
lst = []
for i in numeric_X_scaled.columns:
res = ks_2samp(np.array(numeric_X_scaled[i]), np.array(y))
lst.append(res.pvalue.round(3))
pd.Series(lst, index=numeric_X_scaled.columns).to_frame("ks test pvalue")
# ## 連續型變數篩選 - Logistic regression
# 從wald test結果來看,顯著水準0.05下,連續型變數皆為顯著,從係數可以發現,除了每月平均繳納金額(Monthly Premium Auto),其餘變數與Response為負相關。
# 根據變數篩選的結果,將重要的變數納入考量做為訓練模型的自變數 X ,目標變數 Response
imp_X = X[chi_imp_cat_col.append(numeric_df.columns)]
scaler = MinMaxScaler()
imp_X_scaled = scaler.fit_transform(imp_X)
imp_X_scaled = pd.DataFrame(imp_X, columns=imp_X.columns)
X_train, X_test, y_train, y_test = train_test_split(
imp_X_scaled, y, test_size=0.2, random_state=42
)
import warnings
warnings.filterwarnings("ignore")
Classifiers = [
["LogisticRegression", LogisticRegression(random_state=42)],
["Random Forest", RandomForestClassifier(random_state=42)],
["Support Vector Machine", SVC(random_state=42)],
["XGBClassifier", xgb.XGBClassifier(random_state=42)],
]
Classify_result = []
names = []
prediction = []
for name, classifier in Classifiers:
classifier = classifier
recall = cross_val_score(classifier, X, y, scoring="recall", cv=5).mean()
precision = cross_val_score(classifier, X, y, scoring="precision", cv=5).mean()
f1 = cross_val_score(classifier, X, y, scoring="f1", cv=5).mean()
class_eva = pd.DataFrame([recall, precision, f1])
Classify_result.append(class_eva)
name = pd.Series(name)
names.append(name)
names = pd.DataFrame(names)
names = names[0].tolist()
# names
result = pd.concat(Classify_result, axis=1)
result.columns = names
result.index = ["recall", "precision", "F1score"]
warnings.filterwarnings("default")
result
# #### Random Forest
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
rfc_pred = rfc.predict(X_test)
print(confusion_matrix(rfc_pred, y_test))
print("Accuracy score:", accuracy_score(rfc_pred, y_test))
print(classification_report(rfc_pred, y_test))
scores = cross_val_score(rfc, X, y, cv=5)
print("\n")
print(
"%0.2f accuracy with a standard deviation of %0.2f" % (scores.mean(), scores.std())
)
feature_imp = rfc.feature_importances_.round(3)
feature_imp_ = pd.Series(feature_imp, index=imp_X.columns).sort_values(ascending=False)
plt.figure(figsize=(8, 8))
sns.barplot(x=feature_imp_.values, y=feature_imp_.index)
plt.title("Random forest feature importance")
plt.xlabel("Gini importance")
plt.show()
# #### Xgboost
import xgboost as xgb
xgb = xgb.XGBClassifier(objective="binary:logistic", random_state=42)
xgb.fit(X_train, y_train)
xgb_pred = xgb.predict(X_test)
print(confusion_matrix(xgb_pred, y_test))
print("Accuracy score:", accuracy_score(xgb_pred, y_test))
print(classification_report(xgb_pred, y_test))
feature_imp = xgb.feature_importances_.round(3)
feature_imp_ = pd.Series(feature_imp, index=imp_X.columns).sort_values(ascending=False)
plt.figure(figsize=(8, 8))
sns.barplot(x=feature_imp_.values, y=feature_imp_.index)
plt.title("Xgboost feature importance")
plt.xlabel("Average gain")
plt.show()
|
import os
import random
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns
import numpy as np
import pandas as pd
# In this kaggle project we try to predict from the image of a bird its species (class) of belonging.
# Our data are made of images of size 224x224x3 pixels (RGB system).
# There are 450 classes of birds which forms a very large dataset.
# The data are arranged in folders of train, valid and test which are themselves arranged by the 450 folders of species/class.
# For the train we have 70626 images: a visualization of the class distribution and also random images is proposed in this notebook.
# For the valid we have 5 images per class, that is 2250 images in total.
# For the test we have 5 images per class, 2250 images in total.
#
train_dir = "/kaggle/input/100-bird-species/train/"
val_dir = "/kaggle/input/100-bird-species/valid/"
test_dir = "/kaggle/input/100-bird-species/test/"
num_of_bird_species = len(os.listdir(train_dir))
num_of_bird_species
# # 1.Data Visualisation
# get the number of image per species per folder in the train data
species_count = pd.Series(dtype="float64")
for dirpath, dirnames, filenames in os.walk(train_dir):
species = dirpath.split("/")[-1]
number_image = len(filenames)
species_count[species] = number_image
species_count
data = species_count.to_frame(name="number_image")
data = data.reset_index()
data.columns = ["species", "number_image"]
data = data.sort_values("number_image", ascending=False)
figure = plt.figure(figsize=(9, 90))
ax = sns.barplot(x="number_image", y="species", data=data)
for i in ax.containers:
ax.bar_label(
i,
)
def view_random_image(target_dir, target_class, verbose=0):
if target_class == None:
target_class = random.sample(os.listdir(train_dir), 1)[0]
# setting up the image directory
target_folder = target_dir + "/" + target_class
# get a random image path
random_image = random.sample(os.listdir(target_folder), 1)
# read image and plotting it
img = mpimg.imread(target_folder + "/" + random_image[0])
plt.imshow(img)
plt.title(target_class)
plt.axis("off")
if verbose == 1:
print(f"Image shape: {img.shape}")
return img
img = view_random_image(
target_dir=train_dir, target_class="VICTORIA CROWNED PIGEON", verbose=1
)
def view_9_random_image(target_dir, target_class=None, verbose=0):
plt.figure(figsize=(12, 12))
for x in range(1, 10):
plt.subplot(3, 3, x)
img = view_random_image(
target_dir=target_dir, target_class=target_class, verbose=verbose
)
view_9_random_image(train_dir, "HOUSE FINCH")
view_9_random_image(train_dir)
# # 2.CNN Model
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import Sequential
from tensorflow.keras.layers import (
Conv2D,
MaxPooling2D,
SpatialDropout2D,
Dropout,
Flatten,
Dense,
BatchNormalization,
)
from tensorflow.keras.callbacks import EarlyStopping
# ## 2.1 Preprocessing
# Rescale generator
general_datagen = ImageDataGenerator(rescale=1 / 255)
# ImageDataGenerator is used because the directories are not linked directly to the image data.
# It consolidate the images for each train/test/validation set.
# defaults parameters of flow_from_directory are lefts as follows: batch_size = 32, class_mode = 'categorical'(one hot encoding), shuffle = TRUE(in flow).
train_data = general_datagen.flow_from_directory(
directory=train_dir, target_size=(224, 224), seed=42
)
val_data = general_datagen.flow_from_directory(
directory=val_dir, target_size=(224, 224), seed=42
)
test_data = general_datagen.flow_from_directory(
directory=test_dir, target_size=(224, 224), shuffle=False
)
# tuple of len 2 with X and y respectively
len(train_data[0])
# shape of batch of entries ( aka X)
train_data[0][0].shape
train_data[0][0][[0]].shape
# shape of a batch of targets ( aka y)
train_data[0][1].shape
# ## 2.2 Model Creation
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(224, 224, 3), activation="relu")) # 222X222
model.add(BatchNormalization())
model.add(Conv2D(64, (3, 3), activation="relu")) # 220x220
model.add(MaxPooling2D(pool_size=(2, 2))) # 110x110
model.add(BatchNormalization())
model.add(SpatialDropout2D(0.35))
model.add(Conv2D(128, (3, 3), activation="relu")) # 108x108
model.add(BatchNormalization())
model.add(Conv2D(256, (3, 3), activation="relu")) # 106x106
model.add(MaxPooling2D(pool_size=(2, 2))) # 53x53
model.add(BatchNormalization())
model.add(SpatialDropout2D(0.35))
model.add(Conv2D(512, (3, 3), activation="relu")) # 51x51
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(512, activation="relu"))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Dense(num_of_bird_species, activation="softmax")) # 450
model.summary()
model.compile(loss="categorical_crossentropy", optimizer="Adam", metrics=["accuracy"])
# ## 2.3 Model fit
# fit model
history = model.fit(
train_data,
epochs=1,
validation_data=val_data,
verbose=1,
callbacks=[
EarlyStopping(monitor="val_loss", patience=5, restore_best_weights=True)
],
)
def plot_history_metrics(history):
# plot accuracy = f(epoch)
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("Model accuracy")
plt.ylabel("Accuracy")
plt.xlabel("Epoch")
plt.legend(["Train", "Valid"])
plt.show()
# Plot loss = f(epoch)
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("Model loss")
plt.ylabel("Loss")
plt.xlabel("Epoch")
plt.legend(["Train", "Valid"])
plt.show()
plot_history_metrics(history)
# ## 2.4 Model Evaluation
model.evaluate(test_data)
# save the model to be reutilize easily
model.save("CNN_birds_model.h5")
# array of proba for each image 450 proba of belonging
proba_pred = model.predict(test_data)
proba_pred
# get the index of the max, it's the corresponding predicted class of species
y_pred = np.argmax(proba_pred, axis=-1)
y_pred[0:50]
y_true = test_data.classes
y_true[0:50]
from sklearn.metrics import multilabel_confusion_matrix
multi_confu = multilabel_confusion_matrix(y_true, y_pred)
dico = test_data.class_indices
inv_dico = {value: key for key, value in dico.items()}
# Plot per species the matrix of confusion and saves index with no good prediction
zero_predict = []
for index, matrix in enumerate(multi_confu):
print(inv_dico[index])
print(matrix)
if matrix[1][1] == 0:
zero_predict.append(index)
species = list(dico.keys())
# species with not a single good prediction
for ele in zero_predict:
print(species[ele])
from sklearn.metrics import confusion_matrix
confu = confusion_matrix(y_true, y_pred)
confu
# exemple of the upper up of the confusion matrix for the first 25 species
plt.figure(figsize=(10, 8))
sns.heatmap(
confu[0:25, 0:25], annot=True, xticklabels=species[0:25], yticklabels=species[0:25]
)
plt.ylabel("True species")
plt.xlabel("Predicted species")
plt.show()
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import train_test_split
import tensorflow as tf
# **import data**
df_train = pd.read_csv("/kaggle/input/titanic/train.csv")
df_test = pd.read_csv("/kaggle/input/titanic/test.csv")
# **a quick glance**
df_train.info()
df_test.info()
# I am concating two dataframe to be more easier data preprocessing
df_test["is_train"] = 0
df_train["is_train"] = 1
df_sum = pd.concat([df_test, df_train], axis=0)
df_sum.info()
df_sum.sample(10)
df_sum.drop(["Ticket", "Cabin", "PassengerId"], axis=1, inplace=True)
df_sum.isnull().sum()
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy="mean", missing_values=np.nan)
imputer2 = SimpleImputer(strategy="most_frequent", missing_values=np.nan)
df_sum["Age"] = imputer.fit_transform(df_sum["Age"].values.reshape(-1, 1))
df_sum["Embarked"] = imputer2.fit_transform((df_sum["Embarked"].values.reshape(-1, 1)))
df_sum.isnull().sum()
df_sum.head()
df_sum["title"] = [i.split(",")[1].split(".")[0] for i in df_sum["Name"]]
df_sum.drop("Name", axis=1, inplace=True)
df_sum["title"].value_counts()
for index, value in df_sum["title"].value_counts().items():
if value < 7:
df_sum.loc[df_sum["title"] == index, "title"] = "re"
df_sum["title"].value_counts()
df_sum["is_alone"] = df_sum["SibSp"] + df_sum["Parch"] > 1
df_sum["is_alone"] = df_sum["is_alone"].apply(lambda x: 1 if x == True else 0)
df_sum.drop(["SibSp", "Parch"], axis=1, inplace=True)
df_sum["Age"] = pd.cut(
df_sum["Age"], bins=[0, 7, 15, 25, 40, 120], labels=[0, 1, 2, 3, 4]
)
le = LabelEncoder()
df_sum["Sex"] = le.fit_transform(df_sum["Sex"])
df_sum
ct = ColumnTransformer(
transformers=[("encoder", OneHotEncoder(), [0, 2, 4, 7])], remainder="passthrough"
)
df_sum = ct.fit_transform(df_sum)
df_sum
df_train = pd.DataFrame(df_sum[df_sum[:, 20] == 1]).drop(20, axis=1)
df_test = pd.DataFrame(df_sum[df_sum[:, 20] == 0]).drop([20, 21], axis=1)
X = df_train.drop(21, axis=1).values
y = df_train.iloc[:, 21].values.reshape(-1, 1)
x_train, x_test, y_train, y_test = train_test_split(X, y)
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)
# **in here I developing my model. I thing there is a problem but I can not solve it. if you know this problem please contact me**
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(units=6, activation="relu"))
model.add(tf.keras.layers.Dense(units=6, activation="relu"))
model.add(tf.keras.layers.Dense(units=1, activation="sigmoid"))
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
r = model.fit(
x_train, y_train, batch_size=32, epochs=100, validation_data=(x_test, y_test)
)
print(f"train Score : {model.evaluate(x_train,y_train)[1]}")
print(f"test Score : {model.evaluate(x_test,y_test)[1]}")
import matplotlib.pyplot as plt
plt.plot(r.history["loss"], label="loss")
plt.plot(r.history["val_loss"], label="val_loss")
plt.legend()
plt.plot(r.history["accuracy"], label="accuracy")
plt.plot(r.history["val_accuracy"], label="val_accuracy")
|
import re
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# #### Import txt files
with open("../input/barbican-words/Barbican-Words-Left.txt", "r") as f:
barb_l = f.readlines()[0]
with open("../input/barbican-words/Barbican-Words-Right.txt", "r") as f:
barb_r = f.readlines()[0]
print(barb_l)
print(barb_r)
# ## Letter Counts
# Count each occurance of a unique character (converted to lowercase), then drop any that are not alpha
letter_cnt_l = {}
letter_cnt_r = {}
for letter in barb_l:
if letter.isalpha():
letter_cnt_l[letter.lower()] = letter_cnt_l.get(letter, 0) + 1
for letter in barb_r:
if letter.isalpha():
letter_cnt_r[letter.lower()] = letter_cnt_r.get(letter, 0) + 1
print(f"Left counts: {letter_cnt_l}", end="\n\n")
print(f"Right counts: {letter_cnt_r}")
# #### Let's make a graph!
l_x, l_y = zip(*sorted(letter_cnt_l.items()))
r_x, r_y = zip(*sorted(letter_cnt_r.items()))
# ##### Left Graph
sns.set_style("dark")
plt.figure(figsize=(12, 8))
plt.bar(l_x, l_y, color="m")
plt.show()
# ##### Right Graph
sns.set_style("dark")
plt.figure(figsize=(12, 8))
plt.bar(r_x, r_y, color="m")
plt.show()
# ## Word Counts
# Now we will count each occurance of a unique word and store these in a dict
# This time we will use Counter!
from collections import Counter
# #### Creating lists of all words
word_list_l = barb_l.split()
word_list_r = barb_r.split()
print(
f"First ten words from left: {word_list_l[0:10]} \n\nFirst ten words from right: {word_list_r[0:10]}"
)
word_cnt_l = Counter(word_list_l)
word_cnt_r = Counter(word_list_r)
word_cnt_l = {k.lower(): v for k, v in word_cnt_l.items()}
word_cnt_r = {k.lower(): v for k, v in word_cnt_r.items()}
sorted(word_cnt_l)
# #### Look at this ~~photo~~graph!
# Too many cateogries for a line plot, but the topology looks interesting!
l_x_w, l_y_w = zip(*sorted(word_cnt_l.items()))
r_x_w, r_y_w = zip(*sorted(word_cnt_r.items()))
# ##### Left Graph
plt.figure(figsize=(32, 20))
plt.plot(l_x_w, l_y_w, color="m")
plt.xticks(size=5, rotation=90)
plt.title("Barbican Walk Left Word Frequency", fontsize=22, weight="bold")
plt.xlabel("Word", fontsize=14, weight="semibold")
plt.ylabel("Frequency", rotation=90, fontsize=14, weight="semibold")
# plt.savefig('Barbican-Walk-Left-Freq-by-Word')
plt.show()
# ##### Right Graph
plt.figure(figsize=(32, 20))
plt.plot(r_x_w, r_y_w, color="m")
plt.xticks(size=5, rotation=90)
plt.title("Barbican Walk Right Word Frequency", fontsize=22, weight="bold")
plt.xlabel("Word", fontsize=14, weight="semibold")
plt.ylabel("Frequency", rotation=90, fontsize=14, weight="semibold")
# plt.savefig('Barbican-Walk-Right-Freq-by-Word')
plt.show()
# ##### Both!
# Crazy, right?
plt.figure(figsize=(32, 20))
plt.plot(l_x_w, l_y_w, color="m")
plt.plot(r_x_w, r_y_w, color="r")
plt.xticks(size=5, rotation=90)
plt.legend()
plt.title("Barbican Walk (All) Word Frequency", fontsize=22, weight="bold")
plt.xlabel("Word", fontsize=14, weight="semibold")
plt.ylabel("Frequency", rotation=90, fontsize=14, weight="semibold")
plt.savefig("Barbican-Walk-(All)-Freq-by-Word")
plt.show()
# ## Playing with Natural Language Toolkit (NLTK)
# #### Importing libraries
import nltk
from nltk.corpus import treebank
import random as rand
# #### Create lists of words
# This time using ntlk.word_tokenize() method
# This time we shall concat the two sentances and then add the words to a list.
barb_words = nltk.word_tokenize(barb_l + barb_r)
tagged = nltk.pos_tag(barb_words)
tagged
# #### Convert to Dict
word_dict = {}
for i in range(len(tagged)):
word_dict[tagged[i][0]] = tagged[i][1]
word_dict
# #### Looking at the unique speech codes
word_types = Counter(word_dict.values())
word_types
# #### Defining a function to create a sentance based on the grammar we specify
def make_sentance(num=1):
grammar = ["VB", "DT", ["NN", "NNP", "NNS"], "IN", "DT", ["NN", "NNP", "NNS"]]
for i in range(num):
sent = ""
for i in grammar:
word = rand.choice(list(word_dict.keys()))
while word_dict[word] not in i:
word = rand.choice(list(word_dict.keys()))
sent += word + " "
print(sent)
make_sentance(25)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # **SL Technique - Model**
# **Without HyperParameter Tuning**
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
# Load the dataset
df = pd.read_csv("/kaggle/input/viintage-analysis/dwd.csv")
# Split the dataset into training and test sets
train_df = df[df["OCCUPATION_TYPE"].notnull()]
test_df = df[df["OCCUPATION_TYPE"].isnull()]
train_df1, test_df1 = train_test_split(train_df, test_size=0.25, random_state=42)
# Encode the categorical variables to numerical labels
categorical_cols = [
"CODE_GENDER",
"FLAG_OWN_CAR",
"FLAG_OWN_REALTY",
"NAME_INCOME_TYPE",
"NAME_EDUCATION_TYPE",
"NAME_FAMILY_STATUS",
"NAME_HOUSING_TYPE",
]
encoders = {}
for col in categorical_cols:
le = LabelEncoder()
train_vals = train_df1[col].astype(str).unique()
le.fit(train_vals)
train_df1[col] = le.transform(train_df1[col].astype(str))
test_df1[col] = le.transform(test_df1[col].astype(str))
encoders[col] = le
# Split the training data into input and target variables
X_train = train_df1.drop("OCCUPATION_TYPE", axis=1)
y_train = train_df1["OCCUPATION_TYPE"]
# Train a random forest classifier
clf = RandomForestClassifier(
n_estimators=200, max_depth=13, min_samples_leaf=14, max_features=6
)
clf.fit(X_train, y_train)
# Evaluate the performance on the test data
X_test = test_df1.drop("OCCUPATION_TYPE", axis=1)
y_test = test_df1["OCCUPATION_TYPE"]
X_test = X_test.fillna(
X_train.mean()
) # Fill missing values with the mean of the training data
accuracy = clf.score(X_test, y_test)
print(f"Accuracy: {accuracy}")
# # **HyperParameter Tuning in RFC**
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, RandomizedSearchCV
from sklearn.preprocessing import LabelEncoder
# Load the dataset
df = pd.read_csv("/kaggle/input/viintage-analysis/dwd.csv")
# Split the dataset into training and test sets
train_df = df[df["OCCUPATION_TYPE"].notnull()]
test_df = df[df["OCCUPATION_TYPE"].isnull()]
train_df1, test_df1 = train_test_split(train_df, test_size=0.25, random_state=42)
# Encode the categorical variables to numerical labels
categorical_cols = [
"CODE_GENDER",
"FLAG_OWN_CAR",
"FLAG_OWN_REALTY",
"NAME_INCOME_TYPE",
"NAME_EDUCATION_TYPE",
"NAME_FAMILY_STATUS",
"NAME_HOUSING_TYPE",
]
encoders = {}
for col in categorical_cols:
le = LabelEncoder()
train_vals = train_df1[col].astype(str).unique()
le.fit(train_vals)
train_df1[col] = le.transform(train_df1[col].astype(str))
test_df1[col] = le.transform(test_df1[col].astype(str))
encoders[col] = le
# Split the training data into input and target variables
X_train = train_df1.drop("OCCUPATION_TYPE", axis=1)
y_train = train_df1["OCCUPATION_TYPE"]
# Define the hyperparameter space
# hyperparameters = {
# 'n_estimators': [100, 200, 300, 400],
# 'max_depth': [10, 12, 14, 16],
# 'min_samples_leaf': [10, 12, 14, 16],
# 'max_features': [5, 6, 7, 8],
# }
# Create the random forest classifier
rf = RandomForestClassifier(
n_estimators=300, min_samples_leaf=10, max_features=8, max_depth=16
)
# Perform randomized search CV to find the best hyperparameters
# clf = RandomizedSearchCV(rf, random_state=42)
rf.fit(X_train, y_train)
# Print the best hyperparameters
# print(f"Best hyperparameters: {clf.best_params_}")
# Evaluate the performance on the test data using the best hyperparameters
X_test = test_df1.drop("OCCUPATION_TYPE", axis=1)
y_test = test_df1["OCCUPATION_TYPE"]
X_test = X_test.fillna(
X_train.mean()
) # Fill missing values with the mean of the training data
accuracy = rf.score(X_test, y_test)
print(f"Accuracy: {accuracy}")
# ***Best hyperparameters: {'n_estimators': 300, 'min_samples_leaf': 10, 'max_features': 8, 'max_depth': 16}***
# ***Accuracy: 0.9568741276487756***
# Get the test data with null values
X_missing = df[df["OCCUPATION_TYPE"].isnull()].drop("OCCUPATION_TYPE", axis=1)
# Encode the categorical variables to numerical labels
for col in categorical_cols:
le = encoders[col]
X_missing[col] = le.transform(X_missing[col].astype(str))
# Fill missing values with the mean of the training data
X_missing = X_missing.fillna(X_train.mean())
# Predict the missing occupation types for the test set
y_pred = rf.predict(X_missing)
# Create a new dataframe with the imputed data
imputed_df = df.copy()
imputed_df.loc[df["OCCUPATION_TYPE"].isnull(), "OCCUPATION_TYPE"] = y_pred
print(imputed_df)
# Get the count of each category in the original OCCUPATION_TYPE column
orig_counts = df["OCCUPATION_TYPE"].value_counts()
# Get the count of each category in the imputed OCCUPATION_TYPE column
imputed_counts = imputed_df["OCCUPATION_TYPE"].value_counts()
# Merge the two dataframes on the index (which is the occupation type)
merged = pd.merge(orig_counts, imputed_counts, left_index=True, right_index=True)
# Rename the columns for clarity
merged.columns = ["Original Count", "Imputed Count"]
# Print the merged dataframe
print(merged)
imputed_df.to_csv("imputed using RFC.csv")
# Get the count of unique values in both dataframes
original_count = df["OCCUPATION_TYPE"].value_counts()
imputed_count = imputed_df["OCCUPATION_TYPE"].value_counts()
# Calculate the absolute difference between the counts for each unique value
diff = (original_count - imputed_count).abs()
# Calculate the sum of differences
sum_diff = diff.sum()
print("Sum of differences:", sum_diff)
sum(df["OCCUPATION_TYPE"].isna())
df["OCCUPATION_TYPE"] = imputed_df["OCCUPATION_TYPE"]
import pandas as pd
df1 = pd.read_csv("/kaggle/working/imputed using RFC.csv")
df1 = df1.drop("Unnamed: 0.1", axis=1)
df1 = df1.drop("Unnamed: 0", axis=1)
df1
df1.to_csv("imputed-RFC-df1.csv")
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import cross_val_score
# Load the dataset
print(df1.isna().sum())
# Split the dataset into training and test sets
train_df, test_df = train_test_split(df1, test_size=0.25, random_state=42)
# Encode the categorical variables to numerical labels
categorical_cols = [
"CODE_GENDER",
"FLAG_OWN_CAR",
"FLAG_OWN_REALTY",
"NAME_INCOME_TYPE",
"NAME_EDUCATION_TYPE",
"NAME_FAMILY_STATUS",
"NAME_HOUSING_TYPE",
]
encoders = {}
for col in categorical_cols:
le = LabelEncoder()
train_vals = train_df[col].astype(str).unique()
le.fit(train_vals)
train_df[col] = le.transform(train_df[col].astype(str))
test_df[col] = le.transform(test_df[col].astype(str))
encoders[col] = le
# Split the training data into input and target variables
X_train = train_df.drop("OCCUPATION_TYPE", axis=1)
y_train = train_df["OCCUPATION_TYPE"]
# Train a random forest classifier
clf = RandomForestClassifier(
n_estimators=300, max_depth=16, min_samples_leaf=10, max_features=8
)
clf.fit(X_train, y_train)
# Evaluate the performance on the test data
X_test = test_df.drop("OCCUPATION_TYPE", axis=1)
y_test = test_df["OCCUPATION_TYPE"]
# X_test = X_test.fillna(X_train.mean()) # Fill missing values with the mean of the training data
accuracy = clf.score(X_test, y_test)
print(f"Accuracy: {accuracy}")
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
# create a random forest classifier with the best hyperparameters
rf = RandomForestClassifier(
n_estimators=300, min_samples_leaf=10, max_features=8, max_depth=16
)
# perform 10-fold cross-validation
scores = cross_val_score(rf, X_train, y_train, cv=10)
# print the mean and standard deviation of the scores
print(f"Accuracy: {scores.mean():.2f} (+/- {scores.std():.2f})")
df1.loc[df1["DAYS_EMPLOYED"] > 0, "DAYS_EMPLOYED"] = np.nan
df1
df1[df1["DAYS_EMPLOYED"].isnull()]
df1["YEARS_BIRTH"] = (df1["DAYS_BIRTH"] / -365.25).round(1)
df1["YEARS_EMPLOYED"] = (df1["DAYS_EMPLOYED"] / -365.25).round(1)
# Drop the original columns
df1 = df1.drop(["DAYS_BIRTH", "DAYS_EMPLOYED"], axis=1)
df1
df1[df1["YEARS_EMPLOYED"].isnull()]
df1
# # Random Forest Regressor & Lasso To Predict Years Employed
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Lasso
# load the data
# df = pd.read_csv('data.csv')
# define the categorical columns
categorical_cols = [
"OCCUPATION_TYPE",
"CODE_GENDER",
"FLAG_OWN_CAR",
"FLAG_OWN_REALTY",
"NAME_INCOME_TYPE",
"NAME_EDUCATION_TYPE",
"NAME_FAMILY_STATUS",
"NAME_HOUSING_TYPE",
]
# create a copy of the original dataset
df_imputed = df1.copy()
# split the dataset into training and testing sets
train_df = df_imputed[df_imputed["YEARS_EMPLOYED"].notnull()]
test_df = df_imputed[df_imputed["YEARS_EMPLOYED"].isnull()]
train_df1, test_df1 = train_test_split(train_df, test_size=0.2, random_state=42)
# Encode the categorical variables to numerical labels
# categorical_cols = ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY', 'NAME_INCOME_TYPE', 'NAME_EDUCATION_TYPE', 'NAME_FAMILY_STATUS', 'NAME_HOUSING_TYPE']
encoders = {}
for col in categorical_cols:
le = LabelEncoder()
train_vals = train_df1[col].astype(str).unique()
le.fit(train_vals)
train_df1[col] = le.transform(train_df1[col].astype(str))
test_df1[col] = le.transform(test_df1[col].astype(str))
encoders[col] = le
# extract the target variable from the training set
y_train = train_df1["YEARS_EMPLOYED"]
# extract the features from the training and testing sets
X_train = train_df1.drop(["YEARS_EMPLOYED"], axis=1)
X_test = test_df1.drop(["YEARS_EMPLOYED"], axis=1)
# impute missing values using a RandomForestRegressor
rf = RandomForestRegressor(n_estimators=100)
rf.fit(X_train, y_train)
y_pred = rf.predict(X_test)
# fit a Lasso model to the imputed data to apply L1 regularization
lasso = Lasso(alpha=0.1, tol=0.01, random_state=42)
lasso.fit(X_train, y_train)
# predict using the Lasso model
y_pred_lasso = lasso.predict(X_test)
# calculate MAE for the Lasso model
mae_lasso = mean_absolute_error(test_df1["YEARS_EMPLOYED"], y_pred_lasso)
print("MAE with Lasso regularization:", mae_lasso)
# calculate MAE for the random forest model
mae_rf = mean_absolute_error(test_df1["YEARS_EMPLOYED"], y_pred)
print("MAE without regularization:", mae_rf)
# **HyperParameter Tuning**
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Lasso
# define the parameter grid
param_grid = {
"alpha": [0.001, 0.01, 0.1, 1.0, 10.0, 100.0],
"tol": [0.0001, 0.001, 0.01, 0.1],
"max_iter": [100, 500, 1000, 5000],
}
# create the Lasso model
lasso = Lasso()
# create the grid search object
grid_search = GridSearchCV(lasso, param_grid, cv=5, scoring="neg_mean_absolute_error")
# fit the grid search to the training data
grid_search.fit(X_train, y_train)
# print the best parameters and cross-validation score
print("Best parameters: ", grid_search.best_params_)
print("Cross-validation MAE: ", -grid_search.best_score_)
from sklearn.model_selection import cross_val_score
# define the model with Lasso regularization
lasso_model = Lasso(alpha=0.001, max_iter=500, tol=0.0001)
# calculate the cross-validation scores
cv_scores = cross_val_score(
lasso_model, X_train, y_train, cv=5, scoring="neg_mean_absolute_error"
)
# convert the scores to positive values
cv_scores = -cv_scores
# calculate the mean and standard deviation of the scores
mean_cv_score = cv_scores.mean()
std_cv_score = cv_scores.std()
# print the results
print(f"Cross-validation MAE: {mean_cv_score:.3f} +/- {std_cv_score:.3f}")
# a cross-validation MAE of 4.617 +/- 0.019 could be considered reasonable. Since the range of the target variable is from 0 to 43 years, an MAE of 4.6 years suggests that the model is off by an average of approximately 10% of the total range
# Encode categorical variables in the full dataset
for col in categorical_cols:
le = encoders[col]
df_imputed[col] = le.transform(df_imputed[col].astype(str))
# Extract the features from the full dataset
X_full = df_imputed.drop(["YEARS_EMPLOYED"], axis=1)
# Impute missing values using the trained model
y_pred_full = rf.predict(X_full)
df_imputed.loc[df_imputed["YEARS_EMPLOYED"].isnull(), "YEARS_EMPLOYED"] = y_pred_full[
df_imputed["YEARS_EMPLOYED"].isnull()
]
# Decode categorical variables back to original values
for col in categorical_cols:
le = encoders[col]
df_imputed[col] = le.inverse_transform(df_imputed[col])
df_imputed["YEARS_EMPLOYED"]
nulls = df1["YEARS_EMPLOYED"].isnull()
comparison_df = pd.DataFrame(
{
"Original": df1.loc[nulls, "YEARS_EMPLOYED"],
"Imputed": df_imputed.loc[nulls, "YEARS_EMPLOYED"],
}
)
print(comparison_df)
df_imputed[df_imputed["YEARS_EMPLOYED"] == 0]
# **There Are Widows, should be removed as they are outliers**
df_imputed.to_csv("imputed using RFR - latest.csv")
|
from keras.datasets import mnist, imdb, reuters
import numpy as np
import matplotlib.pyplot as plt
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense
(train_data, train_labels), (test_data, test_labels) = mnist.load_data()
train_data = train_data.reshape((60000, 28 * 28))
test_data = test_data.reshape((10000, 28 * 28))
train_data = train_data.astype("float32") / 255
test_data = test_data.astype("float32") / 255
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
model1 = Sequential(
[
Dense(512, activation="relu", input_shape=(28 * 28,)),
Dense(10, activation="softmax"),
]
)
model1.compile(
optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"]
)
history1 = model1.fit(train_data, train_labels, batch_size=128, epochs=5)
(loss_value, accuracy_value) = model1.evaluate(test_data, test_labels)
print("accuracy_value: ", accuracy_value * 100)
# # IMDB model
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
word_index = imdb.get_word_index()
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
decoded_newswire = " ".join([reverse_word_index.get(i - 3, "?") for i in train_data[0]])
print(decoded_newswire)
def one_hot(x, dim=10000):
results = np.zeros((len(x), dim))
for i, sequence in enumerate(x):
results[i, sequence] = 1.0
return results
train_data = one_hot(train_data)
test_data = one_hot(test_data)
train_labels = np.asarray(train_labels).astype("float32")
test_labels = np.asarray(test_labels).astype("float32")
model2 = Sequential(
[
Dense(16, activation="relu", input_shape=(10000,)),
Dense(16, activation="relu"),
Dense(1, activation="sigmoid"),
]
)
model2.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"])
val_data = train_data[:10000]
val_data = train_data[:10000]
x_val = train_data[:10000]
partial_x_train = train_data[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
history2 = model2.fit(
partial_x_train,
partial_y_train,
batch_size=512,
epochs=20,
validation_data=(x_val, y_val),
)
loss_his = history2.history["loss"]
loss_val = history2.history["val_loss"]
epochs = range(1, len(loss_his) + 1)
plt.plot(epochs, loss_his, "bo", label="loss")
plt.plot(epochs, loss_val, "b", label="val_loss")
plt.title("Training and validation loss")
plt.xlabel("epochs")
plt.ylabel("loss")
plt.legend()
plt.show()
plt.clf()
acc = history2.history["accuracy"]
val_acc = history2.history["val_accuracy"]
epochs = range(1, len(loss_his) + 1)
plt.plot(epochs, acc, "bo", label="acc")
plt.plot(epochs, val_acc, "b", label="val_acc")
plt.title("Training and validation acc")
plt.xlabel("epochs")
plt.ylabel("acc")
plt.legend()
plt.show()
# # **reuters model**
(train_data, train_labels), (test_data, test_labels) = reuters.load_data(
num_words=10000
)
word_index = reuters.get_word_index()
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
decoded_newswire = " ".join([reverse_word_index.get(i - 3, "?") for i in train_data[0]])
print("decoded_newswire: ", decoded_newswire)
x_train = one_hot(train_data)
x_test = one_hot(test_data)
one_hot_train_labels = to_categorical(train_labels)
one_hot_test_labels = to_categorical(test_labels)
model3 = Sequential(
[
Dense(64, activation="relu", input_shape=(10000,)),
Dense(64, activation="relu"),
Dense(46, activation="softmax"),
]
)
model3.compile(
optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"]
)
x_val = x_train[:1000]
partial_x_train = x_train[1000:]
y_val = one_hot_train_labels[:1000]
partial_y_train = one_hot_train_labels[1000:]
history3 = model3.fit(
partial_x_train,
partial_y_train,
epochs=20,
batch_size=512,
validation_data=(x_val, y_val),
)
results = model3.evaluate(x_test, one_hot_test_labels)
loss = history3.history["loss"]
val_loss = history3.history["val_loss"]
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
plt.clf()
acc = history3.history["accuracy"]
val_acc = history3.history["val_accuracy"]
plt.plot(epochs, acc, "bo", label="Training accuracy")
plt.plot(epochs, val_acc, "b", label="Validation accuracy")
plt.title("Training and validation accuracy")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
# # **methods implemention**
def naive_relu(x):
assert len(x.shape) == 2
x = x.copy()
for i in range(x.shape[0]):
for j in range(x.shape[1]):
x[i, j] = max(x[i, j], 0)
return x
x = np.array([[1, -2, 3], [-4, 5, -6]])
y = naive_relu(x)
print("Output array:")
print(y)
def naive_add(x, y):
assert len(x.shape) == 2
assert x.shape == y.shape
x = x.copy()
for i in range(x.shape[0]):
for j in range(x.shape[1]):
x[i, j] += y[i, j]
return x
x = np.array([[1, -2, 3], [-4, 5, -6]])
y = np.array([[1, -2, 3], [-4, 5, -6]])
z = naive_add(x, y)
print("Output array:")
print(z)
def naive_add_matrix_vector(x, y):
assert len(x.shape) == 2
assert len(y.shape) == 1
assert x.shape[1] == y.shape[0]
x = x.copy()
for i in range(x.shape[0]):
for j in range(x.shape[1]):
x[i, j] += y[j]
return x
x = np.array([[1, 2], [3, 4], [5, 6]])
y = np.array([1, 2])
z = naive_add_matrix_vector(x, y)
print("Output array:")
print(z)
def naive_vector_dot(x, y):
assert len(x.shape) == 1
assert len(y.shape) == 1
z = 0.0
for i in range(x.shape[0]):
z += x[i] * y[i]
return z
x = np.array([1, 2, 3])
y = np.array([1, 2, 3])
z = naive_vector_dot(x, y)
print("Output array:")
print(z)
def naive_matrix_vector_dot(x, y):
assert len(x.shape) == 2
assert len(y.shape) == 1
assert x.shape[1] == y.shape[0]
z = np.zeros(x.shape[0])
for i in range(x.shape[0]):
for j in range(x.shape[1]):
z[i] += x[i, j] * y[j]
return z
x = np.array([[1, 2], [3, 4], [5, 6]])
y = np.array([1, 2])
z = naive_matrix_vector_dot(x, y)
print("Output array:")
print(z)
def naive_matrix_vector_dot(x, y):
z = np.zeros(x.shape[0])
for i in range(x.shape[0]):
z[i] = naive_vector_dot(x[i, :], y)
return z
x = np.array([[1, 2], [3, 4], [5, 6]])
y = np.array([1, 2])
z = naive_matrix_vector_dot(x, y)
print("************************************")
print("Output array:")
print(z)
def naive_matrix_dot(x, y):
assert len(x.shape) == 2
assert len(y.shape) == 2
assert x.shape[1] == y.shape[0]
z = np.zeros((x.shape[0], y.shape[1]))
for i in range(x.shape[0]):
for j in range(y.shape[1]):
row_x = x[i, :]
column_y = y[:, j]
z[i, j] = naive_vector_dot(row_x, column_y)
return z
x = np.array([[1, 2], [3, 4], [5, 6]])
y = np.array([[7, 8], [9, 10]])
z = naive_matrix_dot(x, y)
print("Output array:")
print(z)
|
import requests
import pandas as pd
import csv
import time
# URL-адрес API Binance "https://api.binance.com/api/v3/klines"
url = "https://api.binance.com/api/v3/depth"
# В качестве криптовалюты берём биткоин (BTC) по USDT
symbol = "BTCUSDT"
# Интервал будет в 1 минуту
interval = "1m"
# Количество минут в
minutes_in_m = 99 * 24 * 60
# Создаем пустой список, в который мы будем добавлять данные
data = []
for i in range(minutes_in_m):
# Вычисляем время начала и конца интервала
start_time = int(time.time() - i * 60)
end_time = start_time + 60
# Выводим информацию о текущей итерации
print(
f"Iteration {i + 1} / {minutes_in_m}, collecting data for {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))}..."
)
# Создаем параметры запроса
params = {
"symbol": symbol,
"interval": interval,
"startTime": start_time * 1000,
"endTime": end_time * 1000,
}
# Отправляем GET-запрос к API с использованием библиотеки Requests
response = requests.get(url, params=params)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as err:
if "code" in response.json():
print(f"Error occurred: {response.json()['msg']}")
else:
raise err
# Если запрос был успешным, то добавляем данные в список data
if response.status_code == 200:
json_data = response.json()
if len(json_data) > 0:
quote = json_data[0]
timestamp = time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime(quote[0] / 1000)
)
data.append([timestamp, quote[1], quote[2], quote[3], quote[4]])
else:
print("No data in the response")
else:
print(f"Request failed with status code {response.status_code}")
# Создаем DataFrame из списка data
df = pd.DataFrame(data, columns=["Timestamp", "Open", "High", "Low", "Close"])
# Сохраняем DataFrame в csv-файл
df.to_csv("general_variant.FIASCO.csv", index=False)
# Выводим информацию об окончании выполнения скрипта
print(
f"Data collection complete. {len(data)} rows of data were collected and saved to file 'general_variant.FIASCO.csv'."
)
import requests
url = "https://api.binance.com/api/v3/depth"
response = requests.get(url)
if response.status_code == 200:
print("API connection successful")
else:
print("API connection failed")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
fashion_mnist = tf.keras.datasets.mnist.load_data()
(X_train_full, y_train_full), (X_test, y_test) = fashion_mnist
X_train, y_train = X_train_full[:-5000], y_train_full[:-5000]
X_valid, y_valid = X_train_full[-5000:], y_train_full[-5000:]
print(set(y_train_full))
plt.imshow(X_train[11], cmap="binary")
plt.axis("off")
plt.show()
X_train, X_valid, X_test = X_train / 255.0, X_valid / 255.0, X_test / 255.0
X_train[0].shape
set(y_train_full)
classifier = Sequential()
classifier.add(Conv2D(32, (3, 3), input_shape=(28, 28, 1), activation="relu"))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
classifier.add(Conv2D(64, (3, 3), activation="relu"))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
classifier.add(Flatten())
classifier.add(Dense(units=128, activation="relu"))
classifier.add(Dense(units=10, activation="softmax"))
classifier.summary()
classifier.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
classifier.fit(X_train, y_train, epochs=50, validation_data=(X_valid, y_valid))
test_set = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
test_set.head()
test_set.shape
test_set = test_set.values.astype("float32")
test_set = test_set.reshape((28000, 28, 28))
predictions_test = classifier.predict(test_set)
predictions_test[:10]
ImageId = []
Label = []
for i in range(len(predictions_test)):
ImageId.append(i + 1)
Label.append(predictions_test[i].argmax())
submissions = pd.DataFrame({"ImageId": ImageId, "Label": Label})
submissions.to_csv("submission.csv", index=False, header=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.