script
stringlengths 113
767k
|
---|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# Reading the datasets
data_ncov_c = pd.read_csv("../input/novel-coronavirus-2019ncov/coronavirus_conf.csv")
data_ncov_r = pd.read_csv("../input/novel-coronavirus-2019ncov/coronavirus_reco.csv")
data_ncov_d = pd.read_csv("../input/novel-coronavirus-2019ncov/coronavirus_death.csv")
data_ncov_c.head(10)
# Confirmed countries and regions affected by virus
places = data_ncov_c["Country/Region"].unique().tolist()
print(places)
print("\nTotal countries and regions affected by virus: ", len(places))
## Hong Kong, Macau are separated from China
data_ncov_c.groupby(["Country/Region", "Province/State"]).sum()
from shapely.geometry import Point
import geopandas as gpd
from geopandas import GeoDataFrame
geometry_c = [
Point(xy)
for xy in zip(
data_ncov_c["Long"].astype("float"), data_ncov_c["Lat"].astype("float")
)
]
geometry_r = [
Point(xy)
for xy in zip(
data_ncov_r["Long"].astype("float"), data_ncov_r["Lat"].astype("float")
)
]
geometry_d = [
Point(xy)
for xy in zip(
data_ncov_d["Long"].astype("float"), data_ncov_d["Lat"].astype("float")
)
]
gdf_c = GeoDataFrame(data_ncov_c, geometry=geometry_c)
gdf_r = GeoDataFrame(data_ncov_r, geometry=geometry_r)
gdf_d = GeoDataFrame(data_ncov_d, geometry=geometry_d)
# this is a simple map that goes with geopandas
world = gpd.read_file(gpd.datasets.get_path("naturalearth_lowres"))
gdf_c.plot(ax=world.plot(figsize=(15, 10)), marker="o", color="red", markersize=15)
gdf_r.plot(ax=world.plot(figsize=(15, 10)), marker="o", color="green", markersize=15)
gdf_d.plot(ax=world.plot(figsize=(15, 10)), marker="o", color="black", markersize=15)
# ## Number of cases (confirmed, recovered, deaths) in China
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
fig, ax = plt.subplots(figsize=(15, 10))
sns.set_color_codes("pastel")
sns.barplot(
x=data_ncov_c["2/8/2020 10:04 PM"],
y="Province/State",
data=data_ncov_c[data_ncov_c["Country/Region"] == "Mainland China"][1:],
label="Number of cases confirmed on Feb 8, 2020",
color="b",
)
sns.barplot(
x=data_ncov_r["2/8/2020 10:04 PM"],
y="Province/State",
data=data_ncov_r[data_ncov_r["Country/Region"] == "Mainland China"][1:],
label="Number of cases recovered on Feb 8, 2020",
color="g",
)
sns.barplot(
x=data_ncov_d["2/8/2020 10:04 PM"],
y="Province/State",
data=data_ncov_d[data_ncov_d["Country/Region"] == "Mainland China"][1:],
label="Number of cases with deaths on Feb 8, 2020",
color="black",
)
# Add a legend and informative axis label
ax.legend(ncol=1, loc="upper right", frameon=True)
ax.set(xlim=(0, 1500), ylabel="", xlabel="Number of cases") # xmax up to ~27000
sns.despine(left=True, bottom=True)
data_ncov_c.head(3)
sum_21 = data_ncov_c.groupby(["Country/Region"])["1/21/2020 10:00 PM"].sum()
sum_22 = data_ncov_c.groupby(["Country/Region"])["1/22/2020 12:00 PM"].sum()
sum_23 = data_ncov_c.groupby(["Country/Region"])["1/23/2020 12:00 PM"].sum()
sum_24 = data_ncov_c.groupby(["Country/Region"])["1/24/2020 12:00 PM"].sum()
sum_25 = data_ncov_c.groupby(["Country/Region"])["1/25/2020 12:00 PM"].sum()
sum_26 = data_ncov_c.groupby(["Country/Region"])[
"1/26/2020 11:00 PM"
].sum() # 11:00 PM !!!
sum_27 = data_ncov_c.groupby(["Country/Region"])[
"1/27/2020 8:30 PM"
].sum() # 8:30 PM !!!
sum_28 = data_ncov_c.groupby(["Country/Region"])[
"1/28/2020 11:00 PM"
].sum() # 11:00 PM !!!
sum_29 = data_ncov_c.groupby(["Country/Region"])[
"1/29/2020 9:00 PM"
].sum() # 9:00 PM !!!
sum_30 = data_ncov_c.groupby(["Country/Region"])[
"1/30/2020 11:00 AM"
].sum() # 11:00 AM !!!
sum_31 = data_ncov_c.groupby(["Country/Region"])[
"1/31/2020 7:00 PM"
].sum() # 7:00 PM !!!
sum_01 = data_ncov_c.groupby(["Country/Region"])[
"2/1/2020 10:00 AM"
].sum() # 10:00 AM !!!
sum_02 = data_ncov_c.groupby(["Country/Region"])[
"2/2/2020 9:00 PM"
].sum() # 9:00 PM !!!
sum_03 = data_ncov_c.groupby(["Country/Region"])[
"2/3/2020 9:00 PM"
].sum() # 9:00 PM !!!
sum_04 = data_ncov_c.groupby(["Country/Region"])[
"2/4/2020 9:40 AM"
].sum() # 9:40 AM !!!
sum_05 = data_ncov_c.groupby(["Country/Region"])[
"2/5/2020 11:00 PM"
].sum() # 11:00 PM !!!
sum_06 = data_ncov_c.groupby(["Country/Region"])[
"2/6/2020 2:20 PM"
].sum() # 2:20 PM !!!
sum_07 = data_ncov_c.groupby(["Country/Region"])[
"2/7/2020 10:50 PM"
].sum() # 10:50 PM !!!
sum_08 = data_ncov_c.groupby(["Country/Region"])["2/8/20 23:04"].sum() # 23:04 !!!
sum_21
plt.figure(figsize=(20, 6))
ccc = [
"Australia",
"Belgium",
"Cambodia",
"Canada",
"Finland",
"France",
"Germany",
"Hong Kong",
"India",
"Italy",
"Japan",
"Macau",
"Mainland China",
"Malaysia",
"Nepal",
"Others",
"Philippines",
"Russia",
"Singapore",
"South Korea",
"Spain",
"Sri Lanka",
"Sweden",
"Taiwan",
"Thailand",
"UK",
"US",
"United Arab Emirates",
"Vietnam",
]
ax = plt.gca()
ax.set_yscale("log")
ax.set_xticklabels(ccc)
## Jan
sum_21.plot(marker="")
sum_22.plot(marker="")
sum_23.plot(marker="")
sum_24.plot(marker="")
sum_25.plot(marker="")
sum_26.plot(marker="")
sum_27.plot(marker="")
sum_28.plot(marker="")
sum_29.plot(marker="")
sum_30.plot(marker="")
sum_31.plot(marker="")
## Feb
sum_01.plot(marker="")
sum_02.plot(marker="")
sum_03.plot(marker="")
sum_04.plot(marker="")
sum_05.plot(marker="")
sum_06.plot(marker="")
sum_07.plot(marker="")
sum_08.plot(marker="")
# for l in ax.get_lines():
# l.remove()
|
# ## Imports
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from autoviz.classify_method import data_cleaning_suggestions
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from pycaret.classification import *
# ## Data
df = pd.read_csv("/kaggle/input/mushroom-attributes/mushroom.csv")
df.head()
df.describe().T
df.info()
# ### renaming the target
df.rename({"class": "poisonous"}, axis=1, inplace=True)
# # Visualization
# ---
# ## Poisonous frequency
qtd_casado = df["poisonous"].value_counts()
plt.pie(
qtd_casado,
labels=["no poisonous", "poisonous"],
autopct="%.3f%%",
explode=[0, 0.03],
)
# ## Comparing "poisonous" to everything else
for g in df.drop("poisonous", axis=1).columns:
sns.countplot(data=df, x="poisonous", hue=g, palette="viridis")
plt.show()
# ## Histogram of all df
lista_colunas = list(df.columns)
plt.figure(figsize=(12, 20))
for i in range(len(lista_colunas)):
plt.subplot(8, 3, i + 1)
plt.title(lista_colunas[i])
plt.hist(df[lista_colunas[i]])
plt.tight_layout()
data_cleaning_suggestions(df)
# ## Data processing
# getting object columns
cat_cols = df.select_dtypes(include=["object"]).columns
# encode values
le = LabelEncoder()
for col in cat_cols:
le.fit(df[col])
df[col] = le.transform(df[col])
# ## Correlation
plt.figure(figsize=(20, 15))
sns.heatmap(df.corr(), annot=True, cmap="viridis")
df.head()
# ## Split train/test
X = df.drop("poisonous", axis=1)
y = df["poisonous"]
xtrain, xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2, random_state=42)
# ## Using pycaret to compare the models
train_dados = pd.concat([xtrain, ytrain], axis=1)
s = setup(data=train_dados, target="poisonous", session_id=0, normalize=True)
# ## Comparing models
compare_models()
# ## Creating the model
dt = create_model("dt")
# ## Predictions
preds = predict_model(dt)
preds
# ## Plots
plot_model(dt)
plot_model(dt, plot="error")
plot_model(dt, plot="learning")
plot_model(dt, plot="vc")
|
# ## Loan Default Risk using EDA
# - Conducted exploratory data analysis (EDA) on a loan dataset to identify patterns related to loan default.
# - Used statistical techniques and visualizations to understand the relationship between various customer characteristics and the likelihood of default.
# - Identified which types of customers were most likely to default on their loans, based on factors such as income, credit score, and employment status.
# - Presented findings in a clear and actionable manner, making recommendations for how the company could mitigate loan default risk.
# Import python libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Supress warnings
import warnings
warnings.filterwarnings("ignore")
# It will display all columns
pd.set_option("display.max_columns", None)
# ### Checking the dataset
# Reading and inspection
inp0 = pd.read_csv("/Users/sakshimunde/Downloads/loan.csv")
# ### Data Understanding
inp0
inp0.columns
# ### Data Cleaning
# seeing number of null values in each column in percentage
round(100 * (inp0.isnull().sum() / len(inp0.index)), 3)
# seeing columns that have null values more than 30%
null_columns = inp0.columns[100 * (inp0.isnull().sum() / len(inp0.index)) > 30]
null_columns
# dropping columns that have null values more than 30%
inp0 = inp0.drop(null_columns, axis=1)
inp0.columns
# summarising each row null values
inp0.isnull().sum(axis=1)
# summarising rows having more than 5 null values
len(inp0[inp0.isnull().sum(axis=1) > 5].index)
inp0.shape
inp0.head()
# dropping columns that are not useful for further analysis
inp0 = inp0.drop(
[
"delinq_2yrs",
"earliest_cr_line",
"inq_last_6mths",
"open_acc",
"pub_rec",
"revol_bal",
"revol_util",
"total_acc",
"out_prncp",
"out_prncp_inv",
"total_pymnt",
"total_pymnt_inv",
"total_rec_prncp",
"total_rec_int",
"total_rec_late_fee",
"recoveries",
"collection_recovery_fee",
"last_pymnt_d",
"last_pymnt_amnt",
"last_credit_pull_d",
"application_type",
],
axis=1,
)
# determining the number of rows and columns
inp0.shape
inp0.head()
# dropping colums that are notuseful for further analysis
inp0 = inp0.drop(
[
"tax_liens",
"delinq_amnt",
"chargeoff_within_12_mths",
"acc_now_delinq",
"policy_code",
"initial_list_status",
"collections_12_mths_ex_med",
"zip_code",
"addr_state",
"url",
"title",
"pymnt_plan",
"emp_title",
"pub_rec_bankruptcies",
],
axis=1,
)
# Let's see the dimension
inp0.shape
# checking the data types
inp0.info()
# #### Converting data types
# converting interest rate datatype
inp0["int_rate"] = inp0["int_rate"].str.rstrip("%")
inp0["int_rate"] = inp0["int_rate"].astype(float)
inp0["int_rate"].dtype
# Converting data type of term column
inp0["term"] = inp0["term"].str.rstrip()
inp0["term"] = inp0["term"].str.split().str[0]
inp0["term"] = inp0["term"].astype("int")
inp0.term.dtype
# Changing term column name to term months
inp0.rename(columns={"term": "term_months"}, inplace=True)
# extracting month and year from issue_d column
inp0["issue_d"] = pd.to_datetime(inp0["issue_d"], format="%b-%y")
inp0["issue_month"] = inp0["issue_d"].dt.month
inp0["issue_year"] = inp0["issue_d"].dt.year
inp0["issue_month"]
inp0["issue_year"]
# Seeing unique values in emp length
inp0.emp_length.unique()
# Checking null values in emp length
inp0 = inp0[~inp0.emp_length.isnull()]
inp0.emp_length.isnull().sum()
# replacing emp length values to 0 ,10 and blank spaces
inp0["emp_length"] = inp0["emp_length"].str.replace("+", "")
inp0["emp_length"] = inp0["emp_length"].str.replace("<1 year", "0")
inp0["emp_length"] = inp0["emp_length"].str.replace("year", "")
inp0["emp_length"] = inp0["emp_length"].str.replace("years", "")
inp0["emp_length"] = inp0["emp_length"].str.replace("n/a", "10")
inp0["emp_length"] = inp0["emp_length"].str.replace("<", "")
inp0["emp_length"] = inp0["emp_length"].str.rstrip()
inp0["emp_length"] = inp0["emp_length"].str.split().str[0]
# Converting emp length to numeric
inp0["emp_length"] = pd.to_numeric(inp0["emp_length"])
# seeing the count of emp length
inp0["emp_length"].value_counts()
# Checking whether there are null values or not
inp0.isnull().sum()
# Summarising the loan status values
inp0["loan_status"].value_counts()
# Converting loan status to category
inp0["loan_status"] = inp0["loan_status"].astype("category")
# we dont have use of "current" of loan status
inp0 = inp0[inp0["loan_status"] != "Current"]
# seeing the count of loan status
inp0["loan_status"].value_counts()
# Making Fully paid to 0 and charged off to 1 for making further analysis easy
inp0["loan_status"] = inp0["loan_status"].apply(lambda x: 0 if x == "Fully Paid" else 1)
inp0.loan_status.value_counts()
# Converting loan status to numeric
inp0["loan_status"] = pd.to_numeric(inp0["loan_status"])
inp0["loan_status"].dtype
# ##### Deriving Insights
# ## Univariate unordered Categorical varaiables analysis
inp0.head(2)
# summarising loan status statistically
inp0.loan_status.describe()
# - average default rate is 14%
# loan variable analysis
inp0.loan_status.value_counts().plot.pie(autopct="%.2f%%")
plt.title("Distribution of loan by loan status")
plt.show()
# - we can see there are 14% of defaulters are there and fully paid are 85%
# home ownership variable analysis
inp0.home_ownership.value_counts(normalize=True) * 100
# lets replace None value to Rent values
inp0["home_ownership"] = inp0["home_ownership"].str.replace("NONE", "RENT")
inp0["home_ownership"].value_counts()
# plot bar graph to see the distribution of loan by home ownership
inp0.home_ownership.value_counts().plot.bar()
plt.title("Distribution of loan by Home ownership")
plt.xlabel("Home ownership")
plt.ylabel("Number of loans issued")
plt.show()
# - most of the loan applicants stay on rent and mortgage
# verification status analysis
inp0.verification_status.value_counts(normalize=True) * 100
# distribution of loan by verification status
inp0.verification_status.value_counts().plot.bar()
plt.title("Distribution of loan by verification_status")
plt.xlabel("verification_status")
plt.ylabel("Number of loans issued")
plt.show()
# - most of the applicant's not verified there income
# distribution of loan by purpose
inp0["purpose"].value_counts(normalize=True) * 100
# plot bar graph to see the loan distribution by purpose
inp0["purpose"].value_counts().plot.bar()
plt.title("Distribution of loan by Purpose")
plt.xlabel("purpose")
plt.ylabel("number of loan issued")
plt.show()
# - mostly purpose of taking loan is debt consolidation
# ### Univariate ordered categorical data analysis
# distribution of loan by term months
inp0.term_months.value_counts()
# distribution of loan by grade
plt.figure(figsize=[10, 7])
plt.subplot(3, 2, 1)
inp0.grade.value_counts().plot.bar()
plt.title(
"Distribution of loan by grade", fontdict={"fontsize": "15", "color": "Purple"}
)
plt.xlabel("grade", fontdict={"fontsize": "10", "color": "Green"})
plt.ylabel("Number of loans issued", fontdict={"fontsize": "10", "color": "Green"})
# Distribution of loan by emp_length
plt.subplot(3, 2, 2)
inp0["emp_length"].value_counts().plot.bar()
plt.title(
"Distribution of loan by emp_length", fontdict={"fontsize": "15", "color": "Purple"}
)
plt.xlabel("emp_length", fontdict={"fontsize": "10", "color": "Green"})
plt.ylabel("Number of loans issued", fontdict={"fontsize": "10", "color": "Green"})
# distribution of loan by issue_month
plt.subplot(3, 2, 3)
inp0["issue_month"].value_counts().plot.bar()
plt.title(
"Distribution of loan by issue_month",
fontdict={"fontsize": "15", "color": "Purple"},
)
plt.xlabel("issue_month", fontdict={"fontsize": "10", "color": "Green"})
plt.ylabel("Number of loans issued", fontdict={"fontsize": "10", "color": "Green"})
# analysis of issue_year variable
plt.subplot(3, 2, 4)
sns.countplot(data=inp0, x="issue_year")
plt.title(
"Distribution of loan by issue_year", fontdict={"fontsize": "15", "color": "Purple"}
)
plt.xlabel("issue_year", fontdict={"fontsize": "10", "color": "Green"})
plt.ylabel("Number of loans issued", fontdict={"fontsize": "10", "color": "Green"})
# analysis of term month variable
plt.subplot(3, 2, 5)
sns.countplot(data=inp0, x="term_months")
plt.title(
"Distribution of loan by term month", fontdict={"fontsize": "15", "color": "Purple"}
)
plt.xlabel("term_months", fontdict={"fontsize": "10", "color": "Green"})
plt.ylabel("Number of loans issued", fontdict={"fontsize": "10", "color": "Green"})
plt.tight_layout()
plt.show()
# - 1.highest number of loans are B & A grade loans and least number of loans are F & G graded
# - 2.more number of loans are taken by employess who have 10 years of experience
# - 3.12th month has the highest number of loans issued
# - 4.2011 is ther year where most number of loans are issued
# - 5.most of the applicants are of 36 term month
# distribution of loan by sub_grade
plt.figure(figsize=[30, 7])
plt.subplot(2, 3, 5)
inp0.sub_grade.value_counts().plot.bar()
plt.title("Distribution of loan by sub grade")
plt.xlabel("sub grade", fontdict={"fontsize": "10", "color": "Purple"})
plt.ylabel("Number of loans issued", fontdict={"fontsize": "13", "color": "Purple"})
# - highest number of loans are A4,B3,A5,B4 and least are G1,G2,G3,G4,G5
# ### Univariate numerical analysis
inp0.head(2)
# summarising loan amount statistically
inp0.loan_amnt.describe()
# 50% of loan amount is 10,000
# distplot of loan amount
sns.distplot(inp0["loan_amnt"])
# easiest way to analyse is by making bins
# binning loan amount into low ,medium,high,very high
def loan_amount(n):
if n < 5000:
return "low"
elif n >= 5000 and n < 15000:
return "medium"
elif n >= 15000 and n < 25000:
return "high"
else:
return "very high"
# Converting loan amount to bins
inp0["loan_amnt"] = inp0.loan_amnt.apply(lambda x: loan_amount(x))
inp0.loan_amnt.value_counts()
# seeing loan by loan amount using bar plot
inp0.loan_amnt.value_counts().plot.bar()
plt.title("Distribution of loan by loan amount")
plt.xlabel("loan amount")
plt.ylabel("Number of loans issued")
plt.show()
# - so medium(5k-15k) is the highest around 20,000 amount was distributed
# analysing funded amount.Converting funded amount to bins
inp0["funded_amnt"] = inp0.funded_amnt.apply(lambda x: loan_amount(x))
# seeing count of funded amount
inp0["funded_amnt"].value_counts()
# bar plot of loan amount by funded amount
inp0.funded_amnt.value_counts().plot.bar()
plt.title("Distribution of loan by funded amount")
plt.xlabel("Funded amount")
plt.ylabel("Number of loans issued")
plt.show()
# - so 5k - 15k amount was funded more
# converting funded_amnt_inv to bins
inp0.funded_amnt_inv
# converting funded amount inv to bins
inp0["funded_amnt_inv"] = inp0["funded_amnt_inv"].apply(lambda x: loan_amount(x))
inp0["funded_amnt_inv"].value_counts()
# plot bar graph of loan data using funded amount_inv
inp0["funded_amnt_inv"].value_counts().plot.bar()
plt.title("Distribution of loan by funded_amnt_inv")
plt.xlabel("funded_amnt_inv")
plt.ylabel("Number of loans issued")
plt.show()
# - most invested amount is around 5k - 15k
# seeing unique values of interest rate
inp0.int_rate.unique()
# converting interest rate to bins to low,medium,high
def interest_rate(n):
if n <= 10:
return "low"
elif n > 10 and n < 15:
return "medium"
else:
return "high"
inp0["int_rate"] = inp0["int_rate"].apply(lambda x: interest_rate(x))
inp0["int_rate"]
# plotting bar graph to see loan by interest rate
inp0["int_rate"].value_counts().plot.bar()
plt.title("Distribution of loan by interest rate")
plt.xlabel("Interest rate")
plt.ylabel("Number of loans issued")
plt.show()
# - loans with medium(10-15%) of interest are issued most
# converting debt to income variable value's to bins to low,medium,high
inp0.dti
# defining function to bin dti to low ,medium and high
def dti_1(n):
if n <= 10:
return "low"
elif n > 10 and n <= 20:
return "medium"
else:
return "high"
# applying defined function to dti to convert into bins
inp0["dti"] = inp0["dti"].apply(lambda x: dti_1(x))
# seeing the count of dti
inp0["dti"].value_counts()
# bar plot to see the distribution of loan by dti
inp0["dti"].value_counts().plot.bar()
plt.title("Distribution of loan by dti")
plt.xlabel("debt to income")
plt.ylabel("Number of loans issued")
plt.show()
# converting installment column to bins to low,medium,high,very high
inp0.installment
def installment_1(n):
if n < 200:
return "low"
elif n >= 200 and n < 400:
return "medium"
elif n >= 400 and n < 600:
return "high"
else:
return "very high"
# applying defined function to installment to convert into bins
inp0["installment"] = inp0["installment"].apply(lambda x: installment_1(x))
inp0["installment"].value_counts()
inp0["installment"].value_counts().plot.bar()
plt.title("Distribution of loan by installment")
plt.xlabel("installment")
plt.ylabel("Number of loans issued")
plt.show()
# converting annual_inc to bins to low ,medium,high,very high
inp0.annual_inc.describe()
inp0.annual_inc.describe().apply(lambda x: format(x, "f"))
# converting annual income to bins
def annual_income(n):
if n < 50000:
return "low"
elif n >= 50000 and n < 100000:
return "medium"
elif n >= 100000 and n < 150000:
return "high"
else:
return "very high"
inp0["annual_inc"] = inp0["annual_inc"].apply(lambda x: annual_income(x))
inp0["annual_inc"].value_counts()
inp0.annual_inc.value_counts().plot.bar()
plt.title("Distribution of loan by annual income")
plt.xlabel("annual income")
plt.ylabel("Number of loans issued")
plt.show()
# ## Bivariate and Multivariate analysis
inp0.head(2)
# Defaulters count on Home ownership
inp0.groupby("home_ownership")["loan_status"].count()
# bar plot to see defaulters by home ownership
inp0.groupby("home_ownership")["loan_status"].count().plot.bar()
plt.title("Default rates by home ownership")
plt.ylabel("loan_status")
plt.show()
# - more number of defaulters are from Rent and Mortgage.Lenders should be careful while giving loan to borrowers of home ownership as rent and mortgage
# number of defaulters by purpose
inp0.groupby("purpose")["loan_status"].mean()
# bar plot to see the count of defaulters by purpose
plt.figure(figsize=[10, 5])
inp0.groupby("purpose")["loan_status"].mean().plot.bar()
plt.title("Default rates by purpose")
plt.xticks(rotation=90)
plt.ylabel("loan_status")
plt.show()
# - more defaulters are from small business than renewable energy.So lenders should be careful while giving loan to small business borrowers.
# default rates across grade
inp0.groupby("grade")["loan_status"].mean().plot.bar()
plt.ylabel("loan_status")
plt.show()
# - more number of defaulters are in E,F,G grade and less defaulters in A grade
# bar plot to see the count of defaulters by sub grade
inp0.groupby("sub_grade")["loan_status"].mean().plot.bar()
plt.ylabel("loan_status")
plt.show()
# - As seen in grade default rate goes from A to G.
# - We can see there are more defaulters from F5.
# default rate across term month
sns.barplot(data=inp0, x="term_months", y="loan_status")
plt.ylabel("loan_status")
plt.show()
# - 60 months has more number of defaulters compared to 36 months.So while giving loan to borrowers,lenders should take care of giving loan of 60 months bcz of more defaulters,than loan giving of 60 months.
# default rate across verification status
sns.barplot(data=inp0, x="verification_status", y="loan_status")
plt.ylabel("loan_status")
plt.show()
# - more number of defaulters are from verified.Verified loans defaults more than not verified
# default rate across year
sns.barplot(data=inp0, x="issue_year", y="loan_status")
plt.ylabel("loan_status")
plt.show()
# - there is a high defaulted rate in 2007 but after that there is decrease in default rate and suddenly increase in default rate in 2011
# default rates across months
sns.barplot(data=inp0, x="issue_month", y="loan_status")
plt.ylabel("loan_status")
plt.show()
# - not much variation across months
# loan status vs loan amount
sns.barplot(data=inp0, x="loan_amnt", y="loan_status")
plt.ylabel("loan_status")
plt.show()
# - higher the amount higher are the default rates.default rate is more in very high i.e, above 25000 amount
#
# default rate across funded amnt
sns.barplot(data=inp0, x="funded_amnt", y="loan_status")
plt.ylabel("loan_status")
plt.show()
# - higher the amount ,higher are the defaulters
# default rate across funded amnt inv
sns.barplot(data=inp0, x="funded_amnt_inv", y="loan_status")
plt.ylabel("loan_status")
plt.show()
# default rates across int rate
sns.barplot(data=inp0, x="int_rate", y="loan_status")
plt.ylabel("loan_status")
plt.show()
# - higher the interest rate,more defaulters.Lenders must be aware while giving loan when,the interest is more then there are more chances of getting defaulted.
# default rates across dti
sns.barplot(data=inp0, x="dti", y="loan_status")
plt.ylabel("loan_status")
plt.show()
# - higher the debt to income ratio, higher is the default rate
# default rate across installment
sns.barplot(data=inp0, x="installment", y="loan_status")
plt.ylabel("loan_status")
plt.show()
# - more installments lead to more defaults.Lenders should not allow for more instllments to the borrowers.
# loan status vs annual income
sns.barplot(data=inp0, x="annual_inc", y="loan_status")
plt.ylabel("loan_status")
plt.show()
# - applicants with less salary ,defaults more,so while giving loan ,it must be taken care that borrower should have a more annual income ,so that it may lead to less defaults
# loan status vs experience
sns.barplot(data=inp0, x="emp_length", y="loan_status")
plt.show()
# - not much difference
# purpose
plt.figure(figsize=(16, 6))
sns.countplot(x="purpose", data=inp0)
plt.xticks(rotation=70)
plt.show()
# - more defaulters from debt consolidation,credit card,home improvement,major purchase
# purpose vs homewonership who defaulted more
plt.figure(figsize=[20, 7])
sns.barplot(data=inp0, x="home_ownership", hue="purpose", y="loan_status")
plt.show()
# - In general debt consolidation loans have the highest default rates.
# purpose vs interest rate
plt.figure(figsize=[20, 7])
sns.barplot(data=inp0, x="int_rate", hue="purpose", y="loan_status")
plt.show()
# dti vs purpose
plt.figure(figsize=[20, 7])
sns.barplot(data=inp0, x="dti", hue="purpose", y="loan_status")
plt.show()
# purpose vs grade
plt.figure(figsize=[20, 7])
sns.barplot(data=inp0, x="grade", hue="purpose", y="loan_status")
plt.show()
# - We can see there are more defaulters from E,F,G grades.From 'F' grade, defaulters in purpose are more in renewable energy.
# - In G grade more defaulters are from moving & car purpose.
# ### Default rates across different categories
# #### Difference between highest and lowest default rate across the categories
def diff_rate(cat_var):
default_rates = (
inp0.groupby(cat_var).loan_status.mean().sort_values(ascending=False)
)
return (round(default_rates, 2), round(default_rates[0] - default_rates[-1], 2))
default_rates, diff = diff_rate("annual_inc")
print(default_rates)
print(diff)
# 0.06 is the default rate.Thus, there is a 6% increase in default rate as you go from high to low annual income.
default_rates, diff = diff_rate("loan_amnt")
print(default_rates)
print(diff)
# 8% increase in the default rate when you go from high to low loan amount
default_rates, diff = diff_rate("funded_amnt_inv")
print(default_rates)
print(diff)
# 6% increase in the default rate as you go from high to low funded amount invested
default_rates, diff = diff_rate("verification_status")
print(default_rates)
print(diff)
# 4% increase in default rate
default_rates, diff = diff_rate("dti")
print(default_rates)
print(diff)
# 4% increase in default rate as you go from high to low
default_rates, diff = diff_rate("home_ownership")
print(default_rates)
print(diff)
# 5% increase in default rate
default_rates, diff = diff_rate("purpose")
print(default_rates)
print(diff)
# 17% increase in default rate.More deafult rates from purpose variable
default_rates, diff = diff_rate("sub_grade")
print(default_rates)
print(diff)
# 46% increase in default rates,this is the highest default rate
default_rates, diff = diff_rate("grade")
print(default_rates)
print(diff)
# 28% increase in defaulte rate
default_rates, diff = diff_rate("funded_amnt")
print(default_rates)
print(diff)
# 7% increase in default rate
default_rates, diff = diff_rate("installment")
print(default_rates)
print(diff)
# 3% increase in default rate
default_rates, diff = diff_rate("int_rate")
print(default_rates)
print(diff)
# 2% increase in default rate
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
os.path.join(dirname, filename)
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## Importing Libraries & Modules
import torch
from torch import Tensor, nn
from torchvision import utils as U
from torchvision.transforms import transforms as T
from torch.utils.data import DataLoader
from PIL import Image
import torch.optim
import torch.nn.functional as F
from pathlib import Path
import matplotlib.pyplot as plt
import keras
from tqdm import tqdm
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# ## Pre-Processing Images
# Define function to map labels for test and validation datasets.
dictionary = {0: "NORMAL", 1: "PNEUMONIA"}
def map_img(img):
label = img.parent.name
img = Image.open(img).convert("L")
img = img.resize((150, 150))
y = (
lambda x: torch.tensor(0, device=device)
if x == dictionary[0]
else torch.tensor(1, device=device)
)
process = T.Compose([T.ConvertImageDtype(dtype=torch.float32)])
return img, process(y(label))
# Defining function to load images into stacks of tensors.
def load_tensors(path):
return [map_img(img) for folder in path.iterdir() for img in list(folder.iterdir())]
# Loading path of test, training and validation images.
test = Path("/kaggle/input/chest-xray-pneumonia/chest_xray/test")
val = Path("/kaggle/input/chest-xray-pneumonia/chest_xray/val")
# Loading tensors.
test_ts = load_tensors(test)
val_ts = load_tensors(val)
# Randomly sample from both classes of the training dataset, due to unequal class proportions.
normal_train = "/kaggle/input/chest-xray-pneumonia/chest_xray/train/NORMAL"
pneumonia_train = "/kaggle/input/chest-xray-pneumonia/chest_xray/train/PNEUMONIA"
train_normal = [os.path.join(normal_train, file) for file in os.listdir(normal_train)]
train_pneumonia = [
os.path.join(pneumonia_train, file) for file in os.listdir(pneumonia_train)
]
np.random.seed(30)
train_normal_sample = np.random.choice(train_normal, size=1000)
train_normal = list(zip(train_normal_sample, np.zeros(1000)))
train_pneumonia_sample = np.random.choice(train_pneumonia, size=1000)
train_pneumonia = list(zip(train_pneumonia_sample, np.ones(1000)))
train_dataset = train_normal + train_pneumonia
def map_train(item):
image, label = item
image = Image.open(image).convert("L")
image = image.resize((150, 150))
label = torch.tensor(label, device=device)
process = T.Compose([T.ConvertImageDtype(dtype=torch.float32)])
return image, process(label)
train_ts = [map_train(item) for item in train_dataset]
# Define transformations for each image in the dataset.
transform = T.Compose(
[
T.PILToTensor(),
T.ConvertImageDtype(dtype=torch.float32),
T.RandomRotation(20),
T.RandomAffine(10),
]
)
# Concatenate tensors into required form for dataloader.
x_train = torch.cat([transform(t[0]).unsqueeze(1) for t in train_ts])
y_train = torch.cat([t[1].unsqueeze(0).unsqueeze(1) for t in train_ts])
x_test = torch.cat([transform(t[0]).unsqueeze(1) for t in test_ts])
y_test = torch.cat([t[1].unsqueeze(0).unsqueeze(1) for t in test_ts])
x_valid = torch.cat([transform(t[0]).unsqueeze(1) for t in val_ts])
y_valid = torch.cat([t[1].unsqueeze(0).unsqueeze(1) for t in val_ts])
# Create train, test and valid datasets.
train = tuple(zip(x_train, y_train))
test = tuple(zip(x_test, y_test))
valid = tuple(zip(x_valid, y_valid))
# ## Loading the DataLoaders
# Loading data-loaders with tensors.
train_dl = DataLoader(train, batch_size=100, shuffle=True)
test_dl = DataLoader(test, batch_size=100, shuffle=True)
val_dl = DataLoader(valid, batch_size=100, shuffle=True)
# ## Defining the Convolutional Neural Net
# Create Convolutional Neural Network
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.conv1 = self.conv(1, 32)
self.conv2 = self.conv(32, 64, dropout=True)
self.conv3 = self.conv(64, 64)
self.conv4 = self.conv(64, 128, dropout=True)
self.conv5 = self.conv(128, 256, dropout=True)
self.relu = nn.ReLU()
self.flt = nn.Flatten(start_dim=1)
self.dropout = nn.Dropout(0.2)
self.sigmoid = nn.Sigmoid()
self.fc1 = nn.Linear(4096, 1024)
self.fc2 = nn.Linear(1024, 1)
# Define convolutional block.
def conv(self, in_channels, out_channels, stride=1, dropout=False):
layers = [
nn.Conv2d(in_channels, out_channels, (3, 3), stride=stride, padding=(1, 1)),
nn.ReLU(),
]
if dropout == True:
layers.append(nn.Dropout(0.1))
layers.append(nn.BatchNorm2d(out_channels))
layers.append(nn.MaxPool2d((2, 2), stride=2))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
# Flatten the input into single vector to pass into fully-connected layers.
x = self.flt(x)
x = self.relu(self.fc1(x))
# Randomly deactivate unit to decrease chances of overfitting.
x = self.dropout(x)
x = self.fc2(x)
x = self.sigmoid(x)
return x
# Initialise the model.
model = NeuralNetwork()
model.to(device)
# Use Adam optimisation algorithm for deep learning to adjust learning rate so that there is minimum oscillation around the global minimum,
# and large step-wise increments are taken to pass local minimum.
optimise = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=0.01)
# Binary cross-entropy loss.
l = nn.BCELoss()
# Train & evaluate model performance.
def run(epochs):
training_loss = []
training_accuracy = []
test_loss = []
test_accuracy = []
# train
for i in range(epochs):
model.train()
epoch_loss = 0
epoch_accuracy = 0
for batch in tqdm(train_dl):
img, labels = batch
img = img.to(device)
labels = labels.to(device)
optimise.zero_grad()
preds = model(img)
loss = l(preds, labels)
# Take the partial derivative of the loss function.
loss.backward()
# Apply step-wise gradient descent.
optimise.step()
epoch_loss += loss.item()
accuracy = (preds > 0.5).float() == labels.float()
accuracy = accuracy.float().mean()
epoch_accuracy += accuracy.item()
# evaluate
model.eval()
img, labels = next(iter(test_dl))
img = img.to(device)
labels = labels.to(device)
preds = model(img)
eval_loss = l(preds, labels)
eval_accuracy = (preds > 0.5).float() == labels.float()
eval_accuracy = accuracy.float().mean().item()
training_loss.append(epoch_loss)
training_accuracy.append(epoch_accuracy)
test_loss.append(eval_loss.item())
test_accuracy.append(eval_accuracy)
print(
f"Epoch: {i+1}\t Training Loss: {epoch_loss/len(train_dl)}\t Training Accuracy: {epoch_accuracy/len(train_dl)}\nTest Loss: {eval_loss}, \tAccuracy: {accuracy}"
)
run(10)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
#
# # Table of content
#
# * [1. Introduction](#1)
# - [Problem statement](#1.1)
# - [Data description](#1.2)
#
# * [2. Import libraries](#2)
#
# * [3. Basic Exploration](#3)
# - [Read dataset](#3.1)
# - [Some information](#3.2)
# - [Data transformation](#3.3)
# - [Data visualization](#3.4)
# * [4. Machine Learning model](#4)
#
# * [5 Conclusion](#5)
# * [6 Author Message](#6)
# # Introduction
# Problem statement
# Data description
# # Import Libraries
import pandas as pd
import numpy as np
pd.plotting.register_matplotlib_converters()
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
print("Setup Complete")
#
# # Basic Exploration
# Read dataset
def read_dataset(file_path):
data = pd.read_csv(file_path)
return data
file_path = "/kaggle/input/student-performance-in-mathematics/exams.csv"
data = read_dataset(file_path)
#
# Some information
data.head()
data.shape
data.info()
data.nunique()
data.duplicated().any()
#
# Data transformation
# >
# Missing Data Treatment
total_null = data.isnull().sum().sort_values(ascending=False)
percent = ((data.isnull().sum() / data.isnull().count()) * 100).sort_values(
ascending=False
)
print("Total records = ", data.shape[0])
missing_data = pd.concat(
[total_null, percent.round(2)], axis=1, keys=["Total Missing", "In Percent"]
)
missing_data
# >
# Duplicated Data Treatment
duplicated_data = pd.DataFrame(data.loc[data.duplicated()].count())
duplicated_data.columns = ["Total Duplicate"]
duplicated_data
# >
# Clean Data
print("gender: ", data["gender"].unique().tolist())
print("race / ethnicity: ", data["race/ethnicity"].unique().tolist())
print(
"parental level of education: ",
data["parental level of education"].unique().tolist(),
)
print("lunch: ", data["lunch"].unique().tolist())
print("test preparation course: ", data["test preparation course"].unique().tolist())
data["race/ethnicity"] = data["race/ethnicity"].str.replace("group ", "")
data.describe().round(2)
data.head()
#
# Data visualization
print("Statistic of gender:")
df_gender = pd.DataFrame(data["gender"].value_counts().reset_index())
df_gender.columns = ["gender", "total"]
df_gender["percent"] = (df_gender["total"] / df_gender["total"].sum() * 100).round(2)
with pd.option_context(
"display.max_rows", None, "display.max_columns", None, "display.precision", 3
):
print(df_gender)
print("\n---------------------------------------------------\n")
print("Statistic of lunch:")
df_lunch = pd.DataFrame(data["lunch"].value_counts().reset_index())
df_lunch.columns = ["lunch", "total"]
df_lunch["percent"] = (df_lunch["total"] / df_lunch["total"].sum() * 100).round(2)
with pd.option_context(
"display.max_rows", None, "display.max_columns", None, "display.precision", 3
):
print(df_lunch)
print("\n---------------------------------------------------\n")
print("Statistic of test preparation course:")
df_test = pd.DataFrame(data["test preparation course"].value_counts().reset_index())
df_test.columns = ["test preparation course", "total"]
df_test["percent"] = (df_test["total"] / df_test["total"].sum() * 100).round(2)
with pd.option_context(
"display.max_rows", None, "display.max_columns", None, "display.precision", 3
):
print(df_test)
# Define a sample color palette
colors = ["#66b3ff", "#99ff99", "#ffcc99"]
# Create figure and subplots
fig, axs = plt.subplots(1, 3, figsize=(16, 5))
# Create first subplot for pie chart 1
axs[0].pie(
df_gender["percent"],
labels=df_gender["gender"],
autopct="%1.1f%%",
startangle=90,
shadow=True,
colors=colors,
wedgeprops=dict(width=0.5),
)
axs[0].axis("equal")
axs[0].set_title("Distribution of Gender", pad=20, fontsize=15)
# Create second subplot for pie chart 2
axs[1].pie(
df_lunch["percent"],
labels=df_lunch["lunch"],
autopct="%1.1f%%",
startangle=90,
shadow=True,
colors=colors,
wedgeprops=dict(width=0.5),
)
axs[1].axis("equal")
axs[1].set_title("Distribution of Lunch", pad=20, fontsize=15)
# Create third subplot for pie chart 3
axs[2].pie(
df_test["percent"],
labels=df_test["test preparation course"],
autopct="%1.1f%%",
startangle=90,
shadow=True,
colors=colors,
wedgeprops=dict(width=0.5),
)
axs[2].axis("equal")
axs[2].set_title("Distribution of Test Preparation Course", pad=20, fontsize=15)
plt.tight_layout()
# Display the plot
plt.show()
# Set the color palette
palette = sns.color_palette("pastel")
# Create a figure with two subplots
fig, axs = plt.subplots(1, 2, figsize=(15, 6))
# Create the first subplot for the countplot of parental level of education
sns.countplot(y="parental level of education", data=data, ax=axs[0], palette=palette)
axs[0].set_title("Distribution of Parental Education Level")
# Create the second subplot for the countplot of race/ethnicity
sns.countplot(y="race/ethnicity", data=data, ax=axs[1], palette=palette)
axs[1].set_title("Distribution of Race / Ethnicity")
# Set the y-labels for both subplots
axs[0].set_ylabel("")
axs[1].set_ylabel("")
# Set the x-labels for both subplots
axs[0].set_xlabel("Number of Students")
axs[1].set_xlabel("Number of Students")
# Increase the distance between subplots
fig.subplots_adjust(wspace=5.0)
# Set the tight layout
plt.tight_layout()
# Display the plot
plt.show()
# Set the color palette
palette = sns.color_palette("pastel")
# Create a figure with three subplots
fig, axs = plt.subplots(1, 3, figsize=(15, 6), sharey=True)
# Create the first subplot for the distribution of math scores
sns.histplot(x="math score", data=data, ax=axs[0], color=palette[0])
axs[0].set_title("Distribution of Math Scores")
# Create the second subplot for the distribution of reading scores
sns.histplot(x="reading score", data=data, ax=axs[1], color=palette[1])
axs[1].set_title("Distribution of Reading Scores")
# Create the third subplot for the distribution of writing scores
sns.histplot(x="writing score", data=data, ax=axs[2], color=palette[2])
axs[2].set_title("Distribution of Writing Scores")
# Set the y-label for all subplots
fig.text(-0.04, 0.5, "Number of Students", va="center", rotation="vertical")
# Set the x-label for all subplots
fig.text(0.5, -0.04, "Score", ha="center")
# Increase the distance between subplots
fig.subplots_adjust(wspace=0.3)
# Set the tight layout
plt.tight_layout()
# Display the plot
plt.show()
mean_math = data["math score"].mean()
print("Mean of math scores: ", mean_math)
print("\nStatistic of math scores: ")
over = data["math score"].loc[data["math score"] > mean_math].count()
lower = data["math score"].loc[data["math score"] < mean_math].count()
df_math = pd.DataFrame({"over": [over], "lower": [lower]}, index=["total"]).T
df_math["percent"] = (df_math["total"] / df_math["total"].sum() * 100).round(2)
with pd.option_context(
"display.max_rows", None, "display.max_columns", None, "display.precision", 3
):
print(df_math)
print("\n---------------------------------------------------\n")
mean_reading = data["reading score"].mean()
print("Mean of reading scores: ", mean_reading)
print("\nStatistic of reading scores: ")
over = data["reading score"].loc[data["reading score"] > mean_reading].count()
lower = data["reading score"].loc[data["reading score"] < mean_reading].count()
df_reading = pd.DataFrame({"over": [over], "lower": [lower]}, index=["total"]).T
df_reading["percent"] = (df_reading["total"] / df_reading["total"].sum() * 100).round(2)
with pd.option_context(
"display.max_rows", None, "display.max_columns", None, "display.precision", 3
):
print(df_reading)
print("\n---------------------------------------------------\n")
mean_writing = data["writing score"].mean()
print("Mean of writing scores: ", mean_writing)
print("\nStatistic of writing scores: ")
over = data["writing score"].loc[data["writing score"] > mean_writing].count()
lower = data["writing score"].loc[data["writing score"] < mean_writing].count()
df_writing = pd.DataFrame({"over": [over], "lower": [lower]}, index=["total"]).T
df_writing["percent"] = (df_writing["total"] / df_writing["total"].sum() * 100).round(2)
with pd.option_context(
"display.max_rows", None, "display.max_columns", None, "display.precision", 3
):
print(df_writing)
# Define a sample color palette
colors = ["#66b3ff", "#99ff99", "#ffcc99"]
# Create figure and subplots
fig, axs = plt.subplots(1, 3, figsize=(16, 5))
# Create first subplot for pie chart 1
axs[0].pie(
df_math["percent"],
labels=df_math.index.tolist(),
autopct="%1.1f%%",
startangle=90,
shadow=True,
colors=colors,
wedgeprops=dict(width=0.5),
)
axs[0].axis("equal")
axs[0].set_title("Distribution of Math Score", pad=20, fontsize=15)
# Create second subplot for pie chart 2
axs[1].pie(
df_reading["percent"],
labels=df_reading.index.tolist(),
autopct="%1.1f%%",
startangle=90,
shadow=True,
colors=colors,
wedgeprops=dict(width=0.5),
)
axs[1].axis("equal")
axs[1].set_title("Distribution of Reading Score", pad=20, fontsize=15)
# Create third subplot for pie chart 3
axs[2].pie(
df_writing["percent"],
labels=df_writing.index.tolist(),
autopct="%1.1f%%",
startangle=90,
shadow=True,
colors=colors,
wedgeprops=dict(width=0.5),
)
axs[2].axis("equal")
axs[2].set_title("Distribution of Writing Score", pad=20, fontsize=15)
plt.tight_layout()
# Display the plot
plt.show()
data.head()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import re
import string
import numpy as np
import pandas as pd
import random
import missingno
import plotly.express as px
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.base import TransformerMixin
from sklearn.metrics import accuracy_score, recall_score, plot_confusion_matrix
from wordcloud import WordCloud
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
from spacy.lang.en import English
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
import warnings
warnings.filterwarnings("ignore")
data = pd.read_csv(
"/kaggle/input/real-or-fake-fake-jobposting-prediction/fake_job_postings.csv"
)
data.head()
# checking missing data in our dataframe.
missingno.matrix(data)
# * As we can see their are a lot of null values in our dataset, so we need to figure out something later about it.
print(data.columns)
data.describe()
# * From describing our data we get to know that their are 4 columns named as job_id, telecommuting, has_company_logo and has_questions features which have numerical data. So we can easily remove these columns as they are of no use in text classification problems.
# * We can also see one numerical feature 'fraudulent' is basically column on which our model will be trained and predicted.
# Now lets see how many jobs posted are fraud and real.
sns.countplot(data.fraudulent)
data.groupby("fraudulent").count()["title"].reset_index().sort_values(
by="title", ascending=False
)
# * From the plot we can see their are very few fraud jobs posted.
# * Our data is very much imbalanced so its a hard work to make a good classifier, we will try best :-)
# ### **Now let's fill the nan values and get rid of the columns which are of no use to make things simpler.**
columns = [
"job_id",
"telecommuting",
"has_company_logo",
"has_questions",
"salary_range",
"employment_type",
]
for col in columns:
del data[col]
data.fillna(" ", inplace=True)
data.head()
# **Let's check which country posts most number of jobs.**
def split(location):
l = location.split(",")
return l[0]
data["country"] = data.location.apply(split)
country = dict(data.country.value_counts()[:11])
del country[" "]
plt.figure(figsize=(8, 6))
plt.title("No. of job postings country wise", size=20)
plt.bar(country.keys(), country.values())
plt.ylabel("No. of jobs", size=10)
plt.xlabel("Countries", size=10)
# * Most number of jobs are posted by US.
# Let's check about which type of experience is required in most number of jobs.
experience = dict(data.required_experience.value_counts())
del experience[" "]
plt.bar(experience.keys(), experience.values())
plt.xlabel("Experience", size=10)
plt.ylabel("no. of jobs", size=10)
plt.xticks(rotation=35)
plt.show()
# title of jobs which are frequent.
print(data.title.value_counts()[:10])
# **Now we should combine our text in a single column to start cleaning our data.**
data["text"] = (
data["title"]
+ " "
+ data["location"]
+ " "
+ data["company_profile"]
+ " "
+ data["description"]
+ " "
+ data["requirements"]
+ " "
+ data["benefits"]
)
del data["title"]
del data["location"]
del data["department"]
del data["company_profile"]
del data["description"]
del data["requirements"]
del data["benefits"]
del data["required_experience"]
del data["required_education"]
del data["industry"]
del data["function"]
del data["country"]
data.head()
# **Now lets see what type of words are frequent in fraud and actual jobs using wordclouds**
fraudjobs_text = data[data.fraudulent == 1].text
actualjobs_text = data[data.fraudulent == 0].text
STOPWORDS = spacy.lang.en.stop_words.STOP_WORDS
plt.figure(figsize=(16, 14))
wc = WordCloud(
min_font_size=3, max_words=3000, width=1600, height=800, stopwords=STOPWORDS
).generate(str(" ".join(fraudjobs_text)))
plt.imshow(wc, interpolation="bilinear")
plt.figure(figsize=(16, 14))
wc = WordCloud(
min_font_size=3, max_words=3000, width=1600, height=800, stopwords=STOPWORDS
).generate(str(" ".join(actualjobs_text)))
plt.imshow(wc, interpolation="bilinear")
# # Cleaning Data
# * Creating a function that accepts a sentence as input and processes the sentence into tokens, performing lemmatization, lowercasing, and removing stop words.
# * The function that i have used to do these work is found here https://www.dataquest.io/blog/tutorial-text-classification-in-python-using-spacy/, i know that i cant write so neat so i just taken those functions.
# Create our list of punctuation marks
punctuations = string.punctuation
# Create our list of stopwords
nlp = spacy.load("en")
stop_words = spacy.lang.en.stop_words.STOP_WORDS
# Load English tokenizer, tagger, parser, NER and word vectors
parser = English()
# Creating our tokenizer function
def spacy_tokenizer(sentence):
# Creating our token object, which is used to create documents with linguistic annotations.
mytokens = parser(sentence)
# Lemmatizing each token and converting each token into lowercase
mytokens = [
word.lemma_.lower().strip() if word.lemma_ != "-PRON-" else word.lower_
for word in mytokens
]
# Removing stop words
mytokens = [
word for word in mytokens if word not in stop_words and word not in punctuations
]
# return preprocessed list of tokens
return mytokens
# Custom transformer using spaCy
class predictors(TransformerMixin):
def transform(self, X, **transform_params):
# Cleaning Text
return [clean_text(text) for text in X]
def fit(self, X, y=None, **fit_params):
return self
def get_params(self, deep=True):
return {}
# Basic function to clean the text
def clean_text(text):
# Removing spaces and converting text into lowercase
return text.strip().lower()
# creating our bag of words
bow_vector = CountVectorizer(tokenizer=spacy_tokenizer, ngram_range=(1, 3))
# * BoW converts text into the matrix of occurrence of words within a given document. It focuses on whether given words occurred or not in the document, and it generates a matrix that we might see referred to as a BoW matrix or a document term matrix.
# splitting our data in train and test
X_train, X_test, y_train, y_test = train_test_split(
data.text, data.fraudulent, test_size=0.3
)
# # Creating Model
# * We are creating a pipeline with three components: a cleaner, a vectorizer, and a classifier. The cleaner uses our predictors class object to clean and preprocess the text. The vectorizer uses countvector objects to create the bag of words matrix for our text. The classifier is an object that performs the logistic regression to classify the sentiments.
def models(x):
if x == LogisticRegression:
clf = LogisticRegression()
elif x == RandomForest:
clf = RandomForestClassifier()
elif x == SVC:
clf = SVC()
elif x == XGBoost:
clf = XGBClassifier()
else:
print("wrong input")
exit()
# Create pipeline using Bag of Words
pipe = Pipeline(
[("cleaner", predictors()), ("vectorizer", bow_vector), ("classifier", clf)]
)
# Fitting model
pipe.fit(X_train, y_train)
# Predicting with a test dataset
predicted = pipe.predict(X_test)
# Model Accuracy
print(x, "Accuracy: ", accuracy_score(y_test, predicted))
print(x, "Recall: ", recall_score(y_test, predicted))
# plot confusion matrix
plot_confusion_matrix(pipe, X_test, y_test, cmap="Blues", values_format=" ")
models(LogisticRegression)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import librosa # Audio
import matplotlib.pyplot as plt
from pathlib import Path
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Conv2D,
BatchNormalization,
ReLU,
GlobalAveragePooling2D,
Dense,
Softmax,
)
from kapre import STFT, Magnitude, MagnitudeToDecibel
from kapre.composed import get_melspectrogram_layer, get_log_frequency_spectrogram_layer
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
path = Path("../input/dat255-project")
train_path = path / "FSDKaggle2018.audio_train/FSDKaggle2018.audio_train"
test_path = path / "FSDKaggle2018.audio_test/FSDKaggle2018.audio_test"
train = pd.read_csv(
"/kaggle/input/dat255-project/FSDKaggle2018.meta/FSDKaggle2018.meta/train_post_competition.csv"
)
test = pd.read_csv(
"/kaggle/input/dat255-project/FSDKaggle2018.meta/FSDKaggle2018.meta/test_post_competition_scoring_clips.csv"
)
# # Gjør klar data TODO
def load_files(file_name):
audio, sample_rate = librosa.load(file_name)
return audio
features = []
for index, row in train.iterrows():
file_name = os.path.join(os.path.abspath(train_path), str(row["fname"]))
class_label = row["label"]
data = load_files(file_name)
features.append([data, class_label])
featuresdf = pd.DataFrame(features, columns=["feature", "class_label"])
X = np.array(featuresdf.feature)
y = np.array(featuresdf.class_label)
from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils, to_categorical
le = LabelEncoder()
yy = to_categorical(le.fit_transform(y))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
X, yy, test_size=0.2, random_state=42
)
# y = []
# for index,row in test.iterrows():
# file_name = os.path.join(os.path.abspath(test_path), str(row["fname"]))
# y.append(load_files(file_name))
x_train.dtype
y_train.dtype
sr = 22050
input_shape = (sr, 1)
model = Sequential()
# STFT layer
model.add(
STFT(
n_fft=2048,
win_length=2018,
hop_length=1024,
window_name=None,
pad_end=False,
input_data_format="channels_last",
output_data_format="channels_last",
input_shape=input_shape,
)
)
# model.add(get_melspectrogram_layer())
model.add(Magnitude())
model.add(MagnitudeToDecibel())
model.add(Conv2D(32, (3, 3), strides=(2, 2)))
model.add(BatchNormalization())
model.add(ReLU())
model.add(GlobalAveragePooling2D())
model.add(Dense(10))
model.add(Softmax())
model.compile(loss="categorical_crossentropy", metrics=["accuracy"], optimizer="adam")
model.fit(x_train, y_train)
# train.head()
# train.label.unique()
# category_group = train.groupby(['label', 'manually_verified']).count()
# plot = category_group['fname'].unstack().plot(kind='bar', figsize=(16, 10))
# plot.set_xlabel("Category")
# plot.set_ylabel("Number of examples")
# Print out minimum and maximum number of examples per category
# print("Minimum number of examples per category=", min(train.label.value_counts()))
# print("Maximum number of examples per category=", max(train.label.value_counts()))
# train['path'] = '/kaggle/input/dat255-project/FSDKaggle2018.audio_train/FSDKaggle2018.audio_train/' + train['fname']
# print(train.head())
# test['path'] = '/kaggle/input/dat255-project/FSDKaggle2018.audio_test/FSDKaggle2018.audio_test/' + test['fname']
# print(test.head())
# Load the audio file
# y, sr = librosa.load(train.path[0])
# print(y, sr)
# plt.figure(figsize=(12, 4))
# librosa.display.waveshow(y, sr=sr)
# plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Importing Libraries
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# # Data Loading
# training data
df_train = pd.read_csv(
"/kaggle/input/human-activity-recognition-with-smartphones/train.csv"
)
df_train.head()
# testing data
df_test = pd.read_csv(
"/kaggle/input/human-activity-recognition-with-smartphones/test.csv"
)
df_test.head()
len(df_train) # total no. of entries in training dataset.
data = df_train # cppying training data to another variable.
# # Data Processing
# checking for NULL values in training data
total_null_values = df_train.isnull().sum().sort_values(ascending=False)
percentage = ((df_train.isnull().sum() / df_train.isnull().count()) * 100).sort_values(
ascending=False
)
print("Total values present are", df_train.shape[0])
total_missing_data = pd.concat(
[total_null_values, percentage.round(2)],
axis=1,
keys=["Total Missing", "In precentage"],
)
total_missing_data
# Since, no null values were present, so we didn't need to perform any data cleaning.
# checking for NULL values in testing data
total_null_values_in_test = df_test.isnull().sum().sort_values(ascending=False)
percentage_in_test = (
(df_test.isnull().sum() / df_test.isnull().count()) * 100
).sort_values(ascending=False)
print("Total values present are", df_test.shape[0])
total_missing_data_in_test = pd.concat(
[total_null_values, percentage.round(2)],
axis=1,
keys=["Total Missing", "In precentage"],
)
total_missing_data_in_test
# Since, here also, no null values were present, so we didn't need to perform any data cleaning.
# checking for any missing entries in the data.
df_train.isna().sum()
df_train.shape
df_test.shape
# ## Finding out the categorical columns present in the dataset.
df_train.select_dtypes(include=["object"]).columns.tolist()
df_test.select_dtypes(include=["object"]).columns.tolist()
# Changing the names of columns by removing all special characters.
columns = df_train.columns
columns = columns.str.replace("[()]", "")
columns = columns.str.replace("[-]", "")
columns = columns.str.replace("[,]", "")
df_train.columns = columns
df_test.columns = columns
print(df_test.columns)
df_train.columns
from sklearn import preprocessing
sns.set_style("whitegrid")
# label encoding of categorical data to numerical values.
label_encoder = preprocessing.LabelEncoder()
df_train["Activity"] = label_encoder.fit_transform(df_train["Activity"])
# checking unique labels in Activity column.
df_train.Activity.unique()
df_train["labels"] = label_encoder.fit_transform(df_train["Activity"].values)
df_test["labels"] = label_encoder.fit_transform(df_test["Activity"].values)
dat = pd.unique(df_test[["Activity", "labels"]].values.ravel())
d = dat.tolist()
print(d)
df_test["Activity"] = label_encoder.fit_transform(df_test["Activity"])
df_test.Activity.unique()
# # Plotting of graphs and finding out the relations between values.
# graph representing count of no. of times different acitivties performed by 30 subjects.
plt.figure(figsize=(16, 8))
plt.title("Data provided by each user", fontsize=20)
sns.countplot(x="subject", hue="Activity", data=df_train)
plt.show()
# ### Describing the data.
df_train.describe()
df_test.describe()
# Graph showing count of entries of each activity.
plt.title("Data points per activity", fontsize=15)
sns.histplot(df_train.Activity, element="poly", discrete=True, fill=False)
plt.xticks(rotation=0)
plt.show()
# Graph depicting the relation mean of Accleration magnitude and activities.
plt.figure(figsize=(7, 7))
sns.boxplot(
x="Activity", y="tBodyAccMagmean", data=df_train, showfliers=False, saturation=1
)
plt.ylabel("Acceleration Magnitude mean")
plt.axhline(y=-0.9, xmin=0.1, xmax=0.9, dashes=(5, 5), c="g")
plt.axhline(y=0.02, xmin=0.4, dashes=(5, 5), c="m")
plt.show()
# We can successfully infer the following from the above graph: -
# * If tBodyAccMagmean is < -0.9 then the Activities are either Standing or Sitting or Laying.
# * If tBodyAccMagmean > -0.2 and < 0.02 then the Activities are either Walking or WalkingUpstairs.
# * If tBodyAccMagmean > 0.02 then the Activity is WalkingDownstairs.
# #### We can classify most of the Acitivity labels with some errors.
#
sns.boxplot(x="Activity", y="angleXgravityMean", data=df_train)
plt.axhline(y=0.08, xmin=0.1, xmax=0.9, c="m", dashes=(5, 3))
plt.title("Angle between X-axis and Gravity-mean", fontsize=15)
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
loans = pd.read_csv(
"/kaggle/input/data-science-for-good-kiva-crowdfunding/kiva_loans.csv"
)
variables = [
"loan_amount",
"sector",
"country_code",
"term_in_months",
"borrower_genders",
"repayment_interval",
]
loans.head()
## create a "funded" column
loans["funded"] = (
(loans["loan_amount"] - loans["funded_amount"])
/ (loans["loan_amount"] - loans["funded_amount"])
- 1
) * -1
loans["funded"] = loans["funded"].astype("bool")
loans[loans["funded_amount"] < loans["loan_amount"]].head()
# ## how many loans get funded?
n_funded = loans[loans["funded"] == True].shape[0]
n_tot = loans.shape[0]
print(
"{} out of {} loans got funded ({} %)".format(
n_funded, n_tot, round(n_funded / n_tot * 100)
)
)
# ## transform gender variable to has_female, has_male
## get male/female
loans["borrower_genders"] = loans["borrower_genders"].astype("str")
loans["has_female"] = (
loans["borrower_genders"].astype("str").apply(lambda x: "female" in x)
)
loans["has_male"] = (
loans["borrower_genders"]
.astype("str")
.apply(lambda x: len(x) == 4 or (len(x) > 6 and (len(x) % 8) != 6))
)
# ## what is the gender distribution for funded/not funded?
# ## We want to investigate:
# - Can we predict the probability of a loan getting funded?
# - Which variables are more important for getting funded?
# # 1. Variable analysis
# we want to answer:
# - which are the variables and are they numerical or cathegorical?
# - what are the distributions?
# ## Variables
# We have 6 variables:
# - "loan_amount" (numerical)
# - "sector" (categorical)
# - "country_code" (categorical)
# - "term_in_months" (numerical)
# - "borrower_genders" (categorical)
# - "repayment_interval" (categorical)
#
variables = [
"loan_amount",
"sector",
"term_in_months",
"has_male",
"has_female",
"repayment_interval",
"funded",
]
loans_explore = loans[variables]
loans_explore.head()
loans_explore["loan_amount"].describe()
loans_explore["sector"].describe()
# loans_explore["country_code"].hist()
loans_explore["repayment_interval"].describe()
loans_explore["term_in_months"].describe()
loans_explore["loan_amount"].describe()
# # Data preparation
# ## Variables
# We have 6 variables:
# - "loan_amount" (numerical)
# - "sector" (categorical) -> dummy
# - "country_code" (categorical) --> dummy
# - "term_in_months" (numerical)
# - "borrower_genders" (categorical) --> dummy variables: "has_female", "has_male"
# - "repayment_interval" (categorical) --> dummy
l = loans_explore.copy()
## create dummy variables
l = pd.concat(
[l.drop("sector", axis=1), pd.get_dummies(l["sector"], prefix="sector")],
axis=1,
sort=False,
)
# pd.concat([l.drop("country_code", axis = 1), pd.get_dummies(l["country_code"], prefix = "country")], axis=1, sort=False)
l = pd.concat(
[
l.drop("repayment_interval", axis=1),
pd.get_dummies(l["repayment_interval"], prefix="repayment_interval"),
],
axis=1,
sort=False,
)
# convert bools to 1/0
l["has_male"] = pd.to_numeric(l["has_male"])
l["has_female"] = pd.to_numeric(l["has_female"])
l["funded"] = pd.to_numeric(l["funded"])
# normalize loan amount, term in month
l["loan_amount_norm"] = l["loan_amount"] / np.max(l["loan_amount"]) # max is 100k
l["term_in_months_norm"] = l["term_in_months"] / np.max(
l["term_in_months"]
) # max is 158
l = l.drop("loan_amount", axis=1)
l = l.drop("term_in_months", axis=1)
l.head()
# ## inflate data labelled as false
l_false = l[l["funded"] == False]
n_false_labels = l_false.shape[0]
n_true_labels = l.shape[0] - n_false_labels
factor = round(n_true_labels / n_false_labels) - 1
print("inflate all false labels with a factor of {}".format(factor))
false_inflated = pd.concat([l_false] * factor)
l = pd.concat([l, false_inflated])
print("now we have {} data points".format(l.shape[0]))
# # Check correlations among variables
#
l_fem = l[l["has_female"] == True]
fem_funded = l_fem[l_fem["funded"] == True].shape[0]
fem_tot = l_fem.shape[0]
print(
"{}/{} females funded ({} %)".format(
fem_funded, fem_tot, round(100 * fem_funded / fem_tot)
)
)
l_m = l[l["has_male"] == True]
m_funded = l_m[l_m["funded"] == True].shape[0]
m_tot = l_m.shape[0]
print(
"{}/{} males funded ({} %)".format(m_funded, m_tot, round(100 * m_funded / m_tot))
)
cm = l.corr()
alpha = ["ABC", "DEF", "GHI", "JKL"]
alpha = l.columns
fig_cm = plt.figure(figsize=(15, 15))
ax = fig_cm.add_subplot(111)
cax = ax.matshow(cm, interpolation="nearest", cmap="Blues")
fig_cm.colorbar(cax)
plt.xticks(np.arange(0, len(l.columns)), rotation="vertical")
plt.yticks(np.arange(0, len(l.columns)))
ax.set_xticklabels([""] + alpha)
ax.set_yticklabels([""] + alpha)
plt.show()
# ### learnings from the above plot:
# - high correlations between having a female borrower and getting funded
# - negative correlation between having a male borrower and getting funded)
# - high correlation between irregular payments and funded (although, also high correlation between female and irregular payment, so this might explain it)
# - low loan term AND low loan amount has negative correlations to funded ==> shorter and smaller loans get funded easier (it's called microfinance for a reason I guess)
# # Training models
## let's import scikit learn
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, plot_confusion_matrix
# ## Split the data into train and test
# use only some variables
# variables_used = ["term_in_months_norm", "loan_amount_norm", "has_male", "has_female", "repayment_interval_bullet", "repayment_interval_irregular", "repayment_interval_monthly", "repayment_interval_weekly"]
# use all variables
variables_used = l.drop("funded", axis=1).columns
x = l.drop("funded", axis=1)[variables_used]
y = l["funded"]
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=42
)
x_train.head()
y_train.head()
# # 0. check current true/false ratio
n_true = y[y == True].shape[0]
n_false = y[y == False].shape[0]
n = y.shape[0]
print("{} ({} %) true, {} false".format(n_true, round(100 * n_true / n), n_false))
# ## 1. Train a Random Forest classifier
# ### 1.1 train on 80% of the data
rf_model = RandomForestClassifier(n_estimators=100, criterion="entropy", random_state=0)
rf_model.fit(x_train, y_train)
# ### 1.2. Validate by getting the accuracy and confusion matrix
train_score = rf_model.score(x_train, y_train)
test_score = rf_model.score(x_test, y_test)
print("train_score: {}".format(train_score))
print("test_score: {}".format(test_score))
y_pred = rf_model.predict(x_test)
cm = confusion_matrix(y_test, y_pred)
cm
plot_confusion_matrix(rf_model, x_test, y_test, normalize="true")
# ## 2. Train a logistic regression
lr_model = LogisticRegression(max_iter=1000)
lr_model.fit(x_train, y_train)
train_score = lr_model.score(x_train, y_train)
test_score = lr_model.score(x_test, y_test)
print("train_score: {}".format(train_score))
print("test_score: {}".format(test_score))
plot_confusion_matrix(lr_model, x_test, y_test, normalize="true")
# # 3. Analyze the results
# ## 3.1 Get most important variables
#
import matplotlib
import matplotlib.pyplot as plt
importance = rf_model.feature_importances_
feature = x.columns
sort_order = np.argsort(importance)
print(importance)
print(feature)
importance = list(np.array(importance)[sort_order])
feature = list(np.array(feature)[sort_order])
fig = plt.figure(figsize=(12, 12))
plt.barh(feature, importance, figure=fig)
plt.show()
# ## 3.2 See distributions
# So, given that some features are more important than others, let's see if we can understand which values are more common for getting funded than others.
#
## split l into funded and not funded
lexp_funded = loans_explore[loans_explore["funded"] == True]
lexp_nfunded = loans_explore[loans_explore["funded"] == False]
# ### 3.2.1 loan amount of funded
lexp_funded["loan_amount"].hist(bins=100, figsize=(12, 12), range=[0, 10000])
lexp_funded["loan_amount"].describe()
# ### 3.2.2 loan amount of not funded
lexp_nfunded["loan_amount"].hist(bins=100, figsize=(12, 12), range=[0, 10000])
lexp_nfunded["loan_amount"].describe()
# ### 3.2.3 term in months of funded
lexp_funded["term_in_months"].hist(bins=48, figsize=(12, 12), range=[0, 48])
lexp_funded["term_in_months"].describe()
# ### 3.2.4 loan term in months, not funded
lexp_nfunded["term_in_months"].hist(bins=48, figsize=(12, 12), range=[0, 48])
lexp_nfunded["term_in_months"].describe()
|
# This kernel is a copy of a [kernel](https://www.kaggle.com/kaushal2896/bengali-graphemes-starter-eda-multi-output-cnn). But with a lot of comments. It is used only for educational purposes.
# **Загрузка библиотек и начальный анализ входных данных**
## Блок частоупотребимых библиотек по всем темам анализа данных
# линейная алгебра https://pythonworld.ru/numpy/1.html https://habr.com/ru/post/352678/ https://python-scripts.com/numpy
import numpy as np
# чтение файлов и обработка данных https://proglib.io/p/pandas-tricks https://khashtamov.com/ru/pandas-introduction/ https://habr.com/ru/company/ods/blog/322626/
import pandas as pd
# sklearn - препроцессинг, метрики моделей, валидация моделей, выбор признако, выбор моделей,
# модели машинного обучения: классификация, регрессия, кластеризция, понижение размерности...
# Это основная библиотека для классического машинного обучения https://scikit-learn.org/stable/ https://habr.com/ru/company/mlclass/blog/247751/
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# Визуализация https://python-scripts.com/matplotlib https://pythonworld.ru/novosti-mira-python/scientific-graphics-in-python.html
import matplotlib.image as mpimg
from matplotlib import pyplot as plt
# https://habr.com/ru/company/ods/blog/323210/ https://nagornyy.me/courses/data-science/intro-to-seaborn/
import seaborn as sns # более высокоуровневая и легкая в использовании
## конец блока частоупотребимых библиотек
## Блок библиотек связанных с задачами компьютерного зрения
# Алгоритмы обработки изображений https://tproger.ru/translations/opencv-python-guide/ https://arboook.com/kompyuternoe-zrenie/osnovnye-operatsii-s-izobrazheniyami-v-opencv-3-python/
import cv2
# https://habr.com/ru/post/451074/ https://pythonru.com/biblioteki/osnovnye-vozmozhnosti-biblioteki-python-imaging-library-pillow-pil
import PIL.Image as Image, PIL.ImageDraw as ImageDraw, PIL.ImageFont as ImageFont
## конец блока библиотек для работы с изображениями
## Блок работы с нейронными сетями
# keras - высокоуровневая библиотека для работы с нейронными сетями. Работает как надстройка над tensorflow или theano (на выбор)
# видео лекции от Созыкина А.В. https://www.youtube.com/watch?v=GX7qxV5nh5o&list=PLtPJ9lKvJ4oiz9aaL_xcZd-x0qd8G0VN_
# https://www.youtube.com/watch?v=52U4BG0ENiM&list=PLtPJ9lKvJ4oi5ATzKmmp6FznCHmnhVoey
# туториал https://riptutorial.com/ru/keras/topic/8695/%D0%BD%D0%B0%D1%87%D0%B0%D0%BB%D0%BE-%D1%80%D0%B0%D0%B1%D0%BE%D1%82%D1%8B-%D1%81-%D0%BA%D0%B5%D1%80%D0%B0%D1%81%D0%BE%D0%BC
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# крутой и краткий туториал на habr https://habr.com/ru/company/ods/blog/325432/
from tensorflow.keras.models import Model
from tensorflow.keras.models import clone_model
from tensorflow.keras.layers import (
Dense,
Conv2D,
Flatten,
MaxPool2D,
Dropout,
BatchNormalization,
Input,
GlobalAveragePooling2D,
)
# Оптимизаторы https://habr.com/ru/post/318970/
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ReduceLROnPlateau
# предварительно обученные нейронные сети
# Архитектуры в Керас https://www.pyimagesearch.com/2017/03/20/imagenet-vggnet-resnet-inception-xception-keras/
# Архитектуры своими руками https://towardsdatascience.com/cnn-architectures-a-deep-dive-a99441d18049
# tensorflow docs https://www.tensorflow.org/api_docs/python/tf/keras/applications
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg19 import VGG19
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
from tensorflow.keras.applications.xception import Xception
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.applications.resnet_v2 import ResNet50V2, ResNet101V2, ResNet152V2
# пример нейронки с несколькими выходами https://www.pyimagesearch.com/2018/06/04/keras-multiple-outputs-and-multiple-losses/
## конец блока нейронок
## Блок вспомогательных библиотек
# визуализация прогресса https://stackoverflow.com/questions/42212810/tqdm-in-jupyter-notebook-prints-new-progress-bars-repeatedly
from tqdm.auto import tqdm
# позволяет выбирать список файлов по шаблону пути https://pythonworld.ru/moduli/modul-glob.html
from glob import glob
# время и сборщик мусора https://all-python.ru/osnovy/modul-time.html https://asvetlov.blogspot.com/2013/05/gc.html http://www.ilnurgi1.ru/docs/python/modules/gc.html
import time, gc
## конец блока вспомогательных библиотек
# Просмотр входных данных
from pathlib import Path
featherdir = Path(
"/kaggle/input/bengaliaicv19feather"
) # папка с изображениями в формате feather
import os
# выведем все файлы в директории "input/"
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Любые результаты, которые вы записываете в текущий каталог, сохраняются как выходные данные.
# Сравним скорость работы с файлами "feather" и "parquet"
train_image_df0 = pd.read_feather(featherdir / "train_image_data_0.feather")
train_image_df1 = pd.read_feather(featherdir / "train_image_data_1.feather")
train_image_df2 = pd.read_feather(featherdir / "train_image_data_2.feather")
train_image_df3 = pd.read_feather(featherdir / "train_image_data_3.feather")
train_image_df3.head()
d = pd.read_parquet(f"/kaggle/input/bengaliai-cv19/train_image_data_1.parquet")
d.head()
del train_image_df0
del train_image_df1
del train_image_df2
del train_image_df3
del d
gc.collect()
# данные для тренировки
train_df_ = pd.read_csv("/kaggle/input/bengaliai-cv19/train.csv")
# данные для тестирования алгоритма
test_df_ = pd.read_csv("/kaggle/input/bengaliai-cv19/test.csv")
# данные для нахождения составных частей графемы (корня графемы, гласного и согласного диакретических знаков)
class_map_df = pd.read_csv("/kaggle/input/bengaliai-cv19/class_map.csv")
# пример оформления файла ответов
sample_sub_df = pd.read_csv("/kaggle/input/bengaliai-cv19/sample_submission.csv")
train_df_.head() # смотрим какие данные в тренировочной таблице
# смотрим количество уникальных значений в каждом столбце
train_df_.grapheme_root.nunique(), train_df_.vowel_diacritic.nunique(), train_df_.consonant_diacritic.nunique(), train_df_.grapheme.nunique()
test_df_.head() # смотрим какие данные в тестовой таблице
sample_sub_df.head() # смотрим какие данные в примере файла ответов
# таблица, которая ставит в соответствие некой полной графемы G ее состовляющие:
# корень графемы, гласные диакритические знаки и согласные диакритические знаки
G = 5
class_map_df[class_map_df.label == G]
class_map_df.head()
class_map_df.component_type.nunique(), class_map_df.label.nunique(), class_map_df.component.nunique()
# смотрим размеры наборов данных
print(f"Size of training data: {train_df_.shape}")
print(f"Size of test data: {test_df_.shape}")
print(f"Size of class map: {class_map_df.shape}")
# ## Exploratory Data Analysis
# Исследовательский анализ данных (EDA) - это подход к анализу наборов данных для обобщения их основных характеристик, часто с помощью визуальных методов.
HEIGHT = 236 # высота изображения
WIDTH = 236 # ширина изображения
# если хотите глубже понять, что происходит - раскоментируйте принты
def get_n(df, field, n, top=True):
"""функция для выбора топ N графем желаемого типа (field)
df - data frame с графемами (в нашем случае test_df_)
field - часть графемы, которую хотим проанализировать. Может принимать 3 значения (grapheme_root, vowel_diacritic, consonant_diacritic)
n - число знаков, которые мы хотим увидеть
top - (True/False) признак сортировки (по убывание/ по возрастанию)
"""
# группируем датасет по выбранной части графемы и считаем количество появлений каждого типа графемы. Сортируем, отсекам топ
top_graphemes = (
df.groupby([field])
.size()
.reset_index(name="counts")["counts"]
.sort_values(ascending=not top)[:n]
)
# print(top_graphemes)
top_grapheme_roots = (
top_graphemes.index
) # отдельно выбираем индексы (номера частей графем)
# print(top_grapheme_roots)
top_grapheme_counts = (
top_graphemes.values
) # отдельно выбираем значение (количества) вхождения этой части графемы в набор данных
# print(top_grapheme_counts)
# находим выбранные нами части графем в наборе с рисунками этиъ частей графем
top_graphemes = (
class_map_df[class_map_df["component_type"] == field]
.reset_index()
.iloc[top_grapheme_roots]
)
# print(top_graphemes)
top_graphemes.drop(
["component_type", "label"], axis=1, inplace=True
) # удаляем ненужные нам столбцы
# print(top_graphemes)
top_graphemes.loc[
:, "count"
] = top_grapheme_counts # добавляем к номерам и рисункам частей графем количества их вхождения
# print(top_graphemes)
return top_graphemes
def image_from_char(char):
"""функция отображения символов из таблицы с данными в рисунки желаемого размера
char - часть графемы, которую хотим отрисовать в увеличенном масштабе
"""
image = Image.new(
"RGB", (WIDTH, HEIGHT)
) # задаем трехканальный тип изображения и его размерность
draw = ImageDraw.Draw(image) # создаем объект для отрисовки
myfont = ImageFont.truetype(
"/kaggle/input/kalpurush-fonts/kalpurush-2.ttf", 120
) # выбираем тип шрифта и его размер
w, h = draw.textsize(
char, font=myfont
) # преобразовуем табличную графему к выбранному шрифту и размерам шрифта
draw.text(
((WIDTH - w) / 2, (HEIGHT - h) / 3), char, font=myfont
) # рисуем символ, отцентровав его на полотне изображения
return image
# * ### Число уникальных значений по типам графем, которые требуется предсказать
print(f'Number of unique grapheme roots: {train_df_["grapheme_root"].nunique()}')
print(f'Number of unique vowel diacritic: {train_df_["vowel_diacritic"].nunique()}')
print(
f'Number of unique consonant diacritic: {train_df_["consonant_diacritic"].nunique()}'
)
# ### Топ 10 наиболее используемых корней графем
top_10_roots = get_n(train_df_, "grapheme_root", 10)
top_10_roots
# создаем сетку 2 на 5, для более компактного отображения символов и задаем размер их отображения
f, ax = plt.subplots(2, 5, figsize=(16, 8))
ax = ax.flatten()
# отрисовываем в цикле найденные топ N изображений частей графем
for i in range(10):
ax[i].imshow(image_from_char(top_10_roots["component"].iloc[i]), cmap="Greys")
# * ### Топ 10 наименее используемых корней графем в наборе
bottom_10_roots = get_n(train_df_, "grapheme_root", 10, False)
bottom_10_roots
f, ax = plt.subplots(2, 5, figsize=(16, 8))
ax = ax.flatten()
for i in range(10):
ax[i].imshow(image_from_char(bottom_10_roots["component"].iloc[i]), cmap="Greys")
# * ### Топ 5 гласных диакритических знаков в тренировочном наборе данных
top_5_vowels = get_n(train_df_, "vowel_diacritic", 5)
top_5_vowels
f, ax = plt.subplots(1, 5, figsize=(16, 8))
ax = ax.flatten()
for i in range(5):
ax[i].imshow(image_from_char(top_5_vowels["component"].iloc[i]), cmap="Greys")
# * ### Топ 5 согласных диакритических знаков в тренировочном наборе данных
top_5_consonants = get_n(train_df_, "consonant_diacritic", 5)
top_5_consonants
f, ax = plt.subplots(1, 5, figsize=(16, 8))
ax = ax.flatten()
for i in range(5):
ax[i].imshow(image_from_char(top_5_consonants["component"].iloc[i]), cmap="Greys")
train_df_ = train_df_.drop(
["grapheme"], axis=1, inplace=False
) # убираем из тренировочного набора данных рисунки графем
# преобразовуем тип данных к типу, который занимает меньше места в памяти
train_df_[["grapheme_root", "vowel_diacritic", "consonant_diacritic"]] = train_df_[
["grapheme_root", "vowel_diacritic", "consonant_diacritic"]
].astype("uint8")
IMG_SIZE = 64 # определяем входной размер изображений для нейронной сети
N_CHANNELS = 1 # определяем число каналов цвета для нейронной сети
# Применим некоторые преобразования над изображениями (credits: [this kernel](https://www.kaggle.com/shawon10/bangla-graphemes-image-processing-deep-cnn)). Изменим их размер и отцентруем графемы.
def resize(df, size=64, need_progress_bar=True):
"""функция преобразовывает изображения к квадратной форме необходимого размера
df - набор данных с изображениями исходного размера
size - ширина и высота изображения после преобразования
need_progress_bar - вывод строки состояния
return - дата фрейм содержащий изображения нового размера
"""
resized = {}
resize_size = size
# если нужно выводить строку состояния, то оборачиваем цикл в tqdm, иначе нет
if need_progress_bar:
for i in tqdm(range(df.shape[0])):
# извлекаем строку с одним изображением и преобразуем вектор в матрицу изображения исходного размера
image = df.loc[df.index[i]].values.reshape(137, 236)
# приводим изображение к бинарному
_, thresh = cv2.threshold(
image, 30, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU
)
# применяем алгоритм поиска всех вероятных контуров изображения
contours, _ = cv2.findContours(
thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE
)[-2:]
ls_xmin = []
ls_ymin = []
ls_xmax = []
ls_ymax = []
# для всех найденных контуров
for cnt in contours:
# находим координаты прямоугольника описанного вокруг найденного контура изображения
x, y, w, h = cv2.boundingRect(cnt)
# помещаем координаты углов прямоугольника, каждый в свой массив
ls_xmin.append(x)
ls_ymin.append(y)
ls_xmax.append(x + w)
ls_ymax.append(y + h)
# после перебора всех возможных контуров, находим крайние относительно центра координаты прямоугольника, чтобы наверняка захватить все изображение
xmin = min(ls_xmin)
ymin = min(ls_ymin)
xmax = max(ls_xmax)
ymax = max(ls_ymax)
# обрезаем изображение по полученным координатам (теперь у нас есть изображение графемы, занимающее максимум объема изображения)
roi = image[ymin:ymax, xmin:xmax]
# меняем размер изображения на необходимый нам
resized_roi = cv2.resize(
roi, (resize_size, resize_size), interpolation=cv2.INTER_AREA
)
# добавляем новое изображение в словарь
resized[df.index[i]] = resized_roi.reshape(-1)
else:
for i in range(df.shape[0]):
image = df.loc[df.index[i]].values.reshape(137, 236)
_, thresh = cv2.threshold(
image, 30, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU
)
contours, _ = cv2.findContours(
thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE
)[-2:]
ls_xmin = []
ls_ymin = []
ls_xmax = []
ls_ymax = []
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
ls_xmin.append(x)
ls_ymin.append(y)
ls_xmax.append(x + w)
ls_ymax.append(y + h)
xmin = min(ls_xmin)
ymin = min(ls_ymin)
xmax = max(ls_xmax)
ymax = max(ls_ymax)
roi = image[ymin:ymax, xmin:xmax]
resized_roi = cv2.resize(
roi, (resize_size, resize_size), interpolation=cv2.INTER_AREA
)
resized[df.index[i]] = resized_roi.reshape(-1)
# создаем из полученного словаря с изображениями нового размера дата фрейм и возвращаем его
resized = pd.DataFrame(resized).T
return resized
def get_dummies(df):
cols = []
for col in df:
cols.append(pd.get_dummies(df[col].astype(str)))
return pd.concat(cols, axis=1)
d = pd.DataFrame(
{
1: [
8,
1,
3,
4,
5,
6,
],
2: [
0,
15,
54,
0,
4,
8,
],
3: [
10,
11,
45,
0,
7,
9,
],
4: [
0,
1,
3,
4,
5,
6,
],
}
).T
print(d)
cols = []
for col in d:
# print(col)
# print(pd.get_dummies(d[col].astype(str)))
cols.append(pd.get_dummies(d[col].astype(str)))
pd.concat(cols, axis=1)
# ## Базовая модель
# Создаем базовую модель блока свертки с использованием Keras
def make_conv_block(input_tensor, num_filters):
"""функция создает блок из сверточных слоев, слоя пакетной нормализации (BatchNormalization), слоя MaxPool2D и Dropout
df - набор данных с изображениями исходного размера
size - ширина и высота изображения после преобразования
need_progress_bar - вывод строки состояния
return - дата фрейм содержащий изображения нового размера"""
# серия сверточных слоев с одинаковым размером kernel_size
model = Conv2D(
filters=num_filters, kernel_size=(3, 3), padding="SAME", activation="relu"
)(input_tensor)
model = Conv2D(
filters=num_filters, kernel_size=(3, 3), padding="SAME", activation="relu"
)(model)
model = Conv2D(
filters=num_filters, kernel_size=(3, 3), padding="SAME", activation="relu"
)(model)
model = Conv2D(
filters=num_filters, kernel_size=(3, 3), padding="SAME", activation="relu"
)(model)
model = BatchNormalization(momentum=0.15)(model) # слой мини-пакетной нормализации
model = MaxPool2D(pool_size=(2, 2))(model) # слой уменьшения размерности (в 2 раза)
model = Conv2D(
filters=num_filters, kernel_size=(5, 5), padding="SAME", activation="relu"
)(
model
) # еще одна свертка с ядром большего размера (5,5)
model = Dropout(rate=0.3)(
model
) # слой регуляризации (случайно "замораживает" часть нейронов в слое, чтобы избежать переобучения)
return model
# создаем серию однотипных блоков с разным количеством фильтров
inputs = Input(shape=(IMG_SIZE, IMG_SIZE, N_CHANNELS)) # входной слой
model = inputs # небольшой трюк, связанный с тем, что inputs нам понадобиться и дальше, поэтому его нельзя переопределять и преобразовывать
# последовательно применяем созданный блок сверток со все нарастающей глубиной фильтров
for num_filters in [32, 64, 128, 256]:
conv_block = make_conv_block(model, num_filters)
model = conv_block
# преобразовываем выход последнего сверточного блока в плоский вектор
model = Flatten()(model)
model = Dense(1024, activation="relu")(
model
) # добавляем полносвязный слой нейронной сети
model = Dropout(rate=0.3)(model) # слой регуляризации
dense = Dense(512, activation="relu")(model) # еще один полносвязный слой
head_root = Dense(168, activation="softmax")(
dense
) # выход нейронной сети отвечающий за классификацию графем
head_vowel = Dense(11, activation="softmax")(
dense
) # выход нейронной сети отвечающий за классификацию гласных диакректических знаков
head_consonant = Dense(7, activation="softmax")(
dense
) # выход нейронной сети отвечающий за классификацию согласных диакректических знаков
# создаем модель
model = Model(inputs=inputs, outputs=[head_root, head_vowel, head_consonant])
## если хотите использовать предварительно обученную нейронную сеть
## на данный момент необходимо еще добавить преобразование изображений к трехканальным, чтобы данная модель работала
# vgg19_net = VGG19(weights='imagenet', include_top=False, input_shape=(IMG_SIZE, IMG_SIZE, 3))
# vgg19_net.trainable = False
# model = vgg19_net.output
# model = Flatten()(model)
# model = Dense(1024, activation = "relu")(model) # добавляем полносвязный слой нейронной сети
# model = Dropout(rate=0.3)(model) # слой регуляризации
# dense = Dense(512, activation = "relu")(model) # еще один полносвязный слой
# head_root = Dense(168, activation = 'softmax')(dense) # выход нейронной сети отвечающий за классификацию графем
# head_vowel = Dense(11, activation = 'softmax')(dense) # выход нейронной сети отвечающий за классификацию гласных диакректических знаков
# head_consonant = Dense(7, activation = 'softmax')(dense) # выход нейронной сети отвечающий за классификацию согласных диакректических знаков
# создаем модель
# model = Model(inputs=vgg19_net.input, outputs=[head_root, head_vowel, head_consonant])
model.summary() # выводим описание модели
# Визуализируем CNN с 3 выходами
from tensorflow.keras.utils import plot_model
plot_model(model, to_file="model.png")
model.compile(
optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]
) # компилируем модель
# Установим скорость обучения для каждого выхода нейронной сети.
# Так же зададим правило, что, если 3 (patience=3) эпохи подряд точность не возрастает, то делим скорость обучения пополам (factor=0.5)
learning_rate_reduction_root = ReduceLROnPlateau(
monitor="dense_2_accuracy", patience=3, verbose=1, factor=0.5, min_lr=0.00001
)
learning_rate_reduction_vowel = ReduceLROnPlateau(
monitor="dense_3_accuracy", patience=3, verbose=1, factor=0.5, min_lr=0.00001
)
learning_rate_reduction_consonant = ReduceLROnPlateau(
monitor="dense_4_accuracy", patience=3, verbose=1, factor=0.5, min_lr=0.00001
)
batch_size = 256
epochs = 30
class MultiOutputDataGenerator(keras.preprocessing.image.ImageDataGenerator):
"""класс наследующий класс ImageDataGenerator Keras"""
def flow(
self,
x,
y=None,
batch_size=32,
shuffle=True,
sample_weight=None,
seed=None,
save_to_dir=None,
save_prefix="",
save_format="png",
subset=None,
):
"""переопределяет функцию flow родительского класса"""
targets = None
target_lengths = {}
ordered_outputs = []
for output, target in y.items():
if targets is None:
targets = target
else:
targets = np.concatenate((targets, target), axis=1)
target_lengths[output] = target.shape[1]
ordered_outputs.append(output)
for flowx, flowy in super().flow(
x, targets, batch_size=batch_size, shuffle=shuffle
):
target_dict = {}
i = 0
for output in ordered_outputs:
target_length = target_lengths[output]
target_dict[output] = flowy[:, i : i + target_length]
i += target_length
yield flowx, target_dict
HEIGHT = 137
WIDTH = 236
# ### Training loop
histories = []
# считываем по очереди учебные наборы изображений и делаем джоин train_df_ с этими изображениями, чтобы определить их метки
for i in range(4):
train_df = pd.merge(
pd.read_feather(featherdir / f"train_image_data_{i}.feather"),
train_df_,
on="image_id",
).drop(["image_id"], axis=1)
# Визуализируйте несколько образцов текущего учебного набора данных
fig, ax = plt.subplots(nrows=3, ncols=4, figsize=(16, 8))
count = 0
for row in ax:
for col in row:
col.imshow(
resize(
train_df.drop(
["grapheme_root", "vowel_diacritic", "consonant_diacritic"],
axis=1,
).iloc[[count]],
size=IMG_SIZE,
need_progress_bar=False,
)
.values.reshape(-1)
.reshape(IMG_SIZE, IMG_SIZE)
.astype(np.float64)
)
count += 1
plt.show()
# удаляем из тренировочных данных метки классов
X_train = train_df.drop(
["grapheme_root", "vowel_diacritic", "consonant_diacritic"], axis=1
)
X_train = (
resize(X_train, size=IMG_SIZE) / 255
) # преобразовываем изображение к размеру, который ожидает на вход нейронная сеть и нормализуем размеры пикселей
# CNN принимает изображения в форме `(batch_size, h, w, channel)`, поэтому изменяйте форму изображений
X_train = X_train.values.reshape(-1, IMG_SIZE, IMG_SIZE, N_CHANNELS)
# преобразовываем метки классов из целочисленных значений к векторам.
# К примеру согласные занки: вектор-столбец Y_train_consonant содержит значения от 0 до 6, а теперь он станет матрицей из 7 столбцов такой же длины
# И если i-тое значение было 5, то теперь это будет вектор строка [0, 0, 0, 0, 0, 1, 0]
Y_train_root = pd.get_dummies(train_df["grapheme_root"]).values
Y_train_vowel = pd.get_dummies(train_df["vowel_diacritic"]).values
Y_train_consonant = pd.get_dummies(train_df["consonant_diacritic"]).values
print(f"Training images: {X_train.shape}")
print(f"Training labels root: {Y_train_root.shape}")
print(f"Training labels vowel: {Y_train_vowel.shape}")
print(f"Training labels consonants: {Y_train_consonant.shape}")
# Разделите данные на набор для обучения и проверки
(
x_train,
x_test,
y_train_root,
y_test_root,
y_train_vowel,
y_test_vowel,
y_train_consonant,
y_test_consonant,
) = train_test_split(
X_train,
Y_train_root,
Y_train_vowel,
Y_train_consonant,
test_size=0.08,
random_state=666,
)
del train_df
del X_train
del Y_train_root, Y_train_vowel, Y_train_consonant
# Увеличение данных для создания большего количества обучающих данных
datagen = MultiOutputDataGenerator(
featurewise_center=False, # Смещение центра - установим значение 0, для всего набора данных
samplewise_center=False, # Смещение центра - установим значение 0, для каждой выборки
featurewise_std_normalization=False, # уберем нормализацию для всего набора данных
samplewise_std_normalization=False, # уберем нормализацию для каждой выборки
zca_whitening=False,
rotation_range=15, # случайным образом поворачивать изображения в диапазоне (градусы, 0 to 180) (8)
zoom_range=0.25, # Случайное увеличение изображения (0.15)
width_shift_range=0.15, # случайное смещение изображений по горизонтали (доля от общей ширины)
height_shift_range=0.15, # произвольно сдвигать изображения по вертикали (доля от общей высоты)
horizontal_flip=False, # случайно перевернуть изображения относительно горизонтали
vertical_flip=False,
) # случайно перевернуть изображения относительно вертикали
# Вычислим параметры, необходимые для дополнения данных. Но пока не будем выполнять какие-либо дополнения
datagen.fit(x_train)
# Обучим модель
history = model.fit_generator(
datagen.flow(
x_train,
{
"dense_2": y_train_root,
"dense_3": y_train_vowel,
"dense_4": y_train_consonant,
},
batch_size=batch_size,
),
epochs=epochs,
validation_data=(x_test, [y_test_root, y_test_vowel, y_test_consonant]),
steps_per_epoch=x_train.shape[0] // batch_size,
callbacks=[
learning_rate_reduction_root,
learning_rate_reduction_vowel,
learning_rate_reduction_consonant,
],
)
histories.append(history)
# Delete to reduce memory usage
del x_train
del x_test
del y_train_root
del y_test_root
del y_train_vowel
del y_test_vowel
del y_train_consonant
del y_test_consonant
gc.collect()
# если необходимо, сохраните модель для дальнейшего использования
name_model = "own_model_1.h5"
model.save(name_model)
# model = load_model(name_model) # загрузить готовую модель для дальнейшего использования
# **Строим графики потерь и точности**
def plot_loss(his, epoch, title):
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, epoch), his.history["loss"], label="train_loss")
plt.plot(np.arange(0, epoch), his.history["dense_3_loss"], label="train_root_loss")
plt.plot(np.arange(0, epoch), his.history["dense_4_loss"], label="train_vowel_loss")
plt.plot(
np.arange(0, epoch), his.history["dense_5_loss"], label="train_consonant_loss"
)
plt.plot(
np.arange(0, epoch),
his.history["val_dense_3_loss"],
label="val_train_root_loss",
)
plt.plot(
np.arange(0, epoch),
his.history["val_dense_4_loss"],
label="val_train_vowel_loss",
)
plt.plot(
np.arange(0, epoch),
his.history["val_dense_5_loss"],
label="val_train_consonant_loss",
)
plt.title(title)
plt.xlabel("Epoch #")
plt.ylabel("Loss")
plt.legend(loc="upper right")
plt.show()
def plot_acc(his, epoch, title):
plt.style.use("ggplot")
plt.figure()
plt.plot(
np.arange(0, epoch), his.history["dense_3_accuracy"], label="train_root_acc"
)
plt.plot(
np.arange(0, epoch),
his.history["dense_4_accuracy"],
label="train_vowel_accuracy",
)
plt.plot(
np.arange(0, epoch),
his.history["dense_5_accuracy"],
label="train_consonant_accuracy",
)
plt.plot(
np.arange(0, epoch), his.history["val_dense_3_accuracy"], label="val_root_acc"
)
plt.plot(
np.arange(0, epoch),
his.history["val_dense_4_accuracy"],
label="val_vowel_accuracy",
)
plt.plot(
np.arange(0, epoch),
his.history["val_dense_5_accuracy"],
label="val_consonant_accuracy",
)
plt.title(title)
plt.xlabel("Epoch #")
plt.ylabel("Accuracy")
plt.legend(loc="upper right")
plt.show()
for dataset in range(4):
plot_loss(histories[dataset], epochs, f"Training Dataset: {dataset}")
plot_acc(histories[dataset], epochs, f"Training Dataset: {dataset}")
del histories
gc.collect()
preds_dict = {"grapheme_root": [], "vowel_diacritic": [], "consonant_diacritic": []}
components = ["consonant_diacritic", "grapheme_root", "vowel_diacritic"]
target = [] # список предсказаний модели
row_id = [] # список id меток предсказаний
for i in range(4):
# df_test_img = pd.read_parquet(f'/kaggle/input/bengaliai-cv19/test_image_data_{i}.parquet') # читаем тестовый набор
df_test_img = pd.read_feather(
featherdir / f"test_image_data_{i}.feather"
) # читаем тестовый набор
df_test_img.set_index(
"image_id", inplace=True
) # устанавливаем столбец с номером изображения, как индекс
X_test = (
resize(df_test_img, size=IMG_SIZE, need_progress_bar=False) / 255
) # меняем размер изображения и нормализуем его
X_test = X_test.values.reshape(
-1, IMG_SIZE, IMG_SIZE, N_CHANNELS
) # меняем размерность маьрицы изображения с учетом измерения для мини-пакетов
preds = model.predict(X_test) # выполняем предсказание
# перебираем три выхода нейронной сети
for i, p in enumerate(preds_dict):
preds_dict[p] = np.argmax(
preds[i], axis=1
) # находим вектор наиболее вероятных предсказаний для текущего типа классификатора (например для корней графем)
# записываем предсказания в формате, который требуется для сабмита
for k, id in enumerate(df_test_img.index.values):
for i, comp in enumerate(components):
id_sample = id + "_" + comp
row_id.append(id_sample)
target.append(preds_dict[comp][k])
del df_test_img
del X_test
gc.collect()
# преобразовываем полученные метки и предсказания в dataframe
df_sample = pd.DataFrame(
{"row_id": row_id, "target": target}, columns=["row_id", "target"]
)
# записываем файл сабмита
df_sample.to_csv("submission.csv", index=False)
df_sample.head()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## The following dataset on which analysis will be performed is about the top 1000 Youtube channels with highest subscriber count that contains columns like Youtube_Channel, Subcribers, Video_Views, Video_count, Category and the Year it started
youtube = pd.read_csv("/kaggle/input/youtube-channels/topSubscribed.csv")
youtube
youtube.info()
# #### From the above data series, it is evident that there are no Null values in the dataset
# #### But it could be noticed that the columns like Subscribers, Video Views, Video Counts are of Object type instead of integer type values, so the first data cleaning activity is to change the datatype to it's appropriate values
youtube["Subscribers"] = pd.to_numeric(youtube["Subscribers"])
youtube["Video Views"] = pd.to_numeric(youtube["Video Views"])
youtube["Video Count"] = pd.to_numeric(youtube["Video Count"])
youtube.info()
# #### Here an error is generated due to presence of commas(,) in the values of respective columns. Hence first we remove the commas and later convert the datatypes into integer values
youtube["Subscribers"] = youtube["Subscribers"].str.replace(",", "").astype(int)
youtube["Video Views"] = youtube["Video Views"].str.replace(",", "").astype(int)
youtube["Video Count"] = youtube["Video Count"].str.replace(",", "").astype(int)
youtube.info()
# #### Converting the 'Started' column from integer type to date type
# #### But before converting it to date type, we need to convert it into string type or else converting date from integer to date will result in abnormal values
youtube["Started"] = youtube["Started"].astype(str)
youtube.info()
youtube
youtube["Year"] = pd.to_datetime(youtube["Started"], format="%Y").dt.date
youtube
youtube.drop("Started", axis=1, inplace=True)
youtube
# #### Extracting all the unique values from the 'Category' column to see what categories are in the dataset
youtube["Category"].unique()
# #### It could be seen that there is a link as a value in the 'Category' column. The second data cleaning activity is to replace them with appropriate values
youtube[
youtube["Category"]
== "https://us.youtubers.me/global/all/top-1000-most_subscribed-youtube-channels"
]
# #### From the above dataset, apart from rows - 5,9,17,91,154,219,357; rest all belongs to 'Howto & Style' category
youtube.loc[youtube["Youtube Channel"] == "Music", "Category"] = "Music"
youtube.loc[youtube["Youtube Channel"] == "Gaming", "Category"] = "Gaming"
youtube.loc[youtube["Youtube Channel"] == "Sports", "Category"] = "Sports"
youtube.loc[youtube["Youtube Channel"] == "News", "Category"] = "News & Politics"
youtube.loc[youtube["Youtube Channel"] == "Popular on YouTube", "Category"] = "Shows"
youtube.loc[youtube["Youtube Channel"] == "Minecraft - Topic", "Category"] = "Gaming"
youtube.loc[
youtube["Category"]
== "https://us.youtubers.me/global/all/top-1000-most_subscribed-youtube-channels",
"Category",
] = "Howto & Style"
youtube[
youtube["Category"]
== "https://us.youtubers.me/global/all/top-1000-most_subscribed-youtube-channels"
]
# #### All the irrelevant data is now removed from the 'Category' column
youtube["Category"].unique()
youtube[["Video Views", "Video Count"]].replace(0, np.nan, inplace=True)
youtube
# #### Creating a new column to show how many views per video for a Youtube channel
youtube["Views per video"] = round(youtube["Video Views"] / youtube["Video Count"], 2)
pd.options.display.float_format = "{:.2f}".format
youtube
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import plotly.graph_objs as go
import warnings
warnings.filterwarnings("ignore")
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
df = pd.read_csv("../input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv")
df.head()
df["Date"] = df["Date"].apply(pd.to_datetime)
df["Last Update"] = df["Last Update"].apply(pd.to_datetime)
df = df.rename(columns={"Last Update": "Last_Update"})
df.drop(columns="Sno", axis=1, inplace=True)
df.head()
df["ev_year"] = [df.Date[i].year for i in range(df.shape[0])]
df["ev_month"] = [df.Date[i].month for i in range(df.shape[0])]
df["ev_day"] = [df.Date[i].day for i in range(df.shape[0])]
df["ev_hour"] = [df.Date[i].hour for i in range(df.shape[0])]
df.drop(columns="Date", axis=1, inplace=True)
df["ls_year"] = [df.Last_Update[i].year for i in range(df.shape[0])]
df["ls_month"] = [df.Last_Update[i].month for i in range(df.shape[0])]
df["ls_day"] = [df.Last_Update[i].day for i in range(df.shape[0])]
df["ls_hour"] = [df.Last_Update[i].hour for i in range(df.shape[0])]
df.drop(columns="Last_Update", axis=1, inplace=True)
df.head()
df.describe(include="all")
df.Country[df.Country == "Mainland China"] = "China"
df.groupby("Country").sum()[["Confirmed", "Deaths", "Recovered"]]
df_wc = df[df.Country != "China"]
g = df_wc.groupby("Country").sum()[["Confirmed", "Deaths", "Recovered"]]
fig = make_subplots(rows=3, cols=1, subplot_titles=("Confirmed", "Deaths", "Recovered"))
fig.add_trace(go.Bar(x=g.index, y=g.Confirmed), row=1, col=1)
fig.add_trace(go.Bar(x=g.index, y=g.Deaths), row=2, col=1)
fig.add_trace(go.Bar(x=g.index, y=g.Recovered), row=3, col=1)
fig.update_layout(
height=700, width=1000, title_text="Corona Virus Report (Except China)"
)
fig.show()
g = (
df[df.Country == "China"]
.groupby("Province/State")
.sum()[["Confirmed", "Deaths", "Recovered"]]
)
fig = make_subplots(rows=3, cols=1, subplot_titles=("Confirmed", "Deaths", "Recovered"))
fig.add_trace(go.Bar(x=g.index, y=g.Confirmed), row=1, col=1)
fig.add_trace(go.Bar(x=g.index, y=g.Deaths), row=2, col=1)
fig.add_trace(go.Bar(x=g.index, y=g.Recovered), row=3, col=1)
fig.update_layout(
height=800, width=1000, title_text="Corona Virus Report (In States of China)"
)
fig.show()
g = g[g.Confirmed < max(g.Confirmed)]
fig = make_subplots(rows=3, cols=1, subplot_titles=("Confirmed", "Deaths", "Recovered"))
fig.add_trace(go.Bar(x=g.index, y=g.Confirmed), row=1, col=1)
fig.add_trace(go.Bar(x=g.index, y=g.Deaths), row=2, col=1)
fig.add_trace(go.Bar(x=g.index, y=g.Recovered), row=3, col=1)
fig.update_layout(
height=700, width=1000, title_text="Corona Virus Report (In States of China)"
)
fig.show()
# Australia
# Hong Kong
# Japan
# Macau
# Malaysia
# Singapore
# South Korea
# Taiwan
# Thailand
# US
print("Granular view for following nations were available\n")
g4 = (
df[df.Country == "Australia"]
.groupby("Province/State")
.sum()[["Confirmed", "Deaths", "Recovered"]]
)
print("\nStats for Australia\n", "_" * 50, "\n", g4)
g4 = (
df[df.Country == "US"]
.groupby("Province/State")
.sum()[["Confirmed", "Deaths", "Recovered"]]
)
print("\nStats for United States of America\n", "_" * 50, "\n", g4)
dft = df[df.Country == "China"]
g1 = pd.DataFrame(
dft[["Country", "ev_day", "ev_month", "Confirmed"]]
.groupby(["ev_month", "ev_day"])
.sum()["Confirmed"]
)
a = [i for i in range(g1.shape[0])]
fig = px.bar(x=a, y=g1.Confirmed)
fig.update_layout(height=300, width=800, title_text="Corona Virus (In China)")
fig.update_layout(
xaxis=dict(
tickmode="array",
tickvals=[i for i in range(g1.shape[0] + 1)],
ticktext=g1.index,
)
)
fig.show()
dft = df[df.Country != "China"]
g2 = pd.DataFrame(
dft[["Country", "ev_day", "ev_month", "Confirmed"]]
.groupby(["ev_month", "ev_day"])
.sum()["Confirmed"]
)
a = [i for i in range(g1.shape[0])]
fig = px.bar(x=a, y=g2.Confirmed)
fig.update_layout(height=300, width=800, title_text="Corona Virus (Rest of the World)")
fig.update_layout(
xaxis=dict(
tickmode="array",
tickvals=[i for i in range(g1.shape[0] + 1)],
ticktext=g1.index,
)
)
fig.show()
|
# # Introduction
# In this notebook, we explore the data of global surface temperature on Earth. This is one of my first few toy project where I take as a platform to practise and solidate the knowledge I learned from Kaggle course.
# We begin by importing the relevant packages. We also run a code provided by Kaggle to list all of the given datasets:
import numpy as np
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
pd.plotting.register_matplotlib_converters()
import matplotlib.pyplot as plt
import seaborn as sns
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# From the output above we see that there are a total of five datasets, four of them are arranged by certain geographical classification (e.g. by state, country, etc.). In below we will attempt to explore all of the five. Nevertheless we will start with the simplest one: `GlobalTemperatures.csv`.
# # Load the data
# We load the data using the `.read_csv` command in Pandas. It seems natural to label each row by its date (recorded in the column `'dt'`), so when loading the data, we may set `index_col='dt'` and `parse_dates=True`:
# Path of the files
temp_filepath = (
"/kaggle/input/climate-change-earth-surface-temperature-data/GlobalTemperatures.csv"
)
temp_country_filepath = "/kaggle/input/climate-change-earth-surface-temperature-data/GlobalLandTemperaturesByCountry.csv"
temp_state_filepath = "/kaggle/input/climate-change-earth-surface-temperature-data/GlobalLandTemperaturesByState.csv"
temp_city_filepath = "/kaggle/input/climate-change-earth-surface-temperature-data/GlobalLandTemperaturesByCity.csv"
temp_majorcity_filepath = "/kaggle/input/climate-change-earth-surface-temperature-data/GlobalLandTemperaturesByMajorCity.csv"
# Read the files
temp_data = pd.read_csv(temp_filepath, index_col="dt", parse_dates=True)
temp_country_data = pd.read_csv(temp_country_filepath, index_col="dt", parse_dates=True)
temp_state_data = pd.read_csv(temp_state_filepath, index_col="dt", parse_dates=True)
temp_city_data = pd.read_csv(temp_city_filepath, index_col="dt", parse_dates=True)
temp_majorcity_data = pd.read_csv(
temp_majorcity_filepath, index_col="dt", parse_dates=True
)
# # Examine the data
# We begin by exploring `temp_data`. Let us take a brief look at the first few rows of the table. We do this by using the `.head()` function:
# Print the first 5 rows of temp_data
temp_data.head()
# We can already note several things from the table above:
# 1. The table is relatively simple, in the sense that it has only few columns.
# 2. From the column `'dt'` which means the date, it seems that the data was recorded once every month. This potentially raises several issues of intepretation. We will discuss this further in the following.
# 3. There are already a lot of missing values in the table. This is actually understandable as the data dates back to as early as year 1750, which is almost three centuries ago.
# We may also take a brief look at the *last* few rows of the table, by using the `.tail()` function instead:
# Print the last 5 rows of temp_data
temp_data.tail()
# We find that the data was updated until year 2015.
# Next, we may look into more details about the dataset. For example, we may use the `.shape` attribute to check the size of the DataFrame:
temp_data.shape
# There are only nine columns (though the date column has been taken to label the rows) but more than 3,000 rows in the DataFrame. For the data type (abbrv. dtype) of each column, and the number of non-null entries it has, we apply the `.info()` function to take a look at:
temp_data.info()
# Two obvervations are clear from this output:
# 1. The dtype of all columns (apart from the date column) are `float64`.
# 2. Comparing the `non-null count` with the total number of rows shown in `temp_data.shape`, we see that *every* column has some null values.
# We shall handle the null values in the data-preprocessing section below.
# Next, we can also take a brief look at the descriptive statistics of these columns. This is done by using the `.describe()` function:
# Print the statistics
temp_data.describe()
# From this table one may already make a lot of observations. We won't state everything here but only give few examples:
# 1. There was a period of time where the Earth was far colder than now. This can be inferred from, for example, by looking at the `min` of `LandAverageTemperature`, which is equal to -2.08 degree (in Celcius); or by looking at the `min` of `LandMaxTemperature`, which is equal to 5.9 degree.
# 2. Overall, the Earth is not too hot throughout the whole period of time. If we look at the `max` of the `LandMaxTemperature`, it is only equal to 21.32 degree, a pretty comfortable temperature for living. \
# (Of course, this does not mean that it won't be hotter in the future. To get an idea of the trend for the change of temperature, one may need to plot a graph. We will do this in the data-visualization section below.)
# 3. When the ocean is also included, the fluctuation of the temperature becomes smaller. We infer this, for example, by comparing the standard deviations `std` of `LandAverageTemperatureUncertainty`, which is 1.09644, and that of `LandAndOceanAverageTemperatureUncertainty` which is 0.073587. The latter is clearly smaller than the former. \
# We also infer this by comparing the *five-number summary* of the two columns above. The 1st and 3rd quartile, the median, and the maximum of the land-and-ocean one are all lower than those of the land-alone one. The minimum, although being higher, is also not too far different.
# (For item 3, one caveat that needs to be keep in mind is that the land-alone column records data as early as 1750, while the land-and-ocean column starts only from 1850. The difference of precision of measurement in these two different period of time can possibly results in different uncertainty. We cannot completely exclude the effect of this to our inference.)
# # Data-preprocessing
# For the convenience of easy typing we will first rename the columns as follow:
# From the description of data, uncertainty = 95% confidence interval ...
# ... which corresponds to two standard deviations, ...
# ... and standard deviation is commonly denoted by the Greek letter sigma.
temp_data.rename(
columns={
"LandAverageTemperature": "land_avg_temp",
"LandAverageTemperatureUncertainty": "land_avg_temp_2sigmas",
"LandMaxTemperature": "land_max_temp",
"LandMaxTemperatureUncertainty": "land_max_temp_2sigmas",
"LandMinTemperature": "land_min_temp",
"LandMinTemperatureUncertainty": "land_min_temp_2sigmas",
"LandAndOceanAverageTemperature": "land_ocean_avg_temp",
"LandAndOceanAverageTemperatureUncertainty": "land_ocean_avg_temp_2sigmas",
},
inplace=True,
)
# Uncomment to see the result
# temp_data.head()
# As we have seen above, every column has some missing values. One can print the number of missing values using the code below:
temp_data.isnull().sum()
# While the columns for land average temperature and its uncertainty have only 12 null values, the other columns has more than 1,000 missing values, which is a quantity more than one-third of the total number of rows (3192/3 = 1064). We shall deal with these two cases separately.
# First, we consider the rows where the land average temperature has missing values:
# Print the rows where the column 'land_avg_temp' has NaN
# Show only the columns 'land_avg_temp' and 'land_avg_temp_2sigmas'
temp_data[["land_avg_temp", "land_avg_temp_2sigmas"]].loc[
temp_data.land_avg_temp.isnull()
]
# Incidentally, we find that these rows where `land_avg_temp` has null values, are also the rows where `land_avg_temp_2sigmas` has missing values.
# We find that the missing values are all within the first few years when the data was started being collected. One of these is in year 1750, five in 1751, and the remaining six are in 1752. Some are in consecutive months.
# To fill in these missing values, we shall make the following *assumptions: The temperature varies continuously, and in an approximately linear fashion*. These assumptions are not unreasonable, and they allow us to interpolate in a simple way. For example, for the missing temperature in Nov 1750, we may take the *average* of temperatures in Oct 1750 and Dec 1750. The same can be done for the other non-consecutive months.
# We fill in the missing values by using the `.at` function. \
# (Note: Since we have set `parse_dates=True` when we loaded the data, the indices of the DataFrame (which are the dates) are no longer of object dtype; i.e. they are no longer strings. We cannot simply input `'1750-11-01'` into the first argument of `.at[]` function. Instead, we need also to apply a function conveniently provided in Pandas called `.to_datetime()` to transform the string into the desired dtype.)
temp_data.at[pd.to_datetime("1750-11-01"), "land_avg_temp"] = (
temp_data.at[pd.to_datetime("1750-10-01"), "land_avg_temp"]
+ temp_data.at[pd.to_datetime("1750-12-01"), "land_avg_temp"]
) / 2
temp_data.at[pd.to_datetime("1751-05-01"), "land_avg_temp"] = (
temp_data.at[pd.to_datetime("1751-04-01"), "land_avg_temp"]
+ temp_data.at[pd.to_datetime("1751-06-01"), "land_avg_temp"]
) / 2
temp_data.at[pd.to_datetime("1751-07-01"), "land_avg_temp"] = (
temp_data.at[pd.to_datetime("1751-06-01"), "land_avg_temp"]
+ temp_data.at[pd.to_datetime("1751-08-01"), "land_avg_temp"]
) / 2
temp_data.at[pd.to_datetime("1752-02-01"), "land_avg_temp"] = (
temp_data.at[pd.to_datetime("1752-01-01"), "land_avg_temp"]
+ temp_data.at[pd.to_datetime("1752-03-01"), "land_avg_temp"]
) / 2
# Uncomment to display the result
"""
temp_data[['land_avg_temp','land_avg_temp_2sigmas']].loc[[
pd.to_datetime('1750-11-01'),
pd.to_datetime('1751-05-01'),
pd.to_datetime('1751-07-01'),
pd.to_datetime('1752-02-01')
]]
"""
# For the missing values in consecutive months, we fill in the values in the fashion such that the result displays a linear change; in terms of graph, the result should look like a straight line segment has been drawn to interpolate the data. The precice meaning of our words should be clear from the following code:
# For Oct, Nov, Dec 1751
# d1 = 'difference 1'
d1 = (
temp_data.at[pd.to_datetime("1752-01-01"), "land_avg_temp"]
- temp_data.at[pd.to_datetime("1751-09-01"), "land_avg_temp"]
) / 4
temp_data.at[pd.to_datetime("1751-10-01"), "land_avg_temp"] = (
temp_data.at[pd.to_datetime("1751-09-01"), "land_avg_temp"] + 1 * d1
)
temp_data.at[pd.to_datetime("1751-11-01"), "land_avg_temp"] = (
temp_data.at[pd.to_datetime("1751-09-01"), "land_avg_temp"] + 2 * d1
)
temp_data.at[pd.to_datetime("1751-12-01"), "land_avg_temp"] = (
temp_data.at[pd.to_datetime("1751-09-01"), "land_avg_temp"] + 3 * d1
)
# For May, Jun, Jul, Aug, Sep 1752
# d2 = 'difference 2'
d2 = (
temp_data.at[pd.to_datetime("1752-10-01"), "land_avg_temp"]
- temp_data.at[pd.to_datetime("1752-04-01"), "land_avg_temp"]
) / 6
temp_data.at[pd.to_datetime("1752-05-01"), "land_avg_temp"] = (
temp_data.at[pd.to_datetime("1752-04-01"), "land_avg_temp"] + 1 * d2
)
temp_data.at[pd.to_datetime("1752-06-01"), "land_avg_temp"] = (
temp_data.at[pd.to_datetime("1752-04-01"), "land_avg_temp"] + 2 * d2
)
temp_data.at[pd.to_datetime("1752-07-01"), "land_avg_temp"] = (
temp_data.at[pd.to_datetime("1752-04-01"), "land_avg_temp"] + 3 * d2
)
temp_data.at[pd.to_datetime("1752-08-01"), "land_avg_temp"] = (
temp_data.at[pd.to_datetime("1752-04-01"), "land_avg_temp"] + 4 * d2
)
temp_data.at[pd.to_datetime("1752-09-01"), "land_avg_temp"] = (
temp_data.at[pd.to_datetime("1752-04-01"), "land_avg_temp"] + 6 * d2
)
# Uncomment to display the result
"""
temp_data[['land_avg_temp','land_avg_temp_2sigmas']].loc[[
pd.to_datetime('1751-10-01'),
pd.to_datetime('1751-11-01'),
pd.to_datetime('1751-12-01'),
pd.to_datetime('1752-05-01'),
pd.to_datetime('1752-06-01'),
pd.to_datetime('1752-07-01'),
pd.to_datetime('1752-08-01'),
pd.to_datetime('1752-09-01')
]]
"""
# Lastly, we do the same for the missing values in the column `'land_avg_temp_2sigmas'`:
# For non-consecutive months
temp_data.at[pd.to_datetime("1750-11-01"), "land_avg_temp_2sigmas"] = (
temp_data.at[pd.to_datetime("1750-10-01"), "land_avg_temp_2sigmas"]
+ temp_data.at[pd.to_datetime("1750-12-01"), "land_avg_temp_2sigmas"]
) / 2
temp_data.at[pd.to_datetime("1751-05-01"), "land_avg_temp_2sigmas"] = (
temp_data.at[pd.to_datetime("1751-04-01"), "land_avg_temp_2sigmas"]
+ temp_data.at[pd.to_datetime("1751-06-01"), "land_avg_temp_2sigmas"]
) / 2
temp_data.at[pd.to_datetime("1751-07-01"), "land_avg_temp_2sigmas"] = (
temp_data.at[pd.to_datetime("1751-06-01"), "land_avg_temp_2sigmas"]
+ temp_data.at[pd.to_datetime("1751-08-01"), "land_avg_temp_2sigmas"]
) / 2
temp_data.at[pd.to_datetime("1752-02-01"), "land_avg_temp_2sigmas"] = (
temp_data.at[pd.to_datetime("1752-01-01"), "land_avg_temp_2sigmas"]
+ temp_data.at[pd.to_datetime("1752-03-01"), "land_avg_temp_2sigmas"]
) / 2
# For consecutive months
# For Oct, Nov, Dec 1751
delta_1 = (
temp_data.at[pd.to_datetime("1752-01-01"), "land_avg_temp_2sigmas"]
- temp_data.at[pd.to_datetime("1751-09-01"), "land_avg_temp_2sigmas"]
) / 4
temp_data.at[pd.to_datetime("1751-10-01"), "land_avg_temp_2sigmas"] = (
temp_data.at[pd.to_datetime("1751-09-01"), "land_avg_temp_2sigmas"] + 1 * delta_1
)
temp_data.at[pd.to_datetime("1751-11-01"), "land_avg_temp_2sigmas"] = (
temp_data.at[pd.to_datetime("1751-09-01"), "land_avg_temp_2sigmas"] + 2 * delta_1
)
temp_data.at[pd.to_datetime("1751-12-01"), "land_avg_temp_2sigmas"] = (
temp_data.at[pd.to_datetime("1751-09-01"), "land_avg_temp_2sigmas"] + 3 * delta_1
)
# For May, Jun, Jul, Aug, Sep 1752
delta_2 = (
temp_data.at[pd.to_datetime("1752-10-01"), "land_avg_temp_2sigmas"]
- temp_data.at[pd.to_datetime("1752-04-01"), "land_avg_temp_2sigmas"]
) / 6
temp_data.at[pd.to_datetime("1752-05-01"), "land_avg_temp_2sigmas"] = (
temp_data.at[pd.to_datetime("1752-04-01"), "land_avg_temp_2sigmas"] + 1 * delta_2
)
temp_data.at[pd.to_datetime("1752-06-01"), "land_avg_temp_2sigmas"] = (
temp_data.at[pd.to_datetime("1752-04-01"), "land_avg_temp_2sigmas"] + 2 * delta_2
)
temp_data.at[pd.to_datetime("1752-07-01"), "land_avg_temp_2sigmas"] = (
temp_data.at[pd.to_datetime("1752-04-01"), "land_avg_temp_2sigmas"] + 3 * delta_2
)
temp_data.at[pd.to_datetime("1752-08-01"), "land_avg_temp_2sigmas"] = (
temp_data.at[pd.to_datetime("1752-04-01"), "land_avg_temp_2sigmas"] + 4 * delta_2
)
temp_data.at[pd.to_datetime("1752-09-01"), "land_avg_temp_2sigmas"] = (
temp_data.at[pd.to_datetime("1752-04-01"), "land_avg_temp_2sigmas"] + 6 * delta_2
)
# Uncomment to display the final result
temp_data[["land_avg_temp", "land_avg_temp_2sigmas"]].loc[
[
pd.to_datetime("1750-11-01"),
pd.to_datetime("1751-05-01"),
pd.to_datetime("1751-07-01"),
pd.to_datetime("1751-10-01"),
pd.to_datetime("1751-11-01"),
pd.to_datetime("1751-12-01"),
pd.to_datetime("1752-02-01"),
pd.to_datetime("1752-05-01"),
pd.to_datetime("1752-06-01"),
pd.to_datetime("1752-07-01"),
pd.to_datetime("1752-08-01"),
pd.to_datetime("1752-09-01"),
]
]
# With this, the first case is settled.
# We now deal with the second case, in which the columns have 1,200 missing values. We first note, from the dataset description, that these columns record only the data from year 1850 onwards. This would mean that the 1,200 missing values are actually those from 1750 to 1849. Let us verify this:
# Print the rows where the column 'land_max_temp' has NaN
# Show all columns except 'land_avg_temp' and 'land_avg_temp_2sigmas'
temp_data[
[
"land_max_temp",
"land_max_temp_2sigmas",
"land_min_temp",
"land_min_temp_2sigmas",
"land_ocean_avg_temp",
"land_ocean_avg_temp_2sigmas",
]
].loc[temp_data.land_max_temp.isnull()]
# Again, it seems that these 1,200 rows where the column `'land_max_temp'` has missing values, are also exactly the 1,200 rows where the other columns `'land_max_temp_2sigmas'`, `'land_min_temp'`, `'land_min_temp_2sigmas'`, `'land_ocean_avg_temp'` and `'land_ocean_avg_temp_2sigmas'` have missing values. Of course, we are not going to look at every one of these 1,200 rows, one-by-one, to verify this claim. Instead, we may check this by writing a simple code:
# E.g. max_same_max2s is a Boolean-valued variable which return true ...
# ... if the list of indices of the rows where land_max_temp has null values ...
# ... is the same as the list where land_max_temp_2sigmas has null values.
max_same_max2s = list(temp_data.loc[temp_data.land_max_temp.isnull()].index) == list(
temp_data.loc[temp_data.land_max_temp_2sigmas.isnull()].index
)
# Same for the other columns
max_same_min = list(temp_data.loc[temp_data.land_max_temp.isnull()].index) == list(
temp_data.loc[temp_data.land_min_temp.isnull()].index
)
max_same_min2s = list(temp_data.loc[temp_data.land_max_temp.isnull()].index) == list(
temp_data.loc[temp_data.land_min_temp_2sigmas.isnull()].index
)
max_same_loa = list(temp_data.loc[temp_data.land_max_temp.isnull()].index) == list(
temp_data.loc[temp_data.land_ocean_avg_temp.isnull()].index
)
max_same_loa2s = list(temp_data.loc[temp_data.land_max_temp.isnull()].index) == list(
temp_data.loc[temp_data.land_ocean_avg_temp_2sigmas.isnull()].index
)
# Finally, consider their conjunction
print(
max_same_max2s
and max_same_min
and max_same_min2s
and max_same_loa
and max_same_loa2s
)
# For these columns, we decide not to perform any action of filling in the missing values. Instead, we shall treat the data as a complete dataset within a *shorter* period of time; that is, from year 1850 onwards.
# # Data Visualization
# Next, we explore the data more in depth by creating different charts and plots.
# We begin with a simple one -- a line chart that shows how the `land_avg_temp` changes over time:
plt.figure(figsize=(24, 6))
sns.lineplot(data=temp_data["land_avg_temp"])
plt.xlabel("Date")
plt.ylabel("Global average land temperature (°C)")
# We see that the graph fluctuates drastically. Visually, this yields a long, thick shaded area which does not help much on data analysis, even though we have already set `figsize=(24,6)` so that the width of the graph is four times of the height.
# Domain knowledge of Earth science suggests that the fluctuation is actually normal -- it displays the periodic seasonal variation that occurs all the time. Indeed, we can observe this from a small fraction of the line chart, says, within a five-year period:
plt.figure(figsize=(12, 5))
sns.lineplot(
data=temp_data["land_avg_temp"].loc[
(temp_data.index.year >= 1901) & (temp_data.index.year <= 1905)
]
)
plt.title("Global Average Land Temperature Within Year 1900 - 1905")
plt.xlabel("Date")
plt.ylabel("Global average land temperature (°C)")
# Indeed, for every year, the temperature first rises to the peak around the mid-year, then descend back, at the end of that year, to a temperature that is similar to the temperature of the beginning of that year. The lower temperature at the beginning and the end of the year represent the winter season, while the higher temperature near the peak represents the summer season. This is the seasonal variation displayed in the data.
# To achieve the goal of observing the trend of the temperature over the years, we shall first make a *seasonal adjustment* to eliminate the seasonal variation above. For every year, we will take the average of the temperatures of the twelve months in this year, and take all of these *annual* average as our new data (represented as a Series) to plot a new line chart.
# We first gather the years into a list:
years = list(temp_data.index.year.unique())
# Then, we compute the annual average described above:
# (Note: It seems that iterations through Pandas objects are in general discouraged; see e.g. this [post](https://stackoverflow.com/a/55557758). Thus, while iterating the rows of our dataset `temp_data` is perhaps the most natural idea when writing the code to compute the annual average, here we shall follow the advice and avoid doing so. This is not a problem, since an easy alternative exists: For each year, we first extract its corresponding twelve rows into a new sub-DataFrame, then we directly apply the `.mean()` function on this sub-DataFrame to compute the desired average, as illustrated by the code below.)
ann_avg = []
for year in years:
# Extract the twelve rows whose year is the current year
current_year_data = temp_data.loc[temp_data.index.year == year]
# Use .mean() function to compute their average
# Then append the it to ann_avg
ann_avg.append(current_year_data.land_avg_temp.mean())
# Create a new series
land_ann_avg_temp = pd.Series(ann_avg, index=years)
# Uncomment to display the series
# land_ann_avg_temp
# Finally, we plot the line charts using this new Series:
plt.figure(figsize=(10, 6))
sns.lineplot(data=land_ann_avg_temp)
plt.xlabel("Year")
plt.ylabel("Global annual average land temperature (°C)")
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as stats
import re
import string
import tqdm
import nltk
nltk.download("stopwords")
nltk.download("wordnet")
from nltk.corpus import stopwords, wordnet
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.probability import FreqDist
from spacy.lang.en import English
from spacy.lang.en.stop_words import STOP_WORDS
lemma = WordNetLemmatizer()
df_train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
df_train.head()
df_test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
df_test.head()
df_train.isnull().sum()
df_train.info()
df_train.describe().T
# # Data cleaning and preprocessing
#
# Remove URL
def remove_url(text):
url = re.compile(r"https?://\S+|www\.\S+")
return url.sub(r"", text)
# Remove Emoji
def remove_emoji(string):
emoji_pattern = re.compile(
"["
"\U0001F600-\U0001F64F"
"\U0001F300-\U0001F5FF"
"\U0001F680-\U0001F6FF"
"\U0001F1E0-\U0001F1FF"
"\U00002500-\U00002BEF"
"\U00002702-\U000027B0"
"\U00002702-\U000027B0"
"\U000024C2-\U0001F251"
"\U0001f926-\U0001f937"
"\U00010000-\U0010ffff"
"\u2640-\u2642"
"\u2600-\u2B55"
"\u200d"
"\u23cf"
"\u23e9"
"\u231a"
"\ufe0f"
"\u3030"
"]+",
flags=re.UNICODE,
)
return emoji_pattern.sub(r"", string)
# Remove HTML
def remove_html(text):
html = re.compile(r"<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});")
return re.sub(html, "", text)
# Remove Punct
def remove_punctuation(text):
table = str.maketrans("", "", string.punctuation)
return text.translate(table)
# Remove Number
def remove_number(text):
num = re.compile(r"[-+]?[.\d]*[\d]+[:,.\d]*")
return num.sub(r"", text)
df_train["clean_text"] = df_train["text"].apply(lambda x: remove_url(x))
df_train["clean_text"] = df_train["clean_text"].apply(lambda x: remove_emoji(x))
df_train["clean_text"] = df_train["clean_text"].apply(lambda x: remove_html(x))
df_train["clean_text"] = df_train["clean_text"].apply(lambda x: remove_punctuation(x))
df_train["clean_text"] = df_train["clean_text"].apply(lambda x: remove_number(x))
# # Tokenizing the cleaned texts
df_train["tokenized"] = df_train["clean_text"].apply(word_tokenize)
df_train.head()
# df_train['lower'] = df_train['clean_text'].apply(lambda x: [word.lower() for word in x])
# df_train['no_stopwords'] = df_train['lower'].apply(lambda x: [word for word in x if word not in set(nltk.corpus.stopwords.words('english'))])
# df_train['no_stopwords'] = [' '.join(map(str, l)) for l in df_train['no_stopwords']]
# df_train.head()
# # Exploratory Data Analysis
#
plt.figure(figsize=(15, 7), dpi=100)
plt.pie(
df_train["target"].value_counts(),
labels=["Not Disaster", "Disaster"],
autopct="%1.2f%%",
startangle=60,
)
# ## Average number of words in a processed tweet
#
fig, ax1 = plt.subplots(figsize=(10, 5))
word = (
df_train[df_train["target"] == 1]["text"]
.str.split()
.apply(lambda x: [len(i) for i in x])
)
sns.distplot(word.map(lambda x: np.mean(x)), ax=ax1)
ax1.set_title("disaster tweets")
fig, ax2 = plt.subplots(figsize=(10, 5))
word = (
df_train[df_train["target"] == 0]["text"]
.str.split()
.apply(lambda x: [len(i) for i in x])
)
sns.distplot(word.map(lambda x: np.mean(x)), ax=ax2)
ax2.set_title("Non disaster tweets")
# # Load pretrained GloVe embedding
# GloVe word embeddings are generated from a huge text corpus like Wikipedia and are able to find a meaningful vector representation for each word in the news data. This allows us to use Transfer learning and train further over our data. In this project I have used the 100-dimensional `max_sequence_length` data. When used with a BiLSTM, the results seem to be better than Bag-of-Words and Tf-Idf vectorization methods.
import os
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, precision_recall_curve
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn import metrics
import tensorflow as tf
from tensorflow.keras.layers import Embedding
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras import optimizers, layers
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import (
Dense,
Bidirectional,
LSTM,
Dropout,
BatchNormalization,
SpatialDropout1D,
)
from keras.layers.embeddings import Embedding
from keras.initializers import Constant
embedding_dim = 100
max_sequence_length = 100
max_nb_words = 64
embeddings_index = {}
f = open("/kaggle/input/glove6b100dtxt/glove.6B.100d.txt", encoding="utf-8")
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype="float32")
embeddings_index[word] = coefs
f.close()
print("Indexing word vectors.")
print("Found %s word vectors." % len(embeddings_index))
print("d_model: %s", embeddings_index["hi"].shape)
from tqdm import tqdm
stop = set(stopwords.words("english"))
def create_corpus(train):
data = []
for tweet in tqdm(df_train["text"]):
words = [
word.lower()
for word in word_tokenize(tweet)
if ((word.isalpha() == 1) & (word not in stop))
]
data.append(words)
return data
data = create_corpus(df_train)
tokenizer_obj = Tokenizer()
tokenizer_obj.fit_on_texts(data)
sequences = tokenizer_obj.texts_to_sequences(data)
word_index = tokenizer_obj.word_index
tweet_pad = pad_sequences(
sequences, maxlen=max_sequence_length, truncating="post", padding="post"
)
word_index = tokenizer_obj.word_index
print("Found %s unique tokens." % len(word_index))
print(tweet_pad.shape)
print(tweet_pad)
num_words = len(word_index) + 1
df_train["text_encoded"] = tokenizer_obj.texts_to_sequences(df_train["clean_text"])
df_train["len_review"] = df_train["text_encoded"].apply(lambda x: len(x))
df_train.head()
# # LSTM model
X = sequences
X = pad_sequences(X, maxlen=max_sequence_length)
print(X[0])
tokenizer_obj.sequences_to_texts([[594, 4160, 737, 191, 72, 1455, 4161, 30]])
y = df_train["target"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=41
)
embed_dim = embedding_dim
lstm_out = 64
model_1 = Sequential()
model_1.add(Embedding(num_words, embed_dim, input_length=X.shape[1]))
model_1.add(Dropout(0.2))
model_1.add(layers.Conv1D(filters=32, kernel_size=3, padding="same", activation="relu"))
model_1.add(layers.MaxPooling1D(pool_size=2))
model_1.add(Bidirectional(LSTM(lstm_out, dropout=0.2, recurrent_dropout=0.4)))
model_1.add(Dense(1, activation="sigmoid"))
adam = optimizers.Adam(learning_rate=0.003)
es = EarlyStopping(monitor="val_loss", mode="min", verbose=1, patience=4)
mc = ModelCheckpoint(
"best_model.hs", monitor="val_acc", mode="max", verbose=1, save_best_only=True
)
model_1.compile(loss="binary_crossentropy", optimizer=adam, metrics=["accuracy"])
print(model_1.summary())
history_model_1 = model_1.fit(
X_train,
y_train,
epochs=10,
callbacks=[es, mc],
batch_size=32,
validation_data=(X_test, y_test),
)
plt.figure(figsize=(8, 4))
plt.plot(history_model_1.history["loss"], label="Train Loss")
plt.plot(history_model_1.history["val_loss"], label="Test Loss")
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epochs")
plt.legend(loc="upper right")
plt.show()
y_predict = model_1.predict(X_test).round()
y_predict.shape
# # Evaluation
train_accuracy = round(
metrics.accuracy_score(y_train, model_1.predict(X_train).round()) * 100
)
train_accuracy
print("Accuracy : ", (metrics.accuracy_score(y_test, y_predict)))
print("Recall :", (metrics.recall_score(y_test, y_predict)))
print("Precision : ", (metrics.precision_score(y_test, y_predict)))
print(classification_report(y_test, y_predict))
conm = confusion_matrix(y_test, y_predict)
plt.figure(figsize=(7, 5))
sns.heatmap(conm, annot=True, fmt="d")
plt.show()
# # Test Set
df_test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
df_test.head()
df_test["clean_text"] = df_test["text"].apply(lambda x: remove_url(x))
df_test["clean_text"] = df_test["clean_text"].apply(lambda x: remove_emoji(x))
df_test["clean_text"] = df_test["clean_text"].apply(lambda x: remove_html(x))
df_test["clean_text"] = df_test["clean_text"].apply(lambda x: remove_punctuation(x))
df_test["clean_text"].head()
def create_corpus(test):
data = []
for tweet in tqdm(df_test["clean_text"]):
words = [
word.lower()
for word in word_tokenize(tweet)
if ((word.isalpha() == 1) & (word not in stop))
]
data.append(words)
return data
data_test = create_corpus(df_test)
tokenizer_obj = Tokenizer()
tokenizer_obj.fit_on_texts(data_test)
sequences = tokenizer_obj.texts_to_sequences(data_test)
word_index = tokenizer_obj.word_index
tweet_pad = pad_sequences(
sequences, maxlen=max_sequence_length, truncating="post", padding="post"
)
word_index = tokenizer_obj.word_index
print("Found %s unique tokens." % len(word_index))
print(tweet_pad.shape)
print(tweet_pad)
num_words = len(word_index) + 1
print(num_words)
X = sequences
X = pad_sequences(X, maxlen=max_sequence_length)
print(X[0])
tokenizer_obj.sequences_to_texts([[114, 522, 885, 69, 36]])
model_1 = Sequential()
model_1.add(Embedding(num_words, embed_dim, input_length=X.shape[1]))
model_1.add(Dropout(0.2))
model_1.add(layers.Conv1D(filters=32, kernel_size=3, padding="same", activation="relu"))
model_1.add(layers.MaxPooling1D(pool_size=2))
model_1.add(Bidirectional(LSTM(lstm_out, dropout=0.2, recurrent_dropout=0.4)))
model_1.add(Dense(1, activation="sigmoid"))
adam = optimizers.Adam(learning_rate=0.003)
es = EarlyStopping(monitor="val_loss", mode="min", verbose=1, patience=4)
mc = ModelCheckpoint(
"best_model.hs", monitor="val_acc", mode="max", verbose=1, save_best_only=True
)
model_1.compile(loss="binary_crossentropy", optimizer=adam, metrics=["accuracy"])
print(model_1.summary())
from tensorflow import keras
callbacks_es = keras.callbacks.EarlyStopping(monitor="val_loss", patience=3)
history_model1 = model_1.fit(
X, y, epochs=10, validation_split=0.2, callbacks=[callbacks_es], batch_size=32
)
plt.figure(figsize=(8, 4))
plt.plot(history_model1.history["loss"], label="Train Loss")
plt.plot(history_model1.history["val_loss"], label="Test Loss")
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epochs")
plt.legend(loc="upper right")
plt.show()
predicted_1 = model_1.predict(tweet_pad).round()
submission_1 = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv")
submission_1["target"] = np.round(predicted_1).astype("int")
submission_1.describe().style
submission_1.to_csv("submission_1.csv", index=False)
submission_1
# # BERT model
from transformers import AutoTokenizer, TFBertModel
tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased")
bert = TFBertModel.from_pretrained("bert-large-uncased")
print("max len of tweets", max([len(x.split()) for x in df_train.text]))
X_train = tokenizer(
text=df_train.text.tolist(),
add_special_tokens=True,
max_length=36,
truncation=True,
padding=True,
return_tensors="tf",
return_token_type_ids=False,
return_attention_mask=True,
verbose=True,
)
X_train["input_ids"].shape
X_train["attention_mask"].shape
y_train = df_train.target.values
y_train.shape
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.losses import CategoricalCrossentropy, BinaryCrossentropy
from tensorflow.keras.metrics import CategoricalAccuracy, BinaryAccuracy
max_len = 36
input_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_ids")
input_mask = Input(shape=(max_len,), dtype=tf.int32, name="attention_mask")
embeddings = bert(input_ids, attention_mask=input_mask)[
1
] # (0 is the last hidden states,1 means pooler_output)
out = tf.keras.layers.Dropout(0.1)(embeddings)
out = Dense(128, activation="relu")(out)
out = tf.keras.layers.Dropout(0.1)(out)
out = Dense(32, activation="relu")(out)
y = Dense(1, activation="sigmoid")(out)
model_2 = tf.keras.Model(inputs=[input_ids, input_mask], outputs=y)
model_2.layers[2].trainable = True
optimizer = Adam(
learning_rate=6e-06, # this learning rate is for bert model , taken from huggingface website
epsilon=1e-08,
decay=0.01,
clipnorm=1.0,
)
# Set loss and metrics
loss = BinaryCrossentropy(from_logits=True)
metric = (BinaryAccuracy("accuracy"),)
model_2.compile(optimizer=optimizer, loss=loss, metrics=metric)
model_2.summary()
history_model_2 = model_2.fit(
x={"input_ids": X_train["input_ids"], "attention_mask": X_train["attention_mask"]},
y=y_train,
epochs=10,
batch_size=10,
)
# # Test Set
X_test = tokenizer(
text=df_test.text.tolist(),
add_special_tokens=True,
max_length=36,
truncation=True,
padding=True,
return_tensors="tf",
return_token_type_ids=False,
return_attention_mask=True,
verbose=True,
)
X_test["input_ids"].shape
X_test["attention_mask"].shape
y_test = df_test.clean_text.values
y_test.shape
predicted = model_2.predict(
{"input_ids": X_test["input_ids"], "attention_mask": X_test["attention_mask"]}
)
y_predicted = np.where(predicted > 0.5, 1, 0)
y_predicted = y_predicted.reshape((1, 3263))[0]
y_predicted.shape
submission_2 = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv")
submission_2["id"] = df_test.id
submission_2["target"] = y_predicted
submission_2.to_csv("submission_2.csv", index=False)
submission_2
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import fetch_openml
X, y = fetch_openml("mnist_784", version=1, return_X_y=True)
print(X.shape) # 28*28
y.shape
sns.histplot(y)
# To check the distribution of class label
# To plot the image, we need to scale the data and reshape with 28*28
from sklearn.preprocessing import MinMaxScaler
min_max_obj = MinMaxScaler()
new_X = min_max_obj.fit_transform(X)
print("Label : ", y[0])
plt.imshow(new_X[0].reshape(28, 28))
plt.title("Image")
print("Total class labels : ", y.nunique())
print("Class labels : ", y.unique())
# # Train the model - For Binary Classification
X_train, X_test, y_train, y_test = (
X.iloc[0:6000],
X.iloc[6000:],
y.iloc[0:6000],
y.iloc[6000:],
)
from sklearn.preprocessing import MinMaxScaler
min_max = MinMaxScaler()
X_train_transform = min_max.fit_transform(X_train)
plt.imshow(X_train_transform[0].reshape(28, 28))
y_train = y_train.replace(["1", "2", "3", "4", "5", "6", "7", "8", "9"], -1).replace(
["0"], 1
)
y_train.unique()
y_test = y_test.replace(["1", "2", "3", "4", "5", "6", "7", "8", "9"], -1).replace(
["0"], 1
)
y_test.unique()
from sklearn.linear_model import Perceptron
per_obj = Perceptron()
per_obj.fit(X_train, y_train)
per_obj.score(X_test, y_test)
y_pred = per_obj.predict(X_test)
# # Classification Metrics
from sklearn.metrics import (
confusion_matrix,
ConfusionMatrixDisplay,
precision_score,
recall_score,
f1_score,
roc_curve,
)
cm = confusion_matrix(y_pred, y_test)
disp = ConfusionMatrixDisplay(cm)
disp.plot()
plt.show()
print("Precision")
print(precision_score(y_pred, y_test))
print("Recall")
print(recall_score(y_pred, y_test))
print("f1-score")
print(f1_score(y_pred, y_test))
print("ROC-Curve")
roc = roc_curve(y_pred, y_test)
roc.plot()
plt.show()
|
# # Modelo de Predição da Qualidade de Vinhos utilizando Perceptron
# * **Base utilizada:** http://archive.ics.uci.edu/ml/datasets/Wine+Quality _(winequality-red.csv)_
# * **Centro Federal de Educação Tecnológica - CEFET/RJ, UnED Petrópolis**
# * **Sistemas Inteligentes - 2023.1**
# * **Eduardo Paes Silva**
# Importação das bibliotecas e funções que serão utilizadas
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Perceptron
from sklearn.preprocessing import MinMaxScaler, Binarizer
# # Preparação dos Dados
# ## Passos realizados
# - Carregamento do CSV contendo dados para o vinho vermelho;
# - Normalização dos dados utilizando a técnica MinMaxScaler;
# - Separação dos dados de alvo (coluna _'quality'_);
# - Divisão dos dados entre treino e teste (80% e 20%);
# - Ajuste dos valores dos conjuntos alvo para 0 e 1 (Binário);
# - Transposição dos vetores alvo.
# Carregamento dos dados num DataFrame
df = pd.read_csv("/kaggle/input/cefet-si-winequality-red/winequality-red.csv", sep=";")
# Utilizando MinMaxScaler para normalização dos dados
scaler = MinMaxScaler()
# Normalizando todos os dados do DataFrame
df_norm = scaler.fit_transform(df)
# Transformando a matriz normalizada em um novo DataFrame
df_norm = pd.DataFrame(df_norm, columns=df.columns)
# Separação dos dados de alvo
X = df_norm.drop("quality", axis=1).values
y = df_norm["quality"].values
# Divisão dos conjuntos de treino e teste
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=260
)
# Transformando os valores dos conjuntos de alvo em binários usando o limiar 0.5
binarizer = Binarizer(threshold=0.5)
y_train = binarizer.fit_transform(y_train.reshape(-1, 1))
y_test = binarizer.fit_transform(y_test.reshape(-1, 1))
# Transformando os vetores de saída de array unidimensional para vetor coluna
y_train = y_train.ravel()
y_test = y_test.ravel()
# # Treinamento do Modelo e Exibição de Resultados
# Treinando o Perceptron com o conjunto de dados de treino
perceptron = Perceptron(max_iter=1000, random_state=10)
perceptron.fit(X_train, y_train)
# Avaliando a performance do modelo
score = perceptron.score(X_test, y_test)
print("Resultado: {:.2f}%".format(score * 100))
# Plot dos dados de treino
# plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap='bwr')
# plt.title('Dados de Treino')
# plt.xlabel('Características')
# plt.ylabel('Qualidade')
# plt.show()
# Plot dos dados de teste
# plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap='bwr')
# plt.title('Dados de Teste')
# plt.xlabel('Características')
# plt.ylabel('Qualidade')
# plt.show()
# Plot dos dados de treino e teste
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap="coolwarm", label="Treino")
# plt.plot(X_test[:, 0], X_test[:, 1], '.', label='Teste')
# # Traçando a linha de separação
# w = perceptron.coef_
# b = perceptron.intercept_
# x1 = np.linspace(0, 1, 100)
# x2 = -(w[0]/w[1])*x1 - (b/w[1])
# plt.plot(x1, x2, 'k--', label='Linha de separação')
# # Legendas e exibição do plot
# plt.xlabel('Característica 1')
# plt.ylabel('Característica 2')
# plt.title('Perceptron para classificação de vinhos')
# plt.legend()
# plt.show()
|
# Análise de Clientes Inadimplentes
# I - Objeto
# Exploração dos dados de crédito de uma instituição financeira, pretende-se identificar em qual situação um cliente se torna inadimplente. Vamos utilizar para essa análise um arquivo csv contendo as informações dos clientes, o objetivo é explicar a variavel resposta que está na segunda coluna, chamada Default, que indica se um cliente é adimplente (default = 0), ou, inadimplente (default = 1).
# Vamos avaliar a inadimplência comparando como base os atributos como salário, escolaridade, movimentação financeira, assim evidênciando quais podem contribuir para haja a inadimplência
# II - Importação das bibliotecas
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# III - Estruturação dos dados
# a) Dataframe
# Nessa etapa será feito a estruturação dos dados e criação do dataframe para visualizar as informações.
df = pd.read_csv(
"https://raw.githubusercontent.com/andre-marcos-perez/ebac-course-utils/develop/dataset/credito.csv",
na_values="na",
)
df.head(n=10)
# b) Estruturação
df.shape
df[df["default"] == 0].shape
df[df["default"] == 1].shape
qtd_total, _ = df.shape
qtd_adimplentes, _ = df[df["default"] == 0].shape
qtd_inadimplentes, _ = df[df["default"] == 1].shape
print(
f"A proporcão clientes adimplentes é de {round(100 * qtd_adimplentes / qtd_total, 2)}%"
)
print(
f"A proporcão clientes inadimplentes é de {round(100 * qtd_inadimplentes / qtd_total, 2)}%"
)
# c) Schema
df.head(n=5)
# * Colunas e seus tipos de dados.
df.dtypes
# * Atributos categóricos
df.select_dtypes("object").describe().transpose()
# * Atributos numéricos
df.drop("id", axis=1).select_dtypes("number").describe().transpose()
# d) Tratamento de dados faltantes:
# - Vazios ("")
# - Nulos (None)
# - Não disponíveis ou aplicaveis (na, NA, etc)
# - Não numérico (nan, NaN, NAN, etc)
df.head()
# * Identificação das colunas com dados faltantes
df.isna().any()
# * Estatísticas sobre as colunas dos dados faltantes
def stats_dados_faltantes(df: pd.DataFrame) -> None:
stats_dados_faltantes = []
for col in df.columns:
if df[col].isna().any():
qtd, _ = df[df[col].isna()].shape
total, _ = df.shape
dict_dados_faltantes = {
col: {"quantidade": qtd, "porcentagem": round(100 * qtd / total, 2)}
}
stats_dados_faltantes.append(dict_dados_faltantes)
for stat in stats_dados_faltantes:
print(stat)
stats_dados_faltantes(df=df)
stats_dados_faltantes(df=df[df["default"] == 0])
stats_dados_faltantes(df=df[df["default"] == 1])
# IV - Transformação e limpeza dos dados
# - Correção do schema
# - Remover os dados faltantes
# a) Correção do schema
# * Na etapa de estruturação notamos que as colunas limite_credito e valor_transacoes_12m estavam sendo interpretadas como colunas categóricas (dtype = object), vamos converter esse dado para um atributo numérico (float), tornando as informações mais consistentes.
df[["limite_credito", "valor_transacoes_12m"]].dtypes
df[["limite_credito", "valor_transacoes_12m"]].head(n=5)
fn = lambda valor: float(valor.replace(".", "").replace(",", "."))
valores_originais = ["12.691,51", "8.256,96", "3.418,56", "3.313,03", "4.716,22"]
valores_limpos = list(map(fn, valores_originais))
print(valores_originais)
print(valores_limpos)
df["valor_transacoes_12m"] = df["valor_transacoes_12m"].apply(fn)
df["limite_credito"] = df["limite_credito"].apply(fn)
# * Verificação da conversão dos dados
df.dtypes
# * Atributos categóricos
df.select_dtypes("object").describe().transpose()
# * Atributos numéricos
df.drop("id", axis=1).select_dtypes("number").describe().transpose()
# b) Remoção dos dados faltantes
# * Aqui faremos a remoção dos dados que tiverem as condições (None, na, NA, nan, NaN, etc) para tornar nossa análise mais assertiva.
df.dropna(inplace=True)
df.shape
df[df["default"] == 0].shape
df[df["default"] == 1].shape
qtd_total_novo, _ = df.shape
qtd_adimplentes_novo, _ = df[df["default"] == 0].shape
qtd_inadimplentes_novo, _ = df[df["default"] == 1].shape
print(
f"A proporcão adimplentes ativos é de {round(100 * qtd_adimplentes / qtd_total, 2)}%"
)
print(
f"A nova proporcão de clientes adimplentes é de {round(100 * qtd_adimplentes_novo / qtd_total_novo, 2)}%"
)
print("")
print(
f"A proporcão clientes inadimplentes é de {round(100 * qtd_inadimplentes / qtd_total, 2)}%"
)
print(
f"A nova proporcão de clientes inadimplentes é de {round(100 * qtd_inadimplentes_novo / qtd_total_novo, 2)}%"
)
# V - Visualização dos dados
# * Como os dados prontos para análise, vamos criar diversas visualizações para correlacionar e comparar as possíveis causas da inadimplência.
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style("whitegrid")
df_adimplente = df[df["default"] == 0]
df_inadimplente = df[df["default"] == 1]
# a) Visualizações categóricas
# * Essa análise demonstra a relação dos atributos categóricos com a inadimplência (Default=1)
df.select_dtypes("object").head(n=5)
# * Escolaridade
coluna = "escolaridade"
titulos = [
"Escolaridade dos Clientes",
"Escolaridade dos Clientes Adimplentes",
"Escolaridade dos Clientes Inadimplentes",
]
eixo = 0
max_y = 0
max = df.select_dtypes("object").describe()[coluna]["freq"] * 1.1
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df_adimplente, df_inadimplente]:
df_to_plot = dataframe[coluna].value_counts().to_frame()
df_to_plot.rename(columns={coluna: "frequencia_absoluta"}, inplace=True)
df_to_plot[coluna] = df_to_plot.index
df_to_plot.sort_values(by=[coluna], inplace=True)
df_to_plot.sort_values(by=[coluna])
f = sns.barplot(
x=df_to_plot[coluna], y=df_to_plot["frequencia_absoluta"], ax=eixos[eixo]
)
f.set(title=titulos[eixo], xlabel=coluna.capitalize(), ylabel="Frequência Absoluta")
f.set_xticklabels(labels=f.get_xticklabels(), rotation=90)
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
figura.show()
# * Salário Anual
coluna = "salario_anual"
titulos = [
"Salário Anual dos Clientes",
"Salário Anual dos Clientes Adimplentes",
"Salário Anual dos Clientes Inadimplentes",
]
eixo = 0
max_y = 0
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df_adimplente, df_inadimplente]:
df_to_plot = dataframe[coluna].value_counts().to_frame()
df_to_plot.rename(columns={coluna: "frequencia_absoluta"}, inplace=True)
df_to_plot[coluna] = df_to_plot.index
df_to_plot.reset_index(inplace=True, drop=True)
df_to_plot.sort_values(by=[coluna], inplace=True)
f = sns.barplot(
x=df_to_plot[coluna], y=df_to_plot["frequencia_absoluta"], ax=eixos[eixo]
)
f.set(title=titulos[eixo], xlabel=coluna.capitalize(), ylabel="Frequência Absoluta")
f.set_xticklabels(labels=f.get_xticklabels(), rotation=90)
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
figura.show()
# * Idade
coluna = "idade"
titulos = [
"Idade dos Clientes",
"Idade dos Clientes Adimplentes",
"Idade dos Clientes Inadimplentes",
]
eixo = 0
max_y = 0
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df_adimplente, df_inadimplente]:
df_to_plot = dataframe[coluna].value_counts().to_frame()
df_to_plot.rename(columns={coluna: "frequencia_absoluta"}, inplace=True)
df_to_plot[coluna] = df_to_plot.index
df_to_plot.reset_index(inplace=True, drop=True)
df_to_plot.sort_values(by=[coluna], inplace=True)
f = sns.barplot(
x=df_to_plot[coluna], y=df_to_plot["frequencia_absoluta"], ax=eixos[eixo]
)
f.set(title=titulos[eixo], xlabel=coluna.capitalize(), ylabel="Frequência Absoluta")
f.set_xticklabels(labels=f.get_xticklabels(), rotation=90)
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
figura.show()
# * Tipo de Cartao
coluna = "tipo_cartao"
titulos = [
"Tipo de cartao dos Clientes",
"Tipo de cartao dos Clientes Adimplentes",
"Tipo de cartao dos Clientes Inadimplentes",
]
eixo = 0
max_y = 0
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df_adimplente, df_inadimplente]:
df_to_plot = dataframe[coluna].value_counts().to_frame()
df_to_plot.rename(columns={coluna: "frequencia_absoluta"}, inplace=True)
df_to_plot[coluna] = df_to_plot.index
df_to_plot.reset_index(inplace=True, drop=True)
df_to_plot.sort_values(by=[coluna], inplace=True)
f = sns.barplot(
x=df_to_plot[coluna], y=df_to_plot["frequencia_absoluta"], ax=eixos[eixo]
)
f.set(title=titulos[eixo], xlabel=coluna.capitalize(), ylabel="Frequência Absoluta")
f.set_xticklabels(labels=f.get_xticklabels(), rotation=90)
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
# b) Visualizações numéricas
# * Essa análise demonstra a relação dos atributos numéricos com a inadimplência (Default=1)
df.drop(["id", "default"], axis=1).select_dtypes("number").head(n=5)
# * Quantidade de transações dos últimos 12 meses
coluna = "qtd_transacoes_12m"
titulos = [
"Qtd. de Transações no Último Ano",
"Qtd. de Transações no Último Ano de Adimplentes",
"Qtd. de Transações no Último Ano de Inadimplentes",
]
eixo = 0
max_y = 0
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df_adimplente, df_inadimplente]:
f = sns.histplot(x=coluna, data=dataframe, stat="count", ax=eixos[eixo])
f.set(title=titulos[eixo], xlabel=coluna.capitalize(), ylabel="Frequência Absoluta")
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
figura.show()
# * Valor das transações dos últimos 12 meses
coluna = "valor_transacoes_12m"
titulos = [
"Valor das Transações no Último Ano",
"Valor das Transações no Último Ano de Adimplentes",
"Valor das Transações no Último Ano de Inadimplentes",
]
eixo = 0
max_y = 0
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df_adimplente, df_inadimplente]:
f = sns.histplot(x=coluna, data=dataframe, stat="count", ax=eixos[eixo])
f.set(title=titulos[eixo], xlabel=coluna.capitalize(), ylabel="Frequência Absoluta")
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
figura.show()
# * Valor de Transações nos Últimos 12 Meses x Quantidade de Transações nos Últimos 12 Meses
f = sns.relplot(
x="valor_transacoes_12m", y="qtd_transacoes_12m", data=df, hue="default"
)
_ = f.set(
title="Relação entre Valor e Quantidade de Transações no Último Ano",
xlabel="Valor das Transações no Último Ano",
ylabel="Quantidade das Transações no Último Ano",
)
|
# # Summary
# This note explored the shortest way to plot the current status of the number of infected people in each country. A graph of the change in each country has been created. This can be seen as a measure of whether the infection is spreading or shrinking.
# ## Comment
# Recent trends indicate that new cases are increasing in Hong Kong, Malaysia, Singapore, Korea, Thailand, Japan and Vietnam. According to the difference plot, the situation in Singapore is accelerating and the situation looks bad. Japan also has a clear upward trend.
# But Canada, Germany, the Philippines, Taiwan, and Australia seem to be controlling the epidemic.
# At the world level, the epidemic has not yet reached a tipping point as the number of new cases has not declined. The situation is still nervous.
# Updated on February 17, 2020
# # Prepare
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv("../input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv")
# Drop sno and Last Update
df = df.drop(columns=["Sno", "Last Update"])
# fill NaN
df = df.fillna({"Province/State": "None"})
# Change astype
df["Date"] = pd.to_datetime(df["Date"])
df["Confirmed"] = df["Confirmed"].astype("int")
df["Deaths"] = df["Deaths"].astype("int")
df["Recovered"] = df["Recovered"].astype("int")
df.info()
# # World situation
# The current increase in new cases indicates that we are in the first half of the epidemic.
# Growth appears to be declining, but it is unlikely that the epidemic is over.
group_all = df.groupby(["Date"]).sum()
diff = group_all.diff()
world_situation = pd.concat([group_all, diff], axis=1, keys=["Count", "Diff"])
world_situation.plot(
title="World situation", subplots=True, layout=(3, 3), figsize=(12, 8)
)
group_all = df.groupby(["Date"]).sum()["Confirmed"]
diff = group_all.diff()
world_situation = pd.concat(
[diff, diff.rolling(5).mean()], axis=1, keys=["Diff", "Diff_Average_5"]
)
world_situation.plot(title="New confirmed case daily growth", figsize=(12, 8))
# # Graph of diff per day in each country
# Sort by Date
df = df.sort_values(["Date"]).reset_index(drop=True)
group_country = df.groupby(["Country", "Date"]).sum()
group_country
unique_country = group_country.index.unique(level=0)
for c in unique_country:
count = group_country.loc[c, :]
diff = count.diff()
country_specific = pd.concat([count, diff], axis=1, keys=["Count", "Diff"])
if len(diff) > 1:
country_specific.plot(title=c, subplots=True, layout=(2, 3), figsize=(12, 8))
# # Growth (Logarithmic display)
# You can draw comparison graphs by country for Confiremd, Deaths and Recovered. The y-axis is logarithmic.
for name in ["Confirmed", "Deaths", "Recovered"]:
group_country[name].unstack(level=0).plot(title=name, logy=True, figsize=(25, 15))
|
# # Analysis Company Data
# ## Setup
import pandas as pd
import seaborn as sns
import numpy as np
np.set_printoptions(precision=3)
pd.set_option("display.precision", 3)
company_df = pd.read_csv("app/resources/Client_Master.csv", dtype=str)
company_df.head()
# ## Drop column have > 95% null
company_na_count = company_df.isna().sum()
company_na_count = company_na_count * 100 / len(company_df)
company_na_count.sort_values(ascending=False, inplace=True)
company_na_count
sns.set(rc={"figure.figsize": (11.7, 25.27)})
sns.set_style("darkgrid", {"font.sans-serif": ["Hiragino Sans", "Arial"]})
sns.barplot(x=company_na_count, y=company_na_count.index)
from matplotlib import pyplot as plt
num_column_null = sum(company_na_count > 95)
sns.set(rc={"figure.figsize": (5.7, 5.27)})
plt.pie(
[len(company_na_count) - num_column_null, num_column_null],
labels=["Null <= 95%", "Null > 95%"],
autopct="%.0f%%",
)
company_df = company_df.loc[:, company_na_count < 95]
company_df.head()
# ## Number of unique value
number_unique_value = {}
for column in company_df.columns:
number_unique_value[column] = len(company_df[column].unique())
number_unique_value = dict(
sorted(number_unique_value.items(), key=lambda x: x[1], reverse=True)
)
sns.set(rc={"figure.figsize": (11.7, 25.27)})
sns.set_style("darkgrid", {"font.sans-serif": ["Hiragino Sans", "Arial"]})
sns.barplot(
x=list(number_unique_value.values()),
y=list(number_unique_value.keys()),
)
number_unique_value = {}
for column in company_df.columns:
if len(company_df[column].unique()) < 5000:
number_unique_value[column] = len(company_df[column].unique())
number_unique_value = dict(
sorted(number_unique_value.items(), key=lambda x: x[1], reverse=True)
)
sns.set(rc={"figure.figsize": (11.7, 25.27)})
sns.set_style("darkgrid", {"font.sans-serif": ["Hiragino Sans", "Arial"]})
sns.barplot(
x=list(number_unique_value.values()),
y=list(number_unique_value.keys()),
)
# 資本金(千円): Capital (thousand yen)
# 全国ランキング : National ranking
# 創業年 Year of foundation
sns.set(rc={"figure.figsize": (11.7, 10.27)})
sns.set_style("darkgrid", {"font.sans-serif": ["Hiragino Sans", "Arial"]})
sns.scatterplot(
data=company_df,
x="全国ランキング",
y="創業年",
hue="代表者出身県",
palette="deep",
)
|
# # Football Player Team Detection
# https://github.com/ultralytics/ultralytics
from ultralytics import YOLO
import os
import cv2
import shutil
import random
import numpy as np
import pandas as pd
from PIL import Image
from tqdm import tqdm
from IPython.display import HTML, Video, Image, clear_output
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import animation, rc
rc("animation", html="jshtml")
# # Source Images
path0 = "/kaggle/input/football-player-segmentation/images"
# paths=[]
# for dirname, _, filenames in os.walk(path0):
# for filename in filenames:
# if filename[-4:]=='.jpg':
# paths+=[(os.path.join(dirname, filename))]
destination = "frame"
if not os.path.exists(destination):
os.makedirs(destination)
paths = os.listdir(path0)[0:20]
for filename in paths:
src_path = os.path.join(path0, filename)
if filename.endswith(".jpg"):
dst_path = os.path.join(destination, filename)
shutil.copyfile(src_path, dst_path)
print(paths)
# !ls frame
path_frame = "./frame"
path_fig = "./fig"
# # YOLOv8
model = YOLO("yolov8x.pt")
# save=True became required recently
# !ls runs/detect/predict
path_run = "./runs/detect/predict"
paths0 = []
for dirname, _, filenames in os.walk(path_run):
for filename in filenames:
if filename[-4:] == ".jpg":
paths0 += [(os.path.join(dirname, filename))]
paths0 = sorted(paths0)
images0 = []
for i in tqdm(range(len(paths0))):
images0 += [cv2.imread(paths0[i])]
def create_animation(ims):
fig = plt.figure(figsize=(12, 8))
im = plt.imshow(cv2.cvtColor(ims[0], cv2.COLOR_BGR2RGB))
text = plt.text(
0.05, 0.05, f"Slide {0}", transform=fig.transFigure, fontsize=14, color="blue"
)
plt.axis("off")
plt.close()
def animate_func(i):
im.set_array(cv2.cvtColor(ims[i], cv2.COLOR_BGR2RGB))
text.set_text(f"Slide {i}")
return [im]
return animation.FuncAnimation(fig, animate_func, frames=len(ims), interval=1000)
create_animation(np.array(images0))
# # Rectangle information
results = model.predict(path_frame, conf=0.7)
print(len(results))
BOX = pd.DataFrame(columns=range(6))
for i in range(len(results)):
arri = pd.DataFrame(results[i].boxes.boxes).astype(float)
arri["i"] = i
BOX = pd.concat([BOX, arri], axis=0)
BOX.columns = ["x", "y", "x2", "y2", "confidence", "class", "i"]
display(BOX)
BOX["class"] = BOX["class"].apply(lambda x: class_map[int(x) + 1])
BOX = BOX.reset_index(drop=True)
display(BOX)
display(BOX["class"].value_counts())
data0 = BOX.query("`class` == 'person' and `confidence` > 0.7")
data0["i"] = data0["i"].apply(lambda x: int(x))
data0["path"] = data0["i"].apply(lambda x: os.path.join("./frame", paths[x]))
data0 = data0.reset_index(drop=True)
data0["j"] = data0.index.tolist()
data0["color"] = np.nan
display(data0)
for i in range(len(data0)):
path = data0.iloc[i, 7]
x = int(data0.iloc[i, 0])
y = int(data0.iloc[i, 1])
x2 = int(data0.iloc[i, 2])
y2 = int(data0.iloc[i, 3])
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img2 = img[y:y2, x:x2, :]
color = tuple(np.mean(img2, axis=(0, 1)).astype(int))
data0.loc[i, "color"] = str(color)
display(data0)
fig, axes = plt.subplots(2, 5, figsize=(10, 10))
for j, ax in enumerate(axes.flat):
i = random.randint(0, len(data0) - 1)
path = data0.iloc[i, 7]
x = int(data0.iloc[i, 0])
y = int(data0.iloc[i, 1])
x2 = int(data0.iloc[i, 2])
y2 = int(data0.iloc[i, 3])
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img2 = img[y:y2, x:x2, :]
color = data0.loc[i, "color"]
ax.set_title(color)
ax.imshow(img2)
ax.axis("off")
plt.suptitle("Player Color", fontsize=15)
plt.show()
|
# # Welcome to Disaster Tweets NLP
# In this problem, we are given a dataset containing tweets. Our objective is classify them as disaster related or not. There are couple of extra columns - keyword and location. The names of these two are self explanatory. However, as we will see, these columns are not very reliable without adequate porcessing/transformation.
# We will start with data exploration, do some cleaning and then build a model containg single LSTM layer. The objective of this kernel is to quickly demostrate building RNN model, so avoid heaving data processing including stemming, lemmatization, etc.
# ### Library Imports
import pandas as pd
import numpy as np
import os
import re
import tensorflow
from tensorflow.keras.layers import (
Input,
Embedding,
LSTM,
Dropout,
Dense,
TimeDistributed,
concatenate,
RepeatVector,
Reshape,
)
from tensorflow.keras.models import Model
from tensorflow.keras.utils import Sequence
from tensorflow.keras.callbacks import (
TensorBoard,
EarlyStopping,
CSVLogger,
History,
ModelCheckpoint,
)
# ### Data Read
train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
train.shape, test.shape
# ### Exploratory Data Analysis
# Let's checkout head of the data
train.head()
train.keyword.unique()[:10]
train.keyword.nunique()
test.keyword.unique()[:10]
# We notice keywords repeated between train and test
test.keyword.nunique()
# Let's find out how keywords relate to the target.
train.groupby("keyword")["target"].mean().head()
# While airplane accident and aftershock make it easy to classify, words like ambulance being near 0.5 may not help much. We'll see later that adding keyword embedding to our LSTM model does not help much.
# Let's see the location column
train.location.unique()[:10]
test.location.unique()[:10]
train.location.nunique()
test.location.nunique()
# Without any processing, location is not as clean as keyword. We see upper and lower caps, different geographical sizes, numbers and special symbols used. There also are mispellings and sentences that do not represent a location.
# Let's now checkout the text column
train.text[0:5]
# We see some tweets have location mentioned whereas others don't. Some tweets have words such as wildfires, earthquake, etc. which help identify the event.
test.text[0:5]
# We will have to do cleaning to extract words without any special symbols. In the following code, we extract only
# ### Data Cleaning
# We do simple data cleaning that extracts only words from the tweets and keywords. Even integers are read as strings for now. All other symbols will be excluded.
def clean(my_string):
if not pd.isna(my_string):
return " ".join(str.lower(re.sub(r"[\W]", " ", my_string)).split())
else:
return "nan"
train["text"] = train["text"].map(clean)
train["keyword"] = train["keyword"].map(clean)
train["location"] = train["location"].map(clean)
test["text"] = test["text"].map(clean)
test["keyword"] = test["keyword"].map(clean)
test["location"] = test["location"].map(clean)
# Let's see the outputs
train["text"].head()
train["location"].unique()
# Let's find out the maximum length needed for out RNN model
train["text_len"] = train["text"].apply(lambda x: len(x.split()))
test["text_len"] = test["text"].apply(lambda x: len(x.split()))
train["text_len"].max(), test["text_len"].max()
# ### Mapping words to integers for encoding
all_words = (
pd.Series((" ".join(train["text"]) + " ".join(test["text"])).split())
.value_counts()
.reset_index()
)
all_words.columns = ["word", "count"]
all_words.head()
all_words.shape
all_words[all_words["count"] > 2].shape
# removing words with single occurence
words = all_words[all_words["count"] > 1]["word"].values
len(words)
# Now the mappers!
word_dict = {}
for i in range(len(words)):
word_dict[words[i]] = i + 1
def get_word_index(word):
if word in word_dict.keys():
return word_dict[word]
else:
return len(word_dict)
keywords = pd.concat([train["keyword"], test["keyword"]])
keywords = set(keywords)
keyword_dict = {k: v + 1 for v, k in enumerate(keywords)}
def get_keyword_index(keyword):
if keyword in keyword_dict.keys():
return keyword_dict[keyword]
else:
return len(keyword_dict)
# ### Data Generator
# We build a data generator class below. It takes in the dataframe, converts the text and keyword into sequences of numbers using mappers and returns batches of data for model training. We will also use similar generator for prediction. The difference being training generator has target values too.
class DataGenerator(Sequence):
def __init__(self, input_df, batch_size=64):
self.batch_size = batch_size
self.input_df = input_df
self.ids = input_df.index.unique()
def __len__(self):
return int(np.floor(len(self.input_df) / self.batch_size))
def __getitem__(self, index):
sample_ids = np.random.choice(self.ids, self.batch_size)
text_input, keyword_input, target = self.__data_generation(sample_ids)
return [text_input, keyword_input], [np.reshape(target, target.shape + (1,))]
def __data_generation(self, ids):
max_length = 34
text_input = []
target = []
keyword_input = []
for id in ids:
text = self.input_df["text"][id].split()
text = [get_word_index(word) for word in text]
keyword = [get_keyword_index(self.input_df["keyword"][id])] * len(text)
extend_length = max_length - len(text)
if extend_length > 0:
text = np.append(text, [0] * extend_length)
keyword = np.append(keyword, [0] * extend_length)
text_input.append(text)
keyword_input.append(keyword)
target.append(self.input_df["target"][id])
return text_input, keyword_input, np.array(target, dtype="float64")
# ### Model
tensorflow.keras.backend.clear_session()
tensorflow.compat.v1.disable_v2_behavior()
text_input = Input((None,), dtype="float64", name="text_input")
text = Embedding(len(word_dict) + 1, 30, mask_zero=True, dtype="float64", name="text")(
text_input
)
keyword_input = Input((None,), dtype="float64", name="keyword_input")
keyword = Embedding(len(keyword_dict) + 1, 20, mask_zero=True, dtype="float64")(
keyword_input
)
inputs = concatenate([keyword, text], dtype="float64", name="inputs")
lstm = LSTM(50, dtype="float64", name="lstm")(inputs)
dropout = Dropout(0.3, dtype="float64", name="dropout")(lstm)
output = Dense(1, activation="sigmoid", dtype="float64", name="output")(dropout)
model = Model(inputs=[text_input, keyword_input], outputs=output)
model.compile(loss="binary_crossentropy", optimizer="rmsprop")
# > ### Splitting data into training and validation
training_id = pd.Series(train.index.values)
train_ids = training_id.sample(frac=0.9)
val_ids = training_id[~training_id.isin(train_ids)]
train_data = train[train.index.isin(train_ids)]
val_data = train[train.index.isin(val_ids)]
trainingGenerator = DataGenerator(train_data, batch_size=64)
validationGenerator = DataGenerator(val_data, batch_size=64)
train_data.head()
# ### Training
# We invoke fit_generator method which takes our data generators as input instead of dataset.
model.fit_generator(
trainingGenerator,
validation_data=validationGenerator,
epochs=10,
use_multiprocessing=False,
callbacks=[EarlyStopping(monitor="val_loss", patience=2, verbose=1)],
)
# ### Inferencing
class PredictDataGenerator(Sequence):
def __init__(self, input_df, batch_size=64):
self.batch_size = batch_size
self.input_df = input_df
self.ids = input_df.index.unique()
def __len__(self):
return int(np.floor(len(self.input_df) / self.batch_size))
def __getitem__(self, index):
sample_ids = self.ids
text_input, keyword_input = self.__data_generation(sample_ids)
return [text_input, keyword_input]
def __data_generation(self, ids):
max_length = 34
text_input = []
keyword_input = []
for id in ids:
text = self.input_df["text"][id].split()
text = [get_word_index(word) for word in text]
keyword = [get_keyword_index(self.input_df["keyword"][id])] * len(text)
extend_length = max_length - len(text)
if extend_length > 0:
text = np.append(text, [0] * extend_length)
keyword = np.append(keyword, [0] * extend_length)
text_input.append(text)
keyword_input.append(keyword)
return text_input, keyword_input
testGenerator = PredictDataGenerator(test, batch_size=len(test))
preds = model.predict_generator(testGenerator)
# ### Saving Output
predictions = []
for i in preds:
val = 1 if i > 0.5 else 0
predictions.append(val)
submit = pd.DataFrame()
submit["id"] = test["id"]
submit["target"] = predictions
submit.to_csv("/kaggle/submission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # graphs
import seaborn as sns # pretty graphs
import missingno as msno # missing values
import pprint
# Pandas profiling helps while doing categorical data analysis
import pandas_profiling
from pandas_profiling import ProfileReport
from pandas_profiling.utils.cache import cache_file
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## Part 1: Reading the data, general aspects of data ##
train = pd.read_csv("/kaggle/input/titanic/train.csv")
test = pd.read_csv("/kaggle/input/titanic/test.csv")
sub = pd.read_csv("/kaggle/input/titanic/gender_submission.csv")
train.head(5)
test.head(5)
sub.head(5)
train.describe().T
test.describe().T
train.info()
# ## Categorical columns analysis ##
# Identifying categorical features
# I have taken this code from: https://towardsdatascience.com/encoding-categorical-features-21a2651a065c
def identify_cat(dataframe):
"""
(pd.DataFrame) -> list
This function identifies and returns a list with the names of all the categorical columns of a DataFrame.
"""
categorical_feature_mask = (
dataframe.dtypes == object
) # Here, t can be the entire dataframe or only the features
categorical_cols = dataframe.columns[categorical_feature_mask].tolist()
return categorical_cols
catego = identify_cat(train)
pprint.pprint(catego)
numeric = set(train.columns) - set(catego)
numeric
def convert_type(dataframe, catego_cols):
"""
(pd.DataFrame, list) -> None
This is an optimization function. It converts the type of categorical columns in a DataFrame from 'object' to 'category',
making operations faster.
See the docs here: https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html
"""
for column in catego_cols:
dataframe[column].astype("category")
convert_type(train, catego)
convert_type(test, catego)
# Checking for hidden NaN, as 'unknown', for example
for column in catego:
print(column)
print(train[column].unique())
profile = ProfileReport(
train[catego],
title="Titanic - Categorical Features",
html={"style": {"full_width": True}},
minimal=True,
)
profile.to_notebook_iframe()
|
# ## Importing libraries
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from tqdm.notebook import tqdm # progress bars with support for jupyter notebooks
import datetime as dt
import keras as ks
import tensorflow as tf
import sklearn.preprocessing as sklpp
import collections
# import matplotlib.pyplot as plt
# from tensorflow.keras import layers
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
def unique_union(x, y):
"""
takes two lists and returns their union with only unique elements. No ordering.
*x expands x, {*x} makes a set, *{*x} expands the set.
"""
return [*({*x}.union({*y}))]
# ## Import and massage data
# Let's have a look at the data at first.
title_event_codes_map = {
"Welcome to Lost Lagoon!_2000": 0,
"Chest Sorter (Assessment)_2020": 1,
"Crystals Rule_4010": 2,
"Scrub-A-Dub_2050": 3,
"Dino Dive_3120": 4,
"Leaf Leader_3121": 5,
"Flower Waterer (Activity)_3110": 6,
"Sandcastle Builder (Activity)_3110": 7,
"Air Show_2070": 8,
"Air Show_3021": 9,
"Bubble Bath_3021": 10,
"Cart Balancer (Assessment)_4090": 11,
"Dino Drink_4090": 12,
"Watering Hole (Activity)_5010": 13,
"Bubble Bath_4040": 14,
"Bubble Bath_4080": 15,
"Ordering Spheres_2000": 16,
"Cart Balancer (Assessment)_2020": 17,
"Cauldron Filler (Assessment)_4030": 18,
"Bird Measurer (Assessment)_2030": 19,
"Dino Dive_4090": 20,
"Chest Sorter (Assessment)_4090": 21,
"Cart Balancer (Assessment)_2000": 22,
"Bird Measurer (Assessment)_4040": 23,
"Dino Drink_3120": 24,
"Scrub-A-Dub_4080": 25,
"Dino Drink_4031": 26,
"Cart Balancer (Assessment)_3121": 27,
"Pan Balance_3120": 28,
"Happy Camel_2080": 29,
"Leaf Leader_3020": 30,
"Dino Drink_4070": 31,
"Air Show_3120": 32,
"Balancing Act_2000": 33,
"Bubble Bath_4045": 34,
"Watering Hole (Activity)_4021": 35,
"Cart Balancer (Assessment)_4030": 36,
"Dino Drink_3010": 37,
"Mushroom Sorter (Assessment)_3010": 38,
"Cart Balancer (Assessment)_3110": 39,
"Egg Dropper (Activity)_4090": 40,
"Leaf Leader_4010": 41,
"Pan Balance_3121": 42,
"Dino Drink_4030": 43,
"Chow Time_4010": 44,
"Chicken Balancer (Activity)_4035": 45,
"Dino Drink_2060": 46,
"Pan Balance_3020": 47,
"Dino Drink_2070": 48,
"Bubble Bath_2083": 49,
"Magma Peak - Level 1_2000": 50,
"Dino Dive_4080": 51,
"Bubble Bath_2025": 52,
"Bottle Filler (Activity)_2020": 53,
"Mushroom Sorter (Assessment)_2035": 54,
"Happy Camel_4045": 55,
"Mushroom Sorter (Assessment)_2010": 56,
"Happy Camel_3010": 57,
"Pan Balance_4100": 58,
"Happy Camel_2000": 59,
"Leaf Leader_2020": 60,
"Bubble Bath_3020": 61,
"Chow Time_3020": 62,
"Pan Balance_3010": 63,
"Bird Measurer (Assessment)_4035": 64,
"Chest Sorter (Assessment)_2010": 65,
"Leaf Leader_2060": 66,
"Scrub-A-Dub_3121": 67,
"Fireworks (Activity)_4030": 68,
"Chest Sorter (Assessment)_3110": 69,
"Sandcastle Builder (Activity)_4070": 70,
"Chow Time_4080": 71,
"Happy Camel_4020": 72,
"Chow Time_3120": 73,
"Bird Measurer (Assessment)_4090": 74,
"Crystals Rule_3120": 75,
"Cart Balancer (Assessment)_4070": 76,
"Dino Drink_2075": 77,
"Leaf Leader_2070": 78,
"Bug Measurer (Activity)_4030": 79,
"Dino Drink_4020": 80,
"Pan Balance_2020": 81,
"All Star Sorting_4080": 82,
"Happy Camel_4010": 83,
"Cauldron Filler (Assessment)_3020": 84,
"Bubble Bath_4220": 85,
"Cart Balancer (Assessment)_3021": 86,
"Bug Measurer (Activity)_4080": 87,
"Cart Balancer (Assessment)_3010": 88,
"Chest Sorter (Assessment)_4025": 89,
"Pan Balance_4025": 90,
"Bottle Filler (Activity)_2030": 91,
"Dino Dive_3010": 92,
"Scrub-A-Dub_2083": 93,
"Happy Camel_3021": 94,
"Chest Sorter (Assessment)_4030": 95,
"Bubble Bath_3120": 96,
"Bubble Bath_2030": 97,
"Crystals Rule_4090": 98,
"Chicken Balancer (Activity)_2000": 99,
"Sandcastle Builder (Activity)_4030": 100,
"Cauldron Filler (Assessment)_3121": 101,
"Chest Sorter (Assessment)_3020": 102,
"Pan Balance_4080": 103,
"Chest Sorter (Assessment)_3120": 104,
"Air Show_4110": 105,
"Air Show_2030": 106,
"Chow Time_2000": 107,
"Bug Measurer (Activity)_4035": 108,
"Air Show_4010": 109,
"Mushroom Sorter (Assessment)_4025": 110,
"Tree Top City - Level 2_2000": 111,
"Bottle Filler (Activity)_4070": 112,
"Scrub-A-Dub_2000": 113,
"Chicken Balancer (Activity)_4090": 114,
"Bird Measurer (Assessment)_3010": 115,
"Watering Hole (Activity)_2010": 116,
"Leaf Leader_4095": 117,
"Watering Hole (Activity)_3110": 118,
"Sandcastle Builder (Activity)_4090": 119,
"Chicken Balancer (Activity)_4080": 120,
"Flower Waterer (Activity)_2000": 121,
"Mushroom Sorter (Assessment)_3110": 122,
"Chow Time_3110": 123,
"Crystals Rule_2010": 124,
"Happy Camel_2081": 125,
"Air Show_2020": 126,
"Happy Camel_4035": 127,
"Sandcastle Builder (Activity)_4020": 128,
"Dino Drink_4010": 129,
"Scrub-A-Dub_2081": 130,
"Bird Measurer (Assessment)_4110": 131,
"Dino Dive_2070": 132,
"Pan Balance_2030": 133,
"Bird Measurer (Assessment)_3120": 134,
"Watering Hole (Activity)_3010": 135,
"Leaf Leader_4090": 136,
"Crystals Rule_4050": 137,
"Sandcastle Builder (Activity)_4021": 138,
"Dino Dive_4020": 139,
"Happy Camel_4040": 140,
"All Star Sorting_2030": 141,
"Bug Measurer (Activity)_3110": 142,
"Happy Camel_3110": 143,
"Cauldron Filler (Assessment)_4080": 144,
"Cauldron Filler (Assessment)_4025": 145,
"Air Show_4020": 146,
"Chest Sorter (Assessment)_3010": 147,
"Bird Measurer (Assessment)_3021": 148,
"Mushroom Sorter (Assessment)_4080": 149,
"Leaf Leader_4080": 150,
"Egg Dropper (Activity)_2020": 151,
"Air Show_4090": 152,
"Tree Top City - Level 3_2000": 153,
"Fireworks (Activity)_3010": 154,
"Dino Dive_3020": 155,
"Bottle Filler (Activity)_3110": 156,
"Cauldron Filler (Assessment)_4040": 157,
"Cauldron Filler (Assessment)_2000": 158,
"Bottle Filler (Activity)_4090": 159,
"All Star Sorting_2020": 160,
"Tree Top City - Level 1_2000": 161,
"Happy Camel_2030": 162,
"Pan Balance_4020": 163,
"Chest Sorter (Assessment)_4040": 164,
"Chicken Balancer (Activity)_3010": 165,
"Bug Measurer (Activity)_4090": 166,
"Cauldron Filler (Assessment)_3110": 167,
"Dino Dive_4070": 168,
"Mushroom Sorter (Assessment)_4020": 169,
"Bubble Bath_2080": 170,
"Pan Balance_2010": 171,
"Watering Hole (Activity)_4090": 172,
"Cart Balancer (Assessment)_4035": 173,
"Dino Dive_2020": 174,
"Scrub-A-Dub_2020": 175,
"Fireworks (Activity)_4020": 176,
"Air Show_4070": 177,
"Cart Balancer (Assessment)_4080": 178,
"Dino Drink_3121": 179,
"Dino Dive_2060": 180,
"Cauldron Filler (Assessment)_4100": 181,
"Happy Camel_3121": 182,
"Bottle Filler (Activity)_3010": 183,
"Chow Time_4095": 184,
"Dino Drink_3021": 185,
"Crystals Rule_2030": 186,
"Cauldron Filler (Assessment)_3120": 187,
"Chow Time_2030": 188,
"Watering Hole (Activity)_4070": 189,
"Happy Camel_4090": 190,
"Leaf Leader_4070": 191,
"Egg Dropper (Activity)_4080": 192,
"Egg Dropper (Activity)_4025": 193,
"Chow Time_3121": 194,
"Bird Measurer (Assessment)_2010": 195,
"Chow Time_4070": 196,
"Air Show_3110": 197,
"Chicken Balancer (Activity)_4030": 198,
"Leaf Leader_3010": 199,
"Flower Waterer (Activity)_4025": 200,
"Egg Dropper (Activity)_3110": 201,
"Mushroom Sorter (Assessment)_4070": 202,
"Crystals Rule_2020": 203,
"12 Monkeys_2000": 204,
"Heavy, Heavier, Heaviest_2000": 205,
"Magma Peak - Level 2_2000": 206,
"Chest Sorter (Assessment)_2000": 207,
"All Star Sorting_4070": 208,
"Bubble Bath_4230": 209,
"All Star Sorting_3110": 210,
"Crystals Rule_3121": 211,
"Dino Dive_2000": 212,
"Watering Hole (Activity)_5000": 213,
"Chow Time_4020": 214,
"Costume Box_2000": 215,
"Bird Measurer (Assessment)_4020": 216,
"Pan Balance_2000": 217,
"Bird Measurer (Assessment)_4070": 218,
"Chest Sorter (Assessment)_4020": 219,
"Chest Sorter (Assessment)_4080": 220,
"Mushroom Sorter (Assessment)_3120": 221,
"Cart Balancer (Assessment)_2030": 222,
"Cauldron Filler (Assessment)_4070": 223,
"Cauldron Filler (Assessment)_4035": 224,
"Crystals Rule_4020": 225,
"Cauldron Filler (Assessment)_3010": 226,
"Cauldron Filler (Assessment)_2030": 227,
"Scrub-A-Dub_2040": 228,
"Sandcastle Builder (Activity)_2010": 229,
"Bubble Bath_4010": 230,
"All Star Sorting_4010": 231,
"Chest Sorter (Assessment)_3021": 232,
"Bug Measurer (Activity)_3010": 233,
"Watering Hole (Activity)_4020": 234,
"Air Show_2075": 235,
"Lifting Heavy Things_2000": 236,
"Crystals Rule_4070": 237,
"Bird Measurer (Assessment)_2020": 238,
"Dino Drink_3020": 239,
"All Star Sorting_4020": 240,
"Bubble Bath_2020": 241,
"Mushroom Sorter (Assessment)_2000": 242,
"Fireworks (Activity)_4070": 243,
"Cauldron Filler (Assessment)_4020": 244,
"Bird Measurer (Assessment)_4080": 245,
"Pan Balance_3021": 246,
"Air Show_2060": 247,
"Bubble Bath_4020": 248,
"Flower Waterer (Activity)_4030": 249,
"Chow Time_4035": 250,
"Mushroom Sorter (Assessment)_4030": 251,
"Rulers_2000": 252,
"Flower Waterer (Activity)_4022": 253,
"Chest Sorter (Assessment)_3121": 254,
"Mushroom Sorter (Assessment)_2030": 255,
"Chicken Balancer (Activity)_4070": 256,
"Happy Camel_3020": 257,
"Bubble Bath_2000": 258,
"Chest Sorter (Assessment)_4070": 259,
"Scrub-A-Dub_4010": 260,
"Happy Camel_4095": 261,
"Bubble Bath_2035": 262,
"Dino Drink_4080": 263,
"Fireworks (Activity)_4080": 264,
"Bottle Filler (Activity)_4030": 265,
"Mushroom Sorter (Assessment)_2020": 266,
"Pan Balance_4010": 267,
"Cauldron Filler (Assessment)_3021": 268,
"Sandcastle Builder (Activity)_4035": 269,
"Bird Measurer (Assessment)_4025": 270,
"Bird Measurer (Assessment)_4030": 271,
"Dino Dive_3110": 272,
"Dino Dive_3021": 273,
"Crystal Caves - Level 1_2000": 274,
"Honey Cake_2000": 275,
"Scrub-A-Dub_2080": 276,
"Chow Time_3021": 277,
"Chicken Balancer (Activity)_3110": 278,
"Mushroom Sorter (Assessment)_4100": 279,
"Happy Camel_3120": 280,
"Scrub-A-Dub_2030": 281,
"Air Show_2000": 282,
"Scrub-A-Dub_3120": 283,
"Air Show_3010": 284,
"Pan Balance_4030": 285,
"Chow Time_3010": 286,
"Happy Camel_4030": 287,
"Air Show_4080": 288,
"Bug Measurer (Activity)_2000": 289,
"Scrub-A-Dub_3021": 290,
"Bubble Bath_4070": 291,
"Cart Balancer (Assessment)_4040": 292,
"Pan Balance_3110": 293,
"Chow Time_4090": 294,
"Watering Hole (Activity)_4025": 295,
"Chow Time_2020": 296,
"Dino Drink_2020": 297,
"Cauldron Filler (Assessment)_2010": 298,
"Leaf Leader_2075": 299,
"Bubble Bath_4095": 300,
"Chow Time_4030": 301,
"All Star Sorting_3020": 302,
"Bubble Bath_3110": 303,
"All Star Sorting_2025": 304,
"Bubble Bath_3121": 305,
"Air Show_3121": 306,
"Egg Dropper (Activity)_4070": 307,
"Pan Balance_4090": 308,
"Flower Waterer (Activity)_4070": 309,
"Mushroom Sorter (Assessment)_3020": 310,
"Chest Sorter (Assessment)_2030": 311,
"Dino Drink_2030": 312,
"All Star Sorting_3010": 313,
"Air Show_4100": 314,
"Mushroom Sorter (Assessment)_3021": 315,
"Scrub-A-Dub_4020": 316,
"Bird Measurer (Assessment)_3110": 317,
"Air Show_3020": 318,
"Flower Waterer (Activity)_4090": 319,
"Mushroom Sorter (Assessment)_3121": 320,
"Pan Balance_4070": 321,
"Leaf Leader_2030": 322,
"Scrub-A-Dub_3010": 323,
"Mushroom Sorter (Assessment)_4040": 324,
"Chicken Balancer (Activity)_4020": 325,
"Sandcastle Builder (Activity)_3010": 326,
"Scrub-A-Dub_4070": 327,
"Bird Measurer (Assessment)_4100": 328,
"Slop Problem_2000": 329,
"Sandcastle Builder (Activity)_2000": 330,
"Cart Balancer (Assessment)_3120": 331,
"Bottle Filler (Activity)_4080": 332,
"Bottle Filler (Activity)_2010": 333,
"Dino Dive_2030": 334,
"Flower Waterer (Activity)_4020": 335,
"Flower Waterer (Activity)_3010": 336,
"Chest Sorter (Assessment)_4100": 337,
"Leaf Leader_3120": 338,
"Leaf Leader_3021": 339,
"Dino Dive_3121": 340,
"Chest Sorter (Assessment)_4035": 341,
"Bubble Bath_3010": 342,
"Mushroom Sorter (Assessment)_4090": 343,
"Bird Measurer (Assessment)_3121": 344,
"Mushroom Sorter (Assessment)_2025": 345,
"Bird Measurer (Assessment)_2000": 346,
"Crystals Rule_3110": 347,
"Happy Camel_2083": 348,
"Crystals Rule_2000": 349,
"Bug Measurer (Activity)_4070": 350,
"Dino Drink_3110": 351,
"Fireworks (Activity)_3110": 352,
"Bottle Filler (Activity)_4020": 353,
"All Star Sorting_3121": 354,
"Bottle Filler (Activity)_2000": 355,
"Leaf Leader_3110": 356,
"Scrub-A-Dub_4090": 357,
"Chicken Balancer (Activity)_4022": 358,
"All Star Sorting_4095": 359,
"Happy Camel_4080": 360,
"Crystals Rule_3021": 361,
"Sandcastle Builder (Activity)_4080": 362,
"Cart Balancer (Assessment)_3020": 363,
"Dino Drink_2000": 364,
"Egg Dropper (Activity)_3010": 365,
"Dino Dive_4010": 366,
"Bubble Bath_4235": 367,
"Bubble Bath_4090": 368,
"Egg Dropper (Activity)_4020": 369,
"All Star Sorting_4090": 370,
"Crystal Caves - Level 2_2000": 371,
"Pirate's Tale_2000": 372,
"Cauldron Filler (Assessment)_2020": 373,
"Happy Camel_4070": 374,
"Crystal Caves - Level 3_2000": 375,
"Fireworks (Activity)_2000": 376,
"All Star Sorting_2000": 377,
"Scrub-A-Dub_3020": 378,
"Egg Dropper (Activity)_2000": 379,
"All Star Sorting_4030": 380,
"All Star Sorting_3120": 381,
"Crystals Rule_3010": 382,
"Happy Camel_2020": 383,
"Bird Measurer (Assessment)_3020": 384,
"Treasure Map_2000": 385,
"Bottle Filler (Activity)_4035": 386,
"Cart Balancer (Assessment)_4100": 387,
"Fireworks (Activity)_4090": 388,
"Bug Measurer (Activity)_4025": 389,
"Cart Balancer (Assessment)_2010": 390,
"Scrub-A-Dub_3110": 391,
"Watering Hole (Activity)_2000": 392,
"All Star Sorting_4035": 393,
"Leaf Leader_4020": 394,
"Mushroom Sorter (Assessment)_4035": 395,
"Flower Waterer (Activity)_4080": 396,
"Cart Balancer (Assessment)_4020": 397,
"Crystals Rule_3020": 398,
"Leaf Leader_2000": 399,
"Pan Balance_4035": 400,
"Cauldron Filler (Assessment)_4090": 401,
"All Star Sorting_3021": 402,
}
event_ids_map = {
"b7530680": 0,
"30614231": 1,
"e080a381": 2,
"070a5291": 3,
"795e4a37": 4,
"222660ff": 5,
"47f43a44": 6,
"f806dc10": 7,
"ea321fb1": 8,
"25fa8af4": 9,
"fbaf3456": 10,
"d51b1749": 11,
"33505eae": 12,
"dcb55a27": 13,
"d3f1e122": 14,
"16dffff1": 15,
"15f99afc": 16,
"392e14df": 17,
"45d01abe": 18,
"df4940d3": 19,
"2c4e6db0": 20,
"86ba578b": 21,
"d9c005dd": 22,
"2b058fe3": 23,
"7f0836bf": 24,
"cdd22e43": 25,
"2fb91ec1": 26,
"c74f40cd": 27,
"b5053438": 28,
"e5734469": 29,
"93edfe2e": 30,
"9d29771f": 31,
"99ea62f3": 32,
"58a0de5c": 33,
"363c86c9": 34,
"a0faea5d": 35,
"1325467d": 36,
"fcfdffb6": 37,
"55115cbd": 38,
"8f094001": 39,
"6043a2b4": 40,
"3bb91dda": 41,
"71fe8f75": 42,
"5e3ea25a": 43,
"0413e89d": 44,
"9b23e8ee": 45,
"7ec0c298": 46,
"2230fab4": 47,
"46b50ba8": 48,
"65a38bf7": 49,
"56817e2b": 50,
"f50fc6c1": 51,
"38074c54": 52,
"611485c5": 53,
"31973d56": 54,
"83c6c409": 55,
"bfc77bd6": 56,
"56cd3b43": 57,
"47efca07": 58,
"d88e8f25": 59,
"bcceccc6": 60,
"c7128948": 61,
"0086365d": 62,
"2a512369": 63,
"90efca10": 64,
"3bfd1a65": 65,
"a76029ee": 66,
"eb2c19cd": 67,
"a1bbe385": 68,
"6088b756": 69,
"262136f4": 70,
"08fd73f3": 71,
"792530f8": 72,
"6cf7d25c": 73,
"d45ed6a1": 74,
"15a43e5b": 75,
"4901243f": 76,
"beb0a7b9": 77,
"3393b68b": 78,
"e3ff61fb": 79,
"f71c4741": 80,
"8b757ab8": 81,
"4a4c3d21": 82,
"a52b92d5": 83,
"e79f3763": 84,
"bb3e370b": 85,
"5e109ec3": 86,
"27253bdc": 87,
"15ba1109": 88,
"363d3849": 89,
"5859dfb6": 90,
"499edb7c": 91,
"37db1c2f": 92,
"b120f2ac": 93,
"86c924c4": 94,
"77c76bc5": 95,
"9ee1c98c": 96,
"3bb91ced": 97,
"28f975ea": 98,
"84b0e0c8": 99,
"d185d3ea": 100,
"ca11f653": 101,
"36fa3ebe": 102,
"a16a373e": 103,
"e37a2b78": 104,
"fd20ea40": 105,
"5b49460a": 106,
"8ac7cce4": 107,
"91561152": 108,
"6aeafed4": 109,
"5e812b27": 110,
"6bf9e3e1": 111,
"73757a5e": 112,
"f6947f54": 113,
"0a08139c": 114,
"00c73085": 115,
"7040c096": 116,
"13f56524": 117,
"3b2048ee": 118,
"e57dd7af": 119,
"a29c5338": 120,
"16667cc5": 121,
"5348fd84": 122,
"daac11b0": 123,
"e5c9df6f": 124,
"7961e599": 125,
"01ca3a3c": 126,
"7423acbc": 127,
"3d8c61b0": 128,
"acf5c23f": 129,
"a8efe47b": 130,
"4ef8cdd3": 131,
"6f445b57": 132,
"90d848e0": 133,
"bc8f2793": 134,
"4e5fc6f5": 135,
"8af75982": 136,
"7fd1ac25": 137,
"c0415e5c": 138,
"756e5507": 139,
"6f4bd64e": 140,
"89aace00": 141,
"5a848010": 142,
"b80e5e84": 143,
"c54cf6c5": 144,
"76babcde": 145,
"bd701df8": 146,
"65abac75": 147,
"28520915": 148,
"cb1178ad": 149,
"4074bac2": 150,
"1f19558b": 151,
"05ad839b": 152,
"d122731b": 153,
"461eace6": 154,
"731c0cbe": 155,
"7dfe6d8a": 156,
"2dcad279": 157,
"15eb4a7d": 158,
"14de4c5d": 159,
"c51d8688": 160,
"7da34a02": 161,
"abc5811c": 162,
"c2baf0bd": 163,
"3dcdda7f": 164,
"a2df0760": 165,
"3bf1cf26": 166,
"7cf1bc53": 167,
"1340b8d7": 168,
"6f4adc4b": 169,
"dcb1663e": 170,
"d06f75b5": 171,
"763fc34e": 172,
"cfbd47c8": 173,
"4bb2f698": 174,
"e04fb33d": 175,
"b1d5101d": 176,
"df4fe8b6": 177,
"a592d54e": 178,
"e7561dd2": 179,
"0ce40006": 180,
"a5be6304": 181,
"6c930e6e": 182,
"709b1251": 183,
"26a5a3dd": 184,
"a44b10dc": 185,
"5f5b2617": 186,
"a1e4395d": 187,
"17113b36": 188,
"1beb320a": 189,
"d02b7a8e": 190,
"28a4eb9a": 191,
"90ea0bac": 192,
"4c2ec19f": 193,
"598f4598": 194,
"5154fc30": 195,
"69fdac0a": 196,
"c189aaf2": 197,
"ecc36b7f": 198,
"63f13dd7": 199,
"f93fc684": 200,
"c7fe2a55": 201,
"e9c52111": 202,
"28ed704e": 203,
"3babcb9b": 204,
"832735e1": 205,
"804ee27f": 206,
"f54238ee": 207,
"f3cd5473": 208,
"b2e5b0f1": 209,
"5f0eb72c": 210,
"c6971acf": 211,
"8d84fa81": 212,
"49ed92e9": 213,
"9b4001e4": 214,
"ad2fc29c": 215,
"67439901": 216,
"155f62a4": 217,
"f28c589a": 218,
"5de79a6a": 219,
"857f21c0": 220,
"9b01374f": 221,
"85d1b0de": 222,
"4a09ace1": 223,
"e64e2cfd": 224,
"ab4ec3a4": 225,
"3d63345e": 226,
"022b4259": 227,
"1cf54632": 228,
"29bdd9ba": 229,
"a5e9da97": 230,
"f7e47413": 231,
"ad148f58": 232,
"9e6b7fb5": 233,
"8fee50e2": 234,
"87d743c1": 235,
"9e34ea74": 236,
"736f9581": 237,
"44cb4907": 238,
"cf7638f3": 239,
"d2659ab4": 240,
"a8a78786": 241,
"19967db1": 242,
"cf82af56": 243,
"532a2afb": 244,
"1996c610": 245,
"b88f38da": 246,
"e4d32835": 247,
"884228c8": 248,
"c7f7f0e1": 249,
"8d748b58": 250,
"0db6d71d": 251,
"5c3d2b2f": 252,
"1bb5fbdb": 253,
"bd612267": 254,
"47026d5f": 255,
"7372e1a5": 256,
"02a42007": 257,
"04df9b66": 258,
"ea296733": 259,
"77ead60d": 260,
"2dc29e21": 261,
"6f8106d9": 262,
"250513af": 263,
"3dfd4aa4": 264,
"93b353f2": 265,
"3ee399c3": 266,
"1af8be29": 267,
"3a4be871": 268,
"37ee8496": 269,
"a7640a16": 270,
"d3640339": 271,
"71e712d8": 272,
"1c178d24": 273,
"30df3273": 274,
"e694a35b": 275,
"9d4e7b25": 276,
"6d90d394": 277,
"2a444e03": 278,
"26fd2d99": 279,
"e720d930": 280,
"d38c2fd7": 281,
"a1192f43": 282,
"b2dba42b": 283,
"9c5ef70c": 284,
"2b9272f4": 285,
"f5b8c21a": 286,
"dcaede90": 287,
"562cec5f": 288,
"53c6e11a": 289,
"6077cc36": 290,
"06372577": 291,
"56bcd38d": 292,
"bdf49a58": 293,
"5c2f29ca": 294,
"74e5f8a7": 295,
"ecaab346": 296,
"d2278a3b": 297,
"92687c59": 298,
"4d6737eb": 299,
"4b5efe37": 300,
"e7e44842": 301,
"c58186bf": 302,
"cb6010f8": 303,
"1b54d27f": 304,
"d2e9262e": 305,
"1375ccb7": 306,
"85de926c": 307,
"d88ca108": 308,
"c277e121": 309,
"7525289a": 310,
"ac92046e": 311,
"1575e76c": 312,
"17ca3959": 313,
"46cd75b4": 314,
"5be391b5": 315,
"895865f3": 316,
"37937459": 317,
"3d0b9317": 318,
"84538528": 319,
"7d093bf9": 320,
"e4f1efe6": 321,
"b74258a0": 322,
"37c53127": 323,
"ab3136ba": 324,
"bbfe0445": 325,
"7d5c30a2": 326,
"6c517a88": 327,
"5d042115": 328,
"3afde5dd": 329,
"db02c830": 330,
"565a3990": 331,
"9ed8f6da": 332,
"a6d66e51": 333,
"f32856e4": 334,
"ecc6157f": 335,
"51102b85": 336,
"4d911100": 337,
"88d4a5be": 338,
"9554a50b": 339,
"99abe2bb": 340,
"f56e0afc": 341,
"b012cd7f": 342,
"67aa2ada": 343,
"9e4c8c7b": 344,
"08ff79ad": 345,
"2ec694de": 346,
"587b5989": 347,
"3afb49e6": 348,
"de26c3a6": 349,
"9de5e594": 350,
"c1cac9a2": 351,
"3edf6747": 352,
"1cc7cfca": 353,
"160654fd": 354,
"0330ab6a": 355,
"9ce586dd": 356,
"48349b14": 357,
"3323d7e9": 358,
"907a054b": 359,
"77261ab5": 360,
"29a42aea": 361,
"3ddc79c3": 362,
"29f54413": 363,
"0d1da71f": 364,
"3ccd3f02": 365,
"51311d7a": 366,
"b7dc8128": 367,
"cc5087a3": 368,
"a8876db3": 369,
"c952eb01": 370,
"5290eab1": 371,
"7ad3efc6": 372,
"7ab78247": 373,
"119b5b02": 374,
"d3268efa": 375,
"ec138c1c": 376,
"923afab1": 377,
"8d7e386c": 378,
"0d18d96c": 379,
"828e68f9": 380,
}
# number the lists
worlds_map = {
"TREETOPCITY": 0,
"MAGMAPEAK": 1,
"NONE": 2,
"CRYSTALCAVES": 3,
} # {x:i for (x,i) in zip(worlds, np.arange(len(worlds)))}
assessment_titles = [
"Chest Sorter (Assessment)",
"Cart Balancer (Assessment)",
"Cauldron Filler (Assessment)",
"Mushroom Sorter (Assessment)",
"Bird Measurer (Assessment)",
] # unique_union(train[train['type'] == 'Assessment']['title'], test[test['type'] == 'Assessment']['title'])
# {x:i for (x,i) in zip(event_ids, np.arange(len(event_ids)))}
types_map = {"Clip": 0, "Activity": 1, "Game": 2, "Assessment": 3}
types = ["Clip", "Activity", "Game", "Assessment"]
# TODO: reduce runs
def times_to_numbers(data):
# DROP_TIME is the number of seconds between distinct sessions
DROP_TIME = 900
data_sorted = data.sort_values(by=["timestamp"])
data_sorted["next_timestamp"] = data_sorted["timestamp"].shift(periods=-1)
data_sorted["end_of_session"] = (
data_sorted["next_timestamp"] - data_sorted["timestamp"]
).map(lambda x: x.total_seconds() > DROP_TIME)
data_sorted["timestamp"] = (
data_sorted["end_of_session"].cumsum() + 1
) # +1 to support masking
data_sorted = data_sorted.drop(columns=["next_timestamp", "end_of_session"])
data_sorted.rename(columns={"timestamp": "game_session"})
# data_sorted.iloc[-1]['game_time'] = data_sorted.iloc[-2]['game_time']
return data_sorted
import pickle
# pattern = r'"correct":true'
pattern = '"correct":true'
def assessment_to_num(a_bool):
if a_bool:
return 1
else:
return -1
# last_assessment_map = {k: get_last_assessment(k) for k in test_ids}
last_assessment_map = {}
columns_to_keep = [
"installation_id",
"world",
"type",
"event_data",
"event_id",
"timestamp",
"game_time",
"title",
]
test_converters = {
"world": lambda x: worlds_map[x] + 1,
"type": lambda x: types_map[x] + 1,
"event_id": lambda x: event_ids_map[x] + 1,
"event_data": lambda x: assessment_to_num(pattern in x),
"game_time": lambda x: int(x) + 1,
"timestamp": pd.to_datetime,
}
# def read_data():
# print('Reading test.csv.')
# test = pd.read_csv('../input/data-science-bowl-2019/test.csv',
# usecols = columns_to_keep,
# converters = test_converters
# )
# print('Read test.csv with {} rows and {} columns.'.format(test.shape[0],test.shape[1]))
# with open('../input/preprocessor-for-data-bowl-2019/activities_map.pkl', 'rb') as a_pickle:
# reverse_activities_map = pickle.load(a_pickle)
# print('Read.')
# return test, reverse_activities_map
# test, reverse_activities_map = read_data()
# test.rename(columns={'event_data': 'correct_assessment'}, inplace=True)
# #test_csv_reader = pd.read_csv('../input/data-science-bowl-2019/test.csv',
# # usecols = columns_to_keep,
# # converters = test_converters,
# # iterator = True,
# # chunksize = 1,
# # memory_map= True)
# def process_an_ID_pd(next_row=None):
# try:
# if next_row is None:
# a_row = next(test_csv_reader)
# else:
# a_row = next_row
# ID = list(a_row['installation_id'])[0]
# more_rows = []
# while True:
# try:
# next_row = next(test_csv_reader)
# except StopIteration:
# raise
# if (list(next_row['installation_id']))[0] == ID:
# more_rows.append(next_row)
# else:
# break
# all_rows = [a_row] + more_rows
# df = pd.concat(all_rows)
# last_title = list(df.tail(n=1)['title'])[0]
# df.drop(columns=['title'], inplace=True)
# df = times_to_numbers(df)
# df.iat[-1,3] = df.iat[-2,3] + 1
# return df, str(ID), next_row, last_title
# except StopIteration:
# raise
#
#
# to support masking
SEQ_LENGTH = 13000
# we have
# ['event_id', 'correct_assessment', 'game_time', 'type', 'world', 'session_number']
# we want
# ['game_time', 'session_number', 'correct_assessment', 'event_id', 'type','world']
# permutation written as [f(0),f(1),f(2),f(3),f(4),f(5)] under the permutation
# permutation = [3,0,4,5,2,1]
# permutation = [3,2,0,4,5,1]
# idx = np.empty_like(permutation)
# idx[permutation] = np.arange(len(permutation))
# and https://discourse.julialang.org/t/reshape-a-1-d-array-into-an-array-of-different-size-arrays/25999
n = [3, 1, 1, 1]
split_points = np.cumsum(n[0:-1])
import os
# get the models
from tensorflow.keras import layers, Model, losses
assessments = [
"Chest Sorter (Assessment)",
"Cart Balancer (Assessment)",
"Mushroom Sorter (Assessment)",
"Bird Measurer (Assessment)",
"Cauldron Filler (Assessment)",
]
# os.listdir('../input/db2019model/kaggle/')
models = {
assessment: tf.keras.models.load_model(
"../input/fork-of-data-science-bowl-model-1f2596/" + assessment,
custom_objects={
"SEQ_LENGTH": SEQ_LENGTH,
"model_params": {
"LEARNING_RATE": 0.001, # default is 0.001
"LOSS_FN": tf.keras.losses.CategoricalCrossentropy(),
"METRICS": ["categorical_accuracy"],
"CLIP_NORM": 1,
"DENSE_DROPOUT": 0.1,
},
"tf.keras.losses": tf.keras.losses,
"my_optimizer": tf.keras.optimizers.Adam(
learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=True, clipnorm=1
),
},
)
for assessment in assessments
}
def prepare_batch_for_prediction(batch):
def prepare_for_prediction(df):
row = np.array(
[np.array([0, 0, 0]), np.array([0]), np.array([0]), np.array([0])],
dtype=object,
)
X0 = np.empty([SEQ_LENGTH, 3])
X1 = np.empty([SEQ_LENGTH])
X2 = np.empty([SEQ_LENGTH])
X3 = np.empty([SEQ_LENGTH])
Xentry = np.tile(row, (SEQ_LENGTH, 1))
# print(df)
id_array = df.to_numpy().astype(int)[-SEQ_LENGTH:]
# id_array[:] = id_array[:,idx]
# could maybe use something fancy from numpy but let's just loop
for i in np.arange(id_array.shape[0]):
Xentry[i, :] = np.split(id_array[i], split_points, axis=0)
# TODO: make this batch-y instead of silly reshaping
# X0 = np.vstack(Xentry[:,0]).reshape([SEQ_LENGTH,3]).astype(float) # TODO: why astype??
X0 = np.vstack(Xentry[:, 0]).astype(
float
) # .reshape([SEQ_LENGTH,3]).astype(float) # TODO: why astype??
X1 = (
Xentry[:, 1].astype(int).reshape([SEQ_LENGTH])
) # this has event_id -- last one is the
X2 = Xentry[:, 2].astype(int).reshape([SEQ_LENGTH])
X3 = Xentry[:, 3].astype(int).reshape([SEQ_LENGTH])
return [X0, X1, X2, X3]
XX0, XX1, XX2, XX3 = [], [], [], []
for df in batch:
X0, X1, X2, X3 = prepare_for_prediction(df)
XX0.append(X0) # TODO: make this less hacky!
XX1.append(X1)
XX2.append(X2)
XX3.append(X3)
return XX0, XX1, XX2, XX3
# return [prepare_for_prediction(df) for df in batch]
def make_prediction(df, assessment):
row = np.array(
[np.array([0, 0, 0]), np.array([0]), np.array([0]), np.array([0])], dtype=object
)
X0 = np.empty([SEQ_LENGTH, 3])
X1 = np.empty([SEQ_LENGTH])
X2 = np.empty([SEQ_LENGTH])
X3 = np.empty([SEQ_LENGTH])
Xentry = np.tile(row, (SEQ_LENGTH, 1))
# print(df)
id_array = df.to_numpy().astype(int)[-SEQ_LENGTH:]
# id_array[:] = id_array[:,idx]
# could maybe use something fancy from numpy but let's just loop
for i in np.arange(id_array.shape[0]):
Xentry[i, :] = np.split(id_array[i], split_points, axis=0)
# TODO: make this batch-y instead of silly reshaping
X0 = np.vstack(Xentry[:, 0]).reshape([1, SEQ_LENGTH, 3]).astype(float)
X1 = (
Xentry[:, 1].astype(int).reshape([1, SEQ_LENGTH])
) # this has event_id -- last one is the
X2 = Xentry[:, 2].astype(int).reshape([1, SEQ_LENGTH])
X3 = Xentry[:, 3].astype(int).reshape([1, SEQ_LENGTH])
# print(X0)
relevant_model = models[assessment]
prediction = np.argmax(relevant_model.predict([X0, X1, X2, X3], batch_size=1))
return prediction
# import gc
# i=0
# with open('submission.csv', 'w') as submission_file:
# df, ID, next_row, last_title = process_an_ID()
# df.drop(columns='installation_id', inplace=True)
# prediction = make_prediction(df, ID, last_title)
# submission_file.write(ID + ',' + str(prediction) + "\n")
# i+=1
# print(i)
# with open('submission.csv', 'w') as submission_file:
# submission_file.write("installation_id,accuracy_group" + "\n")
# while True:
# try:
# #print('Processing the next ID.')
# df, ID, next_row, last_title = process_an_ID(next_row)
# df.drop(columns='installation_id', inplace=True)
# #print(ID)
# except StopIteration:
# break
# else:
# prediction = make_prediction(df, last_title)
# submission_file.write(ID + ',' + str(prediction) + "\n")
# gc.collect()
# i+=1
# print(i)
#
#
import csv
def process_row(row):
row[0] = test_converters["event_id"](row[0])
row[2] = test_converters["timestamp"](row[2])
row[3] = test_converters["event_data"](row[3])
row[9] = test_converters["type"](row[9])
row[10] = test_converters["world"](row[10])
title = row[8]
indices_to_keep = [4, 7, 2, 3, 0, 9, 10]
row = [row[i] for i in indices_to_keep]
return row, str(title)
# ['event_id', 'game_session', 'timestamp', 'event_data', 'installation_id', 'event_count',
# 'event_code', 'game_time', 'title', 'type', 'world']
# we want
# ['installation_id', 'game_time', 'session_number', 'correct_assessment', 'event_id', 'type','world']
# haven't computed session_number, put it where timestamp is
# ditto for correct_assessment and 'event_data'
# title at end
# i = 0
MAX_BATCH_SIZE = 32
with open("unsorted_submission.csv", "w") as submission_file:
submission_file.write("installation_id,accuracy_group" + "\n")
with open("../input/data-science-bowl-2019/test.csv", "r") as csvfile:
csvreader = csv.reader(csvfile)
next(csvreader) # header row
header = [
"game_time",
"timestamp",
"correct_assessment",
"event_id",
"type",
"world",
]
# note that 'timestamp' is not the eventual column name -- times_to_numbers changes it
# to 'game_session'
a_row, last_title = process_row(next(csvreader))
rows = [a_row]
last_ID = a_row[0]
batch = {assessment: [] for assessment in assessments}
for row in tqdm(csvreader, total=1156415):
the_row, the_title = process_row(row)
ID = the_row[0]
if ID == last_ID:
rows.append(the_row)
last_title = the_title
else:
last_ID = ID
assessment = last_title
predict_ID = rows[0][0]
rows = [row[1:] for row in rows] # drop 'installation_id'
# TODO: couldn't we drop this when we add it? doesn't seem pressing
# print("Found a frame.")
df = pd.DataFrame.from_records(rows, columns=header)
df = times_to_numbers(df)
batch[assessment].append((predict_ID, df))
if len(batch[assessment]) == MAX_BATCH_SIZE:
IDs = [pair[0] for pair in batch[assessment]]
dfs = prepare_batch_for_prediction(
[pair[1] for pair in batch[assessment]]
)
predictions_raw = models[assessment].predict_on_batch(dfs)
predictions = [np.argmax(prediction) for prediction in predictions_raw]
with open("unsorted_submission.csv", "a") as submission_file:
for i, j in zip(IDs, predictions):
submission_file.write(i + "," + str(j) + "\n")
batch[assessment] = []
# i+=1
# print("Predicted on batch " + str(i) + ".")
rows = [the_row]
last_title = the_title
last_ID = ID
# need to pass the last rows to a batch
assessment = last_title
predict_ID = rows[0][0]
rows = [row[1:] for row in rows]
df = pd.DataFrame.from_records(rows, columns=header)
df = times_to_numbers(df)
batch[assessment].append((predict_ID, df))
# predict for those remaining in batch
for assessment in batch:
if batch[assessment]:
IDs = [pair[0] for pair in batch[assessment]]
dfs = prepare_batch_for_prediction([pair[1] for pair in batch[assessment]])
predictions_raw = models[assessment].predict_on_batch(dfs)
predictions = [np.argmax(prediction) for prediction in predictions_raw]
with open("unsorted_submission.csv", "a") as submission_file:
for i, j in zip(IDs, predictions):
submission_file.write(i + "," + str(j) + "\n")
batch[assessment] = [] ## TODO: bad repetition!
with open("unsorted_submission.csv", "r") as unsorted_submission:
with open("submission.csv", "w") as submission_file:
header = next(unsorted_submission)
submission_file.write(header)
for row in sorted(unsorted_submission):
submission_file.write(row)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# > this data help you during interview, when Recruiter ask to draw any data visualization plot for your own dataset.
# **we perform Data Visualization using different plots and used random data**
# importing necessary libraries
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# **1.Histogram**
# 1.histogram shows the distribution of data,it shows frquency of each value in dataset within certain limit...
# 2.With help of histogram we show the normal distrbute data.
# generate random data
data = np.random.randn(1000)
plt.hist(
data, bins=10, color="r"
) # bins reprents the intervals into which the range of values of a dataset divided..
plt.title("Histogram of Data")
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.show()
# **2.Box Plot**
# 1.box plot are used to show distrbution of a variance and also used to show 5 number summary.
# 2.mostlye box plot are used to detect the outlier.
data = np.random.randn(100, 4)
plt.boxplot(data)
plt.title("Boxplot")
plt.xlabel("varible")
plt.ylabel("value")
plt.show()
# **Scatter plot**
# 1.its is used to show realtionship between two varible.
# for ex.height and weight ratio.
x = np.random.randn(100)
y = np.random.randn(100)
plt.scatter(x, y)
plt.title("Scatter plot of data")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
# **Heatmap**
# 1.heatmap are used to show correlation betweens varible
data = np.random.randn(10, 10)
plt.imshow(data, cmap="hot", interpolation="nearest")
plt.colorbar()
plt.title("Heatmap")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
# color code indicate postive and negative correlation between the varibles...
# **Pie chart**
#
sizes = [15, 30, 40, 50, 90]
labels = ["A", "B", "C", "D", "E"]
plt.pie(sizes, labels=labels)
plt.title("pie chart of data")
plt.show()
# **Line plot**
# 1.line plot are used to show trend of varible over time..its is used plot continoues value which is change with time.
# for ex.stock price
x = np.linspace(0, 10, 100)
y = np.sin(x)
plt.plot(x, y)
|
# VINCENT CHEN
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()
women = train_data.loc[train_data.Sex == "female"]["Survived"]
rate_women = sum(women) / len(women)
print("% of women who survived:", rate_women)
men = train_data.loc[train_data.Sex == "male"]["Survived"]
rate_men = sum(men) / len(men)
print("% of men who survived:", rate_men)
# Vincent Chen
from sklearn.ensemble import RandomForestClassifier
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
from sklearn.metrics import mean_absolute_error
import pandas as pd
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
y = train_data["Survived"]
# go through name column and change all the names to their respective title
i = 0
for name in train_data["Name"]:
train_data["Name"][i] = train_data["Name"][i].split(",")[1].split(".")[0].strip()
print(train_data["Name"][i] + "|" + str(i) + "|")
if i < 890:
i = i + 1
# Change all Titles to their catagories
j = 0
for name in train_data["Name"]:
if (
train_data["Name"][j] == "Capt"
or train_data["Name"][j] == "Col"
or train_data["Name"][j] == "Major"
or train_data["Name"][j] == "Dr"
or train_data["Name"][j] == "Rev"
):
train_data["Name"][j] = "Officier"
elif (
train_data["Name"][j] == "Jonkheer"
or train_data["Name"][j] == "Don"
or train_data["Name"][j] == "Dona"
or train_data["Name"][j] == "the Countess"
or train_data["Name"][j] == "Lady"
or train_data["Name"][j] == "Sir"
):
train_data["Name"][j] = "Royalty"
elif (
train_data["Name"][j] == "Mme"
or train_data["Name"][j] == "Ms"
or train_data["Name"][j] == "Mrs"
or train_data["Name"][j] == "Mlle"
):
train_data["Name"][j] = "Mrs"
elif train_data["Name"][j] == "Mr":
train_data["Name"][j] = "Mr"
elif train_data["Name"][j] == "Miss":
train_data["Name"][j] = "Miss"
elif train_data["Name"][j] == "Master":
train_data["Name"][j] = "Master"
if j < 890:
j = j + 1
# Set test_data 'Name'(s) to Titles
r = 0
for name in test_data["Name"]:
test_data["Name"][r] = test_data["Name"][r].split(",")[1].split(".")[0].strip()
if r < 890:
r = r + 1
# Change all Titles to their catagories
c = 0
for name in test_data["Name"]:
if (
test_data["Name"][c] == "Capt"
or test_data["Name"][c] == "Col"
or test_data["Name"][c] == "Major"
or test_data["Name"][c] == "Dr"
or test_data["Name"][c] == "Rev"
):
test_data["Name"][c] = "Officier"
elif (
test_data["Name"][c] == "Jonkheer"
or test_data["Name"][c] == "Don"
or test_data["Name"][c] == "Dona"
or test_data["Name"][c] == "the Countess"
or test_data["Name"][c] == "Lady"
or test_data["Name"][c] == "Sir"
):
test_data["Name"][c] = "Royalty"
elif (
test_data["Name"][c] == "Mme"
or test_data["Name"][c] == "Ms"
or test_data["Name"][c] == "Mrs"
or test_data["Name"][c] == "Mlle"
):
test_data["Name"][c] = "Mrs"
elif test_data["Name"][c] == "Mr":
test_data["Name"][c] = "Mr"
elif test_data["Name"][c] == "Mr":
test_data["Name"][c] = "Mr"
elif test_data["Name"][c] == "Miss":
test_data["Name"][c] = "Miss"
elif test_data["Name"][c] == "Master":
test_data["Name"][c] = "Master"
if c < 890:
c = c + 1
features = ["Pclass", "Sex", "SibSp", "Parch", "Age", "Fare", "Embarked"]
X = pd.get_dummies(train_data[features])
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, test_size=0.2, random_state=0
)
X_test_2 = pd.get_dummies(test_data[features])
model = XGBRegressor(n_esitmators=100, learning_rate=0.01)
model.fit(X, y, early_stopping_rounds=40, eval_set=[(X_test, y_test)])
predictions = model.predict(X_test_2)
list_preds_2 = []
for i in range(len(predictions)):
temp = predictions[i]
if temp > 0.4:
temp = 1
else:
temp = 0
list_preds_2.append(temp)
print(list_preds_2)
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": list_preds_2})
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
|
import sys
import time
from bs4 import BeautifulSoup
import requests
import pandas as pd
try:
page = requests.get("https://www.cricbuzz.com/")
except Exception as e:
error_type, error_obj, error_info = sys.exc_info()
print("ERROR FOR LINK", url)
print(error_type, "Line:", error_info.tb_lineno)
time.sleep(2)
soup = BeautifulSoup(page.text, "html.parser")
links = soup.find_all("div", attrs={"class": "cb-nws-intr"})
page
soup
for i in links:
print(i.text)
print("\n")
|
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Above shows the basics, what libraries are used and how data is pulled
# ----------------------------------------------------------------------
# Below shows the first 5 entris of the dataset
# ----------------------------------------------------------------------
data = pd.read_csv("/kaggle/input/world-happiness/2019.csv")
print(data.head())
print(data.shape)
# Below I did 3 things:
# 1. categorized the data into the input feature X and actual outcome y as training data.
# 2. fit the training set into the buildin linear regression model of sklearn
# 3. printed the accruacy metric for the model
X = data[
[
"GDP per capita",
"Social support",
"Healthy life expectancy",
"Freedom to make life choices",
"Generosity",
"Perceptions of corruption",
]
]
y = data["Score"]
print(f"Shape of X is {X.shape}")
print(f"Shape of y is {y.shape}")
linear_model = LinearRegression()
linear_model.fit(X, y)
predict = linear_model.predict(X)
print("Prediction on training set (first 5):", predict[:5])
print("Actual target Value (first 5):", y.values[:5])
goodness = linear_model.score(X, y)
print(goodness)
# After verification, Scikit-learn's LinearRegression uses the Ordinary Least Squares (OLS) method to estimate the model parameters. OLS is a closed-form solution, which means it directly computes the optimal model parameters without requiring an iterative process like gradient descent.
# Therefore, my plan from here is to learn about the meaning of some suggested metrics for sklearn's linear regression.
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
mse = mean_squared_error(y, predict)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y, predict)
r2 = r2_score(y, predict)
print(f"Mean Squrared Error: ", mse)
print(f"Root Mean Squrared Error: ", rmse)
print(f"Mean Absolute Error: ", mae)
print(f"R-squared: ", r2)
|
# 
#
import csv
import sklearn
import scipy
import tensorflow as tf
import matplotlib.pyplot as plt
import statsmodels
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input director)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
f_format = filename.split(".")[1]
fpath = os.path.join(dirname, filename)
if "csv" in f_format:
df_allstar = pd.read_csv(fpath, sep=",")
df_allstar = df_allstar.dropna()
# **INTRODUCTION**
# Given my growing passion to the basketball game and the tragic passing of Kobe Bryant I saw a nice oportunity to get my hands into this dataset that I saw a few weeks ago containing some of the data collected for the AllStar games played between 2000 and 2016, both editions included. Although at a first glimpse there seems to be no abundance in data, I decided to give answer to the various questions that arised while having a first look at the data. Along these lines, the following sections or cells, show a chronological breakdown of questions that came up throughout the process and their corresponding attempts to provide a straight solution in pandas environment with one liners(if that expression exists, :). For that I tend to move old patterns of programming with python to leverage the possibilities that brings utilizing pandas methods and given that I have been lately quite involved in programming with MySQL I look to programming patterns that could be leveraged to perform a better, optimized way of deriving insights with as few logic behind as possible.
# As goals do stand out the further familiarization with built-in methods of pandas and the tinkering with some visualization tools, placing a special emphasis on dynamic color mapping for graphs. With all, I modestly tried to openly raise questions having in mind that the more opened and spontaneous the question is, the more variability there could be in the tackling of the problem in a programmatic way.
df_allstar.head()
df_allstar.info()
df_allstar.dtypes
#
# A few changes, conversions and transformations follow up, namely :
# ***Data Cleaning, Preparation*** -> "HT" and "NBA Draft Status" where data is stored as "str"(object). Providing that those values are likely to be part of a computation where aggregation is required we turned them into floats. Notice how we make use of regex patterns to simplify the transformation process with .str.extract(). The returned series object is stored in three newly created columns ["NBA Draft Year", "NBA Draft Round", "NBA Draft Pick"].
#
#### Transforming Data one column at a time and expanding dimension space.
df_allstar["HT"] = df_allstar["HT"].str.replace("-", ".")
df_allstar["HT"] = df_allstar["HT"].astype(float)
df_allstar["NBA Draft Year"] = df_allstar["NBA Draft Status"].str.extract(r"([0-9]{4})")
df_allstar["NBA Draft Year"] = df_allstar["NBA Draft Year"].astype(int)
df_allstar["NBA Draft Round"] = df_allstar["NBA Draft Status"].str.extract(r" ([0-9]) ")
df_allstar["NBA Draft Round"] = df_allstar["NBA Draft Round"].astype(int)
df_allstar["NBA Draft Pick"] = df_allstar["NBA Draft Status"].str.extract(
r"((?<=Pick )\d)"
)
df_allstar["NBA Draft Pick"] = df_allstar["NBA Draft Pick"].astype(int)
df_allstar["NBA Drafted"] = df_allstar["NBA Draft Status"].apply(
lambda x: 1 if "NBA" in x else 0
)
df_allstar.drop(columns=["NBA Draft Status"])
df_allstar.head()
# ***Data Cleaning, Preparation*** -> we convert those resulting "str" values into "int" or "float".
# ***Data Cleaning, Preparation*** -> Last but not least, we drop the original column from which we generated the three columns : [NBA Draft Year, NBA Round, NBA Pick] and check the results.
#
df_allstar = df_allstar.drop(columns="NBA Draft Status")
df_allstar.head()
df_allstar.hist(figsize=(25, 10))
# *** FAN QUESTIONS ***
# A bunch of questions streaming from a pure basketball lover offers plenty of bias, :) so don't be surprised if you come across questions that seemed a bit off the scope of what imagined when dealing with this type of data. This often occurs when you hang around too many times with hoopers like you whose main interest is getting into the meaty and the gossip around the game of basketball.
# 1 - **It's mamba who holds the record of allstar participations within this period?**. I assumed he would be in the top end
# per number of participations but let's break it down player by player.
#
#
# Notice how .groupby() function in pandas allows to group data on an index basis. When given a list of columns, pandas finds unique instances of unique values for this columns and crunches down the dataframe on which the operation is performed.
#
df_participations = (
df_allstar.groupby(["Player"])
.count()
.sort_values(by="Year", ascending=False)["Year"]
)
## Serializing data for possible future reusability.
participations_dict = df_participations.to_dict()
### Plotting in a pandas fashion.
df_participations.reset_index().rename(columns={"Year": "Nr.Participations"})
# 2. ** What teams do bring players with higher weights(average)?.
# **
# Note -> We compute average weights per team and per year. We round the number and sort values in a descending order. Only the first 20 instances(by weight) are printed.
dfAvgWeights = (
df_allstar.groupby(["Year", "Team"])["WT"]
.mean()
.round(2)
.sort_values(ascending=False)
)
print(dfAvgWeights[:20])
# 3 -** How much does it take to players since they get drafted until they make it to the AllStar.**
# * Note 1:** x["Year"].min()- x["NBA Draft Year"]** --> This operation subtracts the lowest number of unique years that a player took part in the allstar to its draft year.
#
# * Note 2: **reset_index(**) --> to convert the Series multi-indexed object "RoadToAllStar" back into a DataFrame which is then merged on "Player" with df_allstar to result in df_final
RoadToAllStar = df_allstar.groupby(["Player"]).apply(
lambda x: x["Year"].min() - x["NBA Draft Year"].min()
)
df_final = pd.merge(df_allstar, RoadToAllStar.reset_index(), on="Player", how="inner")
df_final.head()
# * **Note 3 : In our temporary table we get a new column [0] gets appended along the horizontal axis.
df_allstar["DraftToAllStar"] = df_final.dropna()[0]
df_allstar = df_allstar.dropna()
df_allstar["DraftToAllStar"] = df_allstar["DraftToAllStar"].astype(int)
df_allstar.head()
# 
# * Allen Iverson had a huge fan based support over the years, thus he didn't end needing being selected by coaches that much. Given the position he held over the years was Guard I decided to take a subset of the original data that meets the condition of player="G" to see how he compared to other players and what other relationships could be inferred from it.
# * 4.1. First of, Allen Iverson ranks up there in the list but** who is next to him?**.
#
position = "G"
df_allstar.where(df_allstar["Pos"] == position).groupby(["Player"]).count().sort_values(
by="Year", ascending=False
)["Year"]
# * 4.2. Following up, **for each player, is there any selection type that is more seen over others?. **
columns = df_allstar.columns.values
clause = df_allstar["Pos"] == position
filtered_df = df_allstar.where(clause).dropna()
GroupingColumns = ["Player", "Selection Type"]
grouped_df = filtered_df.groupby(["Player", "Selection Type"]).count()["Year"]
print(grouped_df)
# * 4.2 Now we reformulate the question, we want to see whose players and how many times were they selected under selection types.
GroupColumns = ["Selection Type", "Player"]
grouped_df = filtered_df.groupby(GroupColumns).count()["Year"]
print(grouped_df)
# * 5. What positions coaches tend to vote more in an Allstar?.
#
coach_selection = df_allstar.where(
df_allstar["Selection Type"].str.contains("Coaches")
).dropna()
coach_position_selection = (
coach_selection.groupby(["Pos"])
.count()
.sort_values(by="Year", ascending=False)["Year"]
)
print(coach_position_selection)
# Having focused primarily on MySQL Language over the last year in my career I tend to look at analysis on tabular data more and more as I do when I work with MySQL. The same way it would in this language pandas allows to concatenate a series of instructions, and compute in one go the desired instruction.
# Needless to say that when this "query" gets very large its not recommendable to put all that syntax in one long line, instead breaking down the instructions into a few lines would be the way to go, for reusability and readibility.
# 6.** What other nationalities apart from the USA have had a significant contribution to the AllStar in terms of country representation?.**
#
df_allstar["Nationality"].value_counts().reset_index().rename(
columns={"index": "Country"}
).style.set_properties(**{"text-align": "left"})
# ##### DATA VISUALIZATION #####
#
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib
# 4.1 - We plot players against their total number of participations in the allstar.
# Note : palettes from seaborn are utilized to enhance the visualization by grading to darker tones as the number of participations increase.
# df1 receives the computed dataframe result of grouping/aggregating/renaming/sorting.
# Additionally, we can customize the number of samples, that is, the number of players by selection one for each 3 players.
title1 = "Number of Participations per Player"
fig1, ax1 = plt.subplots(figsize=(10, 15))
###### Extracting label and scalar data and plotting
df1 = (
df_allstar.groupby(["Player"])["Year"]
.count()
.reset_index()
.rename(columns={"Year": "Total(Years)"})
.sort_values(by="Total(Years)", ascending=False)
)
n_subsampling = 3
blue_palette = sns.cubehelix_palette(
n_colors=len(df1["Total(Years)"][::3]), start=0.2, rot=0.7, reverse=True
)
ax1.tick_params(pad=30)
plt.title(title1)
ax1.barh(df1["Player"][::3], df1["Total(Years)"][::3], height=0.7, color=blue_palette)
# 4.2 In a similar token we aim at visualizing the distribution between Participations "Total(Years)" and "Team" which is self-explanatory.
fig2, ax2 = plt.subplots(figsize=(10, 15))
df2 = (
df_allstar.groupby(["Team"])["Year"]
.count()
.reset_index()
.rename(columns={"Year": "Total(Years)"})
.sort_values(by="Total(Years)", ascending=False)
)
x2, y2 = df2["Total(Years)"], df2["Team"]
purple_palette = sns.cubehelix_palette(
n_colors=len(y2), start=2.0, rot=0.1, reverse=True
)
ax2.tick_params(pad=30)
ax2.barh(y2, x2, height=0.7, color=purple_palette)
plt.tight_layout()
plt.show()
|
# # 1. Introduction
# I'll use this dataset with lots of face images of famous people to try to blend them two by two into one image only and imagine how such a mix would look like. An incredibly funny and troublesome exercise.
# We start by importing what is necessary for the project:
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import layers, optimizers
from PIL import Image
import matplotlib.pyplot as plt
from matplotlib import image as mpimg
import cv2
import os
# # 2. Creating dataframe
# I'm going to need a dataframe to better select and track who is being mixed with whom.
def create_df(data_path):
origin_path = None
data = {}
for dirpath, dirnames, filenames in os.walk(data_path):
for filename in filenames:
if filename.endswith(".jpg"):
if origin_path is None:
origin_path = dirpath.replace(dirpath.split("/")[-1], "")
name = dirpath.split("/")[-1]
if name not in data:
data[name] = []
data[name].append(filename)
df = pd.DataFrame({name: pd.Series(files) for name, files in data.items()})
return origin_path, df
# Set the path for the dataset
database_path = "/kaggle/input"
# Create the dataset
origin_path, df = create_df(database_path)
df
# For this first project I'll use only those celebrities who has at least 25 images inside this dataset.
# Maybe we can do a second and third part later with other famous people.
# (It really depends more on the free available GPUs and RAM out there than anything else)
def filter_df(df):
for col in df:
if (
df[col].isnull().sum() > 4
): # Greater the number, the more people are included in the sample
df.drop(col, axis=1, inplace=True)
df.dropna(
inplace=True
) # Used to make everyone have the same number of images to train
return df
# Take a sample from the original images
df = filter_df(df)
df.shape
# Sorting the columns alphabetically to prevent randomness.
df.sort_index(axis=1, inplace=True)
df.columns
# In the future, if we need to drop one or some particular columns, it can be easily done with the following command, for example:
# df.drop(df.columns[[0]], axis=1, inplace=True)
# # 3. Preparing images
# After selecting the images for this project, we are going to resize them to 160 x 160 pixels, and finally save these into a new folder.
# Load image from file
def open_image(path, col, row, df):
source_path = os.path.join(path, col, str(df[col][row]))
img = cv2.imread(source_path)
return img
# Resize to desired size
def resize_image(img):
img = cv2.resize(img, img_size)
# Convert BGR color space to RGB color space
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
# Save the images to disk
def save_image(path, col, row, df, img):
destiny_path = os.path.join(path, col, df[col][row])
os.makedirs(os.path.dirname(destiny_path), exist_ok=True)
# Save the image using matplotlib
mpimg.imsave(destiny_path, img)
def resize_database(df):
for col in df.columns:
for row in df.index:
img = open_image(origin_path, col, row, df)
img = resize_image(img)
save_image(resize_path, col, row, df, img)
# Set the image size
img_size = (160, 160)
# Set the path for the directory to save the images
resize_path = "/kaggle/working/sample"
# Make a proper sample to train
resize_database(df)
# # 4. Making GAN models
# A Generative Adversarial Networks consist in a generator model and a discriminator model.
# - The latter one trains in real images to learn how they look like and then classify them, for instance, either as true faces or as fake ones.
# - And the former one generates random images trying to fully mimic them to the point of fooling the discriminator as if they were true images.
# Therefore it's really a cat and rat game. And here are the models and their losses functions:
# Define the generator model
def make_generator_model(latent_dim):
model = tf.keras.Sequential()
# Input layer
model.add(layers.Dense(10 * 10 * 512, use_bias=False, input_shape=(latent_dim,)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Reshape((10, 10, 512)))
assert model.output_shape == (None, 10, 10, 512)
# First convolutional transpose layer
model.add(
layers.Conv2DTranspose(
256, (5, 5), strides=(2, 2), padding="same", use_bias=False
)
)
assert model.output_shape == (None, 20, 20, 256)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
# Second convolutional transpose layer
model.add(
layers.Conv2DTranspose(
128, (5, 5), strides=(2, 2), padding="same", use_bias=False
)
)
assert model.output_shape == (None, 40, 40, 128)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
# Third convolutional transpose layer
model.add(
layers.Conv2DTranspose(
64, (5, 5), strides=(2, 2), padding="same", use_bias=False
)
)
assert model.output_shape == (None, 80, 80, 64)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
# Fourth convolutional transpose layer
model.add(
layers.Conv2DTranspose(
32, (5, 5), strides=(2, 2), padding="same", use_bias=False
)
)
assert model.output_shape == (None, 160, 160, 32)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
# Output layer
model.add(
layers.Conv2DTranspose(
3, (5, 5), strides=(1, 1), padding="same", use_bias=False, activation="tanh"
)
)
assert model.output_shape == (None, 160, 160, 3)
return model
# Define the discriminator model
def make_discriminator_model(img_shape):
model = tf.keras.Sequential()
# First convolutional layer
model.add(
layers.Conv2D(64, (5, 5), strides=(2, 2), padding="same", input_shape=img_shape)
)
assert model.output_shape == (None, img_shape[0] // 2, img_shape[1] // 2, 64)
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
# Second convolutional layer
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding="same"))
assert model.output_shape == (None, img_shape[0] // 4, img_shape[1] // 4, 128)
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
# Third convolutional layer
model.add(layers.Conv2D(256, (5, 5), strides=(2, 2), padding="same"))
assert model.output_shape == (None, img_shape[0] // 8, img_shape[1] // 8, 256)
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
# Output layer
model.add(layers.Flatten())
assert model.output_shape == (None, (img_shape[0] // 8) * (img_shape[1] // 8) * 256)
model.add(layers.Dense(1))
return model
# Define the loss functions for the generator and discriminator
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def discriminator_loss(real_output, fake_output):
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(fake_output):
cross = cross_entropy(tf.ones_like(fake_output), fake_output)
return cross
# Since we are already getting the images two by two for training, I'll also generate their mixes soon after it in order to keep track of their origin.
# Define the training loop for the GAN
def train_gan(
generator,
discriminator,
generator_loss,
discriminator_loss,
generator_optimizer,
discriminator_optimizer,
dataset,
latent_dim,
epochs,
):
for epoch in range(epochs):
for images in dataset:
# Generate random noise vector
noise = tf.random.normal([batch_size, latent_dim])
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
# Generate fake images from the noise vector
generated_images = generator(noise, training=True)
# Evaluate the discriminator on both real and fake images
real_output = discriminator(images, training=True)
fake_output = discriminator(generated_images, training=True)
# Calculate the loss for the generator and discriminator
gen_loss = generator_loss(fake_output)
disc_loss = discriminator_loss(real_output, fake_output)
# Compute the gradients for both generator and discriminator
gradients_of_generator = gen_tape.gradient(
gen_loss, generator.trainable_variables
)
gradients_of_discriminator = disc_tape.gradient(
disc_loss, discriminator.trainable_variables
)
# Apply the gradients to the generator and discriminator
generator_optimizer.apply_gradients(
zip(gradients_of_generator, generator.trainable_variables)
)
discriminator_optimizer.apply_gradients(
zip(gradients_of_discriminator, discriminator.trainable_variables)
)
print(f"Epoch {epoch + 1} complete")
# Define the generating loop for the mixed images
def generate_images(image1_path, image2_path, n_images):
for i in range(n_images):
# Generate random noise vector
noise = tf.random.normal([batch_size, latent_dim])
# Use the generator model to generate a fake image from the noise vector
mixed_image = generator.predict(noise)[0]
# Save the mixed image
output_path = os.path.join(
"/kaggle/working/",
"mixed_images",
f"{image1_path}_{image2_path}",
f"{i+1}_mixed_image.jpg",
)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
tf.keras.preprocessing.image.save_img(output_path, mixed_image)
print(f"{i+1} mixed image(s)")
def init_models(latent_dim, images):
generator = make_generator_model(latent_dim)
discriminator = make_discriminator_model(images.shape[1:])
return generator, discriminator
def prepare_dataset(images):
imageset = np.array(images)
dataset = tf.data.Dataset.from_tensor_slices(imageset)
dataset = dataset.shuffle(buffer_size=1024).batch(batch_size)
return imageset, dataset
# Normalize the pixels to values between -1 and 1
def normal_image(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = (img - 127.5) / 127.5
return img
# # 5. Running everything
# In our loop we have to prevent pointless mixes like:
# - one person and itself;
# - same pairs of people.
# I recommend training at least 2000 epochs with free available GPU and RAM and maybe twice or thrice that if you can afford paying - here on Kaggle I'll run only once as an example.
# It takes a long time, so take your chance and generate as much images as you want in the process - here on Kaggle I'll generate only one as an example.
# Set the batch size and the latent dim used for generating random images
batch_size = 128
latent_dim = 150
mixed_pairs = set()
for name1 in df.columns:
for name2 in df.columns:
# Check if it's the same person
if name1 == name2:
continue
# Check if it's the same pair of people
pair_key = frozenset([name1, name2])
if pair_key in mixed_pairs:
continue
mixed_pairs.add(pair_key)
# Load the selected images
images = []
for row in df.index:
img1 = open_image(resize_path, name1, row, df)
img2 = open_image(resize_path, name2, row, df)
images.append(normal_image(img1))
images.append(normal_image(img2))
# Initialize dataset and models
images, dataset = prepare_dataset(images)
generator, discriminator = init_models(latent_dim, images)
# Define the optimizer for the generator and discriminator
generator_optimizer = optimizers.Adam(
learning_rate=0.0002, beta_1=0.5, beta_2=0.9
)
discriminator_optimizer = optimizers.Adam(
learning_rate=0.0002, beta_1=0.5, beta_2=0.9
)
# Train the GAN on the dataset
train_gan(
generator,
discriminator,
generator_loss,
discriminator_loss,
generator_optimizer,
discriminator_optimizer,
dataset,
latent_dim,
epochs=1,
) # You might want to train thousands of epochs
# Generate mixed images using the generator model
generate_images(
name1, name2, n_images=1
) # You might want to generate dozens of images
# Prevent memory overload
tf.keras.backend.clear_session()
# break # Use this break if you only want one pair mixed
print(f"End of {name1} folder")
# break # Use this break if you only want one person mixed with everyone else
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
files = []
for dirname, _, filenames in os.walk("/kaggle/working/weibo-trending-hot-search/raw/"):
for filename in filenames:
files.append(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
names = [
"韩庚",
"王一博",
"孟美琪",
"范丞丞",
"黄明昊",
"吴宣仪",
"朱正廷",
"李文翰",
"程萧",
"毕文珺",
"黄新淳",
"胡春杨",
"周艺轩",
"王晰",
"唐九洲",
"许雅婷",
"金子涵",
]
result, error_dates = [], []
for file_name in files:
file_date = file_name.split("/")[-1].split(".")[0]
file = pd.read_json(file_name)
for name in names:
try:
p_data = {
"name": name,
"count": file.title.str.contains(name).sum(),
"date": file_date,
"day_total_count": len(file),
}
result.append(p_data)
except:
error_dates.append({"file": file_name, "date": file_date})
df = pd.DataFrame(result)
df["date"] = pd.to_datetime(df.date)
df
s = (
df.set_index("date")
.groupby("name")
.apply(lambda x: x.sort_index()["count"].cumsum())
)
s.to_excel("cumsum.xlsx")
plt.rcParams["font.sans-serif"] = ["SimHei"] # 设置字体
# plt.rcParams["axes.unicode_minus"]=False #该语句解决图像中的“-”负号的乱码问题
s.T.plot()
s.T["吴宣仪"].plot()
df = df.loc[(df.date <= "2023-1-9") & (df.date > "2022-1-9")]
trend_count = df.groupby("name").agg("sum")["count"].to_frame()
trend_count
trend_count["weight"] = trend_count["count"] / trend_count["count"].sum()
trend_count
trend_count.to_excel("trend_eda.xlsx")
|
# # Python Sözdizimi
print("Okula gidiyorum.")
# # Sayılar
5 + 9
9 - 5
5 * 9
9 / 3
if 3 > 1:
print("Üç birden büyüktür.")
type("Selam")
""""
ne zaman yazmak istersin
python'da bir yorum,
üçlü tırnak sembolünü kullanabilirsiniz
"""
print("Python harikadır.")
x = 2
y = 3
print(x)
print(y)
x + y
r = x + y
r
k = "Muhammed"
k
a = 6 # intger, int
b = "Merhaba" # string, str
c = 6.5 # float
print(a)
print(b)
print(c)
# # Veri Tipileri
u = 2
print(type(u))
h = "Hayat güzeldir."
print(type(h))
x = y = z = "kitap"
print(x)
print(y)
print(z)
ulkeler = ["Polonya", "Yunanistan", "Belçika"]
a, b, c = ulkeler
print(a)
print(b)
print(c)
p = 9
def myfunc():
print(5 + p)
myfunc()
j = "uçuyor."
def myfun():
print("Kuşlar " + j)
myfun()
def myfunc():
global x
x = "soğuk."
myfunc()
print("Hava çok " + x)
n = 5
b = 4
if n < b:
print("n>b")
else:
print("n>b")
bool3 = True
type(bool3)
kediler = 2
def KacTaneKedinVar():
global kediler
kediler = 3
print(kediler)
kediler
KacTaneKedinVar()
|
# # Ensemble Techniques
# > All the machine learning algorithms that we have seen have their advantages and disadvantages and there are certain situations where these algorithms are going to give us better result and sometimes they won't. So ensemble learning uses the group of predictors in order to increase accuracy, reduce bias, etc. In ensemble learning combiles the predictions of several base estimators build with a given learning algorithm in order to increase accuracy.
# ## The Ensemble techniques that we are going to use here are:
# > Bagging
# > RandomForest
# > Boosting (AdaBoost and Gradient Boost)
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
stroke_data = pd.read_csv(
"../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv"
)
stroke_data.head()
# ### Before going with the the machine learning algorithms, lets perform some EDA (Exploratory Data Analysis)
stroke_data.shape
# Total Null Values
stroke_data.isna().sum()
# ## Column BMI has 201 nan values
# #### We have multiple ways to handle nan values either we can remove or we can perform imputation such as filling nan values with mean or the median value. In this case i am going to fill nan values with mean.
stroke_data.describe()
stroke_data["bmi"].fillna(stroke_data["bmi"].mean(), inplace=True)
stroke_data.isna().sum()
# lets look at data once more so we can see that nan values in bmi has been changed to mean value.
stroke_data.head()
# As the id column has no use for us so we will remove that
stroke_data.drop(columns={"id"}, inplace=True)
# Just a little description about why I use inplace=True there. If inplace=True is not given then the function will return a back the dataframe that we need to store again. Default value for inplace is false.
# df = stroke_data.drop(columns={'id'}, inplace=False)
# >will return a dataframe wihout the column id and put it in the df.
# where as,
# stroke_data.drop(columns={'id'}, inplace=True)
# >will remove the id and put data back to the stroke_data.
# ### Visualize Data
import matplotlib.pyplot as plt
from matplotlib import rcParams
import seaborn as sns
rcParams["figure.figsize"] = 12, 12
sns.countplot(stroke_data["heart_disease"])
# Heart patients visualization using countplot shows that there are few number of heart patients.
plt.figure(figsize=(15, 15))
fig, axs = plt.subplots(1, 3)
axs[0].boxplot(stroke_data["age"])
axs[0].set_title("Age", size=20)
axs[1].boxplot(stroke_data["avg_glucose_level"])
axs[1].set_title("Glucose Level", size=20)
axs[2].boxplot(stroke_data["bmi"])
axs[2].set_title("BMI", size=20)
sns.distplot(stroke_data["avg_glucose_level"], color="blue", label="Glucose Level")
sns.distplot(stroke_data["bmi"], color="red", label="Body Mass Index")
# #### avg_glucose has more higest number of outliers. We can check out the outliers also in python using IQR Method
# Finding the all outliers inside the glucose column
def FindOutliers(data):
outliers = []
Q1, Q3 = data.quantile([0.25, 0.75])
IQR = Q3 - Q1
upper_range = Q3 + IQR * (1.5)
lower_range = Q1 - IQR * (1.5)
for x in data:
if x > upper_range or x < lower_range:
outliers.append(x)
return outliers, upper_range, lower_range
# Outliers for the column avg_glucose_level
outliers_glucose_level, upper_glucose_lev, lower_glucose_lev = FindOutliers(
stroke_data["avg_glucose_level"]
)
# Outliers for the column bmi
outliers_bmi, upper_bmi, lower_bmi = FindOutliers(stroke_data["bmi"])
# Total number of outliers in these two columns
print(len(outliers_glucose_level), len(outliers_bmi))
# Total number of outliers are 627 out of almost 5000 records so if we remove we might lost a lots of information so lets choose another method for handling outliers. I will do capping(Replacing the larger outliers with uppers range and smaller outliers with lower range)
# Applying capping for the glucose level column
stroke_data["avg_glucose_level"] = np.where(
stroke_data["avg_glucose_level"] < lower_glucose_lev,
lower_glucose_lev,
stroke_data["avg_glucose_level"],
)
stroke_data["avg_glucose_level"] = np.where(
stroke_data["avg_glucose_level"] > upper_glucose_lev,
upper_glucose_lev,
stroke_data["avg_glucose_level"],
)
# Performing Capping for the Bmi column
stroke_data["bmi"] = np.where(
stroke_data["bmi"] < lower_bmi, lower_bmi, stroke_data["bmi"]
)
stroke_data["bmi"] = np.where(
stroke_data["bmi"] > upper_bmi, upper_bmi, stroke_data["bmi"]
)
stroke_data.describe()
plt.figure(figsize=(15, 15))
fig, axs = plt.subplots(1, 2)
axs[0].boxplot(stroke_data["avg_glucose_level"])
axs[0].set_title("Glucose Level", size=20)
axs[1].boxplot(stroke_data["bmi"])
axs[1].set_title("BMI", size=20)
# ### Now we have handled the outliers. Lets handle categorical data.
# ## Categorical Data
# ### For the categorical data we can use dummy variable or i can use labelEncoder but I prefer using labelEncoding as it will be easy to decode a particular label back later after predicting if needed.
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
en_stroke_data = stroke_data.apply(
le.fit_transform
) ## en_ here is simply to remind that this data is encoded and will be used mostly now on
en_stroke_data.head()
# ### Machine Learning algorithms.
# As we say in the beginning about ensemble technique, it is time to apply one by one all of them and see which one gives better result.
# Dependent(Response) variable Y and Independent(Predictor) variable X.
X = en_stroke_data.iloc[:, :-1]
y = en_stroke_data.iloc[:, -1]
# ### Train Test Split:
# Lets split our data into train and test sets. As we have almost 5000 records we will use 70-30 split.
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=30
)
# ## Bagging.
# The first algorithm that we will use is bagging. Bagging is a short name for the bootstrap aggregation is a machine learning ensemble meta-algorithm designed to improve the accuracy and stability of machine learning algorithms. Bootstrap is a sampling technique where out of n samples avaible k samples are choosen with replacement. We then run our algorithm(i.e: Decision Tree Classifier) on each of these samples. The point is to make sampling truly random. Aggregation here means the predictions of all the models is combined to make final predictions.
# ### Code for Bagging:
#
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
bag_clf = BaggingClassifier(
DecisionTreeClassifier(),
n_estimators=500, ## Total Number of decision tree that will be used to train an ensemble is 2
max_samples=100, ## each trained on 100 training instances randomly sampled from the training set with replacement
bootstrap=True, ## Bootstrap = True means use bagging method, if this option is set to False then it will be Pasting method that we didn't mention here.
n_jobs=-1, ## n_jobs means how many cores will be used to train the ensemble and -1 here means all of them
)
bag_clf.fit(x_train, y_train)
# Making predictions
y_pred_bagging = bag_clf.predict(x_test)
# ### Accuracy test of our model using confusion matrix
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, y_pred_bagging)
(1468 / len(y_test)) * 100
# #### So far we have achieved 95.7% of accuracy in this.
# ## RandomForest
# The second algorithms that we will use is going to be random forest. Here Forest means we will have n number of trees. The Random Forest algorithm introduces extra randomness when growing trees; instead of searching for the very best feature when splitting a node, it searches for the best feature among a random subset of features. This results in a greater tree diversity, which (once again) trades a higher bias for a lower variance, generally yielding an overall better model.
# Building a randomForest model
from sklearn.ensemble import RandomForestClassifier
random_forest_clf = RandomForestClassifier(
n_estimators=350, ## Training the ensemble model using 350 decision trees, we can use any number there depends on the speed of our machine
max_leaf_nodes=15, ## Each tree will have a maximum number of 10 leaf nodes
n_jobs=-1,
)
random_forest_clf.fit(x_train, y_train)
y_pred_rf = random_forest_clf.predict(x_test)
from sklearn.metrics import accuracy_score
acc = (accuracy_score(y_test, y_pred_rf)) * 100
print(f"{round(acc, 2)}% of Accuracy")
# #### Seems like we got the same accuracy here.
# ## Boosting:
# Boosting (originally called hypothesis boosting) refers to any Ensemble method that can combine several weak learners into a strong learner. The idea of boosting is to train predictors sequentially and each of them is trying to correct predecessor. The Boosting mehtods that we will use are going to be AdaBoost and Gradient Boost
# ### AdaBoost:
# Everytime we get errors by focusing on those wrongly predicted can help to increase the accuracy. That is how Adaboost works, the first base classifier is trained and predictions are made from training set. The relative weight of all misclassified training instances is increased. A second classifier is then trained using the updated weights and again it makes predictions and again the weights are increased for misclassified instances and so on. It continues until we get the best accuracy.
# Each instances will get boosted weights for the misclassified(by predecessor) records and improve accordingly.
# Building our Adaboost ensemble model
from sklearn.ensemble import AdaBoostClassifier
adaboost_clf = AdaBoostClassifier(
DecisionTreeClassifier(), n_estimators=400, learning_rate=0.6
)
adaboost_clf.fit(x_train, y_train)
adaboost_pred = adaboost_clf.predict(x_test)
confusion_matrix(y_test, adaboost_pred)
acc_boost = (accuracy_score(y_test, adaboost_pred)) * 100
print(f"{round(acc_boost, 2)}% Accuracy achieved")
# Finally we have seen that AdaBoost is classifying all the data into both of the categories. The algorithms used prior were giving definitely a better accuracy but they were not being able to classify both the categories.
# ## Gradient Boost:
# Finally our last(in this notebook) algorithm is gradient boost that we will use here. Gradient Boost is a popular boosting algorithm, just like AdaBoost Gradient Boost works by sequentially adding predictors to an ensemble, each one correcting its predecessor. However, instead of tweaking the instance weights at every
# iteration like AdaBoost does, this method tries to fit the new predictor to the residual errors made by the previous predictor.
from sklearn.ensemble import GradientBoostingClassifier
gradient_clf = GradientBoostingClassifier(n_estimators=2000, learning_rate=0.5)
gradient_clf.fit(x_train, y_train)
grad_pred = gradient_clf.predict(x_test)
confusion_matrix(y_test, grad_pred)
grad_acc = (accuracy_score(y_test, grad_pred)) * 100
print(f"{round(grad_acc, 2)}% Accuracy")
|
# # **HnM RecSys Notebook 9417**
# ## **Table of Contents**
# * [**1. Imports**](#Imports)
# * [**2. Pre-Processing**](#Pre-Processing)
# * [**3. Exploratory Data Analysis**](#Exploratory-Data-Analysis)
# * [**3.1 Articles**](#EDA::Articles)
# * [**3.2 Customers**](#EDA::Customers)
# * [**3.3 Transactions**](#EDA::Transactions)
# ## Imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import os
import re
import warnings
# import cudf # switch on P100 GPU for this to work in Kaggle
# import cupy as cp
# Importing data
articles = pd.read_csv("articles.csv")
print(articles.head())
print("--")
customers = pd.read_csv("customers.csv")
print(customers.head())
print("--")
transactions = pd.read_csv("transactions_train.csv")
print(transactions.head())
print("--")
# ## Pre-Processing
# ----- empty value stats -------------
print("Missing values: ")
print(customers.isnull().sum())
print("--\n")
print("FN Newsletter vals: ", customers["FN"].unique())
print("Active communication vals: ", customers["Active"].unique())
print("Club member status vals: ", customers["club_member_status"].unique())
print("Fashion News frequency vals: ", customers["fashion_news_frequency"].unique())
print("--\n")
# ---- data cleaning -------------
customers["FN"] = customers["FN"].fillna(0)
customers["Active"] = customers["Active"].fillna(0)
# replace club_member_status missing values with 'LEFT CLUB' --> no members with LEFT CLUB status in data
customers["club_member_status"] = customers["club_member_status"].fillna("LEFT CLUB")
customers["fashion_news_frequency"] = customers["fashion_news_frequency"].fillna("None")
customers["fashion_news_frequency"] = customers["fashion_news_frequency"].replace(
"NONE", "None"
)
customers["age"] = customers["age"].fillna(customers["age"].mean())
customers["age"] = customers["age"].astype(int)
articles["detail_desc"] = articles["detail_desc"].fillna("None")
print("Customers' Missing values: ")
print(customers.isnull().sum())
print("--\n")
# ---- memory optimizations -------------
# reference: https://www.kaggle.com/arjanso/reducing-dataframe-memory-size-by-65
# iterate through all the columns of a dataframe and reduce the int and float data types to the smallest possible size, ex. customer_id should not be reduced from int64 to a samller value as it would have collisions
import numpy as np
import pandas as pd
def reduce_mem_usage(df):
"""Iterate over all the columns of a DataFrame and modify the data type
to reduce memory usage, handling ordered Categoricals"""
# check the memory usage of the DataFrame
start_mem = df.memory_usage().sum() / 1024**2
print("Memory usage of dataframe is {:.2f} MB".format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type == "category":
if df[col].cat.ordered:
# Convert ordered Categorical to an integer
df[col] = df[col].cat.codes.astype("int16")
else:
# Convert unordered Categorical to a string
df[col] = df[col].astype("str")
elif col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == "int":
if c_min >= np.iinfo(np.int8).min and c_max <= np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif (
c_min >= np.iinfo(np.int16).min and c_max <= np.iinfo(np.int16).max
):
df[col] = df[col].astype(np.int16)
elif (
c_min >= np.iinfo(np.int32).min and c_max <= np.iinfo(np.int32).max
):
df[col] = df[col].astype(np.int32)
elif (
c_min >= np.iinfo(np.int64).min and c_max <= np.iinfo(np.int64).max
):
df[col] = df[col].astype(np.int64)
else:
if (
c_min >= np.finfo(np.float16).min
and c_max <= np.finfo(np.float16).max
):
df[col] = df[col].astype(np.float16)
elif (
c_min >= np.finfo(np.float32).min
and c_max <= np.finfo(np.float32).max
):
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
# check the memory usage after optimization
end_mem = df.memory_usage().sum() / 1024**2
print("Memory usage after optimization is: {:.2f} MB".format(end_mem))
# calculate the percentage of the memory usage reduction
mem_reduction = 100 * (start_mem - end_mem) / start_mem
print("Memory usage decreased by {:.1f}%".format(mem_reduction))
return df
print("Articles Info: ")
print(articles.info())
print("Customer Info: ")
print(customers.info())
print("Transactions Info: ")
print(transactions.info())
# ---- memory optimizations -------------
# uses 8 bytes instead of given 64 byte string, reduces mem by 8x,
# !!!! have to convert back before merging w/ sample_submissions.csv
# convert transactions['customer_id'] to 8 bytes int
# transactions['customer_id'] = transactions['customer_id'].astype('int64')
transactions["customer_id"] = (
transactions["customer_id"].apply(lambda x: int(x[-16:], 16)).astype("int64")
)
customers["customer_id"] = (
customers["customer_id"].apply(lambda x: int(x[-16:], 16)).astype("int64")
)
articles = reduce_mem_usage(articles)
customers = reduce_mem_usage(customers)
transactions = reduce_mem_usage(transactions)
# articles['article_id'] = articles['article_id'].astype('int32')
# transactions['article_id'] = transactions['article_id'].astype('int32')
# # !!!! ADD LEADING ZERO BACK BEFORE SUBMISSION OF PREDICTIONS TO KAGGLE:
# # Ex.: transactions['article_id'] = '0' + transactions.article_id.astype('str')
print("Articles Info: ")
print(articles.info())
print("Customer Info: ")
print(customers.info())
print("Transactions Info: ")
print(transactions.info())
# ## Exploratory-Data-Analysis
# ### EDA::Articles
# Article data:
# `article_id` : Unique id for every article of clothing
# Observing the structure of the column info, this indentation structure of features satisfied article identification:
# - `` and `` :: (clothing categories)
# - `` and `` :: (clothing categories' sub-groups) -- same as index group if no subgroups for a category
# - `` and `` :: (clothing collections)
# - `` and `` :: (garment groups)
# - `` and `` :: (product groups)
# - `` and `` :: (product types)
# - `` and `` :: (product names)
# Other data:
# `colour_*`: colour info of each article
# `perceived_colour_*`: colour info of each article
# `department_*`: department info
# `detail_desc`: article description
# (we're ignoring `graphical_*` features since we are not going to use the image data)
#
articles.head()
# Observing the most popular clothing categories (indices)
# Convert index_name to ordered categorical for ordered histplot
ordered_index_names = articles["index_name"].value_counts().index
articles["index_name"] = pd.Categorical(
articles["index_name"], categories=ordered_index_names, ordered=True
)
# Plot histogram
f, ax = plt.subplots(figsize=(10, 6))
sns.histplot(data=articles, y="index_name")
ax.set_xlabel("count of articles")
ax.set_ylabel("index_name")
plt.show()
# The Ladieswear category and Children (aggregated) category have the most articles. The Sport category has the least articles.
# Observing the most popular clothing collections (sections)
ordered_section_names = articles["section_name"].value_counts().index
articles["section_name"] = pd.Categorical(
articles["section_name"], categories=ordered_section_names, ordered=True
)
f, ax = plt.subplots(figsize=(14, 14))
sns.histplot(data=articles, y="section_name", bins=len(ordered_section_names))
ax.set_xlabel("count of articles by clothing collection")
ax.set_ylabel("section_name")
plt.show()
# Women's Everyday Collection, followed by the miscellaneous section Divided Collection, and Baby Essentials & Complements.
# Ladies Other has the least number of articles.
# Observing garments grouped by their clothing category (index_group)
ordered_garment_group_names = articles["garment_group_name"].value_counts().index
ordered_index_group_names = articles["index_group_name"].value_counts().index
articles["garment_group_name"] = pd.Categorical(
articles["garment_group_name"], categories=ordered_garment_group_names, ordered=True
)
articles["index_group_name"] = pd.Categorical(
articles["index_group_name"], categories=ordered_index_group_names, ordered=True
)
f, ax = plt.subplots(figsize=(15, 7))
ax = sns.histplot(
data=articles, y="garment_group_name", hue="index_group_name", multiple="stack"
)
ax.set_xlabel("count of articles by garment group")
ax.set_ylabel("garment_group_name")
plt.show()
# Jersey Fancy and Accessories are the most popular garment groups; a large part of the Ladieswear and Children categories contribute to the garment group counts.
# Observing number of articles per clothing category
articles.groupby(["index_group_name"]).count()["article_id"]
# Since some clothing categories (index_group_name) have sub-categories (index_name):
# Observing number of articles per sub-category
grouped_counts = articles.groupby(["index_group_name", "index_name"]).count()[
"article_id"
]
grouped_counts = grouped_counts[grouped_counts != 0]
grouped_counts
# The clothing sub-catgeory of Ladieswear in the Ladieswear category has the most articles.
# The clothing sub-catgeory of Children Sizes 92-140 in the Baby/Children category has the most articles in the category.
# Observing number of articles by product group
grouped_counts = articles.groupby(["garment_group_name", "product_group_name"]).count()[
"article_id"
]
grouped_counts = grouped_counts[grouped_counts != 0]
grouped_counts
# Observing number of articles by product groups
grouped_counts = articles.groupby(["product_group_name", "product_type_name"]).count()[
"article_id"
]
grouped_counts = grouped_counts[grouped_counts != 0]
grouped_counts
# Observing the most popular colours for articles
ordered_colour_names = articles["colour_group_name"].value_counts().index
articles["colour_group_name"] = pd.Categorical(
articles["colour_group_name"], categories=ordered_colour_names, ordered=True
)
f, ax = plt.subplots(figsize=(15, 10))
sns.countplot(y="colour_group_name", data=articles, order=ordered_colour_names)
ax.set_xlabel("count of articles")
ax.set_ylabel("colour_group_name")
ax.set_title("Count of articles by Colour Group")
plt.show()
# Black, dark blue and white are the most popular colours overall.
# Observing the most popular graphics for articles
count_by_graphical_appearance = (
articles["graphical_appearance_name"].value_counts().sort_values(ascending=True)
)
fig, ax = plt.subplots(figsize=(10, 8))
ax.barh(count_by_graphical_appearance.index, count_by_graphical_appearance.values)
ax.set_title("Count of Articles by Graphical Appearance")
ax.set_xlabel("count of articles")
ax.set_ylabel("graphical_appearance_name")
plt.show()
# A Solid pattern on articles is most popular.
# ### EDA::Customers
# Customer data:
# `customer_id` : Unique id for every customer
# `FN` (Does the customer receive fashion news): 1 or 0
# `Active` (Is the customer active for communication): 1 or 0
# `club_member_status` (Customer's club status): 'ACTIVE' or 'PRE-CREATE' or 'LEFT CLUB'
# `fashion_news_frequency` (How often H&M may send news to customer): 'Regularly' or 'Monthly' or 'None'
# `age` : Customer's age
# `postal_code` : Customer's postal code
customers.head()
# Observing postal code counts
top_5_postal_codes = customers["postal_code"].value_counts().head(5)
print(top_5_postal_codes)
# Clearly, the most common postal code is either some default postal code or a centralized delivery location.
# Observing the customer age distribution
f, ax = plt.subplots(figsize=(15, 5))
sns.histplot(data=customers, x="age")
ax.set_ylabel("number of customers")
plt.show()
top_5_ages = customers["age"].value_counts().head(5)
print(top_5_ages)
# Clearly, the age range of 20-25 has the most customers.
# Observing the club member status of customers
# Group the customers by club member status and count the number of customers in each group
club_member_counts = customers.groupby("club_member_status")["customer_id"].count()
# Pie chart
plt.pie(club_member_counts.values, labels=club_member_counts.index, autopct="%1.1f%%")
plt.title("Club Member Statuses")
plt.axis("equal")
plt.show()
# An overwhelming majority of customers currently have an active club status.
# Observing the FN subscription of customers
news_frequency_counts = customers.groupby("fashion_news_frequency")[
"customer_id"
].count()
# create a pie chart
plt.pie(news_frequency_counts, labels=news_frequency_counts.index, autopct="%1.1f%%")
plt.title("Fashion News Newsletter Frequency")
plt.show()
# A majority of customers don't subsribe to the fashion newsletter.
# ### EDA::Transactions
# Transaction data:
# `t_dat`: date the transaction occured in yyyy-mm-dd format
# `customer_id`: in customers df
# `article_id` in articles df
# `price`: geneneralized price (not a currency or unit)
# `sales_channel_id`: 1 = in-store or 2 = online
transactions.head()
f, ax = plt.subplots(figsize=(15, 1))
ax.set_title("Price distribution of all articles")
sns.boxplot(x="price", data=transactions)
plt.show()
pd.set_option("display.float_format", "{:.4f}".format)
transactions.describe()["price"]
# The prices seem to vary a lot across all articles, so the above plot doesn't give us useful information.
# The total transaction count is ~31 million, so we'll use a 100,000 sample from the transaction data as needed.
# merging transactions and artciles df on aritcle_id
articles_product_columns = articles[
["article_id", "index_name", "product_group_name", "product_type_name", "prod_name"]
]
# merged_table_ta --> merged_table_transactions_articles
merged_table_ta = transactions[
["customer_id", "article_id", "price", "sales_channel_id", "t_dat"]
].merge(articles_product_columns, on="article_id", how="left")
# Observing the mean price of each sub-clothing catgeory (index_name)
# Group the data by index_name and calculate the mean price for each group
price_by_index_name = merged_table_ta.groupby("index_name")["price"].mean()
price_by_index_name = price_by_index_name.sort_values(ascending=True)
# Plot
fig, ax = plt.subplots(figsize=(10, 5))
ax.barh(price_by_index_name.index, price_by_index_name.values)
ax.set_title("Average prices by index_name (all clothing categories)")
ax.set_xlabel("mean price")
ax.set_ylabel("index_name")
plt.show()
# The Ladieswear sub-category in the Ladieswear category has the largest mean price.
# Observing the mean price of each product group
# Group the data by product_group_name and calculate the mean price for each group
price_by_product_group = (
merged_table_ta[["product_group_name", "price"]]
.groupby("product_group_name")
.mean()
)
price_by_product_group = price_by_product_group.sort_values(by="price", ascending=True)
# Plot
fig, ax = plt.subplots(figsize=(10, 5))
ax.barh(price_by_product_group.index, price_by_product_group["price"])
ax.set_title("Average prices by product group")
ax.set_xlabel("mean price")
ax.set_ylabel("product_group_name")
plt.show()
# Observing price distributions by product group
# plot all boxplots
f, ax = plt.subplots(figsize=(25, 18))
ax = sns.boxplot(data=merged_table_ta, x="price", y="product_group_name")
ax.set_xlabel("price by product group", fontsize=18)
ax.set_ylabel("product_group_name", fontsize=18)
ax.xaxis.set_tick_params(labelsize=16)
ax.yaxis.set_tick_params(labelsize=16)
plt.show()
# The prices for product groups Garment upper/lower/full and Shoes have a large variance in prices. The opposite is true for Cosmetic, Stationery and Fun product groups. This is reasonable since the price can vary between different clothing collections of the same product group (Ex. premium full garmnet collection vs an on-sale garment collection).
# Observing the distribution of price frequency in samples
# Sample 100,000 observations
merged_sample = merged_table_ta.sample(n=100000)
# using kdeplots to plot probability dist. of price values
fig, ax = plt.subplots(1, 1, figsize=(14, 5))
sns.kdeplot(
np.log(
merged_sample.loc[merged_sample["sales_channel_id"] == 1].price.value_counts()
)
)
sns.kdeplot(
np.log(
merged_sample.loc[merged_sample["sales_channel_id"] == 2].price.value_counts()
)
)
ax.legend(labels=["In-store: Sales channel 1", "Online: Sales channel 2"])
plt.title(
"Logarithmic distribution of price frequency in transactions, grouped by sales channel (100k sample)"
)
plt.show()
# There is a slightly larger tendency for customers to purchase more expensive items online.
# Observing the top 10 customers by number of transactions
transactions["customer_id"].value_counts().head(10)
# Observing the number of transaction per day by sales channel
# Sample 100,000 observations
merged_table_ta_sample = merged_table_ta.sample(n=100000, random_state=69)
# Convert t_dat column to datetime format
merged_table_ta_sample["t_dat"] = pd.to_datetime(merged_table_ta_sample["t_dat"])
# Group the data by sales channel and date, and count the number of transactions for each group
transactions_by_day = merged_table_ta_sample.groupby(
["sales_channel_id", pd.Grouper(key="t_dat", freq="D")]
)["article_id"].count()
# Create a line plot
fig, ax = plt.subplots(figsize=(10, 6))
for channel in transactions_by_day.index.levels[0]:
if channel == 1:
ax.plot(transactions_by_day[channel], label=f"In-Store: Sales channel 1")
else:
ax.plot(transactions_by_day[channel], label=f"Online: Sales channel 2")
ax.legend()
ax.set_title("Number of Transactions per Day by Sales Channel")
ax.set_xlabel("Date")
ax.set_ylabel("Number of Transactions")
plt.show()
# It is worth noting here that for April 2020, the number of in-store transactions is virtually nonexistent, while there is a sharp spike in online transactions for the same month; this is highly likely due to in-person stores possibly closing in April due to Covid-19.
# Purchase stats
num_unique_customers = len(transactions["customer_id"].unique())
num_unique_articles = len(transactions["article_id"].unique())
total_transactions = len(transactions)
print("Total H&M customers:", len(customers))
print("Total H&M articles:", len(articles))
print(
"Number of unique customers that purchased at least 1 article:",
num_unique_customers,
)
print("Number of unique articles purchased:", num_unique_articles)
print(
"Number of customers that didn't make any transactions:",
len(customers) - num_unique_customers,
)
print(
"Number of unique articles that weren't purchased:",
len(articles) - num_unique_articles,
)
print("Total number of transactions:", total_transactions)
# get % of customers that made at least 1 transaction in the last 3 months
transactions["t_dat"] = pd.to_datetime(transactions["t_dat"])
# get the last transaction date for each customer
last_transaction_date = transactions.groupby("customer_id")["t_dat"].max()
three_months_ago = last_transaction_date.max() - pd.Timedelta(days=90)
# get the customers who made at least 1 transaction in the last 3 months
active_customers = last_transaction_date[
last_transaction_date >= three_months_ago
].index
percent_active_customers = len(active_customers) / len(last_transaction_date) * 100
print(
f"Percentage of customers that made at least 1 transaction in the last 3 months: {percent_active_customers:.2f}%"
)
# Validating that there are no duplicate customer or article IDs
duplicate_customers = customers[
customers.duplicated(subset=["customer_id"], keep=False)
]
duplicate_customers = duplicate_customers.sort_values(by=["customer_id"])
print("Number of non-unique customer IDs:", len(duplicate_customers))
duplicate_articles = articles[articles.duplicated(subset=["article_id"], keep=False)]
duplicate_articles = duplicate_articles.sort_values(by=["article_id"])
print("Number of non-unique article IDs:", len(duplicate_customers))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# #### Importing Libraries.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from yellowbrick.classifier import ConfusionMatrix
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import GridSearchCV
# ## 1. Importing Dataset.
# #### Here we are reading the database train using the name "df" in the variable and the database test using the name "df_test".
df = pd.read_csv(
"/kaggle/input/nbfi-vehicle-loan-repayment-dataset/Train_Dataset.csv", sep=","
)
df_test = pd.read_csv(
"/kaggle/input/nbfi-vehicle-loan-repayment-dataset/Test_Dataset.csv", sep=","
)
pd.set_option("display.max_columns", None)
df.head(5)
df.describe()
df.dtypes
df.info()
(df_test.isnull().sum() / len(df_test)).sort_values(ascending=False)
(df.isnull().sum() / len(df)).sort_values(ascending=False)
df.head(5)
df = df.drop(
[
"Own_House_Age",
"Score_Source_1",
"Social_Circle_Default",
"Client_Occupation",
"Score_Source_3",
],
axis=1,
)
df_test = df_test.drop(
[
"Own_House_Age",
"Score_Source_1",
"Social_Circle_Default",
"Client_Occupation",
"Score_Source_3",
],
axis=1,
)
df["Population_Region_Relative"].replace(
to_replace="#", value=None, regex=True, inplace=True
)
df["Population_Region_Relative"].replace(
to_replace="@", value=None, regex=True, inplace=True
)
df["Population_Region_Relative"].replace(
to_replace="x", value=None, regex=True, inplace=True
)
df["Client_Income"].replace(to_replace="$", value=None, regex=True, inplace=True)
df["Loan_Annuity"].replace(to_replace="$", value=None, regex=True, inplace=True)
df["Credit_Amount"].replace(to_replace="$", value=None, regex=True, inplace=True)
df["Loan_Annuity"].replace(to_replace="#VALUE!", value=None, regex=True, inplace=True)
df["ID_Days"].replace(to_replace="x", value=None, regex=True, inplace=True)
df["Age_Days"].replace(to_replace="x", value=None, regex=True, inplace=True)
df["Employed_Days"].replace(to_replace="x", value=None, regex=True, inplace=True)
df["Registration_Days"].replace(to_replace="x", value=None, regex=True, inplace=True)
df["Accompany_Client"].replace(to_replace="##", value=None, regex=True, inplace=True)
df_test["Population_Region_Relative"].replace(
to_replace="#", value=None, regex=True, inplace=True
)
df_test["Population_Region_Relative"].replace(
to_replace="@", value=None, regex=True, inplace=True
)
df_test["Population_Region_Relative"].replace(
to_replace="x", value=None, regex=True, inplace=True
)
df_test["Client_Income"].replace(to_replace="$", value=None, regex=True, inplace=True)
df_test["Loan_Annuity"].replace(to_replace="$", value=None, regex=True, inplace=True)
df_test["Credit_Amount"].replace(to_replace="$", value=None, regex=True, inplace=True)
df_test["Loan_Annuity"].replace(
to_replace="#VALUE!", value=None, regex=True, inplace=True
)
df_test["ID_Days"].replace(to_replace="x", value=None, regex=True, inplace=True)
df_test["Age_Days"].replace(to_replace="x", value=None, regex=True, inplace=True)
df_test["Employed_Days"].replace(to_replace="x", value=None, regex=True, inplace=True)
df_test["Registration_Days"].replace(
to_replace="x", value=None, regex=True, inplace=True
)
df_test["Accompany_Client"].replace(
to_replace="##", value=None, regex=True, inplace=True
)
df_test["Score_Source_2"].replace(to_replace="#", value=None, regex=True, inplace=True)
df["Credit_Bureau"] = df["Credit_Bureau"].fillna(df["Credit_Bureau"].mode()[0])
df["Client_Income_Type"] = df["Client_Income_Type"].fillna(
df["Client_Income_Type"].mode()[0]
)
df["Bike_Owned"] = df["Bike_Owned"].fillna(df["Bike_Owned"].mode()[0])
df["Type_Organization"] = df["Type_Organization"].fillna(
df["Type_Organization"].mode()[0]
)
df["Active_Loan"] = df["Active_Loan"].fillna(df["Active_Loan"].mode()[0])
df["Client_Marital_Status"] = df["Client_Marital_Status"].fillna(
df["Client_Marital_Status"].mode()[0]
)
df["Client_Housing_Type"] = df["Client_Housing_Type"].fillna(
df["Client_Housing_Type"].mode()[0]
)
df["Application_Process_Hour"] = df["Application_Process_Hour"].fillna(
df["Application_Process_Hour"].mode()[0]
)
df["Car_Owned"] = df["Car_Owned"].fillna(df["Car_Owned"].mode()[0])
df["Client_Education"] = df["Client_Education"].fillna(df["Client_Education"].mode()[0])
df["House_Own"] = df["House_Own"].fillna(df["House_Own"].mode()[0])
df["Loan_Contract_Type"] = df["Loan_Contract_Type"].fillna(
df["Loan_Contract_Type"].mode()[0]
)
df["Cleint_City_Rating"] = df["Cleint_City_Rating"].fillna(
df["Cleint_City_Rating"].mode()[0]
)
df["Client_Gender"] = df["Client_Gender"].fillna(df["Client_Gender"].mode()[0])
df["Application_Process_Day"] = df["Application_Process_Day"].fillna(
df["Application_Process_Day"].mode()[0]
)
df["Application_Process_Day"] = df["Accompany_Client"].fillna(
df["Accompany_Client"].mode()[0]
)
df["ID_Days"] = df["ID_Days"].fillna(df["ID_Days"].median())
df["Score_Source_2"] = df["Score_Source_2"].fillna(df["Score_Source_2"].median())
df["Population_Region_Relative"] = df["Population_Region_Relative"].fillna(
df["Population_Region_Relative"].median()
)
df["Loan_Annuity"] = df["Loan_Annuity"].fillna(df["Loan_Annuity"].median())
df["Age_Days"] = df["Age_Days"].fillna(df["Age_Days"].median())
df["Client_Income"] = df["Client_Income"].fillna(df["Client_Income"].median())
df["Phone_Change"] = df["Phone_Change"].fillna(df["Phone_Change"].median())
df["Employed_Days"] = df["Employed_Days"].fillna(df["Employed_Days"].median())
df["Registration_Days"] = df["Registration_Days"].fillna(
df["Registration_Days"].median()
)
df["Child_Count"] = df["Child_Count"].fillna(df["Child_Count"].median())
df["Credit_Amount"] = df["Credit_Amount"].fillna(df["Credit_Amount"].median())
df["Client_Family_Members"] = df["Client_Family_Members"].fillna(
df["Client_Family_Members"].median()
)
df_test["Credit_Bureau"] = df_test["Credit_Bureau"].fillna(
df_test["Credit_Bureau"].mode()[0]
)
df_test["Client_Income_Type"] = df_test["Client_Income_Type"].fillna(
df_test["Client_Income_Type"].mode()[0]
)
df_test["Bike_Owned"] = df_test["Bike_Owned"].fillna(df_test["Bike_Owned"].mode()[0])
df_test["Type_Organization"] = df["Type_Organization"].fillna(
df_test["Type_Organization"].mode()[0]
)
df_test["Active_Loan"] = df_test["Active_Loan"].fillna(df_test["Active_Loan"].mode()[0])
df_test["Client_Marital_Status"] = df_test["Client_Marital_Status"].fillna(
df_test["Client_Marital_Status"].mode()[0]
)
df_test["Accompany_Client"] = df_test["Accompany_Client"].fillna(
df_test["Accompany_Client"].mode()[0]
)
df_test["Client_Housing_Type"] = df_test["Client_Housing_Type"].fillna(
df_test["Client_Housing_Type"].mode()[0]
)
df_test["Application_Process_Hour"] = df_test["Application_Process_Hour"].fillna(
df_test["Application_Process_Hour"].mode()[0]
)
df_test["Car_Owned"] = df_test["Car_Owned"].fillna(df_test["Car_Owned"].mode()[0])
df_test["Client_Education"] = df_test["Client_Education"].fillna(
df_test["Client_Education"].mode()[0]
)
df_test["House_Own"] = df_test["House_Own"].fillna(df_test["House_Own"].mode()[0])
df_test["Loan_Contract_Type"] = df_test["Loan_Contract_Type"].fillna(
df_test["Loan_Contract_Type"].mode()[0]
)
df_test["Cleint_City_Rating"] = df_test["Cleint_City_Rating"].fillna(
df_test["Cleint_City_Rating"].mode()[0]
)
df_test["Client_Gender"] = df_test["Client_Gender"].fillna(
df_test["Client_Gender"].mode()[0]
)
df_test["Application_Process_Day"] = df_test["Application_Process_Day"].fillna(
df_test["Application_Process_Day"].mode()[0]
)
df_test["Application_Process_Day"] = df_test["Accompany_Client"].fillna(
df_test["Accompany_Client"].mode()[0]
)
df_test["ID_Days"] = df_test["ID_Days"].fillna(df_test["ID_Days"].median())
df_test["Score_Source_2"] = df_test["Score_Source_2"].fillna(
df_test["Score_Source_2"].median()
)
df_test["Population_Region_Relative"] = df_test["Population_Region_Relative"].fillna(
df_test["Population_Region_Relative"].median()
)
df_test["Loan_Annuity"] = df_test["Loan_Annuity"].fillna(
df_test["Loan_Annuity"].median()
)
df_test["Age_Days"] = df_test["Age_Days"].fillna(df_test["Age_Days"].median())
df_test["Client_Income"] = df_test["Client_Income"].fillna(
df_test["Client_Income"].median()
)
df_test["Phone_Change"] = df_test["Phone_Change"].fillna(
df_test["Phone_Change"].median()
)
df_test["Employed_Days"] = df_test["Employed_Days"].fillna(
df_test["Employed_Days"].median()
)
df_test["Registration_Days"] = df["Registration_Days"].fillna(
df_test["Registration_Days"].median()
)
df_test["Child_Count"] = df_test["Child_Count"].fillna(df_test["Child_Count"].median())
df_test["Credit_Amount"] = df_test["Credit_Amount"].fillna(
df_test["Credit_Amount"].median()
)
df_test["Client_Family_Members"] = df_test["Client_Family_Members"].fillna(
df_test["Client_Family_Members"].median()
)
(df.isnull().sum() / len(df)).sort_values(ascending=False)
(df_test.isnull().sum() / len(df_test)).sort_values(ascending=False)
# ## 2. Data Analysis.
# #### Verifying the correlation between our variables.¶
# - We can see that we don't have strong correlation between the variables.
corr = df.corr().round(2)
plt.figure(figsize=(15, 10))
sns.heatmap(corr, annot=True, cmap="YlOrBr")
# ## 3. Model Building.
# #### Label Encoder
# - Here we are going to use the LabelEncoder to transform our categorical variables into numeric variables.
df.head(1)
from sklearn.preprocessing import LabelEncoder
le_Accompany_Client = LabelEncoder()
le_Client_Income_Type = LabelEncoder()
le_Client_Education = LabelEncoder()
le_Client_Marital_Status = LabelEncoder()
le_Client_Gender = LabelEncoder()
le_Loan_Contract_Type = LabelEncoder()
le_Client_Housing_Type = LabelEncoder()
le_Application_Process_Day = LabelEncoder()
le_Client_Permanent_Match_Tag = LabelEncoder()
le_Client_Contact_Work_Tag = LabelEncoder()
le_Type_Organization = LabelEncoder()
df["Accompany_Client"] = le_Accompany_Client.fit_transform(df["Accompany_Client"])
df["Client_Income_Type"] = le_Client_Income_Type.fit_transform(df["Client_Income_Type"])
df["Client_Education"] = le_Client_Education.fit_transform(df["Client_Education"])
df["Client_Marital_Status"] = le_Client_Marital_Status.fit_transform(
df["Client_Marital_Status"]
)
df["Client_Gender"] = le_Client_Gender.fit_transform(df["Client_Gender"])
df["Loan_Contract_Type"] = le_Loan_Contract_Type.fit_transform(df["Loan_Contract_Type"])
df["Client_Housing_Type"] = le_Client_Housing_Type.fit_transform(
df["Client_Housing_Type"]
)
df["Application_Process_Day"] = le_Application_Process_Day.fit_transform(
df["Application_Process_Day"]
)
df["Client_Permanent_Match_Tag"] = le_Client_Permanent_Match_Tag.fit_transform(
df["Client_Permanent_Match_Tag"]
)
df["Client_Contact_Work_Tag"] = le_Client_Contact_Work_Tag.fit_transform(
df["Client_Contact_Work_Tag"]
)
df["Type_Organization"] = le_Type_Organization.fit_transform(df["Type_Organization"])
from sklearn.preprocessing import LabelEncoder
le_Accompany_Client = LabelEncoder()
le_Client_Income_Type = LabelEncoder()
le_Client_Education = LabelEncoder()
le_Client_Marital_Status = LabelEncoder()
le_Client_Gender = LabelEncoder()
le_Loan_Contract_Type = LabelEncoder()
le_Client_Housing_Type = LabelEncoder()
le_Application_Process_Day = LabelEncoder()
le_Client_Permanent_Match_Tag = LabelEncoder()
le_Client_Contact_Work_Tag = LabelEncoder()
le_Type_Organization = LabelEncoder()
df_test["Accompany_Client"] = le_Accompany_Client.fit_transform(
df_test["Accompany_Client"]
)
df_test["Client_Income_Type"] = le_Client_Income_Type.fit_transform(
df_test["Client_Income_Type"]
)
df_test["Client_Education"] = le_Client_Education.fit_transform(
df_test["Client_Education"]
)
df_test["Client_Marital_Status"] = le_Client_Marital_Status.fit_transform(
df_test["Client_Marital_Status"]
)
df_test["Client_Gender"] = le_Client_Gender.fit_transform(df_test["Client_Gender"])
df_test["Loan_Contract_Type"] = le_Loan_Contract_Type.fit_transform(
df_test["Loan_Contract_Type"]
)
df_test["Client_Housing_Type"] = le_Client_Housing_Type.fit_transform(
df_test["Client_Housing_Type"]
)
df_test["Application_Process_Day"] = le_Application_Process_Day.fit_transform(
df_test["Application_Process_Day"]
)
df_test["Client_Permanent_Match_Tag"] = le_Client_Permanent_Match_Tag.fit_transform(
df_test["Client_Permanent_Match_Tag"]
)
df_test["Client_Contact_Work_Tag"] = le_Client_Contact_Work_Tag.fit_transform(
df_test["Client_Contact_Work_Tag"]
)
df_test["Type_Organization"] = le_Type_Organization.fit_transform(
df_test["Type_Organization"]
)
df = df.drop("ID", axis=1)
df_test = df_test.drop("ID", axis=1)
# #### Separating into features variables and target variable.
df_test.columns
X = df.drop("Default", axis=1)
X = X.values
y = df["Default"]
# #### StandardScaler
# - Here we will use StandardScaler to put our data in the same scale.
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_standard = scaler.fit_transform(X)
# #### Transforming Data into Train e Test, here we will use 30% of our data to test the machine learning models.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X_standard, y, test_size=0.3, random_state=0
)
# #### Balancing Classes
# - As we have many values of a class and few values of others, we will balance the classes using OverSampling to make the classes balanced.
from imblearn.over_sampling import RandomOverSampler
ros = RandomOverSampler(random_state=0)
X_train, y_train = ros.fit_resample(X_train, y_train)
X_train.shape, y_train.shape
# #### Naive Bayes
# - Running Gaussian Model.
# - Here we will use the Naive Bayes Model, we will test Gaussian model, using our Normal Data.
from sklearn.naive_bayes import GaussianNB
naive_bayes = GaussianNB()
naive_bayes.fit(X_train, y_train)
previsoes = naive_bayes.predict(X_test)
cm = ConfusionMatrix(naive_bayes)
cm.fit(X_train, y_train)
cm.score(X_test, y_test)
# #### Decision Tree
# - Here we will use the Decision Tree Model, we will test Entropy and Gini calculations.
# - Here we are applying GridSearch to check which are the best metrics to use.
from sklearn.model_selection import RandomizedSearchCV
parameters = {
"max_depth": [3, 4, 5, 6, 7, 9, 11],
"min_samples_split": [2, 3, 4, 5, 6, 7],
"criterion": ["entropy", "gini"],
}
model = DecisionTreeClassifier()
gridDecisionTree = RandomizedSearchCV(model, parameters, cv=3, n_jobs=-1)
gridDecisionTree.fit(X_train, y_train)
print("Mín Split: ", gridDecisionTree.best_estimator_.min_samples_split)
print("Max Nvl: ", gridDecisionTree.best_estimator_.max_depth)
print("Algorithm: ", gridDecisionTree.best_estimator_.criterion)
print("Score: ", gridDecisionTree.best_score_)
# #### Running Decision Tree
decision_tree = DecisionTreeClassifier(
criterion="entropy", min_samples_split=5, max_depth=11, random_state=0
)
decision_tree.fit(X_train, y_train)
previsoes = decision_tree.predict(X_test)
cm = ConfusionMatrix(decision_tree)
cm.fit(X_train, y_train)
cm.score(X_test, y_test)
# #### Checking the most important variables of the model.
columns = df.drop("Default", axis=1).columns
feature_imp = pd.Series(decision_tree.feature_importances_, index=columns).sort_values(
ascending=False
)
feature_imp
# #### RandomForest
# - Here we will use the Random Forest Model, we will test Entropy and Gini calculations.
# - Applying GridSearch
from sklearn.ensemble import RandomForestClassifier
parameters = {
"max_depth": [3, 4, 5, 6, 7, 9, 11],
"min_samples_split": [2, 3, 4, 5, 6, 7],
"criterion": ["entropy", "gini"],
}
model = RandomForestClassifier()
gridRandomForest = RandomizedSearchCV(model, parameters, cv=5, n_jobs=-1)
gridRandomForest.fit(X_train, y_train)
print("Algorithm: ", gridRandomForest.best_estimator_.criterion)
print("Score: ", gridRandomForest.best_score_)
print("Mín Split: ", gridRandomForest.best_estimator_.min_samples_split)
print("Max Nvl: ", gridRandomForest.best_estimator_.max_depth)
# #### Running Random Forest
random_forest = RandomForestClassifier(
n_estimators=100,
min_samples_split=6,
max_depth=11,
criterion="entropy",
random_state=0,
)
random_forest.fit(X_train, y_train)
previsoes = random_forest.predict(X_test)
cm = ConfusionMatrix(random_forest)
cm.fit(X_train, y_train)
cm.score(X_test, y_test)
# #### Checking the most important variables of the model.
feature_imp_random = pd.Series(
random_forest.feature_importances_, index=columns
).sort_values(ascending=False)
feature_imp_random
# #### Extra Trees
# - Here we will use the Extra Trees Model, we will test Entropy and Gini calculations.
# - Applying GridSearch
from sklearn.ensemble import ExtraTreesClassifier
n_estimators = np.array([100])
alg = ["entropy", "gini"]
values_grid = {"n_estimators": n_estimators, "criterion": alg}
model = ExtraTreesClassifier()
gridExtraTrees = GridSearchCV(estimator=model, param_grid=values_grid, cv=3)
gridExtraTrees.fit(X_train, y_train)
print("Algorithm: ", gridExtraTrees.best_estimator_.criterion)
print("Score: ", gridExtraTrees.best_score_)
extra_trees = ExtraTreesClassifier(n_estimators=100, criterion="gini", random_state=0)
extra_trees.fit(X_train, y_train)
previsoes = extra_trees.predict(X_test)
cm = ConfusionMatrix(extra_trees)
cm.fit(X_train, y_train)
cm.score(X_test, y_test)
# #### K-Means
# - Here we will use the K Means Model.
from sklearn.cluster import KMeans
model = KMeans(n_clusters=2, random_state=0)
model.fit(X_train)
previsoes = model.predict(X_test)
classification_kmeans_standard = accuracy_score(y_test, previsoes)
print(classification_kmeans_standard)
score_kmeans = 0.4816
# #### K-Neighbors
# - Here we will use the K-Neighbors Model, we will use the GridSearch Model to figure out the best metrics to use in this model.
# - Here we will use the GridSearch to figure out the best metrics to use in this model.
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
k_list = list(range(1, 10))
k_values = dict(n_neighbors=k_list)
grid = GridSearchCV(knn, k_values, cv=2, scoring="accuracy", n_jobs=-1)
grid.fit(X_train, y_train)
grid.best_params_, grid.best_score_
# #### Running KNN
knn = KNeighborsClassifier(n_neighbors=1, metric="minkowski", p=2)
knn.fit(X_train, y_train)
previsoes = knn.predict(X_test)
cm = ConfusionMatrix(knn)
cm.fit(X_train, y_train)
cm.score(X_test, y_test)
# #### Logistic Regression
from sklearn.linear_model import LogisticRegression
logistic = LogisticRegression(random_state=1, max_iter=1000)
logistic.fit(X_train, y_train)
previsoes = logistic.predict(X_test)
cm = ConfusionMatrix(logistic)
cm.fit(X_train, y_train)
cm.score(X_test, y_test)
# #### AdaBoost
# - Here we will use the AdaBoost Model, we will use the GridSearch Model to figure out the best metrics to use in this model.
# - Applying GridSearch
from sklearn.ensemble import AdaBoostClassifier
n_estimators = np.array([500])
learning_rate = np.array([2.0, 2.5, 1.9, 1.7, 0.5, 0.4])
values_grid = {"n_estimators": n_estimators, "learning_rate": learning_rate}
model = AdaBoostClassifier()
gridAdaBoost = GridSearchCV(estimator=model, param_grid=values_grid, cv=5, n_jobs=-1)
gridAdaBoost.fit(X_train, y_train)
print("Learning Rate: ", gridAdaBoost.best_estimator_.learning_rate)
print("Score: ", gridAdaBoost.best_score_)
# #### Gradient Boosting
# - Here we will use the Gradient Boosting Model, here we will use the GridSearch Model to figure out the best metrics to use in this model.
from sklearn.ensemble import GradientBoostingClassifier
parameters = {
"learning_rate": [0.01, 0.02, 0.05, 0.07, 0.09, 0.1, 0.3, 0.5, 0.005],
"n_estimators": [300, 500],
}
model = GradientBoostingClassifier()
gridGradient = RandomizedSearchCV(model, parameters, cv=5, n_jobs=-1)
gridGradient.fit(X_train, y_train)
print("Learning Rate: ", gridGradient.best_estimator_.learning_rate)
print("Score: ", gridGradient.best_score_)
|
# # **Creating the Warming Stripes in Matplotlib**
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import pandas as pd
import numpy as np
import seaborn as sns
# # Complex HeatMaps
df = pd.read_csv(
"../input/arpa-piemonte-acqui-terme/ACQUI TERME_giornalieri_1988_2022.csv",
encoding="unicode_escape",
on_bad_lines="skip",
sep=";",
decimal=",",
)
df.info()
df.head()
df = df.rename(columns={"Temperatura media ( °C )": "Temperatura"})
df.head()
df["Data"] = pd.to_datetime(df["Data"])
df.head()
df["Giorno"] = df["Data"].dt.day
df["Mese"] = df["Data"].dt.month
df["Anno"] = df["Data"].dt.year
df.head()
df.groupby(["Anno", "Mese"]).Temperatura.mean()
df2 = df.loc[df["Mese"] == 6].groupby(["Anno"]).Temperatura.mean()
ax = df2.plot(lw=2, title="Temperature medie mese di Giugno dal 1988 al 2022")
ax.set(xlabel="Anno", ylabel="Temperatura (°C)")
ax.set_xticks([])
plt.grid(axis="y")
plt.show()
cmap = ListedColormap(
[
"#08306b",
"#08519c",
"#2171b5",
"#4292c6",
"#6baed6",
"#9ecae1",
"#c6dbef",
"#deebf7",
"#fee0d2",
"#fcbba1",
"#fc9272",
"#fb6a4a",
"#ef3b2c",
"#cb181d",
"#a50f15",
"#67000d",
]
)
tmp1 = []
dum = 1
for i in range(1, 35):
tmp = []
for j in range(0, 12):
tmp.append(df.groupby(["Anno", "Mese"]).Temperatura.mean().iloc[dum])
dum = dum + 1
tmp1.append(tmp)
plt.figure(figsize=(10, 10))
plt.imshow(tmp1, cmap=cmap, aspect="auto")
# plt.xticks([])
# plt.yticks([])
# plt.grid(False)
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import plotly.express as px
from sklearn.model_selection import train_test_split, cross_val_score, KFold
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB
from sklearn.svm import LinearSVC, SVC
from sklearn.metrics import confusion_matrix, classification_report
from sklearn import metrics
from scipy import stats
from sklearn.preprocessing import StandardScaler
songs = pd.read_csv("../input/top50spotify2019/top50.csv", encoding="ISO-8859-1")
songs.head()
songs.info()
songs.describe()
songs.drop("Unnamed: 0", axis=1, inplace=True)
plt.figure(figsize=(12, 7))
sns.countplot(x="Popularity", data=songs, palette="viridis")
plt.figure(figsize=(12, 6))
sns.heatmap(songs.corr(), annot=True)
plt.figure(figsize=(12, 6))
sns.lineplot(x="Loudness..dB..", y="Energy", data=songs)
plt.figure(figsize=(12, 6))
sns.lineplot(x="Valence.", y="Energy", data=songs)
songs["Genre"].value_counts()
plt.figure(figsize=(25, 15))
order = [
"dance pop",
"pop",
"latin",
"edm",
"canadian hip hop",
"panamanian pop",
"dfw rap",
"canadian pop",
"brostep",
"electropop",
"reggaeton",
"reggaeton flow",
"country rap",
"atl hip hop",
"escape room",
"australian pop",
"trap music",
"r&b en espanol",
"big room",
"pop house",
"boy band",
]
sns.countplot(y=songs["Genre"], data=songs, orient="h", order=order, palette="rainbow")
# so now we'll check the relation between more popular songs which is greater then and equal to 85
most_popular = songs[songs["Popularity"] > 89]
medium_popular = songs[(songs["Popularity"] > 79) & (songs["Popularity"] < 90)]
less_popular = songs[(songs["Popularity"] > 69) & (songs["Popularity"] < 80)]
medium_popular.info()
most_popular["Type"] = most_popular.apply(lambda x: "most popular", axis=1)
medium_popular["Type"] = medium_popular.apply(lambda x: "medium popular", axis=1)
less_popular["Type"] = less_popular.apply(lambda x: "less popular", axis=1)
popular_divided = pd.concat([most_popular, medium_popular, less_popular])
popular_divided.tail()
import matplotlib.gridspec as gridspec
fig3 = plt.figure(figsize=(15, 10))
gs = fig3.add_gridspec(2, 2)
f3_ax1 = fig3.add_subplot(gs[0, :])
f3_ax2 = fig3.add_subplot(gs[-1:, -1])
f3_ax3 = fig3.add_subplot(gs[-1, -2])
sns.lineplot(
x=most_popular["Popularity"],
y=most_popular["Length."],
data=most_popular,
ax=f3_ax1,
)
f3_ax1.set_title("Most Popular")
sns.lineplot(
x=medium_popular["Popularity"],
y=medium_popular["Length."],
data=medium_popular,
ax=f3_ax2,
)
f3_ax2.set_title("Medium Popular")
sns.lineplot(
x=less_popular["Popularity"],
y=less_popular["Length."],
data=less_popular,
ax=f3_ax3,
)
f3_ax3.set_title("Less Popular")
fig3 = plt.figure(figsize=(15, 10))
gs = fig3.add_gridspec(2, 2)
f3_ax4 = fig3.add_subplot(gs[0, :])
f3_ax5 = fig3.add_subplot(gs[-1:, -1])
f3_ax6 = fig3.add_subplot(gs[-1, -2])
sns.violinplot(
x=most_popular["Popularity"], y=most_popular["Energy"], data=most_popular, ax=f3_ax4
).set_title("Most Popular")
sns.violinplot(
x=medium_popular["Popularity"],
y=medium_popular["Energy"],
data=medium_popular,
ax=f3_ax5,
).set_title("Medium Popular")
sns.violinplot(
x=less_popular["Popularity"], y=less_popular["Energy"], data=less_popular, ax=f3_ax6
).set_title("Less Popular")
fig3 = plt.figure(figsize=(15, 10))
gs = fig3.add_gridspec(2, 2)
f3_ax4 = fig3.add_subplot(gs[0, :])
f3_ax5 = fig3.add_subplot(gs[-1:, -1])
f3_ax6 = fig3.add_subplot(gs[-1, -2])
sns.boxplot(
x=most_popular["Popularity"],
y=most_popular["Beats.Per.Minute"],
data=most_popular,
ax=f3_ax4,
).set_title("Most Popular")
sns.boxplot(
x=medium_popular["Popularity"],
y=medium_popular["Beats.Per.Minute"],
data=medium_popular,
ax=f3_ax5,
).set_title("Medium Popular")
sns.boxplot(
x=less_popular["Popularity"],
y=less_popular["Beats.Per.Minute"],
data=less_popular,
ax=f3_ax6,
).set_title("Less Popular")
songs["Genre"].unique()
most_popular["Popularity"] = most_popular["Popularity"].astype(int)
medium_popular["Popularity"] = medium_popular["Popularity"].astype(int)
less_popular["Popularity"] = less_popular["Popularity"].astype(int)
fig3 = plt.figure(figsize=(20, 15))
gs = fig3.add_gridspec(2, 2)
f3_ax7 = fig3.add_subplot(gs[0, :])
f3_ax8 = fig3.add_subplot(gs[-1:, -1])
f3_ax9 = fig3.add_subplot(gs[-1, -2])
sns.barplot(x="Popularity", y="Genre", data=most_popular, ax=f3_ax7, orient="h")
f3_ax7.set_title("Most Popular")
sns.barplot(x="Popularity", y="Genre", data=medium_popular, ax=f3_ax8, orient="h")
f3_ax8.set_title("Medium Popular")
sns.barplot(x="Popularity", y="Genre", data=less_popular, ax=f3_ax9, orient="h")
f3_ax9.set_title("Less Popular")
# From above we can get that people very less like 'Canadian Pop' songs and about dance pop we have to explore futher to know what is the difference between those three types of dance pop.
dance_pop = popular_divided[popular_divided["Genre"] == "dance pop"]
dance_pop.tail()
fig3 = plt.figure(figsize=(20, 15))
gs = fig3.add_gridspec(2, 2)
f3_ax10 = fig3.add_subplot(gs[0, :])
f3_ax11 = fig3.add_subplot(gs[-1:, -1])
f3_ax12 = fig3.add_subplot(gs[-1, -2])
sns.barplot(
x="Popularity",
y="Artist.Name",
data=most_popular,
ax=f3_ax10,
orient="h",
palette="rainbow",
)
f3_ax10.set_title("Most Popular")
sns.barplot(
x="Popularity",
y="Artist.Name",
data=medium_popular,
ax=f3_ax11,
orient="h",
palette="rainbow",
)
f3_ax11.set_title("Medium Popular")
sns.barplot(
x="Popularity",
y="Artist.Name",
data=less_popular,
ax=f3_ax12,
orient="h",
palette="rainbow",
)
f3_ax12.set_title("Less Popular")
# From above plot we can see that "Shawn Mendes" and "Lauv" songs are less popular in compare to other and 'Anuel AA','Post Malone','Lil Tecca','SamSmith','BillieEilish',
# 'Bad Bunny','Drake','J Balvin','Post Malone','Lizzo','MEDUZA','Lil Nas X','Lunay','Daddy Yankee','Sech','Taylor Swift' are most famous.
plt.figure(figsize=(15, 10))
sns.pairplot(popular_divided, hue="Type")
cm = sns.light_palette("green", as_cmap=True)
table = pd.pivot_table(popular_divided, index=["Type", "Artist.Name", "Genre"])
s = table.style.background_gradient(cmap=cm)
s
billie_ellish = songs[(songs["Artist.Name"] == "Billie Eilish")]
billie_ellish
fig3 = plt.figure(figsize=(15, 10))
gs = fig3.add_gridspec(2, 2)
f3_ax4 = fig3.add_subplot(gs[0, :])
f3_ax5 = fig3.add_subplot(gs[-1:, -1])
f3_ax6 = fig3.add_subplot(gs[-1, -2])
sns.boxplot(
x=most_popular["Popularity"],
y=most_popular["Speechiness."],
data=most_popular,
ax=f3_ax4,
).set_title("Most Popular")
sns.boxplot(
x=medium_popular["Popularity"],
y=medium_popular["Speechiness."],
data=medium_popular,
ax=f3_ax5,
).set_title("Medium Popular")
sns.boxplot(
x=less_popular["Popularity"],
y=less_popular["Speechiness."],
data=less_popular,
ax=f3_ax6,
).set_title("Less Popular")
fig3 = plt.figure(figsize=(15, 10))
gs = fig3.add_gridspec(2, 2)
f3_ax4 = fig3.add_subplot(gs[0, :])
f3_ax5 = fig3.add_subplot(gs[-1:, -1])
f3_ax6 = fig3.add_subplot(gs[-1, -2])
sns.lineplot(
x=most_popular["Popularity"],
y=most_popular["Acousticness.."],
data=most_popular,
ax=f3_ax4,
).set_title("Most Popular")
sns.lineplot(
x=medium_popular["Popularity"],
y=medium_popular["Acousticness.."],
data=medium_popular,
ax=f3_ax5,
).set_title("Medium Popular")
sns.lineplot(
x=less_popular["Popularity"],
y=less_popular["Acousticness.."],
data=less_popular,
ax=f3_ax6,
).set_title("Less Popular")
fig3 = plt.figure(figsize=(15, 10))
gs = fig3.add_gridspec(2, 2)
f3_ax4 = fig3.add_subplot(gs[0, :])
f3_ax5 = fig3.add_subplot(gs[-1:, -1])
f3_ax6 = fig3.add_subplot(gs[-1, -2])
sns.boxplot(
x=most_popular["Popularity"],
y=most_popular["Valence."],
data=most_popular,
ax=f3_ax4,
).set_title("Most Popular")
sns.boxplot(
x=medium_popular["Popularity"],
y=medium_popular["Valence."],
data=medium_popular,
ax=f3_ax5,
).set_title("Medium Popular")
sns.boxplot(
x=less_popular["Popularity"],
y=less_popular["Valence."],
data=less_popular,
ax=f3_ax6,
).set_title("Less Popular")
songs.isnull().sum()
less = ["Shawn Mendes", "Lauv"]
medium = [
"Ariana Grande",
"Ed Sheeran",
"Lil Nas X",
"DJ Snake",
"Lewis Capaldi",
"Chris Brown",
"Y2K",
"Jhay Cortez",
"Tones and I",
"Ali Gatie",
"J Balvin",
"The Chainsmokers",
"Ariana Grande",
"Maluma",
"Young Thug",
"Katy Perry",
"Martin Garrix",
"Ed Sheeran",
"Jonas Brothers",
"Kygo",
"Lady Gaga",
"Khalid",
"ROSALÍA",
"Marshmello",
"Nicky Jam",
"Marshmello",
"The Chainsmokers",
]
most = [
"Anuel AA",
"Post Malone",
"Lil Tecca",
"SamSmith",
"Bad Bunny",
"Drake",
"J Balvin",
"Post Malone",
"Lizzo",
"MEDUZA",
"Lil Nas X",
"Lunay",
"Daddy Yankee",
"Taylor Swift",
]
common = ["Billie Eilish", "Sech"]
def encoding(x):
if x in less:
return 0
elif x in medium:
return 1
elif x in common:
return 2
elif x in most:
return 3
songs["Artist_Dummy"] = songs["Artist.Name"].apply(encoding)
songs.head()
songs["Genre"].unique()
final = pd.get_dummies(songs, columns=["Genre"], drop_first=True)
final.head()
sns.distplot(final["Popularity"], kde=True, bins=40)
final.drop(["Artist.Name", "Track.Name", "Loudness..dB.."], axis=1, inplace=True)
final = final.fillna(0)
final.isna().sum()
X = final.drop("Popularity", axis=1)
y = final["Popularity"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.30, random_state=101
)
regression = LinearRegression()
regression.fit(X_train, y_train)
y_pred = regression.predict(X_test)
df_output = pd.DataFrame({"Actual": y_test, "Predicted": y_pred})
print(df_output)
# Checking the accuracy of Linear Regression
print("Mean Absolute Error:", metrics.mean_absolute_error(y_test, y_pred))
print("Mean Squared Error:", metrics.mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error:", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
# KNN
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.30, random_state=101
)
sc = StandardScaler()
sc.fit(X_train)
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# sorted(sklearn.neighbors.VALID_METRICS['brute'])
error = []
for i in range(1, 30):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
pred_i = knn.predict(X_test)
error.append(np.mean(pred_i != y_test))
plt.figure(figsize=(10, 10))
plt.plot(
range(1, 30),
error,
color="black",
marker="o",
markerfacecolor="cyan",
markersize=10,
)
plt.title("Error Rate K value")
plt.xlabel("K Value")
plt.ylabel("Mean error")
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.30, random_state=101
)
gnb = GaussianNB()
gnb.fit(X_train, y_train)
y_pred = gnb.predict(X_test)
df_output = pd.DataFrame({"Actual": y_test, "Predicted": y_pred})
print(df_output)
scores = cross_val_score(gnb, X_train, y_train, scoring="accuracy", cv=3).mean() * 100
print(scores)
# Linear SVM model
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.30, random_state=101
)
LinSVC = LinearSVC(penalty="l2", loss="squared_hinge", dual=True)
LinSVC.fit(X_train, y_train)
y_pred = gnb.predict(X_test)
df_output = pd.DataFrame({"Actual": y_test, "Predicted": y_pred})
print(df_output)
scores = (
cross_val_score(LinSVC, X_train, y_train, scoring="accuracy", cv=3).mean() * 100
)
print(scores)
|
"""
一切从数据开始
单值与多值 -> 数据管理:一行、一列、key-value对应、无序集合、表格、表之间关系 -> 高维数据
数据从哪里来?
指定、计算、文件、网络、其他系统
单值(变量)
如何创建、修改、输出?
基本数据类型
整型 int integer 整数
浮点型 float float 实数
字符串 str string 文字
布尔型 bool boolean True/False
基本运算
算术运算 + - * / %
逻辑运算 and or not
比较运算 > < >= <= == !=
多值(容器)
1) 列表:行的形式保存、index、长度
2) 字典:key-value形式保存、key唯一
3) 元组:不可变的列表
4) 集合:数学集合、不可重复、无序
"""
# 如何读取excel
import openpyxl as xl
excel = xl.load_workbook("data/house.xlsx")
ws = excel["train"]
ws.max_row, ws.max_column, ws.cell(1, 1).value
"""
列表:创建、加入数据、删除数据、获取数据(切片)、index、count、排序、sum、max、min、in/not in
注:关注列表中数据的类型
"""
"""
案例:把excel中的一行或一列加入列表
"""
"""
for循环:4种写法
for i in range(10):
for n in 容器:
for i, n in enumerate(容器):
for a, b in zip(容器1, 容器2, 容器3):
"""
"""
案例:股票价格、收益计算
"""
"""
练习:求价格的最大值在第几个
"""
"""
条件:if....else....
条件运算:> < >= <= == !=
逻辑运算:and or
"""
"""
案例:对数据进行筛选
"""
"""
小技巧, 大作用
对数据进行筛选:filter函数
对数据进行统一计算:map函数
"""
def filter_data(x):
return x >= 10
list(filter(filter_data, [1, 3, 4, 10, 15]))
def map_data(x):
return x * 10
list(map(map_data, [1, 3, 4, 10, 15]))
def filter_data(x):
return x[0] > 2 and x[1] < 5
t1 = list(filter(filter_data, zip([1, 3, 4, 10, 15], [1, 2, 3, 4, 5, 6])))
t2 = list(zip(*t1))
t1, t2
"""
练习:重写之前的案例与练习
"""
"""
随机:shuffle, choice, choices,sample, random, randint, uniform, normalvariate
例:计算随机数的个数
"""
import random
random.choices(range(0, 100), k=10), random.sample(range(0, 100), k=10)
"""
小技巧:计数
"""
import collections
d = random.choices(range(0, 10), k=1000)
counter = collections.Counter(d)
counter
"""
字典:创建,加入数据,删除数据,获取数据(keys, values)
"""
"""
案例:多股票、多时间收益计算
"""
"""
练习:收益最高的时间段
"""
"""
字典与json文件
"""
import json
"""
元组:不可修改的列表
"""
"""
集合:去除重复数据;集合运算:差集、交集、并集
"""
set1 = set([1, 2, 3])
set2 = set([1, 2, 4])
set1.difference(set2), set1.intersection(set2), set1.union(set2)
"""
练习:找出两个数据中不同的数据
"""
"""
函数:把完成一个功能代码独立出来,多次使用
函数名,参数,返回值,函数体
"""
|
# # Stock LSTM with MACD
# ## *add technical indicator = Moving Average Convergence Divergence*
# 
import os
print(os.listdir("../input"))
import numpy as np
import pandas as pd
from sklearn import preprocessing
import tensorflow.keras
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Dropout, LSTM, Input, Activation, concatenate
import matplotlib.pyplot as plt
# take 50 history points (~2 months)
history_points = 50
# ## Read Dataset
csv_path = "../input/GOOGL_daily.csv"
# Read Dataset & Normalise it
def csv_to_dataset(csv_path):
data = pd.read_csv(csv_path)
data = data.drop("date", axis=1)
## reverse index because .csv top column is most recent price
data = data[::-1]
data = data.reset_index()
data = data.drop("index", axis=1)
print(data)
# normaliser
data_normaliser = preprocessing.MinMaxScaler()
data_normalised = data_normaliser.fit_transform(data)
# using the last {history_points} open high low close volume data points, predict the next open value
ohlcv_histories_normalised = np.array(
[
data_normalised[i : i + history_points].copy()
for i in range(len(data_normalised) - history_points)
]
)
print(ohlcv_histories_normalised.shape)
next_day_open_values_normalised = np.array(
[
data_normalised[:, 0][i + history_points].copy()
for i in range(len(data_normalised) - history_points)
]
)
next_day_open_values_normalised = np.expand_dims(
next_day_open_values_normalised, -1
)
next_day_open_values = np.array(
[
data.loc[:, "1. open"][i + history_points].copy()
for i in range(len(data) - history_points)
]
)
next_day_open_values = np.expand_dims(next_day_open_values, -1)
y_normaliser = preprocessing.MinMaxScaler()
y_normaliser.fit(next_day_open_values)
# add MACD
def calc_ema(values, time_period):
# https://www.investopedia.com/ask/answers/122314/what-exponential-moving-average-ema-formula-and-how-ema-calculated.asp
sma = np.mean(values[:, 3])
ema_values = [sma]
k = 2 / (1 + time_period)
for i in range(len(his) - time_period, len(his)):
close = his[i][3]
ema_values.append(close * k + ema_values[-1] * (1 - k))
return ema_values[-1]
# add technical indicators
technical_indicators = []
for his in ohlcv_histories_normalised:
# since we are using his[3] we are taking the SMA of the closing price
sma = np.mean(his[:, 3]) # add SMA
macd = calc_ema(his, 12) - calc_ema(his, 26) # add MACD
technical_indicators.append(
np.array(
[
sma,
macd,
]
)
) # add MACD
technical_indicators = np.array(technical_indicators)
tech_ind_scaler = preprocessing.MinMaxScaler()
technical_indicators_normalised = tech_ind_scaler.fit_transform(
technical_indicators
)
assert (
ohlcv_histories_normalised.shape[0] == next_day_open_values_normalised.shape[0]
)
return (
ohlcv_histories_normalised,
technical_indicators_normalised,
next_day_open_values_normalised,
next_day_open_values,
y_normaliser,
)
(
ohlcv_histories,
technical_indicators,
next_day_open_values,
unscaled_y,
y_scaler,
) = csv_to_dataset(csv_path)
# splitting the dataset up into train and test sets
test_split = (
0.9 # 90% stock-history for training, most-recent 10% stock-history for testing
)
n = int(ohlcv_histories.shape[0] * test_split)
ohlcv_train = ohlcv_histories[:n]
tech_ind_train = technical_indicators[:n] # add technical indicator
y_train = next_day_open_values[:n]
ohlcv_test = ohlcv_histories[n:]
tech_ind_test = technical_indicators[n:] # add technical indicator
y_test = next_day_open_values[n:]
unscaled_y_test = unscaled_y[n:]
print(ohlcv_train.shape)
# ## Build Model
# Build Model (RNN)
lstm_input = Input(shape=(history_points, 5), name="lstm_input")
dense_input = Input(
shape=(technical_indicators.shape[1],), name="tech_input"
) # 2nd input for technical indicator
x = LSTM(50, name="lstm_0")(lstm_input)
x = Dropout(0.2, name="lstm_dropout_0")(x)
# the second branch opreates on the second input
lstm_branch = Model(inputs=lstm_input, outputs=x)
y = Dense(20, name="tech_dense_0")(dense_input)
y = Activation("relu", name="tech_relu_0")(y)
y = Dropout(0.2, name="tech_dropout_0")(y)
technical_indicators_branch = Model(inputs=dense_input, outputs=y)
# combine the output of the two branches
combined = concatenate(
[lstm_branch.output, technical_indicators_branch.output], name="concatenate"
)
z = Dense(64, activation="sigmoid", name="dense_pooling")(combined)
z = Dense(1, activation="linear", name="dense_out")(z)
# this model will accept the inputs of the two branches and then output a single value
model = Model(inputs=[lstm_branch.input, technical_indicators_branch.input], outputs=z)
model.summary()
# Compile Model
adam = Adam(lr=0.0005)
model.compile(optimizer=adam, loss="mse")
# ## Train Model
# Train Model
num_epochs = 100
batch_size = 32
model.fit(
x=[ohlcv_train, tech_ind_train],
y=y_train,
batch_size=batch_size,
epochs=num_epochs,
shuffle=True,
validation_split=0.1,
)
# ## Evaluate Model
# Evaluate Model
evaluation = model.evaluate([ohlcv_test, tech_ind_test], y_test)
print(evaluation)
y_test_predicted = model.predict([ohlcv_test, tech_ind_test])
# model.predict returns normalised values, now we scale them back up using the y_scaler from before
y_test_predicted = y_scaler.inverse_transform(y_test_predicted)
# also getting predictions for the entire dataset, just to see how it performs
y_predicted = model.predict([ohlcv_histories, technical_indicators])
y_predicted = y_scaler.inverse_transform(y_predicted)
assert unscaled_y_test.shape == y_test_predicted.shape
real_mse = np.mean(np.square(unscaled_y_test - y_test_predicted))
scaled_mse = real_mse / (np.max(unscaled_y_test) - np.min(unscaled_y_test)) * 100
print(scaled_mse)
# ## Plot Stock Prediction
# ### *Test data is the most recent 10% stock history*
# Plot stock prediction
plt.gcf().set_size_inches(22, 15, forward=True)
start = 0
end = -1
real = plt.plot(unscaled_y_test[start:end], label="real")
pred = plt.plot(y_test_predicted[start:end], label="predicted")
plt.title("symbol = GOOGL")
plt.legend(["Real", "Predicted"])
plt.show()
# ## **Buy/Sell Strategy**
buys = []
sells = []
thresh = 0.2
x = 0
for ohlcv, ind in zip(ohlcv_test, tech_ind_test):
normalised_price_today = ohlcv[-1][0]
normalised_price_today = np.array([[normalised_price_today]])
price_today = y_scaler.inverse_transform(normalised_price_today)
predicted = np.squeeze(y_scaler.inverse_transform(model.predict([[ohlcv], [ind]])))
delta = predicted - price_today
# print(delta)
if delta > thresh:
buys.append((x, price_today[0][0]))
elif delta < -thresh:
sells.append((x, price_today[0][0]))
x += 1
print(buys)
print(sells)
# ## Plot Buy & Sell timing
plt.gcf().set_size_inches(22, 15, forward=True)
start = 0
end = -1
real = plt.plot(unscaled_y_test[start:end], label="real")
pred = plt.plot(y_test_predicted[start:end], label="predicted")
plt.scatter(
list(list(zip(*buys))[0]), list(list(zip(*buys))[1]), c="#00ff00"
) # buy points in green
plt.scatter(
list(list(zip(*sells))[0]), list(list(zip(*sells))[1]), c="#ff0000"
) # sell points in red
# real = plt.plot(unscaled_y[start:end], label='real')
# pred = plt.plot(y_predicted[start:end], label='predicted')
plt.legend(["Real", "Predicted"])
plt.show()
|
import numpy as np
import pandas as pd
import os
import re
import tensorflow as tf
from tensorflow.keras.layers import (
Conv2D,
MaxPooling2D,
GlobalAveragePooling2D,
Activation,
Dropout,
Flatten,
Dense,
Input,
Layer,
)
from tensorflow.keras.applications import VGG16
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.preprocessing.image import (
ImageDataGenerator,
load_img,
img_to_array,
)
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams["font.size"] = 18
MAIN_DIR = "/kaggle/input/fashion-product-images-dataset/fashion-dataset/"
images_df = pd.read_csv(MAIN_DIR + "images.csv")
styles_df = pd.read_csv(MAIN_DIR + "styles.csv", on_bad_lines="skip")
images_df.head()
styles_df.head()
styles_df["filename"] = styles_df["id"].astype(str) + ".jpg"
styles_df.head()
image_files = os.listdir(MAIN_DIR + "images")
styles_df["present"] = styles_df["filename"].apply(lambda x: x in image_files)
styles_df.head()
styles_df["present"].value_counts()
styles_df = styles_df[styles_df["present"]].reset_index(drop=True)
styles_df.head()
styles = styles_df.sample(10000).reset_index(drop=True)
styles.head()
IMG_SIZE = (224, 224)
datagen = ImageDataGenerator(rescale=1 / 255.0)
generator = datagen.flow_from_dataframe(
dataframe=styles,
directory=MAIN_DIR + "images",
target_size=IMG_SIZE,
x_col="filename",
class_mode=None,
batch_size=32,
shuffle=False,
)
base_model = VGG16(include_top=False, input_shape=IMG_SIZE + (3,))
base_model.trainable = False
input_layer = Input(shape=IMG_SIZE + (3,))
x = base_model(input_layer)
output = GlobalAveragePooling2D()(x)
embeddings = Model(input_layer, output)
embeddings.summary()
X = embeddings.predict(generator, verbose=1)
from sklearn.decomposition import PCA
pca = PCA(2)
X_pca = pca.fit_transform(X)
styles[["pc1", "pc2"]] = X_pca
plt.figure(figsize=(20, 12))
sns.scatterplot(x="pc1", y="pc2", data=styles, hue="masterCategory")
plt.show()
def read_img(image_path):
path = MAIN_DIR + "images/"
image = load_img(os.path.join(path, image_path), target_size=(224, 224, 3))
image = img_to_array(image)
image = image / 255.0
return image
import random
from sklearn.neighbors import KNeighborsClassifier
y = styles["id"]
nn = KNeighborsClassifier(n_neighbors=7)
nn.fit(X, y)
for _ in range(10):
i = random.randint(0, len(styles))
img1 = read_img(styles.loc[i, "filename"])
dist, index = nn.kneighbors(X=X[i, :].reshape(1, -1))
plt.figure(figsize=(4, 4))
plt.imshow(img1)
plt.title("Image-->")
plt.axis("off")
plt.figure(figsize=(20, 20))
for i in range(1, 6):
plt.subplot(1, 5, i)
plt.subplots_adjust(hspace=0.5, wspace=0.3)
image = read_img(styles.loc[index[0][i], "filename"])
plt.imshow(image)
plt.title(f"Product in same catagory #{i}")
plt.axis("off")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
from sklearn.model_selection import train_test_split
apndcts = pd.read_csv("../input/apndcts/apndcts.csv")
print(apndcts.shape)
data_train, data_test = train_test_split(apndcts, test_size=0.3)
print(data_train.shape)
print(data_test.shape)
# Kfold
import pandas as pd
from sklearn.model_selection import KFold
apndcts = pd.read_csv("../input/apndcts/apndcts.csv")
kf = KFold(n_splits=9)
for train_index, test_index in kf.split(apndcts):
data_train = apndcts.iloc[train_index]
data_test = apndcts.iloc[test_index]
print(data_train.shape)
print(data_test.shape)
from sklearn.utils import resample
X = apndcts.iloc[:, 0:9]
resample(X, n_samples=200, random_state=1)
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
data = pd.read_csv("../input/apndcts/apndcts.csv")
predictors = data.iloc[:, 0:7] # Segregating the predictors
target = data.iloc[:, 7] # Segregating the target/class
predictors_train, predictors_test, target_train, target_test = train_test_split(
predictors, target, test_size=0.3, random_state=123
) # Holdout of data
dtree_entropy = DecisionTreeClassifier(
criterion="entropy", random_state=100, max_depth=3, min_samples_leaf=5
) # Model is initialized
# Finally the model is trained
model = dtree_entropy.fit(predictors_train, target_train)
prediction = model.predict(predictors_test)
acc_score = 0
acc_score = accuracy_score(target_test, prediction, normalize=True)
print(acc_score)
conf_mat = confusion_matrix(target_test, prediction)
print(conf_mat)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import glob # For importing datasets
from tqdm.auto import tqdm # For progress bar
from sklearn import *
import seaborn as sns
import matplotlib.pyplot as plt
p = "/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/"
# train = glob.glob(p+'train/**/**') # Grabs all three training datasets
# train_tdcsfog_csv_list = glob.glob(p+'train/tdcsfog/**') # Grabs only the tdcsfog train dataset
train_defog_csv_list = glob.glob(
p + "train/defog/**"
) # Grabs only the tdcsfog train dataset
train_notype_csv_list = glob.glob(p + "train/notype/**")
# test = glob.glob(p+'test/**/**')
# subjects = pd.read_csv(p+'subjects.csv')
# tasks = pd.read_csv(p+'tasks.csv')
# sub = pd.read_csv(p+'sample_submission.csv')
import pathlib
def reader(f):
try:
df = pd.read_csv(f, usecols=["Time", "Valid", "Task"])
df["Id"] = f.split("/")[-1].split(".")[0]
df["Dataset"] = pathlib.Path(f).parts[-2]
# df = pd.merge(df, meta, how='left', on='Id')
return df
except:
pass
# Concatenates the defog train rows
train_defog = pd.concat([reader(f) for f in tqdm(train_defog_csv_list)])
train_defog = train_defog.reset_index(drop=True)
print(train_defog.shape)
defog_df = pd.DataFrame(train_defog)
train_defog["Valid"].value_counts() / train_defog.shape[0]
len(np.unique(train_defog["Id"]))
# Concatenates the notype train rows
train_notype = pd.concat([reader(f) for f in tqdm(train_notype_csv_list)])
train_notype = train_notype.reset_index(drop=True)
print(train_notype.shape)
# # Defog
# ## stem plot
trial_length_defog = pd.DataFrame(
defog_df[["Id"]].groupby(["Id"]).size(), columns=["Length"]
)
plot_order = trial_length_defog.rank().astype(int) - 1
trial_length_defog["Order"] = plot_order
trial_length_defog.sort_values("Order", ascending=False, inplace=True)
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.stem(np.arange(0, 91, 1), trial_length_defog["Length"] / 100)
mn, mx = ax1.get_ylim()
ax2.set_ylim(0, mx / 60)
ax1.set_xlabel("Trial")
ax1.set_ylabel("Seconds", color="g")
ax2.set_ylabel("Minutes", color="orange")
plt.title("Length of Defog Trials")
plt.savefig("defog lengths.png", dpi=300, bbox_inches="tight")
# ## histogram
defog_valid = defog_df[["Id", "Valid"]].groupby(["Id"]).mean()
defog_valid.head()
defog_task = defog_df[["Id", "Task"]].groupby(["Id"]).mean()
defog_task.head()
plt.hist(defog_valid["Valid"], bins=50)
plt.xlabel("Percent Valid Timestamps")
plt.ylabel("Frequency")
plt.title("Distribution of Percentage of Valid Timestamps")
plt.savefig("valid defog percent.png", dpi=300)
plt.hist(defog_task["Task"], bins=50)
plt.xlabel("Percent Task Timestamps")
plt.ylabel("Frequency")
plt.title("Distribution of Percentage of Task Timestamps")
plt.savefig("task defog percent.png", dpi=300)
# ## heatmap
defog_valid_wide = defog_df.pivot(index="Id", columns="Time", values="Valid")
sns.heatmap(defog_valid_ordered.astype(float), cmap="crest")
plt.xlabel("Timestamp")
plt.ylabel("Trial")
plt.title("Valid Timestamps: Defog")
plt.savefig("valid defog.png", dpi=300, bbox_inches="tight")
defog_task_wide = task_df.pivot(index="Id", columns="Time", values="Valid")
|
# ## Kernel description
# Adding new features to the `train_meta` dataframe for one batch only:
# 1) Statistical, such as the total number of impulses or the average relative time within one event and so one;
# 2) Predictions of different models as features (***work in progress***).
#
# Polars library was used for feature engineering, because it allows to process all 660 batches and 131,953,924 events many times faster than Pandas.
#
# This Kernel has separate functions that you can use and modify to create your own features.
#
# The resulting feature table is shown at the end of the notebook.
# Please don't hesitate to leave your comments on this Kernel: use the features table for your models and share the results.
# ## Updates
# **Ver. 2:** Removed the use of Pandas functions to create features (now Polars only). Separate functions added for feature engineering. Added new features in the `train_meta` data as well. Feature engineering was implemented to one batch only due to memory error in all batches case.
# ## Sources
# For this Kernel, [[日本語/Eng]🧊: FeatureEngineering](https://www.kaggle.com/code/utm529fg/eng-featureengineering) kernel was used, as well as separate articles about Polars library and feature engineering:
# 1) [📊 Построение и отбор признаков. Часть 1: feature engineering (RUS)](https://proglib.io/p/postroenie-i-otbor-priznakov-chast-1-feature-engineering-2021-09-15)
# 2) [Polars: Pandas DataFrame but Much Faster](https://towardsdatascience.com/pandas-dataframe-but-much-faster-f475d6be4cd4)
# 3) [Polars: calm](https://calmcode.io/polars/calm.html)
# 4) [Polars - User Guide](https://pola-rs.github.io/polars-book/user-guide/coming_from_pandas.html)
# ## Import libraries
# List all installed packages and package versions
#!pip freeze
import numpy as np
import os
import pandas as pd
import polars as pl
from tqdm.notebook import tqdm
# Check types info for memory usage optimization:
int_types = ["uint64", "int64"]
for it in int_types:
print(np.iinfo(it))
# Check existing paths:
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# Will try to use Polars as one of the fastest libraries because it uses parallelization and cache efficient algorithms to speed up tasks.
#
# Let's create LazyFrames from all existing inputs:
INPUT_DIR = "/kaggle/input/icecube-neutrinos-in-deep-ice"
train_meta = pl.scan_parquet(f"{INPUT_DIR}/train_meta.parquet")
sensor_geometry = pl.scan_csv(f"{INPUT_DIR}/sensor_geometry.csv")
batches_dict = {}
for i in range(1, train_meta.collect()["batch_id"].max() + 1):
key = str("train_batch_" + str(i))
batches_dict[key] = pl.scan_parquet(f"{INPUT_DIR}/train/batch_{i}.parquet")
# ## Feature engineering
def add_cols_to_sensor_geometry(sensor):
"""
add new columns for groupby.sum() function
Parameters:
-----------
sensor : LazyFrame
existing 'sensor_geometry' data
Returns:
-----------
sensor : LazyFrame
updated 'sensor_geometry' data with additional
null-filled columns
"""
sensor = sensor.with_columns(
[
(pl.col("sensor_id") * 0).alias("sensor_count"),
(pl.col("sensor_id") * 0.0).alias("charge_sum"),
(pl.col("sensor_id") * 0).alias("time_sum"),
(pl.col("sensor_id") * 0).alias("auxiliary_sum"),
]
)
return sensor
sensor_geometry = add_cols_to_sensor_geometry(sensor_geometry).collect()
sensor_geometry.head()
def add_stat_features(meta):
"""
add new statistics features into the selected data
Parameters:
-----------
meta : LazyFrame
existing 'train_meta' data
Returns:
-----------
meta : LazyFrame
updated 'train_meta' data with additional columns:
* 'n_events_per_batch' (can be useful when creating
features for all batches at once)
* 'pulse_count' - count of pulses detected
"""
return meta.with_columns(
[
pl.col("event_id")
.count()
.over("batch_id")
.cast(pl.UInt64)
.alias("n_events_per_batch"),
(pl.col("last_pulse_index") - pl.col("first_pulse_index") + 1).alias(
"pulse_count"
),
]
)
train_meta = train_meta.pipe(add_stat_features).collect()
train_meta.head()
# There is not enough memory to execute these codes for all batches - the reason why I commented out this code. Perhaps after some additional work with memory in the future, I can use one of these options to create features for all batches at once.
# def add_time_mean(train_meta, batches_dict):
# batches = []
# for batch_name, batch in tqdm(batches_dict.items()):
# batch_id = int(batch_name.split("_")[-1])
# batch_df = batch.select(['sensor_id', 'time', 'event_id']).collect()
# batch_len = len(batch_df)
# batch = batch_df.with_columns((pl.Series([batch_id] * batch_len)).alias('batch_id'))
# batches.append(batch)
# all_batches = pl.concat(batches)
# time_mean = all_batches.groupby('event_id').agg(
# pl.col('time').mean().alias('time_mean'))
# train_meta_with_time_mean = train_meta.join(
# time_mean, on='event_id', how='inner')
# return train_meta_with_time_mean
# %%time
# add_time_mean(train_meta, batches_dict)
# def create_batch_1_features(batch_dict, sensor, meta):
# '''
# Creating new meta_total data including statistics features from batches
# Parameters:
# -----------
# batch_dict : dictionary where keys are polars LazyFrames
# sensor : LazyFrame
# meta : train_meta pl.DataFrame
# Returns:
# -----------
# sensor : polars DataFrame
# '''
# meta_tmp = pl.DataFrame()
# meta_total = pl.DataFrame() # for output
# for key in tqdm(batch_dict):
# batch = batch_dict[key].collect()
# # count detected sensor
# batch_tmp = batch['sensor_id'].value_counts()
# # cast and join
# batch_tmp = batch_tmp.with_columns([
# pl.col('sensor_id').cast(pl.Int64),
# pl.col('counts').cast(pl.Int64)
# ])
# sensor = sensor.join(batch_tmp, on='sensor_id', how='left')
# # groupby sensor_id and sum
# batch_tmp = batch.select(pl.col(['sensor_id','time','charge','auxiliary'])).groupby(['sensor_id']).sum()
# # cast and join
# batch_tmp = batch_tmp.with_columns(
# [pl.col('sensor_id').cast(pl.Int64),
# pl.col('auxiliary').cast(pl.Int64)])
# sensor = sensor.join(batch_tmp,on='sensor_id',how='left')
# sensor = sensor.fill_null(0)
# # add total value
# sensor = sensor.with_columns(
# [(pl.col('sensor_count') + pl.col('counts')).alias('sensor_count'),
# (pl.col('time_sum') + pl.col('time')).alias('time_sum'),
# (pl.col('charge_sum') + pl.col('charge')).alias('charge_sum'),
# (pl.col('auxiliary_sum') + pl.col('auxiliary')).alias('auxiliary_sum')])
# # exclude unnecessary columns
# sensor = sensor.select(pl.exclude(['counts','time','charge','auxiliary']))
# # groupby event_id
# batch_tmp = batch.select(pl.col(['event_id','time','charge','auxiliary'])).groupby(['event_id']).sum()
# # cast and join
# batch_tmp = batch_tmp.with_columns(pl.col('auxiliary').cast(pl.Int64))
# meta_tmp = meta.join(batch_tmp,on='event_id',how='inner')
# # add total value
# meta_tmp = meta_tmp.with_columns(
# [(pl.col('time')).alias('time_sum'),
# (pl.col('charge')).alias('charge_sum'),
# (pl.col('auxiliary')).alias('auxiliary_sum')])
# # exclude unnecessary columns
# meta_tmp = meta_tmp.select(pl.exclude(['time','charge','auxiliary']))
# # append to output
# meta_total = pl.concat([meta_total, meta_tmp])
# return meta_total
def create_batch_features(batch_dict, key, sensor, meta):
"""
Creating new meta_total data including statistics features
for one selected batch only
Parameters:
-----------
batch_dict : dict
keys - str, values - LazyFrames
key : str
name of batch
sensor : LazyFrame
meta : polars DataFrame
existing 'train_meta' data
Returns:
-----------
sensor : polars DataFrame
meta_total : polars DataFrame
"""
# for output
meta_tmp = pl.DataFrame()
meta_total = pl.DataFrame()
batch = batch_dict[key].collect()
# count detected sensor
batch_tmp = batch["sensor_id"].value_counts()
# cast and join
batch_tmp = batch_tmp.with_columns(
[pl.col("sensor_id").cast(pl.Int64), pl.col("counts").cast(pl.Int64)]
)
sensor = sensor.join(batch_tmp, on="sensor_id", how="left")
# groupby sensor_id and sum
batch_tmp = (
batch.select(pl.col(["sensor_id", "time", "charge", "auxiliary"]))
.groupby(["sensor_id"])
.sum()
)
# cast and join
batch_tmp = batch_tmp.with_columns(
[pl.col("sensor_id").cast(pl.Int64), pl.col("auxiliary").cast(pl.Int64)]
)
sensor = sensor.join(batch_tmp, on="sensor_id", how="left")
sensor = sensor.fill_null(0)
# add total value
sensor = sensor.with_columns(
[
(pl.col("sensor_count") + pl.col("counts")).alias("sensor_count"),
(pl.col("time_sum") + pl.col("time")).alias("time_sum"),
(pl.col("charge_sum") + pl.col("charge")).alias("charge_sum"),
(pl.col("auxiliary_sum") + pl.col("auxiliary")).alias("auxiliary_sum"),
]
)
# exclude unnecessary columns
sensor = sensor.select(pl.exclude(["counts", "time", "charge", "auxiliary"]))
# groupby event_id
batch_tmp = (
batch.select(pl.col(["event_id", "time", "charge", "auxiliary"]))
.groupby(["event_id"])
.sum()
)
# cast and join
batch_tmp = batch_tmp.with_columns(pl.col("auxiliary").cast(pl.Int64))
meta_tmp = meta.join(batch_tmp, on="event_id", how="inner")
# add total value
meta_tmp = meta_tmp.with_columns(
[
(pl.col("time")).alias("time_sum"),
(pl.col("charge")).alias("charge_sum"),
(pl.col("auxiliary")).alias("auxiliary_sum"),
]
)
# exclude unnecessary columns
meta_tmp = meta_tmp.select(pl.exclude(["time", "charge", "auxiliary"]))
# append to output
meta_total = pl.concat([meta_total, meta_tmp])
return sensor, meta_total
meta_batch.head()
# feature engineering
def feature_engineering(sensor, meta_total):
"""
Creating new meta_total data including statistics features
for one selected batch only
Parameters:
-----------
sensor : polars DataFrame
meta_total : polars DataFrame
Returns:
-----------
sensor : polars DataFrame
meta_total : polars DataFrame
"""
sensor = sensor.with_columns(
[
(pl.col("sensor_count") / len(meta_total)).alias("sensor_count_mean"),
(pl.col("time_sum") / pl.col("sensor_count")).alias("time_mean"),
(pl.col("charge_sum") / pl.col("sensor_count")).alias("charge_mean"),
(pl.col("auxiliary_sum") / pl.col("sensor_count")).alias("auxiliary_ratio"),
]
)
meta_total = meta_total.with_columns(
[
(pl.col("time_sum") / pl.col("pulse_count")).alias("time_mean"),
(pl.col("charge_sum") / pl.col("pulse_count")).alias("charge_mean"),
(pl.col("auxiliary_sum") / pl.col("pulse_count")).alias("auxiliary_ratio"),
]
)
# select and sort columns
sensor = sensor.select(
pl.col(
[
"sensor_id",
"x",
"y",
"z",
"sensor_count",
"sensor_count_mean",
"time_mean",
"charge_mean",
"auxiliary_ratio",
]
)
)
meta_total = meta_total.select(
pl.col(
[
"batch_id",
"event_id",
"first_pulse_index",
"last_pulse_index",
"azimuth",
"zenith",
"pulse_count",
"time_mean",
"charge_mean",
"auxiliary_ratio",
]
)
)
return sensor, meta_total
sensor_geometry, meta_batch = feature_engineering(sensor_geometry, meta_batch)
display(sensor_geometry.head())
meta_batch.head()
|
# # NOTE
# ### Please DO read this markdown carefully.
# This Kernal just the starter Kernal and only for beginners who aim to find the starting path in the video analysis using Deep Learning. It is a mixture of ideas from different public Kernals but aims to give the complete solution to the beginners who wander off to find out the complex code about what is happening in the kernals that have been written by the very proficient programmers.
# I can not say everything again and again in every Kernal and I would feel bad about the fact if I get to know that some beginner is still unable to understand what is going on. So before you start here, even if you have some knowledge of NN and every other thing,
# ### Please do [check out this kernal](https://www.kaggle.com/deshwalmahesh/bengali-ai-complete-beginner-tutorial-95-acc) about Keras, NN, Images and much more before you start because a lot of very very useful links have been given in the links there for beginners.
# # Problem Statement
# If you haven't been to the kernal suggest, please go there and check the links before you start.
# For this problem we have been provided a dataset of videos that have been altered using [Deep Fakes](https://www.youtube.com/watch?v=gLoI9hAX9dw). Deep Fakes are alterded data either audio,video or image data by using Deep Neural Networks mostly (Encoder - Decoder) Structure. So what basically is Encoder- Decoder and how it works? To give this answer, let us suppose we have 2 two players one is very advanced in skills but lacks in physical aspect and the one is just the opposite. So what if in far future we are able to mutate the people by desire? Then I hope we are able to give the features to one another. This is what exactly Features and Deep Fakes works.
# To give you the best idea about Features, we take example of different fruits. Each and evey fruit has different shape, size, weight, color and so on BUT...if we have enough different Fruits, can we classify or give similarities? Yes , we can. This is where features come into play. Even if fruits are not completely related, we can still group them somwhow, say by taste, origin, continent or something else.
# In terms of images or videos (videos are just images playing with your brain), they resemble Fruits and our model ```just finds out the features``` somehow. It happens by the use of Encoders and Decoders. We train out computers to extract the features from two different things with the use of Encoders say A and B. Then when there are features given by the Encoders, we use the features of A to feed it to the Decoder of B. Complex? No. It is just like the mutation. We just swapped (one side to be precise) the features in the Encoded dimensions. It just just like Messi and Ronaldo getting trained under some strict conditions so either Ronaldo gets the agility and dribbling of Messi or Messi getting the physical aspect and power of Ronaldo.
# # Approach
# In this tutorial we just want to start the journey in finding the Fakes by using a CNN model which I'll commit on the next version. This notebook describes every part BEFORE the Training about how to go till training.
# The idea is to extract the Frames from the videos, then detect Faces (because only faces are altered) and then combine some faces to Train as Fake or Real. Simple!!!!
# MTCNN is an implementation of the [Research Paper](https://arxiv.org/abs/1604.02878) published on using a [Convolution Neural Network](https://www.youtube.com/watch?v=FmpDIaiMIeA) to detect Faces given in an image. It is pretrained model to detect faces with a degree of confidence such as 0.9 or 90% confidence that there is a face. It can return multiple faces with each face's coordinates in the image array where it has found a face. You can plot a rectangle aroung the box or just crop the image to see the cropped face which is later in the tutorial. It also returns the eyes and nose coordinates. More about the use of MTCNN can be learned either from the [official documentation of the library](https://pypi.org/project/mtcnn/) or from [this website](https://machinelearningmastery.com/how-to-perform-face-detection-with-classical-and-deep-learning-methods-in-python-with-keras/). These are very good resources. Please do go through them.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import cv2
from IPython.display import HTML
from base64 import b64encode
from tqdm import tqdm
from skimage.transform import resize
from skimage.metrics import structural_similarity
from keras.layers import (
Dense,
Dropout,
Conv2D,
Conv3D,
LSTM,
Embedding,
BatchNormalization,
Input,
LeakyReLU,
ELU,
GlobalMaxPooling2D,
GlobalMaxPooling3D,
)
from keras.optimizers import Adam
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from mtcnn.mtcnn import MTCNN
from matplotlib.patches import Rectangle
from tensorflow import random as tf_rnd
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
INPUT_PATH = "/kaggle/input/deepfake-detection-challenge/"
TEST_PATH = "test_videos/"
TRAIN_PATH = "train_sample_videos/"
SIZE = 128
BATCH_SIZE = 32
CASCADE_PATH = (
cv2.data.haarcascades
) # with this line, you do not need to download the XML files from web. Later.
SEED = 13
np.random.seed(SEED) # set random seed to get reproducible results
tf_rnd.set_seed(SEED) # tensor flow randomness remover
plt.style.use("seaborn-whitegrid") # just some fancy un useful stuff
# # Getting Files and EDA
# iterate through the directory to get all the file names and save them as a DataFrame. No need to pay attention to
train_files = []
ext = []
for _, _, filenames in os.walk(INPUT_PATH + TRAIN_PATH): # iterate within the directory
for filename in filenames: # get all the files inside directory
splitted = filename.split(
"."
) # split the files as a . such .exe, .deb, .txt, .csv
train_files.append(splitted[0]) # first part is name of file
ext.append(splitted[1]) # second one is extension type
files_df = pd.DataFrame({"filename": train_files, "type": ext})
files_df.head()
files_df.shape # 401 files
files_df["type"].value_counts() # 400 mp4 files and 1 json file
meta_df = pd.read_json(
INPUT_PATH + TRAIN_PATH + "metadata.json"
) # We have Transpose the Df
meta_df.head()
meta_df = meta_df.T
meta_df.head()
meta_df.isna().sum() # 77 original files are missing
meta_df["label"].value_counts().plot(
kind="pie", autopct="%1.1f%%", label="Real Vs Fake"
)
# # Extracting and Processing Features
class VideoFeatures:
"""
Claas for working with features related to videos such getting frames, plotting frames, playing videos etc
"""
def get_frames(self, filepath, first_only=False, show=False):
"""
method for getting the frames from a video file
args:
filepath: exact path of the video file
first_only: whether to detect the first frame only or all of the frames
out:
frame: first frame in form of numpy array
"""
cap = cv2.VideoCapture(filepath)
# captures the video. Think of it as if life is a movie so we ask the method to focus on patricular event
# that is our video in this case. It will concentrate on the video
if not first_only: # whether to get all the frames or not
all_frames = []
while cap.isOpened(): # as long as all the frames have been traversed
ret, frame = cap.read()
# capture the frame. Again, if life is a movie, this function acts as camera
if ret == True:
all_frames.append(frame)
if cv2.waitKey(1) & 0xFF == ord(
"q"
): # break in between by pressing the key given
break
else:
break
else:
ret, all_frames = cap.read()
if show:
plt.imshow(cv2.cvtColor(all_frames, cv2.COLOR_BGR2RGB))
# plot the image but the cv2 changes thge ordering to Blue,Green,Red than RGB so it converts the
# metrices to proper ordering
cap.release()
# release whatever was held by the method for say, resources and the video itself
return all_frames
def play_video(self, filepath):
"""
Method that uses the HTML inside Python to put a code in the Kernal so that there is a HTML page
like code where the supported video can be played
args:
filepath: path of the file which you want to play
"""
video = open(filepath, "rb").read() # read video file
dec_data = "data:video/mp4;base64," + b64encode(video).decode()
# decode the video data in form of a sting. Funny! Video is now a string
return HTML(
"""<video width=350 controls><source src="%s" type="video/mp4"></video>"""
% dec_data
)
# embed the string as <video> tag in HTML Kernal so that it can be understood by HTML and can be played
# ## HaarCascade
# Below is what was very new to me once I saw it. It is called HaarCascade. A HaarCascade is basically a classifier which is used to detect the object for which it has been trained for, from the source. So source is our image and it detects Faces and different features of faces like smile, side profile, eyes etc. HaarCascade is basically a XML file where there is a code written inside it by very tech savvy coders so that we do not have to do it again and again. It will detect the structure from the file. Each XML is trained for a different feature. As this is the first commit, I'll just keep it simple and when tuning the model, we will have tweak lots of parameters.
# To get insight about the working of HaarCascades I recommend you [this insightful blog](http://www.willberger.org/cascade-haar-explained/)
class FrameProcessor:
"""
class to process the images such as resizing, changing colors, detect faces from frames etc
"""
def __init__(self):
"""
Constructor where the data from OpenCV is used directly to find the Faces.
"""
self.face_cascade = cv2.CascadeClassifier(
CASCADE_PATH + "haarcascade_frontalface_default.xml"
)
# XML file which has code for Frontal Face
self.eye_cascade = cv2.CascadeClassifier(CASCADE_PATH + "haarcascade_eye.xml")
# it extracts eyes
def detect_face_eye(
self,
img,
scaleFactor=1.3,
minNeighbors=5,
minSize=(50, 50),
get_cropped_face=False,
):
"""
Method to detect face and eye from the image
args:
img: image in the form of numpy array pixels
scaleFactor: scale the image in proportion. indicates how much the image size is
reduced at each image scale. A lower value uses a smaller step for downscaling.
minNeighbors: int, number of Neighbors to select from. You know that the pixels at eyes are correlated
with surrounding with pixels around the eye but not the 1000 pixels away at feet
minSize: tuple. Smaller the face in the image, it is best to adjust the minSize value lower
get_zoomed_face: Bin. Wheter to return the zoomed face only rather than the full image
out:
image with detected faces
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# convert image to Grayscale to make better use of resources
faces = self.face_cascade.detectMultiScale(
gray, scaleFactor=scaleFactor, minNeighbors=minNeighbors, minSize=minSize
)
# Return the face rectangle from the image
if get_cropped_face:
for x, y, w, h in faces:
cropped_img = img[
y : y + h, x : x + w
] # slice the array to-from where the face(s) have been found
return cropped_img
for x, y, w, h in faces:
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 3)
# draw a rectangle around the face with (0,0,255= Blue) color
eyes = self.eye_cascade.detectMultiScale(
gray,
minSize=(minSize[0] // 2, minSize[1] // 2),
minNeighbors=minNeighbors,
)
# eyes will always be inside a front profile. So it will reduce the TruePositive of finding other eyes
for ex, ey, ew, eh in eyes:
cv2.rectangle(img, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 3)
# draw a rectangle around the eyes with Green color (0,255,0)
return img
def plot_frame(self, img):
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
def resize_frame(self, frame, res_w=256, preserve_aspect=True, anti_aliasing=True):
"""
resize the images according to desired width and height
param:
frame: numpy image pixels array
rew_w: resize width default to 256
preserve_aspect: preserve the aspect ratio in the frame. If not, the output will be a square matrix
anti_aliasing: whether to apply or not
out:
resized numpy array
"""
res_h = res_w
if preserve_aspect: # protect the aspect ratio even after the resizing
aspect_ratio = frame.shape[0] / frame.shape[1] # get aspect ratio
res_h = res_w * aspect_ratio # set resulting height according to ratio
return resize(frame, (res_h, res_w), anti_aliasing=anti_aliasing)
def frames_similarity(self, frames, full=True, multichannel=True, with_resize=True):
"""
Find the similarity between the consecutive frames based on a common scale
param:
frames: list of numpy pixel arrays
full: whether to return full structural similarity
multichannel: Bool. IF the images are Grayscale or RGB
with_resize: Bool. Default True. whether to resize the frames before finding similarity
"""
sim_scores = []
prev_frame = False
for i in tqdm(range(1, len(frames))): # tqdm shows a progress bar
if with_resize:
if i > 1:
# save the trouble of resizing again what we already have becaue if we
# have prev_frame means we have used our loop atleast once so we point current to previous
prev_frame = curr_frame
else:
prev_frame = self.resize_frame(
frames[0]
) # initialise for the first time only
curr_frame = self.resize_frame(
frames[i]
) # current frame has to be same no matter what
else:
# doesnt make sense to resize the last frame again as we can just point to the same object
curr_frame = frames[i]
prev_frame = frames[i - 1]
if curr_frame.shape[0] != prev_frame.shape[0]:
# different sizes of same images will be seen as two different images so we have to deal with this
# so just resize the bigger image as the smaller one
if curr_frame.shape[0] > prev_frame.shape[0]:
curr_frame = curr_frame[
: prev_frame.shape[0], : prev_frame.shape[0], :
]
else:
prev_frame = prev_frame[
: curr_frame.shape[0], : curr_frame.shape[0], :
]
mean_ssim, _ = structural_similarity(
curr_frame, prev_frame, full=full, multichannel=multichannel
)
# get mean similarity scores of the images
sim_scores.append(mean_ssim)
return sim_scores
vf = VideoFeatures()
fp = FrameProcessor()
img = vf.get_frames(
INPUT_PATH + TRAIN_PATH + "cwrtyzndpx.mp4", first_only=True, show=True
)
vf.play_video(INPUT_PATH + TRAIN_PATH + "cwrtyzndpx.mp4")
detected_face = fp.detect_face_eye(
img, minNeighbors=5, scaleFactor=1.3, minSize=(50, 50)
)
fp.plot_frame(detected_face)
frames = vf.get_frames(INPUT_PATH + TRAIN_PATH + "cwrtyzndpx.mp4")
fp.plot_frame(frames[54])
zoomed_face = fp.detect_face_eye(
frames[13],
get_cropped_face=True,
)
fp.plot_frame(zoomed_face)
# ## MTCNN
class MTCNNWrapper:
"""
Detect and show faces using MTCNN
"""
def get_face(self, img):
"""
method to get face from an image
args:
img: image as numpy array
out:
rect: coordinates of rectangle(s) for multiple face(s)
"""
faces = MTCNN().detect_faces(img)
# dectect_faces returns a list of dicts of all the faces
x, y, width, height = faces[0]["box"]
# faces return a list of dicts so [0] means first faces out of all the faces
return faces
def show_faces(self, img):
"""
Show faces on the original image as red boxes
args:
img: image as numpy array
out:
None: plot the original image with faces inside red boxes
"""
faces = self.get_face(img) # get the list of faces dict
plt.imshow(
cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
) # plot the image and next modify the image
ax = plt.gca() # get the context for drawing boxes
# Get the current Axes instance on the current figure matching the given keyword args, or create one
for result in faces: # faces returns a list of dicts of all the faces
x, y, width, height = result["box"] # get coordinates of each box found
rect = Rectangle(
(x, y), width, height, fill=False, color="red"
) # form rectangle at the given coordinates
ax.add_patch(rect) # add that box to the axis/ current image
plt.show() # plot the extra rectangles
def get_cropped(self, img, show_only=False):
"""
get the cropped image only from detected face
args:
img: numpy image array
show_only: whether to return cropped array or just plot the image. Default False
out:
numpy array of cropped image at the face
"""
faces = self.get_face(img)
x, y, width, height = faces[0][
"box"
] # first face. Will add logic later to find the most significant face
if show_only:
plt.imshow(cv2.cvtColor(cropped_img, cv2.COLOR_BGR2RGB))
else:
return img[y : y + height, x : x + width]
img = VideoFeatures().get_frames(
INPUT_PATH + TRAIN_PATH + "cwrtyzndpx.mp4", first_only=True
)
mt_wrapper = MTCNNWrapper()
mt_wrapper.show_faces(img)
mt_wrapper.get_cropped(img, show_only=True)
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
image = cv2.imread("/kaggle/input/m-g-odev1/mevlana.jpg")
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
kernel = np.array([[2, 0, -1], [1, 0, -2], [1, 0, -1]])
img = cv2.filter2D(image, -1, kernel) # çıkış boyutunun aynı olması (-1)
fig, ax = plt.subplots(1, 2, figsize=(10, 6))
ax[0].imshow(image) # normal image
ax[1].imshow(img) # filter image
image = cv2.imread("/kaggle/input/m-g-odev1/mevlana.jpg")
image = image[400:500, 100:200, :]
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
kernel = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
kernel2 = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]])
img = cv2.filter2D(image, -1, kernel)
img2 = cv2.filter2D(image, -1, kernel2)
fig, ax = plt.subplots(1, 3, figsize=(10, 6))
ax[0].imshow(image) # normal image
ax[1].imshow(img) # filter image yatay kernel geçişleri (sobel)
ax[2].imshow(img2) # filter image dikey kenel geçişleri (sobel)
image = cv2.imread("/kaggle/input/m-g-odev1/mevlana.jpg")
image = image[400:500, 100:200, :]
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
kernel = np.array([[3, 10, 3], [0, 0, 0], [-3, -10, -3]])
kernel2 = np.array([[3, 0, -3], [10, 0, -10], [3, 0, -3]])
img = cv2.filter2D(image, -1, kernel)
img2 = cv2.filter2D(image, -1, kernel2)
fig, ax = plt.subplots(1, 3, figsize=(10, 6))
ax[0].imshow(image) # normal image
ax[1].imshow(img) # filter image yatay kernel geçişleri (scharr)
ax[2].imshow(img2) # filter image dikey kenel geçişleri (scharr)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import sys
sys.path.append("../input")
import numpy as np
from sklearn.model_selection import LeaveOneGroupOut
from skimage.util.shape import view_as_windows
import longtermdetectioncode.long_term_dataset as dataset
import longtermdetectioncode.model_MSTCN_torch as model
import math
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import copy
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# get rawdata dictionary
# /kaggle/input/imu-long-range/aligned_entire_annotated_data
rootpath = r"../input/imu-long-range/aligned_entire_annotated_data/"
csv_dict_R_A, csv_dict_L_A = dataset.get_rawdata_dic(rootpath)
# get connected data from rawdata dictionary
data_connected, label_connected = dataset.get_data_from_dic(csv_dict_R_A, csv_dict_L_A)
# make groups for loso validation
groups = dataset.make_groups_for_loso(csv_dict_R_A)
range_label = np.copy(label_connected)
range_label[np.logical_and(range_label != 111, range_label != 222)] = 0
label_connected[label_connected == 111] = 0
label_connected[label_connected == 222] = 0
label_connected[label_connected == 1] = 0
label_connected[label_connected == 2] = 1
label_connected[label_connected == 6] = 1
label_connected[label_connected == 8] = 1
label_connected[label_connected == 9] = 1
label_connected[label_connected == 3] = 2
label_connected[label_connected == 10] = 2
label_connected[np.logical_and(label_connected != 2, label_connected != 1)] = 0
label_connected = np.array(label_connected)
np.save(r"./range_label.npy", range_label)
np.save(r"./data_connected.npy", data_connected)
np.save(r"./groups.npy", groups)
np.save(r"./label_connected.npy", label_connected)
data_connected = np.load(r"../input/datasaved/data_connected.npy")
groups = np.load(r"../input/datasaved/groups.npy")
label_connected = np.load(r"../input/datasaved/label_connected.npy")
range_label = np.load(r"../input/datasaved/range_label.npy")
downsampling_rate = 3
# LOSO training models
save_dir = r"."
logo = LeaveOneGroupOut()
counter = 1
training_time = 3
for train_index, test_index in logo.split(data_connected, label_connected, groups):
data_train, data_test = data_connected[train_index], data_connected[test_index]
label_train, label_test = label_connected[train_index], label_connected[test_index]
test_subject_id = counter
counter = counter + 1
# split into R_A and L_A to make sure they have the same number of data
R_A_data_train = data_train[: int(data_train.shape[0] / 2)]
L_A_data_train = data_train[int(data_train.shape[0] / 2) :]
R_A_data_test = data_test[: int(data_test.shape[0] / 2)]
L_A_data_test = data_test[int(data_test.shape[0] / 2) :]
R_A_label_train = label_train[: int(label_train.shape[0] / 2)]
L_A_label_train = label_train[int(label_train.shape[0] / 2) :]
R_A_label_test = label_test[: int(label_test.shape[0] / 2)]
L_A_label_test = label_test[int(label_test.shape[0] / 2) :]
# Preprocessing
# reformat
R_A_data_train = R_A_data_train.T
L_A_data_train = L_A_data_train.T
R_A_data_test = R_A_data_test.T
L_A_data_test = L_A_data_test.T
R_A_label_train = R_A_label_train.reshape((1, -1))
L_A_label_train = L_A_label_train.reshape((1, -1))
R_A_label_test = R_A_label_test.reshape((1, -1))
L_A_label_test = L_A_label_test.reshape((1, -1))
# downsampling
downsampling_rate = 3
R_A_data_train_d = R_A_data_train[::, ::downsampling_rate]
L_A_data_train_d = L_A_data_train[::, ::downsampling_rate]
R_A_label_train_d = R_A_label_train[::, ::downsampling_rate]
L_A_label_train_d = L_A_label_train[::, ::downsampling_rate]
R_A_data_test_d = R_A_data_test[::, ::downsampling_rate]
L_A_data_test_d = L_A_data_test[::, ::downsampling_rate]
R_A_label_test_d = R_A_label_test[::, ::downsampling_rate]
L_A_label_test_d = L_A_label_test[::, ::downsampling_rate]
# window sliced the data
train_window = 2000
train_stride = 667
test_window = 2000
test_stride = 2000
# Train data
R_A_data_train_c = dataset.window_sliced(
R_A_data_train_d, train_window, train_stride
)
L_A_data_train_c = dataset.window_sliced(
L_A_data_train_d, train_window, train_stride
)
# Train label
R_A_label_train_c = dataset.window_sliced(
R_A_label_train_d, train_window, train_stride
)
L_A_label_train_c = dataset.window_sliced(
L_A_label_train_d, train_window, train_stride
)
# test data
R_A_data_test_c = dataset.window_sliced(R_A_data_test_d, test_window, test_stride)
L_A_data_test_c = dataset.window_sliced(L_A_data_test_d, test_window, test_stride)
# test label
R_A_label_test_c = dataset.window_sliced(R_A_label_test_d, test_window, test_stride)
L_A_label_test_c = dataset.window_sliced(L_A_label_test_d, test_window, test_stride)
# connect R_A and L_A
data_train_c = torch.cat((R_A_data_train_c, L_A_data_train_c), 0)
data_train_c = data_train_c.to(device)
label_train_c = torch.cat((R_A_label_train_c, L_A_label_train_c), 0)
label_train_c = label_train_c.to(device)
data_test_c = torch.cat((R_A_data_test_c, L_A_data_test_c), 0)
data_test_c = data_test_c.to(device)
label_test_c = torch.cat((R_A_label_test_c, L_A_label_test_c), 0)
label_test_c = label_test_c.to(device)
# train the model
trainer = model.Trainer(
data_train_c,
label_train_c,
data_test_c,
label_test_c,
2,
9,
128,
6,
3,
0.4,
test_subject_id,
training_time,
)
num_epochs = 30
batch_size = 200
learning_rate = 0.0005
print(
f"subject-{test_subject_id}-training_time-{training_time}................................"
)
trainer.train(num_epochs, batch_size, learning_rate, device, save_dir)
# test model here
from scipy.signal import find_peaks
from collections import Counter
subject_id = 1 # to test subject, specify its id here
training_time = 2
logo = LeaveOneGroupOut()
a = logo.split(data_connected, label_connected, groups)
a_list = list(a)
index_array = np.array(a_list)
train_index, test_index = index_array[subject_id - 1][0], index_array[subject_id - 1][1]
data_train, data_test = data_connected[train_index], data_connected[test_index]
label_train, label_test = label_connected[train_index], label_connected[test_index]
range_label_train, range_label_test = range_label[train_index], range_label[test_index]
range_label_test = range_label_test[: int(range_label_test.shape[0] / 2)]
local_maximum, _ = find_peaks(range_label_test)
print(local_maximum)
local_maximum = local_maximum + (downsampling_rate - local_maximum % downsampling_rate)
range_label_test = np.zeros(range_label_test.shape[0])
for i in range(0, int(local_maximum.shape[0] / 2)):
range_label_test[local_maximum[2 * i]] = 111
range_label_test[local_maximum[2 * i + 1]] = 222
range_label_test = range_label_test.reshape((1, -1))
R_A_data_test = data_test[: int(data_test.shape[0] / 2)]
L_A_data_test = data_test[int(data_test.shape[0] / 2) :]
R_A_label_test = label_test[: int(label_test.shape[0] / 2)]
L_A_label_test = label_test[int(label_test.shape[0] / 2) :]
R_A_data_test = R_A_data_test.T
L_A_data_test = L_A_data_test.T
R_A_label_test = R_A_label_test.reshape((1, -1))
L_A_label_test = L_A_label_test.reshape((1, -1))
R_A_data_test_d = R_A_data_test[::, ::downsampling_rate]
L_A_data_test_d = L_A_data_test[::, ::downsampling_rate]
R_A_label_test_d = R_A_label_test[::, ::downsampling_rate]
L_A_label_test_d = L_A_label_test[::, ::downsampling_rate]
range_label_test = range_label_test[::, ::downsampling_rate]
test_window = 2000
test_stride = 2000
# test data
R_A_data_test_c = dataset.window_sliced(R_A_data_test_d, test_window, test_stride)
L_A_data_test_c = dataset.window_sliced(L_A_data_test_d, test_window, test_stride)
# test label
R_A_label_test_c = dataset.window_sliced(R_A_label_test_d, test_window, test_stride)
L_A_label_test_c = dataset.window_sliced(L_A_label_test_d, test_window, test_stride)
range_label_test = dataset.window_sliced(range_label_test, test_window, test_stride)
data_test_c = torch.cat((R_A_data_test_c, L_A_data_test_c), 0)
data_test_c = data_test_c.to(device)
label_test_c = torch.cat((R_A_label_test_c, L_A_label_test_c), 0)
label_test_c = label_test_c.to(device)
range_label_test = range_label_test.to(device)
test_subject_id = subject_id
trainer = model.Trainer(
data_test_c,
label_test_c,
data_test_c,
label_test_c,
2,
9,
128,
6,
3,
0.4,
test_subject_id,
training_time,
)
predictions = trainer.predict(
"../input/mstcnmodels", data_test_c, test_subject_id, training_time, device
)
predictions_f = predictions.cpu().numpy().flatten()
test_ground_truth = label_test_c.cpu().numpy().flatten()
range_label_test = range_label_test.cpu().numpy().flatten()
local_maximum, _ = find_peaks(range_label_test)
if local_maximum.shape[0] % 2 != 0:
range_label_test[range_label_test.shape[0] - 1] = 222
from collections import Counter
Counter(range_label_test)
# or operations for right and left hand
pre_merge_rl = (
predictions_f[: int(predictions_f.shape[0] / 2)]
| predictions_f[int(predictions_f.shape[0] / 2) :]
)
test_ground_truth_merge_rl = (
test_ground_truth[: int(test_ground_truth.shape[0] / 2)]
| test_ground_truth[int(test_ground_truth.shape[0] / 2) :]
)
pre_merge_rl[pre_merge_rl == 3] = 2
test_ground_truth_merge_rl[test_ground_truth_merge_rl == 3] = 2
# apply length filter for bites
pre_merge_rl = model.length_filtering(
pre_merge_rl, 0.5, downsampling_rate, bg_class=[0]
) # filter less than 0.5 second
pre_merge_rl.shape
# # using gassian filter for meal range detection
import scipy.signal as signal
# build guassin filter
# Define the filter parameters
fs = 64
filter_length = fs * 240 / (downsampling_rate)
filter_std = fs * 45 / (downsampling_rate)
# Create the Gaussian filter
filter_coeffs = signal.gaussian(filter_length, filter_std)
filter_coeffs_normalize = filter_coeffs / sum(filter_coeffs)
Sn = signal.convolve(pre_merge_rl.astype(float), filter_coeffs_normalize, mode="same")
# see Sn before threshold
from matplotlib import pyplot as plt
from matplotlib.pyplot import figure
import matplotlib as mpl
import matplotlib.patches as patches
len_single_hand = test_ground_truth_merge_rl.shape[0]
lengt = len_single_hand
for j in range(0, len_single_hand // lengt):
# for j in range(0,10):
fig, axs = plt.subplots(4)
fig.set_size_inches(24, 12)
x = np.linspace(0, lengt, lengt)
my_x_ticks = np.arange(160, lengt, 160)
my_x_ticks1 = np.arange(10, lengt // 16, 10)
plt.xticks(my_x_ticks, my_x_ticks1, fontsize=15)
plt.tick_params(labelsize=20)
y1 = pre_merge_rl[j * lengt : j * lengt + lengt]
axs[0].plot(x, y1, label="$prediction$", color="green", linewidth=2)
axs[0].legend()
y2 = Sn[j * lengt : j * lengt + lengt]
axs[1].plot(
x, y2, label="$prediction after guassian filter$", color="red", linewidth=2
)
axs[1].legend()
y3 = test_ground_truth_merge_rl[j * lengt : j * lengt + lengt]
axs[2].plot(x, y3, label="$ground truth$", color="blue", linewidth=2)
axs[2].legend()
y4 = range_label_test[j * lengt : j * lengt + lengt]
axs[3].plot(x, y4, label="$ground truth meal range$", color="blue", linewidth=2)
axs[3].legend()
plt.show()
# plt.savefig('./'+str(subject_id)+'.png')
threshold = 0.15
Sn_after_threshold = Sn.copy()
Sn_after_threshold[Sn_after_threshold < threshold] = 0
Sn_after_threshold[Sn_after_threshold != 0] = 1
# see Sn after threshold
from matplotlib import pyplot as plt
from matplotlib.pyplot import figure
import matplotlib as mpl
import matplotlib.patches as patches
len_single_hand = test_ground_truth_merge_rl.shape[0]
lengt = len_single_hand
for j in range(0, len_single_hand // lengt):
# for j in range(0,10):
fig, axs = plt.subplots(4)
fig.set_size_inches(24, 12)
x = np.linspace(0, lengt, lengt)
my_x_ticks = np.arange(160, lengt, 160)
my_x_ticks1 = np.arange(10, lengt // 16, 10)
plt.xticks(my_x_ticks, my_x_ticks1, fontsize=15)
plt.tick_params(labelsize=20)
y1 = pre_merge_rl[j * lengt : j * lengt + lengt]
axs[0].plot(x, y1, label="$prediction$", color="green", linewidth=2)
axs[0].legend()
y2 = Sn_after_threshold[j * lengt : j * lengt + lengt]
axs[1].plot(
x, y2, label="$prediction after guassian filter$", color="red", linewidth=2
)
axs[1].legend()
y3 = test_ground_truth_merge_rl[j * lengt : j * lengt + lengt]
axs[2].plot(x, y3, label="$ground truth$", color="blue", linewidth=2)
axs[2].legend()
y4 = range_label_test[j * lengt : j * lengt + lengt]
axs[3].plot(x, y4, label="$ground truth meal range$", color="blue", linewidth=2)
axs[3].legend()
plt.show()
# define the filter kernel
fs = 64 # sampling frequency
h = np.concatenate(
[np.arange(1, (fs // (3 * 4)) + 1), np.array([0]), -np.arange(fs // (3 * 4), 0, -1)]
)
Dn = np.convolve(Sn_after_threshold.astype(float), h, mode="same")
Dn = np.abs(Dn)
# see Sn after threshold and edge detector
from matplotlib import pyplot as plt
from matplotlib.pyplot import figure
import matplotlib as mpl
import matplotlib.patches as patches
len_single_hand = test_ground_truth_merge_rl.shape[0]
lengt = len_single_hand
for j in range(0, len_single_hand // lengt):
# for j in range(0,10):
fig, axs = plt.subplots(4)
fig.set_size_inches(24, 12)
x = np.linspace(0, lengt, lengt)
my_x_ticks = np.arange(160, lengt, 160)
my_x_ticks1 = np.arange(10, lengt // 16, 10)
plt.xticks(my_x_ticks, my_x_ticks1, fontsize=15)
plt.tick_params(labelsize=20)
y1 = pre_merge_rl[j * lengt : j * lengt + lengt]
axs[0].plot(x, y1, label="$prediction$", color="green", linewidth=2)
axs[0].legend()
y2 = Dn[j * lengt : j * lengt + lengt]
axs[1].plot(
x, y2, label="$prediction after guassian filter$", color="red", linewidth=2
)
axs[1].legend()
y3 = test_ground_truth_merge_rl[j * lengt : j * lengt + lengt]
axs[2].plot(x, y3, label="$ground truth$", color="blue", linewidth=2)
axs[2].legend()
y4 = range_label_test[j * lengt : j * lengt + lengt]
axs[3].plot(x, y4, label="$ground truth meal range$", color="blue", linewidth=2)
axs[3].legend()
plt.show()
from scipy.signal import find_peaks
local_maximum, _ = find_peaks(Dn)
if local_maximum.shape[0] % 2 != 0:
local_maximum = np.append(local_maximum, [pre_merge_rl.shape[0] - 1])
local_maximum
# merge intervals less than certain seconds
fs = 64
interval_threshold_seconds = 360 # seconds
interval_threshold_samples = interval_threshold_seconds * (fs / downsampling_rate)
starts_and_ends = np.empty(shape=[0, 2], dtype=int)
for i in range(0, int(local_maximum.shape[0] / 2)):
if i == 0:
starts_and_ends = np.append(
starts_and_ends, [[local_maximum[i * 2], local_maximum[i * 2 + 1]]], axis=0
)
else:
if (
local_maximum[i * 2] - starts_and_ends[starts_and_ends.shape[0] - 1][1]
< interval_threshold_samples
):
starts_and_ends[starts_and_ends.shape[0] - 1][1] = local_maximum[i * 2 + 1]
else:
starts_and_ends = np.append(
starts_and_ends,
[[local_maximum[i * 2], local_maximum[i * 2 + 1]]],
axis=0,
)
starts_and_ends
# reject intervals less than certain seconds
fs = 64
interval_threshold_seconds = 3 # seconds
interval_threshold_samples = interval_threshold_seconds * (fs / downsampling_rate)
starts_and_ends_length_filtered = np.empty(shape=[0, 2], dtype=int)
for i in range(0, starts_and_ends.shape[0]):
if starts_and_ends[i][1] - starts_and_ends[i][0] >= interval_threshold_samples:
starts_and_ends_length_filtered = np.append(
starts_and_ends_length_filtered,
[[starts_and_ends[i][0], starts_and_ends[i][1]]],
axis=0,
)
starts_and_ends_length_filtered
Dn_after_processed = np.zeros(Dn.shape[0])
for i in range(0, starts_and_ends_length_filtered.shape[0]):
Dn_after_processed[starts_and_ends_length_filtered[i][0]] = 1
Dn_after_processed[starts_and_ends_length_filtered[i][1]] = 1
# see final Dn after processed
from matplotlib import pyplot as plt
from matplotlib.pyplot import figure
import matplotlib as mpl
import matplotlib.patches as patches
len_single_hand = test_ground_truth_merge_rl.shape[0]
lengt = len_single_hand
for j in range(0, len_single_hand // lengt):
# for j in range(0,10):
fig, axs = plt.subplots(4)
fig.set_size_inches(24, 12)
x = np.linspace(0, lengt, lengt)
my_x_ticks = np.arange(160, lengt, 160)
my_x_ticks1 = np.arange(10, lengt // 16, 10)
plt.xticks(my_x_ticks, my_x_ticks1, fontsize=15)
plt.tick_params(labelsize=20)
y1 = pre_merge_rl[j * lengt : j * lengt + lengt]
axs[0].plot(x, y1, label="$prediction$", color="green", linewidth=2)
axs[0].legend()
y2 = Dn_after_processed[j * lengt : j * lengt + lengt]
axs[1].plot(
x, y2, label="$prediction after guassian filter$", color="red", linewidth=2
)
axs[1].legend()
y3 = test_ground_truth_merge_rl[j * lengt : j * lengt + lengt]
axs[2].plot(x, y3, label="$ground truth$", color="blue", linewidth=2)
axs[2].legend()
y4 = range_label_test[j * lengt : j * lengt + lengt]
axs[3].plot(x, y4, label="$ground truth meal range$", color="blue", linewidth=2)
axs[3].legend()
plt.show()
# ###########################################
# # using morphology closing for meal range detection
from scipy.ndimage.morphology import binary_closing
pre_merge_rl_binary = np.copy(pre_merge_rl)
pre_merge_rl_binary[pre_merge_rl_binary == 2] = 1
Kn = binary_closing(pre_merge_rl_binary, iterations=2500)
Kn.shape
# see Kn after morphology closing
from matplotlib import pyplot as plt
from matplotlib.pyplot import figure
import matplotlib as mpl
import matplotlib.patches as patches
len_single_hand = test_ground_truth_merge_rl.shape[0]
lengt = len_single_hand
for j in range(0, len_single_hand // lengt):
# for j in range(0,10):
fig, axs = plt.subplots(4)
fig.set_size_inches(24, 12)
x = np.linspace(0, lengt, lengt)
my_x_ticks = np.arange(160, lengt, 160)
my_x_ticks1 = np.arange(10, lengt // 16, 10)
plt.xticks(my_x_ticks, my_x_ticks1, fontsize=15)
plt.tick_params(labelsize=20)
y1 = pre_merge_rl[j * lengt : j * lengt + lengt]
axs[0].plot(x, y1, label="$prediction$", color="green", linewidth=2)
axs[0].legend()
y2 = Kn[j * lengt : j * lengt + lengt]
axs[1].plot(
x,
y2,
label="$prediction after morphology closing filter$",
color="red",
linewidth=2,
)
axs[1].legend()
y3 = test_ground_truth_merge_rl[j * lengt : j * lengt + lengt]
axs[2].plot(x, y3, label="$ground truth$", color="blue", linewidth=2)
axs[2].legend()
y4 = range_label_test[j * lengt : j * lengt + lengt]
axs[3].plot(x, y4, label="$ground truth meal range$", color="blue", linewidth=2)
axs[3].legend()
plt.show()
_, starts, ends = model.get_labels_start_end_time(Kn)
print(starts)
print(ends)
Kn_length_filtered = model.length_filtering(
np.copy(Kn), 3, downsampling_rate, bg_class=[0]
) # filter less than 3 seconds meal time
# see Kn after morphology closing and length filtered
from matplotlib import pyplot as plt
from matplotlib.pyplot import figure
import matplotlib as mpl
import matplotlib.patches as patches
len_single_hand = test_ground_truth_merge_rl.shape[0]
lengt = len_single_hand
for j in range(0, len_single_hand // lengt):
# for j in range(0,10):
fig, axs = plt.subplots(4)
fig.set_size_inches(24, 12)
x = np.linspace(0, lengt, lengt)
my_x_ticks = np.arange(160, lengt, 160)
my_x_ticks1 = np.arange(10, lengt // 16, 10)
plt.xticks(my_x_ticks, my_x_ticks1, fontsize=15)
plt.tick_params(labelsize=20)
y1 = pre_merge_rl[j * lengt : j * lengt + lengt]
axs[0].plot(x, y1, label="$prediction$", color="green", linewidth=2)
axs[0].legend()
y2 = Kn_length_filtered[j * lengt : j * lengt + lengt]
axs[1].plot(
x,
y2,
label="$prediction after morphology closing filter$",
color="red",
linewidth=2,
)
axs[1].legend()
y3 = test_ground_truth_merge_rl[j * lengt : j * lengt + lengt]
axs[2].plot(x, y3, label="$ground truth$", color="blue", linewidth=2)
axs[2].legend()
y4 = range_label_test[j * lengt : j * lengt + lengt]
axs[3].plot(x, y4, label="$ground truth meal range$", color="blue", linewidth=2)
axs[3].legend()
plt.show()
# # caculate F1 score
predictions_f_f = model.length_filtering(
predictions_f, 0.5, 3, bg_class=[0]
) # filter less than 0.5 second
# fill in ones
An = np.zeros(predictions_f.shape[0])
for i in range(0, starts_and_ends_length_filtered.shape[0]):
start = starts_and_ends_length_filtered[i][0]
end = starts_and_ends_length_filtered[i][1]
for j in range(start, end + 1):
An[j] = 1 # right
An[int(An.shape[0] / 2) + j] = 1
predictions_f_f_post = predictions_f_f * An
# eatring
F1, fn, tp, fp = model.f_score(
predictions_f_f_post, test_ground_truth, 0.1, bg_class=[0, 2]
)
print(F1)
print(tp)
print(fn)
print(fp)
# drinking
F1, fn, tp, fp = model.f_score(
predictions_f_f_post, test_ground_truth, 0.25, bg_class=[0, 1]
)
print(F1)
print(tp)
print(fn)
print(fp)
from longtermdetectioncode.visualization import plot_prediction
x_data = data_test_c.transpose(1, 2).cpu().reshape(-1, 6).numpy()
plot_prediction(x_data, test_ground_truth, predictions_f_f)
|
# Content
# Import Libraries
# Load data
# Data Preparation
# Missing values imputation
# Feature Engineering
# Modeling
# Build the model
# Evaluation
# Model performance
# Feature importance
# Who gets the best performing model?
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# Modelling Algorithms
from sklearn.svm import SVC, LinearSVC
from sklearn import linear_model
# Modelling Helpers
from sklearn.preprocessing import Imputer, Normalizer, scale
from sklearn.feature_selection import RFECV
# Visualisation
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import seaborn as sns
# get TMDB Box Office Prediction train & test csv files as a DataFrame
train = pd.read_csv("/kaggle/input/tmdb-box-office-prediction/train.csv")
test = pd.read_csv("/kaggle/input/tmdb-box-office-prediction/test.csv")
def plot_correlation_map(df):
corr = train.corr()
_, ax = plt.subplots(figsize=(23, 22))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
_ = sns.heatmap(
corr,
cmap=cmap,
square=True,
cbar_kws={"shrink": 0.9},
ax=ax,
annot=True,
annot_kws={"fontsize": 12},
)
def plot_distribution(df, var, target, **kwargs):
row = kwargs.get("row", None)
col = kwargs.get("col", None)
facet = sns.FacetGrid(df, hue=target, aspect=4, row=row, col=col)
facet.map(sns.kdeplot, var, shade=True)
facet.set(xlim=(0, df[var].max()))
facet.add_legend()
# **Visualization**
train.corr()
np.count_nonzero(train.budget)
train.describe()
data = pd.concat([train["budget"], train["revenue"]], axis=1)
data.plot.scatter(x="budget", y="revenue", xlim=(0, 1e7), ylim=(0, 1e8))
# **Training**
# Splitting into Test and validation data and feature selection
# Selecting features Budget and Popularity
train_mod = train[{"budget", "popularity"}]
# Selecting the first 2001 indices of the training data for training
train_train = train_mod[0:2000]
# Selecting the rest of the training data for validation
train_val = train_mod[2001:2999]
# Obtain labels
train_mod_y = train[{"revenue"}]
train_train_y = train_mod_y[0:2000]
train_val_y = train_mod_y[2001:2999]
train_val_title = train["original_title"][2001:2999]
# Check for NaN
if train_mod.isnull().values.any():
print("Too bad, Nan found...")
else:
print("All right!!! Data ok!")
# Initialize and train a linear regression (Lasso) model
model = linear_model.Lasso(alpha=0.1)
model.fit(train_train, train_train_y.values.ravel())
# Evaluate on the training data
res = model.predict(train_val)
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
def evaluateModels(prediction, ground_truth):
r2 = r2_score(ground_truth, prediction)
rms = np.sqrt(mean_squared_error(ground_truth, prediction))
mae = mean_absolute_error(ground_truth, prediction)
# Create error array
prediction_error = ground_truth - prediction
ax = sns.boxplot(x=prediction_error, width=1)
return [r2, rms, mae]
# Obtain R2 score (ordinary least square)
print(evaluateModels(res, train_val_y.values.ravel()))
# Display best predictions
res.shape
# Create the table for comparing predictions with labels
absolute_error = np.abs(res - train_val_y.values.ravel())
relative_error = absolute_error / train_val_y.values.ravel()
evaluation = pd.DataFrame(
{
"Title": train_val_title.values.ravel(),
"budget": train_val["budget"].values.ravel(),
"popularity": train_val["popularity"].values.ravel(),
"Prediction": res.round(),
"Actual revenue": train_val_y.values.ravel(),
"Absolute error 1": absolute_error,
"Relative error 1": relative_error,
}
)
evaluation.sort_values(by=["Relative error 1"])
|
# ## Create Random Dataset
## Select random latitude between (12.8255, 13.0995) as Lat
## Select random longitude between (77.3959, 77.8690) as Long
## Select random value between (10000, 100000) as Revenue
## Select random value between random.uniform(0.3, 0.7) as Workload
import folium
import pandas as pd
import random
import numpy as np
df = pd.DataFrame(
columns=["Account_name", "Latitude", "Longitude", "Revenue", "Workload"]
)
for i in range(30):
data = [
"Account_" + str(i),
random.uniform(12.8255, 13.0995),
random.uniform(77.3959, 77.8690),
random.uniform(10000, 100000),
random.uniform(0.1, 0.2),
]
df.loc[-1] = data # adding a row
df.index = df.index + 1 # shifting index
df = df.sort_index() # sorting by index
df.head(2)
# ## Folium
def init_map(df=None, selector_list=None, color_dict=None):
color_list = [
"red",
"blue",
"green",
"purple",
"orange",
"darkred",
"lightred",
"beige",
"darkblue",
"darkgreen",
"cadetblue",
"darkpurple",
"white",
"pink",
"lightblue",
"lightgreen",
"gray",
"black",
"lightgray",
]
map_ = folium.Map(
location=[13, 77.6],
zoom_start=11,
zoom_control=False,
scrollWheelZoom=False,
dragging=False,
)
if df is not None:
for i, row in df.iterrows():
if color_dict is not None:
color = color_list[color_dict[row.Account_name]]
else:
color = "blue"
# print(row.Account_name, color)
if selector_list is not None:
if i in selector_list:
folium.Marker(
[row.Latitude, row.Longitude],
tooltip=row.Account_name,
icon=folium.Icon(color=color),
).add_to(map_)
else:
folium.Marker(
[row.Latitude, row.Longitude],
tooltip=row.Account_name,
icon=folium.Icon(color=color),
).add_to(map_)
return map_
init_map()
# ## Plot Initial Map
init_map(df)
# ## Select initial seed
def seed_selection(df, inital_seed_count=10, df_seed_limit=0.1):
initial_seed_index_list = []
initial_seed_index_list.append(random.randint(0, df.shape[0]))
while len(initial_seed_index_list) < inital_seed_count:
max_distance = 0
max_index = -1
for index_account, row in df.iterrows():
distance_from_seed = 0
# print(index_account)
if index_account in initial_seed_index_list:
continue
for index_seed in initial_seed_index_list:
point1 = np.array((row.Longitude, row.Latitude))
point2 = np.array(
(df.iloc[index_seed]["Longitude"], df.iloc[index_seed]["Latitude"])
)
d_seed = np.linalg.norm(point1 - point2)
if d_seed < df_seed_limit:
break
distance_from_seed += np.linalg.norm(point1 - point2)
# print(distance_from_seed, index_account, max_index)
if d_seed < df_seed_limit:
continue
if distance_from_seed > max_distance:
max_distance = distance_from_seed
max_index = index_account
# print(distance_from_seed, index_account, max_index)
initial_seed_index_list.append(max_index)
return initial_seed_index_list
seed_index_list = seed_selection(df)
seed_index_list = [i for i in seed_index_list if i != -1]
print(seed_index_list)
m = init_map(df, seed_index_list)
for i in seed_index_list:
# print(i)
folium.Circle(
[df.iloc[i].Latitude, df.iloc[i].Longitude], 12000, fill=True
).add_child(folium.Popup(df.iloc[i].Account_name)).add_to(m)
m
# ## Set Quadratic Programming
import cvxpy as cp
# ### Assignment Matrix
assignment_matrix = cp.Variable((len(seed_index_list), df.shape[0]), boolean=True)
assignment_matrix.shape
# ### Optimization - Maximize Revenue
num_seeds, num_accounts = assignment_matrix.shape
sum_VI = 0
for i in range(num_seeds):
for j in range(num_accounts):
# print('Seed ', seed_index_list[i])
point2 = np.array((df.iloc[j].Longitude, df.iloc[j].Latitude))
point1 = np.array(
(
df.iloc[seed_index_list[i]].Longitude,
df.iloc[seed_index_list[i]].Latitude,
)
)
dist = np.linalg.norm(point1 - point2) * 10000
if dist == 0:
continue
VI = df.iloc[j].Revenue / (dist * dist)
sum_VI += assignment_matrix[i][j] * VI
# print(sum_VI)
objective = cp.Maximize(sum_VI)
# ### Constaints
constraints = []
# #### 1. Each seed should be assigned to itself
for index, seed_index in enumerate(seed_index_list):
constraints.append(assignment_matrix[index][seed_index] == 1)
# #### 2. Each account should be mapped to only one seed
for account_index in range(assignment_matrix.shape[1]):
sum_account = 0
for seed_index in range(assignment_matrix.shape[0]):
# print(account_index)
# print(seed_index)
sum_account += assignment_matrix[seed_index][account_index]
# print(sum_account)
constraints.append(sum_account == 1)
# #### 3. Workload Balance
num_seeds, num_accounts = assignment_matrix.shape
for i in range(num_seeds):
sum_workload = 0
for j in range(num_accounts):
# print('Seed ', seed_index_list[i])
Workload = df.iloc[j].Workload
sum_workload += assignment_matrix[i][j] * Workload
# print(sum_workload)
constraints.append(sum_workload <= 1.3)
# ### 4. Distance from base should not be greater than threshold
num_seeds, num_accounts = assignment_matrix.shape
sum_VI = 0
for i in range(num_seeds):
for j in range(num_accounts):
point2 = np.array((df.iloc[j].Longitude, df.iloc[j].Latitude))
point1 = np.array(
(
df.iloc[seed_index_list[i]].Longitude,
df.iloc[seed_index_list[i]].Latitude,
)
)
dist = np.linalg.norm(point1 - point2)
constraints.append(assignment_matrix[i][j] * dist <= 0.10)
# ### Optimization
problem = cp.Problem(objective, constraints)
problem.solve()
print(problem.status)
# ### Assignment
assignment_df = pd.DataFrame(np.array(assignment_matrix.value))
df.head(2)
seed_mapping_dict = {}
for c in assignment_df.columns:
# print(assignment_df[c].sum())
if assignment_df[c].sum() == 0:
# print(c)
pass
seed_mapping_dict[df.iloc[c].Account_name] = assignment_df[c].idxmax()
init_map(df, color_dict=seed_mapping_dict)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from matplotlib.pyplot import plot
from matplotlib.pyplot import vlines
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
"""
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
"""
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data1 = pd.read_csv(
"/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/train/defog/bdda73c9be.csv"
)
print(data1)
plot(data1.Turn)
data2 = data1[data1.Turn == True]
data3 = data1[data1.Turn == False]
a = data1.Turn[1]
N = len(data1)
t = 0
for i in range(N):
if i > 100 and data1.Turn[i] and not data1.Turn[i - 1]:
t = t + 1
starts = np.zeros([t, 1])
ends = np.zeros([t, 1])
s = 0
e = 0
for i in range(N):
if i > 100 and data1.Turn[i] and not data1.Turn[i - 1]:
starts[s] = i - 100
s = s + 1
if i < N - 100 and data1.Turn[i] and not data1.Turn[i + 1]:
ends[e] = i + 100
e = e + 1
t
starts
ends
event = 0
l = int(ends[event]) - int(starts[event])
taxis = np.linspace(-100, l - 100, l)
vmin = min(data1.AccV[int(starts[event]) : int(ends[event])])
vmax = max(data1.AccML[int(starts[event]) : int(ends[event])])
plot(taxis, data1.AccML[int(starts[event]) : int(ends[event])])
plot(taxis, data1.AccAP[int(starts[event]) : int(ends[event])])
plot(taxis, data1.AccV[int(starts[event]) : int(ends[event])])
vlines(l - 200, vmin, vmax)
vlines(0, vmin, vmax)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# ## Importing data
import pandas as pd
gender_submission = pd.read_csv("../input/titanic/gender_submission.csv")
test = pd.read_csv("../input/titanic/test.csv")
train = pd.read_csv("../input/titanic/train.csv")
train.head()
numer_of_rows = train.shape[0]
numer_of_rows
# ### Checking how many columns has missing values
missing_vaules = train.isnull().sum()
missing_vaules
# ### Missing values detected in 3 columns. Because Embarked column has only 2 NAs, two rows going to be deleted.
train_drop = train.dropna(subset=["Embarked"])
# numer_of_rows = train_drop.shape[0]
# numer_of_rows
# ### Filling Age column with mean value and checking if all NAs are now filled
mean_age = train_drop[["Age"]].mean()
mean_age
train_fill_age = train_drop.fillna({"Age": 30})
train_fill_age.isnull().sum()
# ### Filling Cabin with value from next row and checking if all NAs are now filled.
train_fill_cabin = train_fill_age.fillna(method="bfill", axis=0)
train_fill_cabin.isnull().sum()
# train_fill_cabin.head()
# ### Drop last missing value from Cabin
train_filled = train_fill_cabin.dropna(subset=["Cabin"])
train_filled.isnull().sum()
# ### All NAs are now handled.
# ## Transofrming categorical data into numerical.
train_filled.head()
# ### Droping text columns
train_drop_num = train_filled.drop(["PassengerId", "Name", "Ticket"], axis=1)
train_drop_num.head()
# ### Trim numbers from Cabin number to get only cabin's category.
train_drop_num["Cabin"].replace(to_replace=r"[0-9]", value="", regex=True)
train_reg = train_drop_num
train_reg = train_reg.assign(
Cabin=(train_drop_num["Cabin"].replace(to_replace=r"[0-9]", value="", regex=True))
)
train_reg.head()
# ### Replace char categories with numerical values.
train_cat = train_reg
labels, uniqes = pd.factorize(train_reg["Sex"])
train_cat["Sex"] = labels
# train_test_cat = train_test_cat.assign(Sex=pd.factorize(train_reg['Sex']))
labels1, uniqes1 = pd.factorize(train_reg["Cabin"])
train_cat["Cabin"] = labels1
labels2, uniqes2 = pd.factorize(train_reg["Embarked"])
train_cat["Embarked"] = labels2
train_cat.head()
# ## Standarization
train_clean = train_cat
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
# scaler.fit(train_selected.drop('Survived',axis=1))
# scaled_features = scaler.transform(train_selected.drop('Survived',axis=1))
scaler.fit(train_clean.drop("Survived", axis=1))
scaled_features = scaler.transform(train_clean.drop("Survived", axis=1))
x_train_scaled = scaled_features
# ## Feature selection with Random Forest
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
labels = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Cabin", "Embarked"]
# x = train_clean.drop('Survived',axis=1)
# y = train_clean['Survived']
rfc = RandomForestClassifier()
rfc = rfc.fit(x_train_scaled, y)
sfm = SelectFromModel(rfc, prefit=True)
selected_features = sfm.transform(x_train_scaled)
selected_features
sfm.get_support(indices=True)
for feature_list_index in sfm.get_support(indices=True):
print(labels[feature_list_index])
x_train_selected = selected_features
x_train_scaled = scaled_features
y_train = train_clean["Survived"]
# ### Testing model fitting on clean train data set using cross-validation
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score, cross_val_predict
Classifiers = [
SVC(gamma="scale"),
KNeighborsClassifier(),
DecisionTreeClassifier(),
ExtraTreesClassifier(n_estimators=100),
LogisticRegression(solver="lbfgs"),
RandomForestClassifier(n_estimators=100),
]
# ### Models fitting with cross validation for train set with feature selection
Model = []
Cross_val_score = []
Accuracy = []
for clf in Classifiers:
predictions = cross_val_predict(clf, x_train_selected, y_train, cv=10)
score = cross_val_score(clf, x_train_selected, y_train, scoring="accuracy", cv=10)
accuracy = score.mean() * 100
Model.append(clf.__class__.__name__)
Cross_val_score.append(score)
Accuracy.append(accuracy)
print(
"Score of "
+ clf.__class__.__name__
+ " : "
+ str(score)
+ "\n"
+ "Accuracy of "
+ clf.__class__.__name__
+ " : "
+ str(accuracy)
)
# ### Models fitting with cross validation for train set without feature selection
Model = []
Cross_val_score = []
Accuracy = []
for clf in Classifiers:
predictions = cross_val_predict(clf, x_train_scaled, y_train, cv=10)
score = cross_val_score(clf, x_train_scaled, y_train, scoring="accuracy", cv=10)
accuracy = score.mean() * 100
Model.append(clf.__class__.__name__)
Cross_val_score.append(score)
Accuracy.append(accuracy)
print("Accuracy of " + clf.__class__.__name__ + " : " + str(accuracy))
# print('Score of '+clf.__class__.__name__ +' : '+ str(score)+ '\n'+
#'Accuracy of '+clf.__class__.__name__ +' : '+ str(accuracy))
# ### Geting the best parameters for the best classifiers with GridSearch
param_grid = {
"gamma": ["auto", 0.01, 0.1, 0.5, 1, 2, 10],
"C": [0.001, 0.01, 0.1, 1, 10],
}
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
gs = GridSearchCV(SVC(), param_grid, cv=StratifiedKFold(n_splits=5))
gs_result = gs.fit(x_train_scaled, y_train)
print(gs_result.best_score_)
print(gs_result.best_estimator_)
print(gs_result.best_params_)
# # Prepering test data
test.head()
# ### Checking missing values
test.isnull().sum()
train_selected.head()
# ### Droping the same columns as in train dataset
test_drop = test.drop(["PassengerId", "Name", "Ticket"], axis=1)
# test_drop.isnull().sum()
# ### Fill age with mean
mean_age = test_drop[["Age"]].mean()
mean_age
test_fill_age = test_drop.fillna({"Age": 30})
test_fill_age.isnull().sum()
# ### Fill fare with mean
mean_fare = test_fill_age[["Fare"]].mean()
mean_fare
test_fill_fare = test_fill_age.fillna({"Fare": 35.6271})
test_fill_fare.isnull().sum()
# ### Filling Cabin with value from next row and checking if all NAs are now filled.
test_fill_cabin = test_fill_fare.fillna(method="bfill", axis=0)
test_fill_cabin.isnull().sum()
# test_fill_cabin.head()
# ### Trim numbers from Cabin number to get only cabin's category.
# test_fill_cabin['Cabin'].replace(to_replace=r'[0-9]', value='', regex=True)
test_reg = test_fill_cabin
test_reg = test_reg.assign(
Cabin=(test_fill_cabin["Cabin"].replace(to_replace=r"[0-9]", value="", regex=True))
)
test_reg.isnull().sum()
# ### Fill last 3 rows from Cabin
# test_reg.count(['Cabin'])
test_reg.loc[test_reg.Cabin == "C", "Cabin"].count()
test_reg_cabin = test_reg.fillna({"Cabin": "C"})
test_reg_cabin.head()
# ### Getting categorical to numerical values
test_clean = test_reg_cabin
labels, uniqes = pd.factorize(test_reg_cabin["Sex"])
test_clean["Sex"] = labels
labels1, uniqes1 = pd.factorize(test_reg_cabin["Cabin"])
test_clean["Cabin"] = labels1
labels2, uniqes2 = pd.factorize(test_reg["Embarked"])
test_clean["Embarked"] = labels2
test_clean.head()
# ### Standarization
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
# scaler.fit(train_selected.drop('Survived',axis=1))
# scaled_features = scaler.transform(train_selected.drop('Survived',axis=1))
scaler.fit(test_clean)
test_scaled_features = scaler.transform(test_clean)
# x_train = scaled_features
# y_train = train_cat['Survived']
# ### Teaching the model and testing
x_train_scaled = scaled_features
y_train = train_clean["Survived"]
svc_model = SVC(
C=1,
cache_size=200,
class_weight=None,
coef0=0.0,
decision_function_shape="ovr",
degree=3,
gamma="auto",
kernel="rbf",
max_iter=-1,
probability=False,
random_state=None,
shrinking=True,
tol=0.001,
verbose=False,
)
svc_model.fit(x_train_scaled, y_train)
predictions = svc_model.predict(test_scaled_features)
predictions
result_dict = {"PassengerId": test["PassengerId"], "Survived": predictions}
result = pd.DataFrame(result_dict)
result.head()
result.to_csv("submission_svc.csv", index=False)
|
import os
import numpy as np
import pandas as pd
from skimage.io import imread
import matplotlib.pyplot as plt
import gc
gc.enable()
print(os.listdir("../input/airbus-ship-detection"))
masks = pd.read_csv(
os.path.join("../input/airbus-ship-detection", "train_ship_segmentations_v2.csv")
)
not_empty = pd.notna(masks.EncodedPixels)
print(
not_empty.sum(), "masks in", masks[not_empty].ImageId.nunique(), "images"
) # 非空图片中的mask数量
print(
(~not_empty).sum(), "empty images in", masks.ImageId.nunique(), "total images"
) # 所有图片中非空图片
masks.head()
masks["ships"] = masks["EncodedPixels"].map(
lambda c_row: 1 if isinstance(c_row, str) else 0
)
masks.head()
unique_img_ids = masks.groupby("ImageId").agg({"ships": "sum"}).reset_index()
unique_img_ids.head()
unique_img_ids["has_ship"] = unique_img_ids["ships"].map(
lambda x: 1.0 if x > 0 else 0.0
)
unique_img_ids.head()
ship_dir = "../input/airbus-ship-detection"
train_image_dir = os.path.join(ship_dir, "train_v2")
test_image_dir = os.path.join(ship_dir, "test_v2")
unique_img_ids["has_ship_vec"] = unique_img_ids["has_ship"].map(lambda x: [x])
unique_img_ids["file_size_kb"] = unique_img_ids["ImageId"].map(
lambda c_img_id: os.stat(os.path.join(train_image_dir, c_img_id)).st_size / 1024
)
unique_img_ids.head()
unique_img_ids = unique_img_ids[
unique_img_ids["file_size_kb"] > 50
] # keep only +50kb files
plt.hist(
x=unique_img_ids["file_size_kb"], # 指定绘图数据
bins=6, # 指定直方图中条块的个数
color="steelblue", # 指定直方图的填充色
edgecolor="black", # 指定直方图的边框色
)
plt.xticks([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])
plt.ylabel("number")
plt.xlabel("file_size_kb")
# unique_img_ids['file_size_kb'].hist()#绘制直方图
masks.drop(["ships"], axis=1, inplace=True)
unique_img_ids.sample(7)
plt.title("Number of images of each size")
SAMPLES_PER_GROUP = 1500
balanced_train_df = unique_img_ids.groupby("ships").apply(
lambda x: x.sample(SAMPLES_PER_GROUP) if len(x) > SAMPLES_PER_GROUP else x
)
# 图片有相同船舶数量,但超出2000的不要
rect = plt.hist(
x=balanced_train_df["ships"], # 指定绘图数据
bins=16, # 指定直方图中条块的个数
color="steelblue", # 指定直方图的填充色
edgecolor="black", # 指定直方图的边框色
)
plt.yticks(range(0, 1800, 300))
plt.xticks(range(0, 15))
plt.ylabel("Number of images")
plt.xlabel("Number of ships")
plt.title("Number of images containing different number of vessels")
# balanced_train_df['ships'].hist(bins=balanced_train_df['ships'].max()+1)
print(balanced_train_df.shape[0], "images", balanced_train_df.shape) # 取出1万张图片
from PIL import Image
x = np.empty(shape=(10680, 256, 256, 3), dtype=np.uint8)
y = np.empty(shape=10680, dtype=np.uint8)
for index, image in enumerate(balanced_train_df["ImageId"]):
image_array = (
Image.open("../input/airbus-ship-detection/train_v2/" + image)
.resize((256, 256))
.convert("RGB")
)
x[index] = image_array
y[index] = balanced_train_df[balanced_train_df["ImageId"] == image][
"has_ship"
].iloc[0]
print(x.shape)
print(y.shape)
# 收集测试集
from sklearn.model_selection import train_test_split
train_ids, valid_ids = train_test_split(
balanced_train_df, test_size=0.2, stratify=balanced_train_df["ships"]
)
# stratify使训练和测试的ships比例一样
train_df = pd.merge(masks, train_ids) # merge往里面train_ids添加EncodedPixels面罩信息
valid_df = pd.merge(masks, valid_ids)
print(train_df.shape[0], "training masks")
print(valid_df.shape[0], "validation masks")
BATCH_SIZE = 48
IMG_SCALING = (3, 3)
# 把rle解码为图像
def make_image_gen(in_df, batch_size=BATCH_SIZE):
all_batches = list(in_df.groupby("ImageId"))
out_rgb = []
out_mask = []
while True:
np.random.shuffle(all_batches) # 打乱顺序
for c_img_id, c_masks in all_batches:
rgb_path = os.path.join(train_image_dir, c_img_id)
c_img = imread(rgb_path)
c_mask = np.expand_dims(masks_as_image(c_masks["EncodedPixels"].values), -1)
if IMG_SCALING is not None:
c_img = c_img[:: IMG_SCALING[0], :: IMG_SCALING[1]]
c_mask = c_mask[:: IMG_SCALING[0], :: IMG_SCALING[1]]
out_rgb += [c_img]
out_mask += [c_mask]
# im = Image.fromarray(out_rgb)
# im.save('../code/input/trainmask_v2/'+c_img_id.split('.')[0] + '.png')
#
# if len(out_rgb)>=batch_size:
# yield np.stack(out_rgb, 0)/255.0, np.stack(out_mask, 0)
# out_rgb, out_mask=[], []
def masks_as_image(in_mask_list):
# Take the individual ship masks and create a single mask array for all ships
# 获取单个舰船面罩,并为所有舰船创建单个面罩阵列
all_masks = np.zeros((768, 768), dtype=np.uint8)
for mask in in_mask_list:
if isinstance(mask, str):
all_masks |= rle_decode(mask)
return all_masks
def rle_decode(mask_rle, shape=(768, 768)):
"""
mask_rle: run-length as string formated (start length)
shape: (height,width) of array to return
Returns numpy array, 1 - mask, 0 - background
"""
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0] * shape[1], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape(shape).T # Needed to align to RLE direction需要与RLE方向对齐
# 得到原图和罩子图
train_gen = make_image_gen(train_df)
train_x, train_y = next(train_gen) # 返回迭代器的下一个项目。
print("x", train_x.shape, train_x.min(), train_x.max())
print("y", train_y.shape, train_y.min(), train_y.max())
|
# # Real Estate: Lead Conversion
# ## Problem Statement
# Develop a scoring system for lead conversion and derive online behavior analysis along with recommendations to client.
# ## Data
# There are two data files - one containing the information on online beavior of the leads and the other containing
# information of those leads being converted to tenants. These two files, henceforth will be addressed as `leads_data` and
# `taregt_data` and the target column `converted_to_tenant` as `output`.
# ## Approach
# The primary tasks to accomplish the goal of this project are:
# 1. Understand the data
# 2. Map `target_data` to `leads_data`
# 3. Develop a scoring mechanism
# The solution follows a standard Data Science solution approach targeting the following sections:
# * [Exploratory Data Analysis](#eda)
# * [Data Cleaning](#data-cleaning)
# * [Feature Engineering](#feature-engineering)
# * [Model Selection and Training](#model-selection-and-training)
# * [Benchmarking Model Results](#benchmarking-model-results)
# * [Testing and Scoring](#testing-and-scoring)
# * [Conclusion](#conclusion)
# # Imports
import os
import random
import numpy as np
import pandas as pd
from functools import partial
# from pandas_profiling import ProfileReport
from pandas.api.types import is_numeric_dtype
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import preprocessing
from sklearn.metrics import roc_auc_score, f1_score
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import SMOTE
random.seed(0)
#
# # EDA
# ## Target Data
path_target_raw = "../input/real-estate-tenant-conversion/target_data.csv"
target_raw = pd.read_csv(path_target_raw)
# folder_report = "./reports/"
# if not os.path.isdir(folder_report):
# os.makedirs(folder_report)
# path_report_target = os.path.join(folder_report, "target.html")
# profile = ProfileReport(target_raw)
# profile.to_file(path_report_target)
print("target_data shape: ", target_raw.shape)
target_columns = target_raw.columns.tolist()
for column in target_columns:
print(f"number of unique values in `{column}`:", target_raw[column].nunique())
column_id, column_output = target_columns
print(f"unique values in `{column_output}`:", target_raw[column_output].unique())
print(f"value count in `{column_output}`:")
print(target_raw[column_output].value_counts())
print(
f"duplicate values in column `{column_id}`:",
target_raw[column_id].duplicated().sum(),
)
print(f"duplicate rows in target_data:", target_raw.duplicated().sum())
# Checking for lead ID values having more than 1 output
target_groups = target_raw.groupby(column_id)
target_distinct_value = target_groups[column_output].nunique()
print(
"number of lead ID values having more than 1 output:",
len(target_distinct_value[(target_distinct_value > 1)]),
)
# ### Obserations
# Some lead ID values have multiple outputs
# There are multiple data points repeating information
# ### To-Do
# Drop lead ID values having more than one outputs
# Drop duplicate rows
# ## Leads Data
path_leads_raw = "../input/real-estate-tenant-conversion/leads_data.csv"
leads_raw = pd.read_csv(path_leads_raw)
# folder_report = "./reports/"
# if not os.path.isdir(folder_report):
# os.makedirs(folder_report)
# path_report_leads = os.path.join(folder_report, "leads.html")
# profile = ProfileReport(leads_raw)
# profile.to_file(path_report_leads)
# identifying a column in `leads_data` that resembles `lead_id` column in `target_data`
column_id is leads_raw.columns, leads_raw.filter(
like=column_id
).columns, leads_raw.filter(like="id").columns
# identifying which column of `ga_lead_id` and `client_id` matches `lead_id` `target_data`
id_target_set = set(target_raw[column_id].unique())
print("unique IDs in `target_data`: ", len(id_target_set))
possible__id_columns = ["ga_lead_id", "client_id"]
for column in possible__id_columns:
id_leads_set = set(leads_raw[column].unique())
print(f"unique IDs in `{column}`: ", len(id_leads_set))
id_common = id_leads_set.intersection(id_target_set)
print(f"common lead IDs in `target_data` and `{column}`: ", len(id_common))
# ### Column Analysis
# #### `ga_lead_id`
column_id_leads = "ga_lead_id"
column_id_target = column_id
# checking number of times the `column_id_leads` is repeated
leads_raw[column_id_leads].value_counts().value_counts()
# #### `client_id`
column_client = "client_id"
print(f"unique values in column `{column_client}`", leads_raw[column_client].nunique())
print(
f"null value percentage in column `{column_client}`",
leads_raw[column_client].isna().mean(),
)
print(f"description of number of data points for every `{column_client}`")
leads_raw[column_client].value_counts().describe()
# ##### Hypothesis
# A client has multiple interactions with the website
# A client might be linked with multiple leads
# Each `lead_id` is associated with only one client
#
leads_client_groups = leads_raw.groupby(column_client)
# hypothesis 1 testing
random_client = random.choice(leads_raw[column_client])
leads_client_sample = leads_client_groups.get_group(random_client)
print(
f"number of total interactions for a sample client - {int(random_client)}:",
leads_client_sample.shape[0],
)
print(
f"number of unique interactions for a sample client - {int(random_client)}:",
leads_client_sample.drop_duplicates().shape[0],
)
print(
f"percentage of duplicate rows for a sample client - {int(random_client)}:",
100 * leads_client_sample.duplicated().mean(),
)
# hypothesis 2 testing
client_leads_count = leads_client_groups[column_id_leads].nunique()
print(
"percentage of clients having more than one leads:", (client_leads_count > 1).mean()
)
# hypothesis 3 testing
leads_lead_groups = leads_raw.groupby(column_id_leads)
leads_client_count = leads_lead_groups[column_client].nunique()
has_more_than_1_client = leads_client_count > 1
print("number of leads with more than 1 client:", has_more_than_1_client.sum())
print(
"percentage of leads with more than 1 client:", 100 * has_more_than_1_client.mean()
)
# ##### Hypothesis Results
# A client has multiple interactions with the website - positive
#
# There exists duplicate data points for a client but we do not have enough evidence to drop them
#
# A client might be linked with multiple leads - positive
# Each `lead_id` is associated with only one client - negative
#
# We can drop the lead IDs having more than one clients as it constitutes 0.07% of total lead IDs
#
# Note
# A client ID is associated with more than one lead IDs, we cannot merge data on client ID to generate lead ID features
# #### Other Columns
# Analysing remainning columns of `leads_data` and grouping them based on their characteristics for feature engineering
# function for column analysis
def basic_info(column):
print(f"--------------{column}--------------")
print(f"datatype: {leads_raw[column].dtype}")
print(f"NaN value %: {leads_raw[column].isna().mean()}")
nunique = leads_raw[column].nunique()
if nunique <= 5:
print(f"unique values: {leads_raw[column].unique()}")
print(f"value counts")
print(leads_raw[column].value_counts())
else:
print(f"number of unique values: {nunique}")
if is_numeric_dtype(leads_raw[column]):
print(f"minimum value: {leads_raw[column].min()}")
print(f"maximum value: {leads_raw[column].max()}")
print(
f"sample client data: {leads_client_groups.get_group(random.choice(leads_raw[column_client]))[column].tolist()[:10]}"
)
print(f"--------------------------------------")
other_columns = [
column
for column in leads_raw.columns
if column not in [column_id_leads, column_client]
]
for column in other_columns:
basic_info(column)
# grouping columns based on above observation
leads_features_info = {
"1": {
"columns": [
"sessions",
"hits",
"unique_events",
"session_duration",
"page_per_session",
"enquiries",
"pageviews",
"time_on_page",
"avg_time_on_page",
"property_search",
"lead_dropped",
"bounce_rate",
"total_events",
],
},
"2": {
"columns": [
"timestamp",
],
},
"3": {
"columns": ["social_source_referral"],
},
"4": {
"columns": ["contact_us_click", "app_downloads"],
},
"6": {
"columns": ["device_category"],
"devices": [
device
for device in leads_raw["device_category"].unique().tolist()
if device is not np.NaN
],
},
"7": {
"columns": ["event_category"],
"categories": [
cat
for cat in leads_raw["event_category"].unique().tolist()
if cat is not np.NaN
],
},
}
# columns to drop which are not providing any relevent information
leads_columns_to_drop = [
"contact_us_form",
"pdf_download",
"website_search",
"social_media_click",
"date_hour_minute",
"medium",
"data_source",
"property_view_location",
"property_view_price",
"property_view_size",
"property_view_bedrooms",
]
# columns need more analysis than provided by `basic_info`
leads_columns_need_more_analysis = ["campaign", "event_label", "event_action"]
# ### Obserations
# `lead_id` of `target_data` does not have the same column name in `leads_data`
# There are no common IDs in `target_data` and `client_id`, hence we will use `ga_lead_id` to merge with the `target_data`
# Number of leads are more than number of clients, hence a client might be associated with more than one lead ID
# We have grouped columns based on their characteristics and identified which to drop and those which need advanced analysis
# ### To-Do
# Drop lead IDs from `target_data` having more than one client ID
# Features to be created
#
# Number of leads for a client
# Number of interactions for a client
# Normalize required numerical features by number of leads for a client
# Generate features based on column grouping
#
# Drop columns identified in column analysis
# # Data cleaning
# ## Target Data
# dropping lead ID values having more than one outputs
# target_groups_filtered = target_groups.filter(lambda x: x[col_label].nunique()==1)
lead_ids_to_drop = target_distinct_value[~(target_distinct_value == 1)].index.tolist()
target_filtered = target_raw[~target_raw[column_id_target].isin(lead_ids_to_drop)]
print(
"shape of the `target_data` after dropping lead ID values having more than one outputs:",
target_filtered.shape,
)
# dropping duplicate rows
target_filtered.drop_duplicates(inplace=True)
print(
"shape of the `target_data` after dropping duplicate rows:", target_filtered.shape
)
# dropping lead ID values having more than one clients in Leads Data
leads_groups = leads_raw.groupby(column_id_leads)
leads_clients = leads_groups[column_client].nunique()
leads_to_drop = leads_clients[(leads_clients > 1)].index.tolist()
target_filtered = target_filtered[
~(target_filtered[column_id_target].isin(leads_to_drop))
]
print(
"shape of the `target_data` after dropping lead ID values having more than one clients in `leads_data`:",
target_filtered.shape,
)
# saving filtered Target Data
path_target_filtered = "../kaggle/working/target.csv"
target_filtered.to_csv(path_target_filtered, index=False)
del target_raw
# ## Leads Data
# dropping columns
leads_filtered = leads_raw.drop(columns=leads_columns_to_drop)
leads_filtered["timestamp"] = pd.to_datetime(leads_filtered["timestamp"])
path_leads_filtered = "../kaggle/working/leads.csv"
leads_filtered.to_csv(path_leads_filtered, index=False)
del leads_raw
# # Feature Engineering
# ## Designing Features
# Based on the column segregation in [Other Column](#column-analysis) we are designing features for different sets of groups.
# `feature_set1`
# Numeric features
# sum
# mean
# not na rows
# non-zero rows
# max
# sum / leads for a client
# not na rows / leads for a client
# non-zero rows / leads for a client
# `feature_set2`
# Datetime features
# Timegap feature based on `timestamp` - gaps in minutes between first and last interaction
# `feature_set3`
# Boolean feature
# If client has `social_source_referral`
# `feature_set4`
# Boolean features
# If client has clicked on contact-us link (`contact_us_click`) and downloaded app (`app_downloads`)
# `feature_set5`
# Categorical and Numeric features
# Using the extended list of `sources` to map down to smaller feature set
# Finding most used `source` for a user
# Counting occurence of every `source`
# `feature_set6`
# Numeric features
# Count of devices (mobile, tablet) used for accessing the website
# `feature_set7`
# Numeric features
# Count of `event_category`
#
def get_not_na_count(series):
return (~series.isna()).sum()
def get_nonzero(series):
return ((series != 0) & (~series.isna())).sum()
def get_feature_set1(group, column, column_merge):
# sum of column
features = group[column].sum().reset_index()
features.columns = [column_merge, f"{column}-sum-{column_merge}"]
# number of not null rows
feature2 = group[column].apply(get_not_na_count).reset_index()
feature2.columns = [column_merge, f"{column}-count_not_na-{column_merge}"]
features = features.merge(feature2, on=column_merge, how="outer")
# max of column
feature3 = group[column].max().reset_index()
feature3.columns = [column_merge, f"{column}-max-{column_merge}"]
features = features.merge(feature3, on=column_merge, how="outer")
# counting non zero rows
feature4 = group[column].apply(get_nonzero).reset_index()
feature4.columns = [column_merge, f"{column}-count_nonzero-{column_merge}"]
features = features.merge(feature4, on=column_merge, how="outer")
# mean of column
features[f"{column}-mean-{column_merge}"] = (
features[f"{column}-sum-{column_merge}"]
/ features[f"{column}-count_not_na-{column_merge}"]
)
group_id_leads = group[column_id_leads].nunique().reset_index(drop=True)
# sum/ #leads
features[f"{column}-sum_per_lead-{column_merge}"] = (
features[f"{column}-sum-{column_merge}"] / group_id_leads
)
# #not_na_rows/ #leads
features[f"{column}-count_not_na_per_lead-{column_merge}"] = (
features[f"{column}-count_not_na-{column_merge}"] / group_id_leads
)
# #not_zero_rows/ #leads
features[f"{column}-count_nonzero_per_lead-{column_merge}"] = (
features[f"{column}-count_nonzero-{column_merge}"] / group_id_leads
)
return features
def get_feature_set2(group, column, column_merge):
# calculating time gap in minutes between first and last interaction
features = (
(group[column].max() - group[column].min()).dt.total_seconds() / 60
).reset_index()
features.columns = [column_merge, f"{column}-gap-{column_merge}"]
return features
def value_in_series(value, series):
return value in series.tolist()
def get_feature_set3(group, column, column_merge):
# checking if "Yes" exists in a column
features = group[column].apply(partial(value_in_series, "Yes")).reset_index()
features.columns = [column_merge, f"{column}-value-{column_merge}"]
return features
def get_feature_set4(group, column, column_merge):
# checking if "1" exists in a column
features = group[column].apply(partial(value_in_series, 1)).reset_index()
features.columns = [column_merge, f"{column}-value-{column_merge}"]
return features
set5_source_mapping = leads_features_info["5"]["source_mapping"]
def get_series_mode(series):
mode = series.map(set5_source_mapping).mode()
if len(mode):
return mode.iloc[0]
else:
return np.nan
def get_series_count(value, series):
return (series == value).sum()
def get_feature_set5(group, column, column_merge):
# finding most used source
features = group[column].apply(get_series_mode).reset_index()
features.columns = [column_merge, f"{column}-mode-{column_merge}"]
# creating a feature for each source count
for value in set(set5_source_mapping.values()):
feature = group[column].apply(partial(get_series_count, value)).reset_index()
feature.columns = [column_merge, f"{column}-{value}-count-{column_merge}"]
features = features.merge(feature, on=column_merge, how="outer")
return features
def is_value(value, series):
return (series == value).sum()
def get_feature_set6(group, column, column_merge):
# count of devices
values = leads_features_info["6"]["devices"]
features = pd.DataFrame({column_merge: []})
for value in values:
feature = group[column].apply(partial(is_value, value)).reset_index()
feature.columns = [column_merge, f"{column}-count_{value}-{column_merge}"]
features = features.merge(feature, on=column_merge, how="outer")
return features
def get_feature_set7(group, column, column_merge):
# count of categories
values = leads_features_info["7"]["categories"]
features = pd.DataFrame({column_merge: []})
for value in values:
feature = group[column].apply(partial(is_value, value)).reset_index()
feature.columns = [column_merge, f"{column}-count_{value}-{column_merge}"]
features = features.merge(feature, on=column_merge, how="outer")
return features
leads_features_info["1"]["feature_function"] = get_feature_set1
leads_features_info["2"]["feature_function"] = get_feature_set2
leads_features_info["3"]["feature_function"] = get_feature_set3
leads_features_info["4"]["feature_function"] = get_feature_set4
leads_features_info["5"]["feature_function"] = get_feature_set5
leads_features_info["6"]["feature_function"] = get_feature_set6
leads_features_info["7"]["feature_function"] = get_feature_set7
# ## Generating Features
# ### Client Features
feature_client = pd.DataFrame({column_client: []})
leads_client = leads_filtered.groupby(column_client)
for feature_set in leads_features_info:
feature_set_function = leads_features_info[feature_set]["feature_function"]
for column in leads_features_info[feature_set]["columns"]:
print("generating features for set:", feature_set, "column:", column)
features_set_column = feature_set_function(leads_client, column, column_client)
feature_client = feature_client.merge(
features_set_column, on=column_client, how="outer"
)
feature_client.to_csv("../kaggle/working/feature_client.csv", index=False)
# folder_report = "./reports/"
# if not os.path.isdir(folder_report):
# os.makedirs(folder_report)
# path_report_features = os.path.join(folder_report, "features.html")
# profile = ProfileReport(features_client)
# profile.to_file(path_report_features)
# ### Lead Features
def get_lead_id(value):
if isinstance(value, str):
if value.startswith("Inventory Residential Lead - "):
return int(value.split(" - ")[-1])
return None
leads_filtered[f"{column_id_leads}_2"] = leads_filtered["event_action"].apply(
get_lead_id
)
leads_filtered[column_id_target] = leads_filtered[column_id_leads].fillna(
0
) + leads_filtered[f"{column_id_leads}_2"].fillna(0)
feature_leads = pd.DataFrame({column_id_target: []})
leads = leads_filtered.groupby(column_id_target)
for feature_set in leads_features_info:
feature_set_function = leads_features_info[feature_set]["feature_function"]
for column in leads_features_info[feature_set]["columns"]:
print("generating features for set:", feature_set, "column:", column)
features_set_column = feature_set_function(leads, column, column_id_target)
feature_leads = feature_leads.merge(
features_set_column, on=column_id_target, how="outer"
)
feature_leads.to_csv("../kaggle/working/feature_leads.csv", index=False)
# ### Merging `target_data` with Lead Feature on Client Features
# identifying required lead IDs from `leads_data`
lead_ids_to_consider = leads_filtered[column_id_leads].unique()
# filtering `target_data` based on required lead IDs
target_filtered = target_filtered[
target_filtered[column_id_target].isin(lead_ids_to_consider)
]
lead_ids_to_consider = target_filtered[column_id_target].unique()
# filtering `leads_features` based on required lead IDs
feature_leads_filtered = feature_leads[
feature_leads[column_id_target].isin(lead_ids_to_consider)
]
# merging `target_filtered` with Lead Features
features_combined = pd.merge(
target_filtered, feature_leads_filtered, on=column_id_target, how="inner"
)
print("total lead IDs to consider:", len(lead_ids_to_consider))
print("shape of `target_filtered`:", target_filtered.shape)
print("shape of `feature_leads_filtered`:", feature_leads_filtered.shape)
print("shape of `features_combined`:", features_combined.shape)
# extracting lead IDs and client IDs from `leads_data` for which we have `output` information in `target_data`
client_leads = leads_filtered[[column_id_leads, column_client]].drop_duplicates()
client_leads = client_leads[
client_leads[column_id_leads].isin(target_filtered[column_id_target])
]
client_leads.columns = [column_id_target, column_client]
print("client-lead ID combinations in `leads_data`:", client_leads.shape[0])
# merging `features_combined` with extracted lead IDs and client IDs
features = features_combined.merge(client_leads, on=column_id_target, how="outer")
print(
"shape of `features` after combining with client-lead ID combinations:",
features.shape,
)
# merging `features` with Client Features
features = features.merge(feature_client, on=column_client, how="inner")
print("shape of `features` after mergining with Client Features:", features.shape)
# #### To-Do
# Feature Cleaning
# Dropping features with constant value throughout
# Dropping features with high correlation (above 90%)
# Dropping data points with same feature information other than `lead_id`, `client_id` and `output` values
# Dropping features having more than 70% of missing values
#
# dropping features with constant value throughout
feature_var_is_0 = (features.var() == 0).reset_index()
columns_with_var_0 = feature_var_is_0[feature_var_is_0[0]]["index"].tolist()
features.drop(columns=columns_with_var_0, inplace=True)
print("number of columns with zero variance:", len(columns_with_var_0))
print("shape of `features` after dropping columns with zero variance:", features.shape)
# checking correlations amongst `features`
feature_corr = features.corr().abs()
# dropping features with high correlation (above 90%)
corr_subset = feature_corr.where(
np.triu(np.ones(feature_corr.shape), k=1).astype(np.bool)
)
columns_with_high_corr = [
column for column in corr_subset.columns if any(corr_subset[column] > 0.9)
]
features.drop(columns=columns_with_high_corr, inplace=True)
print("number of features with high correlation:", len(columns_with_high_corr))
print(
"shape of `features` after dropping columns with correlation of over 90%:",
features.shape,
)
# dropping data points with same feature information and different `lead_id`, `client_id` and `output` values
temp = features.drop(columns=[column_id_target, column_client, column_output])
features = features.loc[temp.drop_duplicates().index]
print("shape of features after dropping duplicate data points", features.shape)
# dropping features having more than 70% of missing values
missing_values = (features.isna().mean()).reset_index()
columns_to_drop = missing_values[missing_values[0] > 0.7]["index"].to_list()
features.drop(columns=columns_to_drop, inplace=True)
print("shape of final data after dropping duplicate data points", features.shape)
# final data
features.to_csv("../kaggle/working/features.csv", index=False)
# # Model Selection and Training
# ## Data Preparation
data = features.copy()
# finding categorical columns in the data
categorical_columns = [
column for column in data.columns if not (is_numeric_dtype(data[column]))
]
print("categorical columns in data:", categorical_columns)
# label encoding categorical columns
label_encoder = {}
for column in categorical_columns:
label_encoder[column] = preprocessing.LabelEncoder()
data[column].fillna("0", inplace=True)
data[column] = label_encoder[column].fit_transform(data[column])
# defining features and target with X and y
X = data.drop(columns=[column_output]).copy()
y = data[column_output]
# creating train-val-test split
# splitting data in training and remaining dataset
X_train, X_rem, y_train, y_rem = train_test_split(
X, y, train_size=0.8, random_state=226
)
# spliting remaining data into equal sections of validation and test split (15% each of overall data).
X_valid, X_test, y_valid, y_test = train_test_split(
X_rem, y_rem, test_size=0.5, random_state=226
)
print("shape of train data:", X_train.shape, y_train.shape)
print("shape of validation data:", X_valid.shape, y_valid.shape)
print("shape of test data:", X_test.shape, y_test.shape)
# dropping `client_id` and `lead_id` from train data, valid data and test data
X_train.drop(columns=[column_id_target, column_client], inplace=True)
X_valid_index = X_valid[[column_id_target, column_client]]
X_test_index = X_test[[column_id_target, column_client]]
X_valid.drop(columns=[column_id_target, column_client], inplace=True)
X_test.drop(columns=[column_id_target, column_client], inplace=True)
# filling missing values with default value -1
X_train.fillna(-1, inplace=True)
X_valid.fillna(-1, inplace=True)
X_test.fillna(-1, inplace=True)
# using SMOTE to compensate for data scarcisty
oversampling = SMOTE(random_state=226)
X_train, y_train = oversampling.fit_resample(X_train, y_train)
print("shape of train data:", X_train.shape, y_train.shape)
# model scoring function - calculating f1 and roc-auc scores
def scoring(model):
results = {}
y_predict = model.predict(X_train)
results["train-f1"] = f1_score(y_predict, y_train)
results["train-roc_auc"] = roc_auc_score(y_predict, y_train)
y_predict = model.predict(X_valid)
results["valid-f1"] = f1_score(y_predict, y_valid)
results["valid-roc_auc"] = roc_auc_score(y_predict, y_valid)
y_predict = model.predict(X_test)
results["test-f1"] = f1_score(y_predict, y_test)
results["test-roc_auc"] = roc_auc_score(y_predict, y_test)
return results
# ## Baseline Model - Logistic Regression
# defining a list to store all model results
model_results = []
# training logistic regression with no hyperparameter tuning as the baseline model
baseline_dt = LogisticRegression(random_state=0)
baseline_dt.fit(X_train, y_train)
baseline_dt_score = scoring(baseline_dt)
print("F1 and ROC-AUC score for train, validation and test data:", baseline_dt_score)
result_df = pd.DataFrame(baseline_dt_score.items()).set_index(0).T.reset_index()
result_df["model"] = "Logistic Regression"
result_df["params"] = "baseline"
model_results.append(result_df)
# ## Decision Tree
# training decision tree with hyperparameter tuning and storing the results for model benchmarking
print(
"calculating F1 and ROC-AUC score for train, validation and test data along with different hyperparameter values"
)
for depth in range(4, 15):
tree_dt = DecisionTreeClassifier(max_depth=depth, random_state=0)
tree_dt.fit(X_train, y_train)
result = scoring(tree_dt)
params = f"depth:{depth}"
result_df = pd.DataFrame(result.items()).set_index(0).T.reset_index()
result_df["model"] = "Decision Tree"
result_df["params"] = params
model_results.append(result_df)
# ## K-Nearest Neighbours
# training KNN with hyperparameter tuning and storing the results for model benchmarking
print(
"calculating F1 and ROC-AUC score for train, validation and test data along with different hyperparameter values"
)
neighbors = [10, 20, 50, 80, 100, 150]
leaf_sizes = [10, 20, 50]
for neighbor in neighbors:
for leaf_size in leaf_sizes:
nei_knn = KNeighborsClassifier(
n_neighbors=neighbor, weights="uniform", leaf_size=leaf_size
)
nei_knn.fit(X_train, y_train)
result = scoring(nei_knn)
params = f"neighbor:{neighbor}, leaf_size:{leaf_size}"
result_df = pd.DataFrame(result.items()).set_index(0).T.reset_index()
result_df["model"] = "KNN"
result_df["params"] = params
model_results.append(result_df)
# ## Gradient Boost
print(
"calculating F1 and ROC-AUC score for train, validation and test data along with different hyperparameter values"
)
learning_rates = [0.01, 0.05, 0.1]
n_estimators = [10, 25, 50, 100, 150]
min_samples_splits = [10, 25, 50]
max_depths = [4, 7, 10, 15]
for learning_rate in learning_rates:
for n_estimator in n_estimators:
for min_samples_split in min_samples_splits:
for max_depth in max_depths:
model_gb = GradientBoostingClassifier(
random_state=0,
n_estimators=n_estimator,
learning_rate=learning_rate,
min_samples_split=min_samples_split,
max_depth=max_depth,
)
model_gb.fit(X_train, y_train)
result = scoring(model_gb)
params = f"learning_rate:{learning_rate}, n-estimators:{n_estimator}, min_samples_split:{min_samples_split}, max_depth:{max_depth}"
result_df = pd.DataFrame(result.items()).set_index(0).T.reset_index()
result_df["model"] = "GBM"
result_df["params"] = params
model_results.append(result_df)
# ## Random Forest
print(
"calculating F1 and ROC-AUC score for train, validation and test data along with different hyperparameter values"
)
ne_list = [10, 20, 50, 80, 100, 150, 200]
for est in ne_list:
for depth in range(4, 10):
tree_rf = RandomForestClassifier(
n_estimators=est, max_depth=depth, random_state=0
)
tree_rf.fit(X_train, y_train)
result = scoring(tree_rf)
params = f"n_estimators:{est}, max_depth:{depth}"
result_df = pd.DataFrame(result.items()).set_index(0).T.reset_index()
result_df["model"] = "Random Forest"
result_df["params"] = params
model_results.append(result_df)
# # Benchmarking Model Results
# concating all the model results
model_eval_df = pd.concat(model_results, ignore_index=True)
df_columns = model_eval_df.columns[1:].tolist()
model_eval_df = model_eval_df[df_columns[-2:] + df_columns[:-2]]
# identifying the best model
model_eval_df[
(model_eval_df["valid-roc_auc"] > 0.62) & (model_eval_df["valid-f1"] > 0.42)
]
# ## Observations
# To get the best two models we are looking at `valid-roc_auc` greater than 62% and from above result we can conclude
# * Random Forest model with 100 n-estimators and 5 as the maximum depth
# * Gradient Boost model with learning_rate=0.01, n-estimators=10, min_samples_split=25 and max_depth=4
# are the best and avoid cases of overfitting
# # Testing and Scoring
# finding leads conversion rate without using machine learning
default_success_percentage = 100 * y_valid.mean()
print("success percentage without machine learning:", default_success_percentage)
def get_prediction_probability(model, x):
return model.predict_proba(x)[:, 1]
# finding leads conversion rate using machine learning
final_models = {}
final_models["Random Forest"] = RandomForestClassifier(
n_estimators=100, max_depth=5, random_state=0
)
final_models["Random Forest"].fit(X_train, y_train)
final_models["Gradient Boosting Model"] = GradientBoostingClassifier(
learning_rate=0.01,
n_estimators=10,
min_samples_split=25,
max_depth=4,
random_state=0,
)
final_models["Gradient Boosting Model"].fit(X_train, y_train)
for model_name, model in final_models.items():
# scoring leads and identifying top picks
scoring_df = pd.DataFrame(
{
column_id_target: X_valid_index[column_id_target],
"lead_success_score": get_prediction_probability(model, X_valid),
"ground_truth": y_valid,
}
).sort_values(by="lead_success_score", ascending=False)
top_picks = [50, 100]
for top_pick in top_picks:
success_percentage = 100 * scoring_df.iloc[:top_pick]["ground_truth"].mean()
print(
f"success percentage for top {top_pick} picks using {model_name}:",
success_percentage,
)
print(
f"we can increase leads conversion rate for top {top_pick} picks by:",
success_percentage - default_success_percentage,
)
print("===================================================")
scoring_df.head(5)
# ## Observation
# From the above results on validation data we conclude that Random Forest Model performs better than Gradient Boosting Model
# Model selected - Random Forest Model
# ## Scoring on test data (using selected model)
test_success_percentage = 100 * y_test.mean()
model_name = "Random Forest"
final_model = final_models[model_name]
# scoring leads using probablities and identifying top picks
scoring_df = pd.DataFrame(
{
column_id_target: X_test_index[column_id_target],
"lead_success_score": get_prediction_probability(final_model, X_test),
"ground_truth": y_test,
}
).sort_values(by="lead_success_score", ascending=False)
top_pick = 50
success_percentage = 100 * scoring_df.iloc[:top_pick]["ground_truth"].mean()
print(
"success percentage on test set without machine learning:", test_success_percentage
)
print(
f"success percentage for top {top_pick} test picks using {model_name}:",
success_percentage,
)
print(
f"we can increase leads conversion rate for top {top_pick} test picks by:",
success_percentage - test_success_percentage,
)
# ## Identifying Important Features
feature_importace = pd.DataFrame(
{"column": X_train.columns, "importance": final_model.feature_importances_}
)
feature_importace.sort_values(by="importance", ascending=False, inplace=True)
feature_importace.iloc[:10]
|
# # Introduce
# NLTK is a module for natural language processing (NLP), it can transform words to help the machine easier to read. In this part, I will offer some basic skills about NLTK. The application of NLTK is quite a board such as analysis of the questionaries, social media, or papers.
import nltk # import nltk moudel
# * Collect data
# Basically, you can collect data for practice with several ways, but you have to follow the rule of the original authors.
# 1. Use crawlers to grab data.
# 2. use API such as tweepy to collect data form social media.
# 3. questionary from others.
# 4. You can use the corpus in NLTK such as "brown","gutenberg", "reuters"
# Here is an example about how to import the brown moudle.
# Before we "import" the relevant moudle of NLTK, we have to download it, it is very different than most of moulde
nltk.download("brown")
from nltk.corpus import brown
# then we can
a = brown.words()
a
nltk.download("gutenberg")
from nltk.corpus import gutenberg
b = gutenberg.words()
b
text = " Hello! Where can I go to the classroom?123" # example texture
# Delete noise
# there are several marks which are not helpful for analysis and will influence the analyze, so we have to delete it.we can use re() moudle to help us remove them
# remove by ourselves
import re # re is a regular expression which can deal with text
text_clean = re.sub(
r"[~.,%/:;?_&+*=!-]", " ", text
) # replace [~.,%/:;?_&+*=!-] to block in sentence of "text"
print(text_clean)
# Ok! now you can find the puncatation has disappered next there are also some noise have remove
# remove the number
text_clean = re.sub(r"[0-9]", " ", text_clean)
text_clean
# of course other inofrmation such as url, emoji or others noise may also need to deal with you can search more tutorial on the internet.
# Make the words become lower case:
# While we analyze the words we have to unify the text
text_clean.lower() # now you can see the words become lower
# tokenize the words which can help us to deal with the words with mechine.
from nltk.tokenize import word_tokenize
text_token = nltk.word_tokenize(text_clean)
text_token
# now, you can see the words become token
nltk.download("wordnet")
# this is a problem that wordnet file is zip file which are not unzip, you can use above code to open it
# Beside you may also face same problem while you download this module in your computer
# you can just unzip it in your file bby yourself
# you can refer revelant inofrmation here: https://www.kaggle.com/discussions/general/395169
# limmatize and stem are both important while clean the text data:
# limmatize: it can transform the Ving p.p or other, such as: "is","are","am","being" transform to "be"
# stem: it can cut the head and tail to find the root word
from nltk.stem import WordNetLemmatizer
text_practice = "I am playing vedio game now "
lemmatizer = WordNetLemmatizer()
text_practice = lemmatizer.lemmatize(text_practice)
text_practice_token = nltk.word_tokenize(text_practice)
text_practice_token
# tips: if you want to tokenize words first, it is ok
# however you may have error about unhash"list"
# so you have to change the code "lemmatizer.lemmatize(token) for token in text_practice_token"
# **Create stopwords:**
# NLTK has thier own stopwords or we can create our own stopwords to delete unnecessary words in the sentence.
# create our own stopwords and cancel it
stop_words = ["I", "can", "to", "the"]
sentence = [word for word in text_practice_token if word not in stop_words]
sentence
# use nltk stopwords
from nltk.corpus import stopwords
stopwords = nltk.corpus.stopwords.words("english")
print(stopwords) # check the wtopwords dictionary
# **Deal with dataframe** : In practice, there will be chances to deal with questionary or practice with twitter API and you will face the data frame, so here I will briefly introduce how to transform the dataframe.
# * apply() are useful in the text analysis of dataframe
#
import pandas as pd
# Example dataframe
data = pd.DataFrame(
{
"student": ["Betty", "Jacky", "Sam", "John"],
"opinion": [
"the course is so boring",
"I want to go home",
"interesting!",
"bad feeling",
],
}
)
data.head()
# for instance if we want to collect students opinions
# first we also remove the noise
def remove_noise(x):
x = re.sub(r"[~.,%/:;?_&+*=!-]", " ", x)
return x
data["opinion"] = data["opinion"].apply(remove_noise)
data["opinion"]
# lower case because list doesn't have attribute "lower" we have to use str to transform
data["opinion"] = data["opinion"].str.lower()
data
data["opinion_token"] = data["opinion"].apply(lambda x: nltk.word_tokenize(x))
data["opinion_token"] # tokenize
data["opinion_token"] = data["opinion_token"].apply(
lambda x: [lemmatizer.lemmatize(word) for word in x]
)
data["opinion_token"]
# if the words are not successfully transform by lemmatizer
# we have other choices:
# 1. ignore them (if the influence is not significant)
# 2. stemming(however, it is roughly cut the head and tails)
# 3. use pos_tag which can make result more accuracy however it is also complex
|
w = "I see a pencil" # Slicing Strings
print(w[2:5])
q = "Don't open the door!" # Slice From the Start
print(q[:6])
a = "She doesn't study German on Monday." # Slice From the End
print(a[12:18])
x = "Air is pure!" # Negative Indexing
print(x[-7:-1])
# # How to modify Strings
y = "I like ice cream" # upper case
print(y.upper())
y = "I LIKE ICE CREAM" # lower case
print(y.lower())
t = " I read a lot. " # remove spaces
print(t.strip()) # returns "I read a lot."
h = "Hey, Salma" # replace string
print(h.replace("what_will_change", "we_want_to_fulfill"))
# split separates words
k = "The house , is beautiful."
print(k.split(",")) # returns ['The house', ' is beautiful']
x = k.split()
k[1]
a = "I'm" # String Concatenation
b = "tired."
c = a + b
print(c)
a = "I'm" # To add a space between them, add a " ":
b = "tired."
c = a + " " + b
print(c)
cost = 4 # String Format
txt = " The cost of this jakect is {} $ "
print(txt.format(cost))
cats = 2
dogs = 5
myorder = "I'll adopt {} cats and {} dogs this summer."
print(myorder.format(cats, dogs))
cats = 2
dogs = 5
myorder = "I'll adopt {0} cats and {1} dogs this summer."
print(myorder.format(cats, dogs))
txt = 'While I am a passionate "Basketball" I prefer football.' # escape characters
txt
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn import feature_extraction, linear_model, model_selection, preprocessing
from sklearn.feature_extraction.text import (
CountVectorizer,
TfidfVectorizer,
HashingVectorizer,
)
from nltk.tokenize.toktok import ToktokTokenizer
from sklearn.naive_bayes import GaussianNB
from sklearn import metrics
import re
import nltk
import re
from collections import Counter
import gensim
import heapq
from operator import itemgetter
from multiprocessing import Pool
from collections import Counter
from nltk.tokenize import RegexpTokenizer, word_tokenize
from sklearn.ensemble import GradientBoostingClassifier
from tqdm import tqdm
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
train_df = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
test_df = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
train_df.isnull().sum()
df_raw = test_df.copy()
df_raw
len(train_df)
def remove_puncts(x):
pattern = r"[^a-zA-z0-9\s]"
text = re.sub(pattern, "", x)
return text
train_df["text"] = train_df["text"].apply(remove_puncts)
test_df["text"] = test_df["text"].apply(remove_puncts)
train_df.iloc[0]["text"]
stopword_list = nltk.corpus.stopwords.words("english")
def remove_stopwords(text):
tokenizer = ToktokTokenizer()
tokens = tokenizer.tokenize(text)
tokens = [token.strip() for token in tokens]
filtered_tokens = [token for token in tokens if token.lower() not in stopword_list]
filtered_text = " ".join(filtered_tokens)
return filtered_text
train_df["text"] = train_df["text"].apply(remove_stopwords)
test_df["text"] = test_df["text"].apply(remove_stopwords)
tokenizer = RegexpTokenizer(r"\w+")
train_df["tokens"] = train_df["text"].apply(tokenizer.tokenize)
all_words = [word for tokens in train_df["tokens"] for word in tokens]
sentence_lengths = [len(tokens) for tokens in train_df["tokens"]]
VOCAB = sorted(list(set(all_words)))
print("%s words total, with a vocabulary size of %s" % (len(all_words), len(VOCAB)))
count_all_words = Counter(all_words)
# get the top 100 most common occuring words
count_all_words.most_common(100)
tfidf = TfidfVectorizer(min_df=2, max_df=0.5, ngram_range=(1, 2))
train_tfidf = tfidf.fit_transform(train_df["text"]).toarray()
test_tfidf = tfidf.transform(test_df["text"]).toarray()
y_train = train_df.target.values
gnb = GaussianNB()
y_pred_gnb = gnb.fit(train_tfidf, y_train).predict(test_tfidf)
scores = model_selection.cross_val_score(
gnb, train_tfidf, train_df["target"], cv=5, scoring="f1"
)
scores
df = pd.concat([train_df, test_df])
submission = pd.DataFrame({"id": df_raw["id"], "target": y_pred_gnb})
submission.to_csv("tweet_submission_file.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
data = pd.read_csv("../input/diplomabigdataset/20200120_purchase_base.csv")
data.drop(
[
"Unnamed: 0",
"creation_datetime",
"payment_type",
"tree_level_1",
"tree_level_2",
"tree_level_3",
"buyer_price",
"item_count",
"channel",
"location_name",
"region_name",
"fd_name",
],
axis=1,
inplace=True,
)
data.dropna(inplace=True)
data = data.head(1000000)
diftov = data["mrid"].unique().tolist()
rantov = range(len(pd.unique(data["mrid"])))
TS1 = pd.DataFrame({"num": rantov, "goo": diftov})
data["mrid"] = data["mrid"].map(TS1.set_index("goo")["num"])
aprorders = data.groupby("mrid")["tree_level_4"].apply(list)
aprorders = aprorders.tolist()
del data
del diftov
del rantov
del TS1
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(tokenizer=lambda x: x, lowercase=False)
m = cv.fit_transform(aprorders)
# To get the values correspond to each column
goods_name = cv.get_feature_names()
# If you need dummy columns, not count
m = m > 0
m = m * 1
del cv
iters = m.shape[0] // 50000 + 1
m1 = m[:50000].todense()
df = pd.DataFrame(data=m1, columns=goods_name)
del m1
for i in range(1, iters):
m2 = m[50000 * i : 50000 * (i + 1)].todense()
dff = pd.DataFrame(data=m2, columns=goods_name)
df = df.append(dff)
del dff
del m2
# df = df.drop_duplicates()
del m
df1 = df.T
del df
goods_list = list(df1.index.values)
items = {}
cc = 0
for line in goods_list:
key, value = line, cc
items[key] = str(value)
cc += 1
for key in items:
items[key] = int(items[key])
df2 = df1.T
goods_sum = df1.sum(axis=1).values
del df1
cnt = 0
for i in items.keys():
items[i] = goods_sum[cnt]
cnt += 1
sorted_items = {
k: v for k, v in sorted(items.items(), reverse=True, key=lambda item: item[1])
}
cnt = 0
for i in sorted_items.keys():
sorted_items[i] = cnt
cnt += 1
for i in range(len(aprorders)):
aprorders[i] = list(set(aprorders[i]))
for i in range(len(aprorders)):
for j in range(len(aprorders[i])):
if aprorders[i][j] in sorted_items:
aprorders[i][j] = sorted_items[aprorders[i][j]]
train_valid = []
# for i in range(int(df2.shape[0])):
for i in range(int(0.8 * df2.shape[0])):
train_valid.append(df2.iloc[[i]].squeeze().to_numpy().nonzero()[0].tolist())
sorted_goods_sum_inds = np.flip(np.argsort(goods_sum)).tolist()
import random
from collections import defaultdict, Counter
# cheques = []
# ans_vec = []
# prob_vec = []
answers = []
answers1 = []
answers2 = []
# all_cnt, good_cnt = 0, 0
for j in range(int(0.8 * df2.shape[0]), int(1.0 * df2.shape[0])):
all_ones = df2.iloc[[j]].squeeze().to_numpy().nonzero()[0]
if len(all_ones) > 3:
selection = random.randint(0, len(all_ones) - 1)
kicked_elem = all_ones[selection]
all_ones = np.delete(all_ones, selection)
freq_list = []
# for i in meets_dict.items():
# if i[0][0] in all_ones and i[0][1] not in all_ones:
# freq_list += [i[0][1]] * i[1]
# if i[0][1] in all_ones and i[0][0] not in all_ones:
# freq_list += [i[0][0]] * i[1]
# fq = [iii[0] for iii in Counter(freq_list).most_common()]
for (
ii
) in (
train_valid
): # train_valid - список списков для обучающей выборки, где в каждом списке - номера купленных товаров в чеке
for elem in all_ones:
if elem in ii:
freq_list = freq_list + ii
fq = defaultdict(lambda: 1)
fq1 = defaultdict(lambda: 1)
for w in freq_list:
fq1[w] += 1 / goods_sum[w]
fq[w] += 1
fq = {
k: v for k, v in sorted(fq.items(), key=lambda item: item[1], reverse=True)
}
fq = [e for e in fq if e not in all_ones]
# fq = [e for e in fq if e not in bad_check]
# factor=1.0/sum(fq.values())
# for k in fq:
# fq[k] = fq[k]*factor
# all_ones = [rev_items.get(item,item) for item in all_ones]
if kicked_elem in fq:
answers.append(fq.index(kicked_elem))
else:
answers.append(1000000)
fq1 = {
k: v for k, v in sorted(fq1.items(), key=lambda item: item[1], reverse=True)
}
fq1 = [e for e in fq1 if e not in all_ones]
# fq1 = [e for e in fq1 if e not in bad_check]
if kicked_elem in fq1:
answers1.append(fq1.index(kicked_elem))
else:
answers1.append(1000000)
if kicked_elem in sorted_goods_sum_inds:
answers2.append(sorted_goods_sum_inds.index(kicked_elem))
else:
answers2.append(1000000)
# if j % 1000 == 0:
# print(j)
# for i in range(len(answers)):
# answers[i] = sorted(answers[i])
with open("0_roc_preds.txt", "w") as f:
for item in answers:
f.write("%s\n" % item)
# for i in range(len(answers1)):
# answers1[i] = sorted(answers1[i])
with open("1_roc_preds.txt", "w") as f:
for item in answers1:
f.write("%s\n" % item)
# for i in range(len(answers2)):
# answers2[i] = sorted(answers2[i])
with open("2_roc_preds.txt", "w") as f:
for item in answers2:
f.write("%s\n" % item)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import warnings
warnings.filterwarnings("ignore")
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
ROOT_PATH = "/kaggle/input/store-sales-time-series-forecasting"
oil = pd.read_csv(ROOT_PATH + "/oil.csv")
stores = pd.read_csv(ROOT_PATH + "/stores.csv")
train = pd.read_csv(ROOT_PATH + "/train.csv")
test = pd.read_csv(ROOT_PATH + "/test.csv")
transactions = pd.read_csv(ROOT_PATH + "/transactions.csv")
## https://www.kaggle.com/code/arjanso/reducing-dataframe-memory-size-by-65
def reduce_memory_usage(df):
start_mem = df.memory_usage().sum() / 1024**2
print("Memory usage of dataframe is {:.2f} MB".format(start_mem))
for col in df.columns:
col_type = df[col].dtype.name
if (col_type != "datetime64[ns]") & (col_type != "category"):
if col_type != "object":
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == "int":
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif (
c_min > np.iinfo(np.int16).min
and c_max < np.iinfo(np.int16).max
):
df[col] = df[col].astype(np.int16)
elif (
c_min > np.iinfo(np.int32).min
and c_max < np.iinfo(np.int32).max
):
df[col] = df[col].astype(np.int32)
elif (
c_min > np.iinfo(np.int64).min
and c_max < np.iinfo(np.int64).max
):
df[col] = df[col].astype(np.int64)
else:
if (
c_min > np.finfo(np.float16).min
and c_max < np.finfo(np.float16).max
):
df[col] = df[col].astype(np.float16)
elif (
c_min > np.finfo(np.float32).min
and c_max < np.finfo(np.float32).max
):
df[col] = df[col].astype(np.float32)
else:
pass
else:
df[col] = df[col].astype("category")
mem_usg = df.memory_usage().sum() / 1024**2
print("Memory usage became: ", mem_usg, " MB")
return df
train = reduce_memory_usage(train)
test = reduce_memory_usage(test)
def summary(text, df):
print(f"{text} shape: {df.shape}")
summ = pd.DataFrame(df.dtypes, columns=["dtypes"])
summ["null"] = df.isnull().sum()
summ["unique"] = df.nunique()
summ["min"] = df.min()
summ["median"] = df.median()
summ["max"] = df.max()
summ["mean"] = df.mean()
summ["std"] = df.std()
return summ
summary("oil", oil)
summary("stores", stores)
summary("train", train)
summary("test", test)
summary("transactions", transactions)
stores_transactions = pd.merge(stores, transactions, on=["store_nbr"])
summary("stores_transactions", stores_transactions)
stores_transactions_oil = pd.merge(stores_transactions, oil, on=["date"])
summary("stores_transactions_oil", stores_transactions_oil)
df_full = pd.merge(train, stores_transactions_oil, how="left", on=["store_nbr", "date"])
summary("df_full", df_full)
|
# # IMPORTING LIBRARIES:
# For Data Processing
import numpy as np
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from PIL import Image, ImageEnhance
# For ML Models
from tensorflow import keras
from tensorflow.keras.layers import *
from tensorflow.keras.losses import *
from tensorflow.keras.models import *
from tensorflow.keras.metrics import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.applications import *
from tensorflow.keras.preprocessing.image import load_img
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Model
from tensorflow.keras.utils import plot_model
from tensorflow.keras.layers import Input, Dense, BatchNormalization
from IPython.core.display import Image
# For Data Visualization
import matplotlib.pyplot as plt
import seaborn as sns
# Miscellaneous
from tqdm import tqdm
import random
import os
import keras
from keras.models import Sequential
from keras.layers import (
Conv2D,
MaxPooling2D,
Flatten,
Dense,
Dropout,
BatchNormalization,
)
from PIL import Image
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use("dark_background")
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
# # READING THE DATASET:
# ### Setting the file paths for a training dataset and a testing dataset. It then creates two empty lists, "train_paths" and "train_labels", and iterates through the folders within the training dataset directory to append the file paths and their corresponding labels to the appropriate lists. Then it shuffles the order of the file paths and labels.
train_dir = "/kaggle/input/brain-tumor-mri-dataset/Training/"
test_dir = "/kaggle/input/brain-tumor-mri-dataset/Testing/"
train_paths = []
train_labels = []
for label in os.listdir(train_dir):
for image in os.listdir(train_dir + label):
train_paths.append(train_dir + label + "/" + image)
train_labels.append(label)
train_paths, train_labels = shuffle(train_paths, train_labels)
# ### Creating a pie chart that represents the distribution of labels in a dataset called "train_labels." The chart shows the percentage of each label, "pituitary," "notumor," "meningioma," and "glioma," in the dataset. The colors of the chart are defined as specific RGB values, and the chart is labeled accordingly. The chart also has an "explode" feature which separates each slice of the pie chart by a small margin.
#
plt.figure(figsize=(14, 6))
colors = ["#4285f4", "#ea4335", "#fbbc05", "#34a853"]
plt.rcParams.update({"font.size": 14})
plt.pie(
[
len([x for x in train_labels if x == "pituitary"]),
len([x for x in train_labels if x == "notumor"]),
len([x for x in train_labels if x == "meningioma"]),
len([x for x in train_labels if x == "glioma"]),
],
labels=["pituitary", "notumor", "meningioma", "glioma"],
colors=colors,
autopct="%.1f%%",
explode=(0.025, 0.025, 0.025, 0.025),
startangle=30,
)
# # AS WE CAN SEE THAT THE DATASET IS REASONABLY BALANCED.
# ### Creating two empty lists, "test_paths" and "test_labels", and iterates through the folders within the testing dataset directory to append the file paths and their corresponding labels to the appropriate lists. It also shuffles the order of the file paths and labels.
test_paths = []
test_labels = []
for label in os.listdir(test_dir):
for image in os.listdir(test_dir + label):
test_paths.append(test_dir + label + "/" + image)
test_labels.append(label)
test_paths, test_labels = shuffle(test_paths, test_labels)
# ### Creating a pie chart that represents the distribution of train and test datasets. The chart shows the percentage of each label, "train" and "test" in the dataset. The colors of the chart are defined as specific RGB values, and the chart is labeled accordingly. The chart also has an "explode" feature which separates each slice of the pie chart by a small margin.
#
plt.figure(figsize=(14, 6))
colors = ["#4285f4", "#ea4335", "#fbbc05", "#34a853"]
plt.rcParams.update({"font.size": 14})
plt.pie(
[len(train_labels), len(test_labels)],
labels=["Train", "Test"],
colors=colors,
autopct="%.1f%%",
explode=(0.05, 0),
startangle=30,
)
# # DATA AUGMENTATION:
# ### Defining a function called "augment_image" that takes in an image as an input. The function uses the Python Imaging Library (PIL) to convert the image to a PIL image object. Then it applies random brightness and contrast enhancements to the image using the ImageEnhance module, which adjusts the brightness and contrast of the image by a random value between 0.8 and 1.2. Finally, the function normalizes the image by dividing it by 255 and returns the augmented image.
def augment_image(image):
image = Image.fromarray(np.uint8(image))
image = ImageEnhance.Brightness(image).enhance(random.uniform(0.8, 1.2))
image = ImageEnhance.Contrast(image).enhance(random.uniform(0.8, 1.2))
image = np.array(image) / 255.0
return image
# Defining a function called "open_images" that takes in a list of paths to images and returns the images as arrays after augmenting them. The function first uses the keras function "load_img" to load the images and resize them to the specified size (128x128 pixels). Then it applies the augment_image function to each image. The function then returns the augmented images as an array.
# The code then calls the open_images function on a subset of the train_paths (train_paths[50:59]) and assigns the result to the variable "images". It also assigns the corresponding subset of train_labels to the variable "labels".
# Then it creates a figure of size (12,6) and plots 8 images with their corresponding label, with a title of their label. it also turns off the axis and show the images on the figure. The function also updates the font size of the title of the images and shows the figure.
IMAGE_SIZE = 128
def open_images(paths):
"""
Given a list of paths to images, this function returns the images as arrays (after augmenting them)
"""
images = []
for path in paths:
image = load_img(path, target_size=(IMAGE_SIZE, IMAGE_SIZE))
image = augment_image(image)
images.append(image)
return np.array(images)
images = open_images(train_paths[50:59])
labels = train_labels[50:59]
fig = plt.figure(figsize=(12, 6))
for x in range(1, 9):
fig.add_subplot(2, 4, x)
plt.axis("off")
plt.title(labels[x])
plt.imshow(images[x])
plt.rcParams.update({"font.size": 12})
plt.show()
# # DATA GENERATOR HELPS AUGMENT IMAGES, NORMALIZES THEM.
# The `datagen` function generates data for training and testing. It takes three arguments: `paths`, `labels` and `batch_size` (default 12) and `epochs` (default 1).
# It uses the `open_images` function to open and augment the images and the encode_label function to convert the labels to numerical values. The function then yields the batch of images and labels, allowing the model to train on them. The `for _ in range(epochs)` loop allows you to specify the number of times the entire dataset should be passed through the model. And `for x in range(0, len(paths), batch_size)` is used for iterating over the dataset in batches of the specified batch_size.
unique_labels = os.listdir(train_dir)
def encode_label(labels):
encoded = []
for x in labels:
encoded.append(unique_labels.index(x))
return np.array(encoded)
def decode_label(labels):
decoded = []
for x in labels:
decoded.append(unique_labels[x])
return np.array(decoded)
def datagen(paths, labels, batch_size=12, epochs=1):
for _ in range(epochs):
for x in range(0, len(paths), batch_size):
batch_paths = paths[x : x + batch_size]
batch_images = open_images(batch_paths)
batch_labels = labels[x : x + batch_size]
batch_labels = encode_label(batch_labels)
yield batch_images, batch_labels
# # MODEL:
# ## WE ARE USING VGG16 FOR TRANSFER LEARNING.
# The model is built on top of VGG16, which is a pre-trained convolutional neural network (CNN) for image classification.
# * First, the VGG16 model is loaded with input_shape=(IMAGE_SIZE,IMAGE_SIZE,3), include_top=False, weights='imagenet'. The input shape is set to match the size of the images in the dataset, which is 128x128 pixels. The include_top parameter is set to False, which means that the final fully-connected layers of VGG16 that perform the classification will not be included. The weights parameter is set to 'imagenet' which means that the model will be pre-trained with a dataset of 1.4 million images called imagenet
# * Next, the for layer in base_model.layers: loop is used to set all layers of the base_model (VGG16) to non-trainable, so that the weights of these layers will not be updated during training.
# * Then, the last three layers of the VGG16 model are set to trainable by using base_model.layers[-2].trainable = True,base_model.layers[-3].trainable = True and base_model.layers[-4].trainable = True
# * After that, a Sequential model is created and the VGG16 model is added to it with model.add(base_model).
# * Next, a Flatten layer is added to the model with model.add(Flatten()) which reshapes the output of the VGG16 model from a 3D tensor to a 1D tensor, so that it can be processed by the next layers of the model.
# * Then, a Dropout layer is added with model.add(Dropout(0.3)) which is used to prevent overfitting by randomly setting a fraction of input units to 0 at each update during training time.
# * After that, a dense layer is added with 128 neurons and relu activation function is added with model.add(Dense(128, activation='relu')).
# * Next, another Dropout layer is added with model.add(Dropout(0.2))
# * Finally, the output dense layer is added with number of neurons equal to the number of unique labels and 'softmax' activation function is added with model.add(Dense(len(unique_labels), activation='softmax')). The 'softmax' activation function is used to give a probability distribution over the possible classes.
#
base_model = VGG16(
input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, weights="imagenet"
)
# Set all layers to non-trainable
for layer in base_model.layers:
layer.trainable = False
# Set the last vgg block to trainable
base_model.layers[-2].trainable = True
base_model.layers[-3].trainable = True
base_model.layers[-4].trainable = True
model = Sequential()
model.add(Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3)))
model.add(base_model)
model.add(Flatten())
model.add(Dropout(0.3))
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(len(unique_labels), activation="softmax"))
# The `model.summary()` method in Keras prints a summary of the architecture of the model. This summary includes the layers in the model, their output shapes, the number of parameters in each layer, and the total number of parameters in the model. This can be useful for understanding the overall structure of the model and for identifying potential issues such as overfitting. In the code provided, `model.summary()` shows the architecture of the image classification model created using a pre-trained VGG16 model and some additional layers.
model.summary()
# `model.compile` is used to configure the learning process before training the model. The optimizer is the algorithm used to update the weights of the model based on the gradients of the loss function. In this case, the `Adam` optimizer is used with a learning rate of 0.0001. The learning rate controls the step size at which the optimizer makes updates to the weights. A smaller learning rate will make the optimization converge slower but potentially with better results.
# The loss function is used to measure how well the model is doing on the training data. The loss is a scalar value that represents the degree of error in the model's predictions. The `sparse_categorical_crossentropy` loss is used in this case, which is a measure of the dissimilarity between the predicted and actual labels.
# The metrics parameter is used to specify the metrics that will be evaluated during training and testing. The `sparse_categorical_accuracy` metric is used in this case, which calculates the mean accuracy rate across all predictions for multiclass classification problems.
#
model.compile(
optimizer=Adam(learning_rate=0.0001),
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
# # TRAINING THE MODEL:
# The fit method takes in the following arguments:
# * datagen(train_paths, train_labels, batch_size=batch_size, epochs=epochs): This argument specifies the training data generator to use. The datagen function generates batches of images and labels for training. It takes in the list of paths to the training images, the corresponding labels, the batch size and number of epochs. The datagen function will be called multiple times (once per epoch) and will yield a new batch of images and labels each time it is called.
# * epochs=epochs: This argument specifies the number of times the model should go through the entire training dataset.
# * steps_per_epoch=steps: This argument specifies the number of batches to use per epoch.
# The batch_size variable is set to 20, which means that the model will be trained on 20 images at a time. The steps variable is set to the total number of images divided by the batch size. The epochs variable is set to 4, so the model will go through the entire training dataset 4 times. The training is done by repeatedly calling the datagen function to get new batches of images and labels, and then training the model on those batches. The history variable will store the training history, which can be used to plot the training progress, or extract training statistics.
#
batch_size = 20
steps = int(len(train_paths) / batch_size)
epochs = 5
history = model.fit(
datagen(train_paths, train_labels, batch_size=batch_size, epochs=epochs),
epochs=epochs,
steps_per_epoch=steps,
)
# Creating a plot of the training history of the model, including the accuracy and loss over the number of epochs. The x-axis represents the number of epochs and the y-axis shows the value of accuracy and loss. The plot is created using matplotlib library, it has two lines one for accuracy and one for loss. The plot helps in visualizing how the model is learning and how well it is performing during the training process. It is useful for identifying overfitting, underfitting, and to decide when to stop training.
plt.figure(figsize=(8, 4))
plt.grid(True)
plt.plot(history.history["sparse_categorical_accuracy"], ".g-", linewidth=2)
plt.plot(history.history["loss"], ".r-", linewidth=2)
plt.title("Model Training History")
plt.xlabel("epoch")
plt.xticks([x for x in range(epochs)])
plt.legend(["Accuracy", "Loss"], loc="upper left", bbox_to_anchor=(1, 1))
plt.show()
# # EVALUATING MODEL WITH TEST SAMPLES:
# Trained model to make predictions on the test set, which consists of the test_paths and test_labels. It uses the datagen() function to generate batches of images and labels, and for each batch it uses the model.predict() method to make predictions on the images. The predicted labels are in encoded form and using decode_label() function they are decoded and stored in y_pred. The actual labels are stored in y_true. The tqdm library is used to display a progress bar for the loop.
#
batch_size = 32
steps = int(len(test_paths) / batch_size)
y_pred = []
y_true = []
for x, y in tqdm(
datagen(test_paths, test_labels, batch_size=batch_size, epochs=1), total=steps
):
pred = model.predict(x)
pred = np.argmax(pred, axis=-1)
for i in decode_label(pred):
y_pred.append(i)
for i in decode_label(y):
y_true.append(i)
# Generating a classification report which evaluates the performance of the model on the test dataset. The report contains various metrics such as precision, recall, f1-score and support for each class in the dataset. It also calculates a weighted average of these metrics across all classes. This report helps in understanding the overall performance of the model and identifying any specific classes where the model is performing well or poorly.
print(classification_report(y_true, y_pred))
# # Let's Check about the working of the model:
def names(number):
if number == 0:
return "No, Its not a tumor"
else:
return "Its a Tumor"
from PIL import Image
from matplotlib.pyplot import imshow
img = Image.open(
r"/kaggle/input/brain-tumor-mri-dataset/Training/glioma/Tr-glTr_0002.jpg"
)
x = np.array(img.resize((128, 128)))
x = x.reshape(1, 128, 128, 3)
res = model.predict_on_batch(x)
classification = np.where(res == np.amax(res))[1][0]
imshow(img)
print(str(res[0][classification] * 100) + "% Conclusion: " + names(classification))
from matplotlib.pyplot import imshow
img = Image.open(
r"/kaggle/input/brain-tumor-mri-dataset/Testing/pituitary/Te-piTr_0004.jpg"
)
x = np.array(img.resize((128, 128)))
x = x.reshape(1, 128, 128, 3)
res = model.predict_on_batch(x)
classification = np.where(res == np.amax(res))[1][0]
imshow(img)
print(str(res[0][classification] * 100) + "% Conclusion: " + names(classification))
from matplotlib.pyplot import imshow
img = Image.open(
r"/kaggle/input/brain-tumor-mri-dataset/Training/meningioma/Tr-meTr_0001.jpg"
)
x = np.array(img.resize((128, 128)))
x = x.reshape(1, 128, 128, 3)
res = model.predict_on_batch(x)
classification = np.where(res == np.amax(res))[1][0]
imshow(img)
print(str(res[0][classification] * 100) + "% Conclusion: " + names(classification))
def names(number):
if number == 0:
return "Its a Tumor"
else:
return "No, Its not a tumor"
from matplotlib.pyplot import imshow
img = Image.open(
r"/kaggle/input/brain-tumor-mri-dataset/Training/notumor/Tr-noTr_0004.jpg"
)
x = np.array(img.resize((128, 128)))
x = x.reshape(1, 128, 128, 3)
res = model.predict_on_batch(x)
classification = np.where(res == np.amax(res))[1][0]
imshow(img)
print(str(res[0][classification] * 100) + "% Conclusion: " + names(classification))
|
from fastai import *
from fastai.vision import *
from tqdm import tqdm_notebook as tqdm
import os
import cv2
import random
import numpy as np
import keras
from random import shuffle
from keras.utils import np_utils
from shutil import unpack_archive
import matplotlib.pyplot as plt
data = (
ImageList.from_folder(
"/kaggle/input/naruto-hand-sign-dataset/Pure Naruto Hand Sign Data/"
)
.split_by_rand_pct()
.label_from_folder()
.transform(get_transforms(), size=128)
.databunch()
.normalize(imagenet_stats)
)
# data = (ImageDataBunch.from_folder(mainPath) .random_split_by_pct() .label_from_folder() .transform(tfms, size=224) .databunch())
# data = (ImageList.from_folder(mainPath) .split_by_rand_pct() .label_from_folder() .databunch())
data
data.classes
data.show_batch(rows=3, figsize=(10, 10))
from fastai.metrics import error_rate
learn = cnn_learner(data, models.resnet18, metrics=accuracy)
# learn.fit_one_cycle(6,1e-2)
# learn.model_dir="/kaggle/working/models"
# learn.save('mini_train')
# learn.fit_one_cycle(6, max_lr=slice(1e-05, 1e-04))
learn.fit_one_cycle(8, 1e-2)
learn.model_dir = "/kaggle/working/models"
learn.save("Hand-Sign-detection-stage-1")
learn.validate()
learn.show_results(ds_type=DatasetType.Train, rows=3, figsize=(20, 20))
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_confusion_matrix()
interp.plot_top_losses(9, figsize=(15, 15))
learn.unfreeze() # must be done before calling lr_find
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(6, max_lr=slice(1e-04, 1e-03))
learn.save("Hand-Sign-detection-stage-2")
|
# # SURVIVAL AFTER TITANIC ACCIDENT
# 
import pandas as pd
import numpy as np
from math import *
import seaborn as sns
from tensorflow import keras
from keras.callbacks import Callback
from tensorflow.keras.optimizers import Adam
from keras.layers import Dense
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import to_categorical
df = pd.read_csv("/kaggle/input/titanic/train.csv")
df_test = pd.read_csv("/kaggle/input/titanic/test.csv")
df
df.info()
df.isna().sum()
df["Embarked"] = df["Embarked"].fillna("S")
df["Age"] = df["Age"].fillna(float(df["Age"].mean()))
df
df.isna().sum()
df_new = df.drop(["PassengerId", "Name", "Ticket", "Cabin"], axis=1)
df_new
encode = OrdinalEncoder()
df_new[["Sex", "Embarked"]] = encode.fit_transform(df_new[["Sex", "Embarked"]])
df_new["Sex"].replace(["male", "female"], [[0, 1], [1, 0]], inplace=True)
df_new.info()
df_new.Sex.value_counts()
x_train = df_new.drop("Survived", axis=1).values
y_train = df_new["Survived"].values
x_train.shape, y_train.shape
model = Sequential(
[
Dense(64, input_shape=[7], activation="relu"),
Dense(64, activation="relu"),
Dense(32, activation="relu"),
Dense(32, activation="relu"),
Dense(1, activation="softmax"),
]
)
num_epoch = 10
model.compile(
optimizer=Adam(learning_rate=0.001),
loss="categorical_crossentropy",
metrics=["mse"],
)
hist = model.fit(x_train, y_train, epochs=num_epoch, batch_size=16)
df_test.isna().sum()
df_test2 = df_test.drop(["Name", "Ticket", "Cabin"], axis=1)
df_test2 = df_test2.interpolate()
df_test2.isna().sum()
onehot = LabelEncoder()
df_test2["Sex"] = onehot.fit_transform(df_test2["Sex"])
df_test2["Sex"] = to_categorical(df_test2["Sex"])
df_test2["Embarked"] = onehot.fit_transform(df_test2["Embarked"])
df_test2
df_test2.Embarked.value_counts()
df_test2.Embarked = encode.fit_transform(df_test2[["Embarked"]])
x_test = df_test2.drop("PassengerId", axis=1).values
ID = df_test2["PassengerId"].values
predictions = model.predict(x_test)
predict = predictions.reshape(-1)
predict
submit = pd.DataFrame({"PassengerId": ID, "Survived": predict})
submit
submit.to_csv("submission.csv", index=False)
|
# ### Vaex performance test (GPU instance)
# The purpose of this Notebook is to evaluate the performance of the [vaex](https://github.com/vaexio/vaex) DataFrame library on a Kaggle instance with an active GPU.
# The process is simple:
# - Install vaex
# - Generate a "big-ish" dataset (100_000_000 rows) by replicating the Iris flower dataset
# - Create a couple of computationally expensive virtual columns
# - Evaluate them on the fly via `numpy`, `jit_numba` and `jit_cuda` and compare performance.
# Install vaex & pythran from pip
# Import packages
import vaex
import vaex.ml
import numpy as np
import pylab as plt
import seaborn as sns
from tqdm.notebook import tqdm
import time
# The following method replicates the Iris flower dataset many times, and creates a hdf5 file on disk comprising ~1e8 samples. The purpose is to create "big-ish" data for various types of performance testing.
df = vaex.ml.datasets.load_iris_1e8()
# Get a preview of the data
df
# Let us define a simple function that will measure the execution time of other functions (vaex methods).
def benchmark(func, reps=5):
times = []
for i in tqdm(range(reps), leave=False, desc="Benchmark in progress..."):
start_time = time.time()
res = func()
times.append(time.time() - start_time)
return np.mean(times), res
# Now let's do some performance testing. I have defined some function, just on top of my head that is a bit computationally challenging to be calculated on the fly. The idea is to see how fast vaex performs when the computations are done with numpy, numba, pythran and cuda.
def some_heavy_function(x1, x2, x3, x4):
a = x1**2 + np.sin(x2 / x1) + (np.tan(x2**2) - x4 ** (2 / 3))
b = (x1 / x3) ** (0.3) + np.cos(x1) - np.sqrt(x2) - x4**3
return a ** (2 / 3) / np.tan(b)
# Numpy
df["func_numpy"] = some_heavy_function(
df.sepal_length, df.sepal_width, df.petal_length, df.petal_width
)
# Numba
df["func_numba"] = df.func_numpy.jit_numba()
# Pythran
df["func_pythran"] = df.func_numpy.jit_pythran()
# CUDA
df["func_cuda"] = df.func_numpy.jit_cuda()
# Calculation of the sum of the virtual columns - this forces their evaluation
duration_numpy, res_numpy = benchmark(df.func_numpy.sum)
duration_numba, res_numba = benchmark(df.func_numba.sum)
duration_pythran, res_pythran = benchmark(df.func_pythran.sum)
duration_cuda, res_cuda = benchmark(df.func_cuda.sum)
print(f"Result from the numpy sum {res_numpy:.5f}")
print(f"Result from the numba sum {res_numba:.5f}")
print(f"Result from the pythran sum {res_pythran:.5f}")
print(f"Result from the cuda sum {res_cuda:.5f}")
# Calculate the speed-up compared to the (base) numpy computation
durations = np.array([duration_numpy, duration_numba, duration_pythran, duration_cuda])
speed_up = duration_numpy / durations
# Compute
compute = ["numpy", "numba", "pythran", "cuda"]
# Let's visualise it
plt.figure(figsize=(16, 6))
plt.subplot(121)
plt.bar(compute, speed_up)
plt.tick_params(labelsize=14)
for i, (comp, speed) in enumerate(zip(compute, speed_up)):
plt.annotate(s=f"x {speed:.1f}", xy=(i - 0.1, speed + 0.3), fontsize=14)
plt.annotate(s="(higher is better)", xy=(0, speed + 2), fontsize=16)
plt.title("Evaluation of complex virtual columns with Vaex", fontsize=16)
plt.xlabel("Accelerators", fontsize=14)
plt.ylabel("Speed-up wrt numpy", fontsize=14)
plt.ylim(0, speed_up[-1] + 5)
plt.subplot(122)
plt.bar(compute, durations)
plt.tick_params(labelsize=14)
for i, (comp, duration) in enumerate(zip(compute, durations)):
plt.annotate(s=f"{duration:.1f}s", xy=(i - 0.1, duration + 0.3), fontsize=14)
plt.annotate(s="(lower is better)", xy=(2, durations[0] + 3), fontsize=16)
plt.title("Evaluation of complex virtual columns with Vaex", fontsize=16)
plt.xlabel("Accelerators", fontsize=14)
plt.ylabel("Duration [s]", fontsize=14)
plt.ylim(0, durations[0] + 5)
plt.tight_layout()
plt.show()
# Let us try another involved function, this time one calculating the arc-distance between two points on a sphere. We don't have such data here, but let's use this anyway in order to test the speed of the computations.
def arc_distance(theta_1, phi_1, theta_2, phi_2):
temp = (
np.sin((theta_2 - theta_1) / 2 * np.pi / 180) ** 2
+ np.cos(theta_1 * np.pi / 180)
* np.cos(theta_2 * np.pi / 180)
* np.sin((phi_2 - phi_1) / 2 * np.pi / 180) ** 2
)
distance = 2 * np.arctan2(np.sqrt(temp), np.sqrt(1 - temp))
return distance * 3958.8
# Numpy
df["arc_distance_numpy"] = arc_distance(
df.sepal_length, df.sepal_width, df.petal_length, df.petal_width
)
# Numba
df["arc_distance_numba"] = df.arc_distance_numpy.jit_numba()
# Pythran
df["arc_distance_pythran"] = df.arc_distance_numpy.jit_pythran()
# CUDA
df["arc_distance_cuda"] = df.arc_distance_numpy.jit_cuda()
# Calculation of the sum of the virtual columns - this forces their evaluation
duration_numpy, res_numpy = benchmark(df.arc_distance_numpy.sum)
duration_numba, res_numba = benchmark(df.arc_distance_numba.sum)
duration_pythran, res_pythran = benchmark(df.arc_distance_pythran.sum)
duration_cuda, res_cuda = benchmark(df.arc_distance_cuda.sum)
print(f"Result from the numpy sum {res_numpy:.5f}")
print(f"Result from the numba sum {res_numba:.5f}")
print(f"Result from the pythran sum {res_pythran:.5f}")
print(f"Result from the cuda sum {res_cuda:.5f}")
# Calculate the speed-up compared to the (base) numpy computation
durations = np.array([duration_numpy, duration_numba, duration_pythran, duration_cuda])
speed_up = duration_numpy / durations
# Let's visualise it
plt.figure(figsize=(16, 6))
plt.subplot(121)
plt.bar(compute, speed_up)
plt.tick_params(labelsize=14)
for i, (comp, speed) in enumerate(zip(compute, speed_up)):
plt.annotate(s=f"x {speed:.1f}", xy=(i - 0.1, speed + 0.3), fontsize=14)
plt.annotate(s="(higher is better)", xy=(0, speed + 2), fontsize=16)
plt.title("Evaluation of complex virtual columns with Vaex", fontsize=16)
plt.xlabel("Accelerators", fontsize=14)
plt.ylabel("Speed-up wrt numpy", fontsize=14)
plt.ylim(0, speed_up[-1] + 5)
plt.subplot(122)
plt.bar(compute, durations)
plt.tick_params(labelsize=14)
for i, (comp, duration) in enumerate(zip(compute, durations)):
plt.annotate(s=f"{duration:.1f}s", xy=(i - 0.1, duration + 0.3), fontsize=14)
plt.annotate(s="(lower is better)", xy=(2, durations[0] + 3), fontsize=16)
plt.title("Evaluation of complex virtual columns with Vaex", fontsize=16)
plt.xlabel("Accelerators", fontsize=14)
plt.ylabel("Duration [s]", fontsize=14)
plt.ylim(0, durations[0] + 5)
plt.tight_layout()
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
df = pd.read_csv("/kaggle/input/student-performance-in-mathematics/exams.csv")
df
df.head(50)
df.tail(50)
df.shape
df.isnull()
df.columns
df["race/ethnicity"].unique()
df["race/ethnicity"].value_counts()
df["test preparation course"].unique()
df["test preparation course"].value_counts()
df[df["math score"].between(80, 100)].value_counts()
df[df["reading score"].between(80, 100)].value_counts()
df[df["writing score"].between(80, 100)].value_counts()
|
# # Special Thanks To @dlibenzi (github) for all his help;
import os
os.environ["XRT_TPU_CONFIG"] = "tpu_worker;0;10.0.0.2:8470"
import collections
from datetime import datetime, timedelta
import os
import tensorflow as tf
import numpy as np
import requests, threading
_VersionConfig = collections.namedtuple("_VersionConfig", "wheels,server")
VERSION = "torch_xla==nightly"
CONFIG = {
"torch_xla==nightly": _VersionConfig(
"nightly",
"XRT-dev{}".format((datetime.today() - timedelta(1)).strftime("%Y%m%d")),
),
}[VERSION]
DIST_BUCKET = "gs://tpu-pytorch/wheels"
TORCH_WHEEL = "torch-{}-cp36-cp36m-linux_x86_64.whl".format(CONFIG.wheels)
TORCH_XLA_WHEEL = "torch_xla-{}-cp36-cp36m-linux_x86_64.whl".format(CONFIG.wheels)
TORCHVISION_WHEEL = "torchvision-{}-cp36-cp36m-linux_x86_64.whl".format(CONFIG.wheels)
CONFIG.wheels
# Install COLAB TPU compat PyTorch/TPU wheels and dependencies
import torch_xla.core.xla_model as xm
import torch_xla.distributed.data_parallel as dp # http://pytorch.org/xla/index.html#running-on-multiple-xla-devices-with-multithreading
import torch_xla.distributed.xla_multiprocessing as xmp # http://pytorch.org/xla/index.html#running-on-multiple-xla-devices-with-multiprocessing
import torch_xla.distributed.parallel_loader as pl
from kaggle_datasets import KaggleDatasets
# Data access
GCS_DS_PATH = KaggleDatasets().get_gcs_path()
# Configuration
IMAGE_SIZE = [512, 512]
EPOCHS = 20
BATCH_SIZE = 16 * 1
GCS_PATH_SELECT = { # available image sizes
192: GCS_DS_PATH + "/tfrecords-jpeg-192x192",
224: GCS_DS_PATH + "/tfrecords-jpeg-224x224",
331: GCS_DS_PATH + "/tfrecords-jpeg-331x331",
512: GCS_DS_PATH + "/tfrecords-jpeg-512x512",
}
GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]]
TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + "/train/*.tfrec")
VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + "/val/*.tfrec")
TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + "/test/*.tfrec")
TRAINING_FILENAMES
# In the 100 flowers dataset, the format of each TFRecord of labeled data is:
# - "image": list of bytestrings containing 1 bytestring (the JPEG-ecoded image bytes)
# - "label": list of int64 containing 1 int64
# REFERENCE https://gist.githubusercontent.com/dlibenzi/c9868a1090f6f8ef9d79d2cfcbadd8ab/raw/947fbec325cbdeda91bd53acb5e126caa4115348/more_tf_stuff.py
# Thanks A Lot For Your Help!!!
from PIL import Image
import numpy as np
import hashlib
import os
import sys
import torch
import torch_xla.utils.tf_record_reader as tfrr
a = """
image/class/label tensor([82])
image/class/synset n01796340
image/channels tensor([3])
image/object/bbox/label tensor([], dtype=torch.int64)
image/width tensor([900])
image/format JPEG
image/height tensor([600])
image/class/text ptarmigan
image/object/bbox/ymin tensor([])
image/encoded tensor([ -1, -40, -1, ..., -30, -1, -39], dtype=torch.int8)
image/object/bbox/ymax tensor([])
image/object/bbox/xmin tensor([])
image/filename n01796340_812.JPEG
image/object/bbox/xmax tensor([])
image/colorspace RGB
"""
def decode(ex):
w = 512 # ex['image/width'].item()
h = 512 # ex['image/height'].item()
imgb = ex["image"].numpy().tobytes()
# m = hashlib.md5()
# m.update(imgb)
# print('HASH = {}'.format(m.hexdigest()))
image = Image.frombytes("RGB", (w, h), imgb, "JPEG".lower(), "RGB", None)
npa = np.asarray(image)
return torch.from_numpy(npa), image
def readem(path, img_path=None):
count = 0
transforms = {}
r = tfrr.TfRecordReader(path, compression="", transforms=transforms)
while True:
ex = r.read_example()
if not ex:
break
# print('\n')
# for lbl, data in ex.items():
# print('{}\t{}'.format(lbl, data))
img_tensor, image = decode(ex)
if img_path:
image.save(os.path.join(img_path, str(count) + ".jpg"))
count += 1
print("\n\nDecoded {} samples".format(count))
import os
for idx, file in enumerate(TRAINING_FILENAMES):
img_path = f"/kaggle/working/flower_images_{idx}"
os.makedirs(img_path, exist_ok=True)
print(file)
readem(path=file, img_path=img_path)
# https://stackoverflow.com/questions/11159436/multiple-figures-in-a-single-window
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
def plot_figures(figures, nrows=1, ncols=1):
"""
Plot a dictionary of figures.
Parameters
----------
figures : <title, figure> dictionary
ncols : number of columns of subplots wanted in the display
nrows : number of rows of subplots wanted in the figure
"""
fig, axeslist = plt.subplots(ncols=ncols, nrows=nrows, figsize=(20, 20))
for ind, title in zip(range(len(figures)), figures):
axeslist.ravel()[ind].imshow(figures[title], cmap=plt.jet())
# axeslist.ravel()[ind].set_title(title)
axeslist.ravel()[ind].set_axis_off()
# generation of a dictionary of (title, images)
w, h = 10, 10
number_of_im = w * h
figures = {
"im" + str(i): Image.open(f"./flower_images_0/{i}.jpg") for i in range(number_of_im)
}
# plot of the images in a figure, with 5 rows and 4 columns
plot_figures(figures, w, h)
plt.show()
# generation of a dictionary of (title, images)
w, h = 10, 10
number_of_im = w * h
figures = {
"im" + str(i): Image.open(f"./flower_images_1/{i}.jpg") for i in range(number_of_im)
}
plot_figures(figures, w, h)
plt.show()
# generation of a dictionary of (title, images)
w, h = 10, 10
number_of_im = w * h
figures = {
"im" + str(i): Image.open(f"./flower_images_2/{i}.jpg") for i in range(number_of_im)
}
plot_figures(figures, w, h)
plt.show()
# generation of a dictionary of (title, images)
w, h = 10, 10
number_of_im = w * h
figures = {
"im" + str(i): Image.open(f"./flower_images_3/{i}.jpg") for i in range(number_of_im)
}
plot_figures(figures, w, h)
plt.show()
# generation of a dictionary of (title, images)
w, h = 10, 10
number_of_im = w * h
figures = {
"im" + str(i): Image.open(f"./flower_images_10/{i}.jpg")
for i in range(number_of_im)
}
plot_figures(figures, w, h)
plt.show()
|
# # **Introdution**
# The expression structure of Parkinson's disease is composed of many proteins together. In the model I have created, I will not consider the effect of peptides on proteins, because it involves a multi-level model.
# In this note, I will only discuss the relationship between protein abundance and disease severity in different patients.
# I will use the random forest model and the support vector machine model for prediction, and the rest of the model readers can predict by themselves.
# # **Improt**
#
import pandas as pd
import random
# ***Read the csv data to get the dataframe data set***
# **Import protein data**
# Protein data
train_proteins = pd.read_csv(r"/kaggle/input/parkinson-data-set/train_proteins.csv")
# Get the null value information
train_proteins.info()
# Therefore, the protein data does not contain null values
# **Read clinical data**
# Clinical Data
train_clinical_data = pd.read_csv(
r"/kaggle/input/parkinson-data-set/train_clinical_data.csv"
)
# Supplement clinical data
supplemental_clinical_data = pd.read_csv(
r"/kaggle/input/parkinson-data-set/supplemental_clinical_data.csv"
)
# Clearly, we need to combine the two to get overall clinical information
# Combined data set
sum_clinical_data = pd.concat([train_clinical_data, supplemental_clinical_data], axis=0)
# Check data authenticity in the combined data set
sum_clinical_data.info()
# Observe data absence in updrs_1,updrs_2,updrs_3,updrs_4, and upd23b_clinical_state_on_medication
# Now let's deal with the missing data
# # Missing data processing
# We first plot the bar distribution of the missing data to determine what method to use for completion. Then draw the completed figure
# **Example Processing updrs_1**
import seaborn as sns
import matplotlib.pyplot as plt
# Paint canvas
fig, axes = plt.subplots(1, 1, figsize=(8, 6))
# Observe the data in updrs_1
sns.countplot(x="updrs_1", data=sum_clinical_data, ax=axes)
# **Looking at the data, we used the median to fill in the missing values**
# fill with median
sum_clinical_data["updrs_1"] = sum_clinical_data["updrs_1"].fillna(
sum_clinical_data["updrs_1"].median()
)
fig, axes = plt.subplots(1, 1, figsize=(8, 6))
sns.countplot(x="updrs_1", data=sum_clinical_data, ax=axes)
# **Example Processing updrs_2**
# Paint canvas
fig, axes = plt.subplots(1, 1, figsize=(8, 6))
# Observe the data in updrs_1
sns.countplot(x="updrs_2", data=sum_clinical_data, ax=axes)
# **Looking at the data, we used the median to fill in the missing values**
# fill with median
sum_clinical_data["updrs_2"] = sum_clinical_data["updrs_2"].fillna(
sum_clinical_data["updrs_2"].median()
)
fig, axes = plt.subplots(1, 1, figsize=(8, 6))
sns.countplot(x="updrs_2", data=sum_clinical_data, ax=axes)
# **Example Processing updrs_3**
# Paint canvas
fig, axes = plt.subplots(1, 1, figsize=(8, 6))
# Observe the data in updrs_1
sns.countplot(x="updrs_3", data=sum_clinical_data, ax=axes)
# **In this code, it appears that using the median padding might cause a relatively large error, but because there are fewer missing values in updrs_3, it is still possible to use the median padding**
# fill with median
sum_clinical_data["updrs_3"] = sum_clinical_data["updrs_3"].fillna(
sum_clinical_data["updrs_3"].median()
)
fig, axes = plt.subplots(1, 1, figsize=(8, 6))
sns.countplot(x="updrs_3", data=sum_clinical_data, ax=axes)
# **Example Processing updrs_3**
# Paint canvas
fig, axes = plt.subplots(1, 1, figsize=(8, 6))
# Observe the data in updrs_1
sns.countplot(x="updrs_4", data=sum_clinical_data, ax=axes)
# **Looking at the data, we used the median to fill in the missing values**
sum_clinical_data["updrs_4"] = sum_clinical_data["updrs_4"].fillna(
sum_clinical_data["updrs_4"].median()
)
fig, axes = plt.subplots(1, 1, figsize=(8, 6))
sns.countplot(x="updrs_4", data=sum_clinical_data, ax=axes)
# **Medication data processing**
sum_clinical_data.info()
# Recheck the clinical data
# **The data has been cleaned**
# # We need to get each person's protein count
# Get the type of protein each person has relative to the amount
# Use the pivot_table function to implement the requirements
train_proteins_pivot = pd.pivot_table(
train_proteins, index="visit_id", columns="UniProt", values="NPX"
)
# **Data were merged for each period of each patient's protein quantity with the current period of disease severity**
# We found that there was too much data in sum_clinical_data, so we need to delete some of it
# Secondly, delete train_proteins_pivot
# Just taking the union of two data sets
train_proteins_pivot = train_proteins_pivot.reset_index()
train_proteins_pivot = train_proteins_pivot[
train_proteins_pivot["visit_id"].isin(sum_clinical_data["visit_id"])
]
mask = sum_clinical_data["visit_id"].isin(train_proteins_pivot["visit_id"])
sum_clinical_data = sum_clinical_data[mask]
# **Data needed in clinical data were extracted and filled with train_proteins_pivot**
value1 = sum_clinical_data["updrs_1"]
value1 = value1.reset_index(drop=True)
value2 = sum_clinical_data["updrs_2"]
value2 = value2.reset_index(drop=True)
value3 = sum_clinical_data["updrs_3"]
value3 = value3.reset_index(drop=True)
value4 = sum_clinical_data["updrs_4"]
value4 = value4.reset_index(drop=True)
train_proteins_pivot["updrs_1"] = value1
train_proteins_pivot["updrs_2"] = value2
train_proteins_pivot["updrs_3"] = value3
train_proteins_pivot["updrs_4"] = value4
# **Since the visit_id contains the patient number and inspection time, we need to split this column of data into two columns, one is the patient number and the other is the inspection time**
train_proteins_pivot[["id", "time"]] = train_proteins_pivot["visit_id"].str.split(
"_", expand=True
)
train_proteins_pivot.drop(columns=["visit_id"], inplace=True)
# **Since some proteins are not present in everyone at all times, we use zeros to fill proteins that are not present in the body**
train_proteins_pivot.fillna("0", inplace=True)
# Then we move the time to the first column, along with the feature column
train_proteins_pivot = train_proteins_pivot[
[train_proteins_pivot.columns[-1]] + list(train_proteins_pivot.columns[:-1])
]
# Converts all numbers in the dataframe to integers
train_proteins_pivot = train_proteins_pivot.astype(int)
# # Target processing
# **Since the target value is composed of many values, we divided the target set into four equal fractions and assigned them the values of 1,2,3,4 to represent the severity of the disease**
bins = pd.qcut(train_proteins_pivot["updrs_1"], 4, labels=False, duplicates="drop")
four_quarters = ["1", "2", "3", "4"]
four_quarters_data = [four_quarters[x] for x in bins]
train_proteins_pivot["updrs_1"] = four_quarters_data
bins = pd.qcut(train_proteins_pivot["updrs_2"], 4, labels=False, duplicates="drop")
four_quarters = ["1", "2", "3", "4"]
four_quarters_data = [four_quarters[x] for x in bins]
train_proteins_pivot["updrs_2"] = four_quarters_data
bins = pd.qcut(train_proteins_pivot["updrs_3"], 4, labels=False, duplicates="drop")
four_quarters = ["1", "2", "3", "4"]
four_quarters_data = [four_quarters[x] for x in bins]
train_proteins_pivot["updrs_3"] = four_quarters_data
# **The reader may find here why we did not process the data set 4, because the probability of the data set 4 presents a serious imbalance, if the use of the four-quartile will lead to inaccurate results**
# # Model construction
# **Decision tree model**
from sklearn.ensemble import RandomForestClassifier
def RandomForest(n):
x = RandomForestClassifier(
criterion="gini",
max_depth=20,
n_estimators=100,
min_samples_split=100,
min_samples_leaf=5,
max_features="auto",
oob_score=True,
random_state=1,
n_jobs=-1,
)
x.fit(train_proteins_pivot.iloc[:, :228], train_proteins_pivot.iloc[:, n])
# 打印袋外模型的准确率
print("oob_score", end="")
print("%.4f" % x.oob_score_)
# 打印模型准确度
accuracy = x.score(
train_proteins_pivot.iloc[:, :228], train_proteins_pivot.iloc[:, n]
)
return accuracy
# Conduct a test
print("---------------Random Forest--------------")
ans = RandomForest(228)
print(ans)
ans = RandomForest(229)
print(ans)
ans = RandomForest(230)
print(ans)
ans = RandomForest(231)
print(ans)
# **Support vector machine model**
from sklearn.svm import SVC
def SVM(n):
x = SVC(kernel="rbf", C=1, gamma="scale", random_state=1)
x.fit(train_proteins_pivot.iloc[:, :228], train_proteins_pivot.iloc[:, n])
accuracy = x.score(
train_proteins_pivot.iloc[:, :228], train_proteins_pivot.iloc[:, n]
)
print("accuracy")
return accuracy
print("--------------------SVC-------------------")
ans = SVM(228)
print(ans)
ans = SVM(229)
print(ans)
ans = SVM(230)
print(ans)
ans = SVM(231)
print(ans)
|
# # Natural Language Processing with Disaster Tweets: Model Training Script
#
# > Seong Chan Cho, Ael Lee, Yeo Myeong Yoon
# ## 1. Setup
# install the latest version of transformers
# run this cell if logging in by explicitly providing the login token
# log into huggingface for saving the model
import huggingface_hub
huggingface_hub.login(token=..., add_to_git_credential=True)
import re
import string
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import torch
import transformers
from transformers import AutoTokenizer, DataCollatorWithPadding
from transformers import AutoModelForSequenceClassification
from transformers import TrainingArguments, Trainer
from datasets import Dataset, DatasetDict
# ## 2. Data Setup
# ### 2a. Reading in the data
# We will first start by reading in the data from the competition:
train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
sample_sub = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv")
train.head()
test.head()
sample_sub.head()
# Since the `test` dataset is a dataset without the `target` variable, we will split the `train` dataset (80-20 split) and use the smaller subset as a validation set. To keep the data consistent, we will also provide a seed number for our specific split.
train, valid = train_test_split(train, test_size=0.2, random_state=7)
train.head()
valid.head(-2)
# Before moving on to cleaning the data, let's check the balance between labels:
train_bal = list(train.target.value_counts())
valid_bal = list(valid.target.value_counts())
index = ["Non-Disaster (0)", "Disaster (1)"]
df = pd.DataFrame({"Train": train_bal, "Validation": valid_bal}, index=index)
axes = df.plot.bar(
rot=0, subplots=True, color={"Train": "#ff6361", "Validation": "#ffa600"}
)
axes[1].legend(loc=2)
# Although both the training and validation datasets have more Non-Disaster data, the balance is not too extreme to a concerning point. Therefore, we will move on to cleaning up the data.
# ### 2b. Data Clean-up
# The majority of the clean-up code was borrowed from [this notebook](https://www.kaggle.com/code/faressayah/sentiment-model-with-tensorflow-transformers)
# Use regex to clean the data
def remove_url(text):
url = re.compile(r"https?://\S+|www\.\S+")
return url.sub(r"", text)
def remove_punct(text):
# remove all punctuations except for @
punc = string.punctuation.replace("@", "")
table = str.maketrans("", "", punc)
return text.translate(table)
def remove_html(text):
html = re.compile(r"<.*?>")
return html.sub(r"", text)
def remove_emoji(text):
emoji_pattern = re.compile(
"["
"\U0001F600-\U0001F64F" # emoticons
"\U0001F300-\U0001F5FF" # symbols & pictographs
"\U0001F680-\U0001F6FF" # transport & map symbols
"\U0001F1E0-\U0001F1FF" # flags (iOS)
"\U00002702-\U000027B0"
"\U000024C2-\U0001F251"
"]+",
flags=re.UNICODE,
)
return emoji_pattern.sub(r"", text)
def cont_rep_char(text):
tchr = text.group(0)
if len(tchr) > 1:
return tchr[0:2]
def unique_char(rep, text):
text = re.sub(r"(\w)\1+", rep, text)
return text
# In addition to the above processing functions, I added an extra function `replace_mentions(text)` that replaces mentions (e.g. `@aellxx`) to `@user`.
def replace_mentions(text):
mention = re.compile(r"@([A-Za-z0-9_\.]+)")
text = mention.sub(r"@user", text)
return text
replace_mentions("@aellxx is my twitter username... maybe")
# This function was made to accomodate the training set of the [pretrained model](https://huggingface.co/cardiffnlp/twitter-roberta-base-sentiment-latest) we will be using for training. The model was trained on a [dataset](https://huggingface.co/datasets/tweet_eval) of tweets, in which all mentions of users were replaced with `@user`.
train["text"] = train["text"].apply(lambda x: remove_url(x))
train["text"] = train["text"].apply(lambda x: remove_punct(x))
train["text"] = train["text"].apply(lambda x: remove_emoji(x))
train["text"] = train["text"].apply(lambda x: unique_char(cont_rep_char, x))
train["text"] = train["text"].apply(lambda x: replace_mentions(x))
valid["text"] = valid["text"].apply(lambda x: remove_url(x))
valid["text"] = valid["text"].apply(lambda x: remove_punct(x))
valid["text"] = valid["text"].apply(lambda x: remove_emoji(x))
valid["text"] = valid["text"].apply(lambda x: unique_char(cont_rep_char, x))
valid["text"] = valid["text"].apply(lambda x: replace_mentions(x))
test["text"] = test["text"].apply(lambda x: remove_url(x))
test["text"] = test["text"].apply(lambda x: remove_punct(x))
test["text"] = test["text"].apply(lambda x: remove_emoji(x))
test["text"] = test["text"].apply(lambda x: unique_char(cont_rep_char, x))
test["text"] = test["text"].apply(lambda x: replace_mentions(x))
train.head(-2)
# Now, let's aggregate the training and validation dataframes to a HuggingFace `DatasetDict` object for fine-tuning.
ds = DatasetDict(
{"train": Dataset.from_pandas(train), "validation": Dataset.from_pandas(valid)}
)
# The `DataCollatorWithPadding` used for batched-padding requires the targets to explicitly be named `labels`. We'll do that here:
ds = ds.rename_column("target", "labels")
ds
# ## 3. Tokenization
# For tokenizing the text data, we will use the [`twitter-roberta-base-sentiment`](https://huggingface.co/cardiffnlp/twitter-roberta-base-sentiment?text=I+hate+you+%F0%9F%A4%AE) model and tokenizer, which was trained on the [`tweet-eval`](https://huggingface.co/datasets/tweet_eval) dataset. Since the original model is trained to predict 3 categories of sentiment (positive, neutral, negative), we will have to fine-tune this model to predict 2 categories to fit our task or predicting disasters.
checkpoint = "cardiffnlp/twitter-roberta-base-sentiment"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
# Let's try out the tokenizer with one example from the training set:
sample_text = ds["train"][100]["text"]
sample_text
tokenized_text = tokenizer(sample_text)
input_ids = tokenized_text["input_ids"]
input_ids
tokenizer.convert_ids_to_tokens(input_ids)
# The `tokenizer` returns a dictionary, which means that if we use the tokenizer directly on our `ds` (DatasetDict object), it will not be a DatasetDict object anymore. To keep the Dataset as a Dataset, we will instead use the `.map()` function and provide a tokenizer function instead.
def tokenize_function(example):
return tokenizer(example["text"], truncation=True)
tokenized_ds = ds.map(tokenize_function, batched=True)
tokenized_ds
# As we can see from above, using `map()` retained the data as of type `DatasetDict`.
# ### Data Collation
# At this point, we will define a data collator, which dynamically pads the text data depending on the longest sequence in a batch of data. This will give us a performance boost while training because we won't need to pad every input according to the longest sequence in the entire dataset.
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
# ## 4. The Model
# We will load the pretrained model with the same checkpoint as the tokenizer above. Because we will fine-tune the initial model to output 2 categories instead of 3, we will explicitly provide the number of target labels (`num_labels=2`).
model = AutoModelForSequenceClassification.from_pretrained(
checkpoint, num_labels=2, ignore_mismatched_sizes=True
)
# We need not be worried about the warning above, for we will fine-tune the model to output 2 categories.
# specify some hyperparameters
EPOCHS = 4
LR = 3e-5
# For fine-tuning this pre-trained model, we will use the HuggingFace [Trainer API](https://huggingface.co/docs/transformers/main_classes/trainer). We have already defined most of the parameters that the API needs in the above steps, but we will define a few more that are needed to start the fine-tuning process.
# ### Training Arguments
# The full documentation on the `TrainingArguments` class can be found [here](https://huggingface.co/docs/transformers/v4.27.2/en/main_classes/trainer#transformers.TrainingArguments).
training_args = TrainingArguments(
output_dir="disaster-tweet-5",
evaluation_strategy="steps",
logging_steps=12,
per_device_train_batch_size=64,
per_device_eval_batch_size=64,
num_train_epochs=EPOCHS,
push_to_hub=True,
)
# * `output_dir`: The directory to push the new model
# * `evaluation_strategy`: This logs the records per the number of `logging_steps` (other option: epochs)
# * `logging_steps`: We will log the training status every 100 steps
# * `per_device_train_batch_size`: 64 rows of data will be trained in the same batch
# * `per_device_eval_batch_size`: 64 rows of data will be evaluated in the same batch
# * `num_train_epochs`: The number of training epochs
# * `push_to_hub`: Setting this to `True` will automatically push the fine-tuned model to the HuggingFace Hub
# ### Optimizers
# For training the model, we will use the [`AdamW`](https://pytorch.org/docs/stable/generated/torch.optim.AdamW.html) optimizer, which helps us to prevent overfitting to the training data through weight decay. We will also specify the learning rate with the value that we defined above.
optimizer = torch.optim.AdamW(model.parameters(), lr=LR)
optimizer
# We also need to define a learning rate scheduler using [`get_scheduler`](https://huggingface.co/docs/transformers/main_classes/optimizer_schedules#transformers.get_scheduler), which will gradually reduce the learning rate, helping the model converge more smoothly to the optimal solution.
# The API requires us to specify `num_training_steps`, so we will simply define one according to the length of the training dataset.
train_len = tokenized_ds["train"].num_rows
from transformers import get_scheduler
num_epochs = EPOCHS
num_training_steps = EPOCHS * train_len
lr_scheduler = get_scheduler(
"linear",
optimizer=optimizer,
num_warmup_steps=num_training_steps * 0.1,
num_training_steps=num_training_steps,
)
# ### Defining the `Trainer`
# The [`Trainer` API](https://huggingface.co/docs/transformers/main_classes/trainer) provides a convenient way of fine-tuning models.
trainer = Trainer(
model,
training_args,
train_dataset=tokenized_ds["train"],
eval_dataset=tokenized_ds["validation"],
data_collator=data_collator,
tokenizer=tokenizer,
optimizers=[optimizer, lr_scheduler],
)
trainer.train()
trainer.push_to_hub()
|
import pandas as pd
import numpy as np
import sklearn # scikit-learn kutubxonasi
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv(
"https://raw.githubusercontent.com/anvarnarz/praktikum_datasets/main/housing_data_08-02-2021.csv"
)
df.head()
# Creating a model that predicts the prices of houses in Tashkent
# Defining an analytical approach
# **Supervised learning** - because we have a label in the data. This is the median price in the area. We want to predict exactly this price.
# **Regression** - Did we say "prediction" (prognosis) is solved using regression algorithms
# **Offline** - because the data has already been given to us once. There is no constant data flow.
# Data analys
# * **location** - the address of the house for sale
# * **district** - the district where the house is located
# * **rooms** - the number of rooms
# * **size** - house area (sq.m)
# * **level** - the floor where the house is located
# * **max_levels** - the total number of levels
# * **price** - house price
df.info()
# Conclusions from the above result:
# * The dataset consists of 7565 rows (each row is a separate house).
# * Although the *size* and *price* columns are numerical, the data type of these columns is object
# We will analyze the *size* and *price* columns.
# we try to convert the size column to float64 data type.
# df['size'].astype("float64")
# As we can see, we have found that the wrong information named **"Площадьземли:1сот"** has been entered in the size column.
# Let's define this line
df[df["size"] == "Площадьземли:1сот"]
# Since 1 sotix is 100 m2, we change this value to 100
df1 = df.replace("Площадьземли:1сот", 100)
df1.iloc[[5347]]
# Now we try to convert the **"price"** column to *float64* data type.
# df1['price'].astype("float64")
# As we can see, we have found that the information named **"Договорная"** has been entered in the **price** column.
# Let's define this lines
df1[df1["price"] == "Договорная"]
# Since we don't have enough data, we replace the **"Договорная"** value in these rows without discarding them.
# To do this, we find a column that has a strong correlation with the **"price"** column.
df2 = df1[df1["price"] != "Договорная"]
df3 = df2.astype({"price": "int64"})
df4 = df3.astype({"size": "float64"})
df4.corr()
# As we can see, there is no **correlation** between the **"price"** column and almost any other columns.
# This cannot be.
# Let's find out why. For this, we use the *pair plot* graph of the **seaborn** library.
# cols = ['price','size','rooms','level', 'max_levels']
# sns.pairplot(df4[cols], height=5)
# plt.show()
# As we can see, the values in the *size* column are entered by mistake. We analyze them.
df4[df4["size"] > 300]
# Since the values at index 1827, 2268, 2547, 3089, 4089, 4411, 4615, 5456, 7421 are entered incorrectly, we discard these rows.
df4.shape
df5 = df4.drop([1827, 2268, 2547, 3089, 4089, 4411, 4615, 5456, 7421, 1831])
df5.shape
# Since the values in the index 981, 1831, 2662, 4784, 7043 are entered as multiples of 100, we divide these values by 100.
df5["size"][[981, 2662, 4784, 7043]] = df5["size"][[981, 2662, 4784, 7043]].map(
lambda x: x / 100
)
df5 = df4.drop([1827, 2268, 2547, 3089, 4089, 4411, 4615, 5456, 7421, 1831])
# cols = ['price','size','rooms','level', 'max_levels']
# sns.pairplot(df5[cols], height=5)
# plt.show()
df5[df5["price"] > 1000_000]
# If we analyze the price column, this column also contains errors in excess of **1,000,000** US dollars.
# We throw them away.
df6 = df5[df5["price"] < 1000_000]
# Now let's see the **correlation**.
df6.corr()
# cols = ['price','size','rooms','level', 'max_levels']
# sns.pairplot(df6[cols], height=5)
# plt.show()
# As we can see, the **strongest correlation** with the *price column* is in the *size column*.
df9 = df6.drop(df6[(df6["size"] < 20) & (df6["rooms"] > 1)].index)
df9.plot(kind="scatter", x="size", y="price", alpha=0.5, figsize=(6, 4))
plt.show()
# Now we change the value **"Dogovornaya"** in the df1 dataframe.
df7 = df1.replace("Договорная", np.NaN)
df8 = df7.astype({"size": "float64"})
df91 = df8.sort_values(by=["size"])
df10 = df91.fillna(method="ffill")
df11 = df10.astype({"price": "int64"})
df11.info()
# Drop duplicates
df12 = df11.drop_duplicates()
df13 = df12.drop([1827, 2268, 2547, 3089, 4089, 4411, 4615, 5456, 7421, 1831])
df13["size"][[981, 2662, 4784, 7043]] = df13["size"][[981, 2662, 4784, 7043]].map(
lambda x: x / 100
)
df14 = df13[(df13["price"] < 600_000)]
df15 = df14[df14["size"] < 300]
df15
df15.plot(kind="scatter", x="size", y="price", alpha=0.5, figsize=(6, 4))
plt.show()
# We continue the visualization
plt.figure(figsize=(12, 9))
sns.barplot(x=df15.district, y=df15["price"])
plt.title("Price of houses of the Tashkent city")
plt.xlabel("Disctrict")
plt.ylabel("Price")
plt.grid() # Chizmaga gorizontal va vertikal chiziqlar (setka) qo'shish
plt.xticks(rotation=30)
plt.show()
# We analyze the balance in the Size column.
df15["size_cat"] = pd.cut(
df15["size"], bins=[0.0, 25.0, 50.0, 75.0, 100.0, np.inf], labels=[1, 2, 3, 4, 5]
)
df15["size_cat"].hist(figsize=(9, 6))
plt.show()
# We divide the data into a **balanced** *train* and a *test* set.
from sklearn.model_selection import StratifiedShuffleSplit
stratified_split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
# stratified_split.split funksiyasi indekslar qaytaradi
for train_index, test_index in stratified_split.split(df15, df15["size_cat"]):
strat_train_set = df15.iloc[train_index]
strat_test_set = df15.iloc[test_index]
strat_train_set.drop("size_cat", axis=1, inplace=True)
strat_test_set.drop("size_cat", axis=1, inplace=True)
strat_train_set.drop("location", axis=1, inplace=True)
strat_test_set.drop("location", axis=1, inplace=True)
strat_test_set
# We choose *balanced sets* for work.
# This time we separate the *price* column (which we need to optimize) as well. This is because the changes applied to the rest of the columns are not applied to the label.
housing = strat_train_set.drop("price", axis=1)
housing_labels = strat_train_set["price"].copy()
# Changing the shape of text columns
# The ocean_proximity column of our dataset is a text column.
# Let's break down this column to get started.
housing_cat = housing[["district"]]
housing_num = housing.drop("district", axis=1)
# Using the OneHotEncoder method, each unique value becomes a separate column and we put 1 in the corresponding column and 0 in the rest.
from sklearn.preprocessing import OneHotEncoder
cat_encoder = OneHotEncoder()
housing_cat_1hot = cat_encoder.fit_transform(housing_cat)
housing_cat_1hot.toarray()
housing
# We can make a **ransformer**
from sklearn.base import BaseEstimator, TransformerMixin
rooms_ix, size_ix, level_ix, max_levels_ix = 0, 1, 2, 3
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
def __init__(self, add_size_of_per_room=True):
self.add_size_of_per_room = add_size_of_per_room
def fit(self, X, y=None):
return self # bizni funksiyamiz faqat transformer. estimator emas
def transform(self, X):
percent_of_levels = X[:, level_ix] * 100 / X[:, max_levels_ix]
if self.add_size_of_per_room: # add_bedrooms_per_room ustuni ixtiyoriy bo'ladi
size_of_per_room = X[:, size_ix] / X[:, rooms_ix]
return np.c_[X, percent_of_levels, size_of_per_room]
else:
return np.c_[X, percent_of_levels]
attr_adder = CombinedAttributesAdder(add_size_of_per_room=True)
housing_extra_attribs = attr_adder.transform(housing_num.values)
housing_extra_attribs[0, :]
attr_adder = CombinedAttributesAdder(add_size_of_per_room=False)
housing_extra_attribs = attr_adder.transform(housing_num.values)
housing_extra_attribs[0, :]
# Standard Scaler
#
from sklearn.preprocessing import StandardScaler
standart_scaler = StandardScaler()
standart_scaler.fit_transform(housing_num)
housing
housing_onehot = pd.get_dummies(housing["district"])
housing_onehot
housing_num
# Pipeline
#
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
num_pipeline = Pipeline([("std_scaler", StandardScaler())])
num_pipeline.fit_transform(housing_num)
from sklearn.compose import ColumnTransformer
num_attribs = list(housing_num)
cat_attribs = ["district"]
full_pipeline = ColumnTransformer(
[("num", num_pipeline, num_attribs), ("cat", OneHotEncoder(), cat_attribs)]
)
housing_prepared = full_pipeline.fit_transform(housing)
housing_prepared[0:5, :]
# Now our data is ready.
# Machine learning
# Linear Regression
from sklearn.linear_model import LinearRegression
LR_model = LinearRegression()
LR_model.fit(housing_prepared, housing_labels)
# Let's check how well the model works
test_data = housing.sample(10)
test_data
test_label = housing_labels.loc[test_data.index]
test_label
test_data_prepared = full_pipeline.transform(test_data)
test_data_prepared
predicted_data = LR_model.predict(test_data_prepared)
predicted_data
pd.DataFrame({"Predict": predicted_data, "Real price": test_label})
# Let's evaluate the model.
#
test_set = strat_test_set
test_set
# First let's extract the predictor columns (all columns except `price`):
X_test = test_set.drop("price", axis=1)
X_test
y_test = test_set["price"].copy()
y_test
# We also pass **test_set** through the *pipeline:*
X_test_prepared = full_pipeline.transform(X_test)
# Predict
y_predicted = LR_model.predict(X_test_prepared)
# We use the Root mean square error (RMSE) we saw in the previous section to compare the forecast and real data:
from sklearn.metrics import mean_squared_error
lin_mse = mean_squared_error(y_test, y_predicted)
# RMSE hisoblaymiz
lin_rmse = np.sqrt(lin_mse)
print(lin_rmse)
# DecisionTree
from sklearn.tree import DecisionTreeRegressor
Tree_model = DecisionTreeRegressor()
Tree_model.fit(housing_prepared, housing_labels)
y_predicted = Tree_model.predict(X_test_prepared)
lin_mse = mean_squared_error(y_test, y_predicted)
# RMSE hisoblaymiz
lin_rmse = np.sqrt(lin_mse)
print(lin_rmse)
# RandomForest
from sklearn.ensemble import RandomForestRegressor
RF_model = RandomForestRegressor()
RF_model.fit(housing_prepared, housing_labels)
y_predicted = RF_model.predict(X_test_prepared)
lin_mse = mean_squared_error(y_test, y_predicted)
# RMSE hisoblaymiz
lin_rmse = np.sqrt(lin_mse)
print(lin_rmse)
# Saving model
import pickle
filename = "RF_model.pkl" # faylga istalgan nom beramiz
with open(filename, "wb") as file:
pickle.dump(RF_model, file)
import joblib
filename = "RF_model.jbl" # faylga istalgan nom beramiz
joblib.dump(RF_model, filename)
|
from typing import Dict, List, Union
from pathlib import Path, PosixPath
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import (
classification_report,
confusion_matrix,
ConfusionMatrixDisplay,
)
from sklearn.model_selection import train_test_split
from tensorflow.keras import Sequential
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.layers import (
Conv1D,
Dense,
Dropout,
Embedding,
Flatten,
MaxPooling1D,
)
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical
human_data = pd.read_table("/kaggle/input/dna-sequence-dataset/human.txt")
chimpanzee_data = pd.read_table("/kaggle/input/dna-sequence-dataset/chimpanzee.txt")
dog_data = pd.read_table("/kaggle/input/dna-sequence-dataset/dog.txt")
seed = 12345
train_val_split = 0.8
n_filters = 512
hidden_layer = 1024
pool_size = 2
dropout = 0.5
learning_rate = 0.001
batch_size = 16
epochs = 100
loss = "categorical_crossentropy"
n_classes = human_data["class"].nunique()
human_data.head()
ctoi = {"A": 0, "C": 1, "G": 2, "N": 4, "T": 3}
human_data_int = [[ctoi[i] for i in seq] for seq in human_data["sequence"]]
max_len = max(len(i) for i in human_data_int)
training_int_data = pad_sequences(human_data_int, maxlen=max_len, padding="post")
training_int_data = np.array(training_int_data, dtype="float32")
training_classes = to_categorical(human_data["class"])
n_characters = len(ctoi)
embedding_weights = []
embedding_weights.append(np.zeros(n_characters))
for i in range(len(ctoi)):
one_hot_encoded_char = np.zeros(n_characters)
one_hot_encoded_char[i] = 1
embedding_weights.append(one_hot_encoded_char)
embedding_weights = np.array(embedding_weights)
model = Sequential()
model.add(
Embedding(
n_characters + 1,
n_characters,
input_length=max_len,
weights=[embedding_weights],
)
)
model.add(Conv1D(n_filters, 7, activation="relu"))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(Conv1D(n_filters, 7, activation="relu"))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(Conv1D(n_filters, 3, activation="relu"))
model.add(Conv1D(n_filters, 3, activation="relu"))
model.add(Conv1D(n_filters, 3, activation="relu"))
model.add(Conv1D(n_filters, 3, activation="relu"))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(Flatten())
model.add(
Dense(
hidden_layer,
activation="relu",
)
)
model.add(Dropout(dropout))
model.add(Dense(hidden_layer, activation="relu"))
model.add(Dropout(dropout))
model.add(Dense(n_classes, activation="softmax"))
model.compile(
optimizer=Adam(learning_rate=learning_rate),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
model.build(input_shape=training_int_data[0].shape)
model.summary()
X_train, X_val, y_train, y_val = train_test_split(
training_int_data,
training_classes,
test_size=1 - train_val_split,
train_size=train_val_split,
random_state=seed,
shuffle=True,
stratify=human_data["class"],
)
print(X_train.shape)
print(y_train.shape)
early_stopping = EarlyStopping(
monitor="val_loss", verbose=1, patience=5, mode="min", restore_best_weights=True
)
model_checkpoint = ModelCheckpoint("sap_homework_epoch_{epoch}.h5")
history = model.fit(
X_train,
y_train,
validation_data=(X_val, y_val),
batch_size=batch_size,
epochs=epochs,
callbacks=[early_stopping, model_checkpoint],
)
def plot_metrics(history):
metrics = ["loss", "accuracy"]
for n, metric in enumerate(metrics):
name = metric.replace("_", " ").capitalize()
plt.subplot(2, 2, n + 1)
plt.plot(history.epoch, history.history[metric], color=colors[0], label="Train")
plt.plot(
history.epoch,
history.history["val_" + metric],
color=colors[0],
linestyle="--",
label="Val",
)
plt.xlabel("Epoch")
plt.ylabel(name)
if metric == "loss":
plt.ylim([0, plt.ylim()[1]])
elif metric == "auc":
plt.ylim([0.8, 1])
else:
plt.ylim([0, 1])
plt.legend()
plot_metrics(history)
scores = model.predict(X_val)
y_hat = np.argmax(scores, axis=1)
y_val = np.argmax(y_val, axis=1)
print(classification_report(y_val, y_hat))
cm = confusion_matrix(y_val, y_hat)
disp = ConfusionMatrixDisplay(
confusion_matrix=cm, display_labels=list(range(n_characters))
)
disp.plot(xticks_rotation="vertical")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# 
# # Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import ipywidgets as widgets
from ipywidgets import interact
df = pd.read_excel("/kaggle/input/nutricients/ABBREV.xlsx")
df.head()
# # Basic Exploring
df.shape
df.dtypes
df.duplicated().value_counts()
# # Select Your Nutrient
cols = [column for column in df.columns if df[column].dtype != "object"]
plt.rcParams["figure.figsize"] = (15, 15)
palette = sns.color_palette("Blues", n_colors=30)
palette.reverse()
##### interactable plot #####
dd = widgets.Dropdown(options=cols, option="Select Nutrient")
def draw_barplot(column):
p = sns.barplot(
data=df.nlargest(20, column), x=column, y="Shrt_Desc", palette=palette
)
p.set_title("20 Foods High in " + column, fontsize=30)
plt.yticks(fontsize=18)
plt.xticks(fontsize=18)
plt.xlabel(column, fontsize=25)
plt.ylabel("")
interact(draw_barplot, column=dd)
# # 50 High Cholesterol Foods List
high_cholest = df.nlargest(50, "Cholestrl_(mg)")[
["Shrt_Desc", "Cholestrl_(mg)"]
].reset_index()
high_cholest.index += 1
high_cholest
# # 50 Foods Which High in Sugar
high_sugar_foods = df.nlargest(50, "Sugar_Tot_(g)")[
["Shrt_Desc", "Sugar_Tot_(g)"]
].reset_index()
high_sugar_foods.index += 1
high_sugar_foods
# # 50 Foods Which High in Lipid Total
high_lipid = df.nlargest(50, "Lipid_Tot_(g)")[
["Shrt_Desc", "Lipid_Tot_(g)"]
].reset_index()
high_lipid.index += 1
high_lipid
# # 50 Foods High in Calories(Energ_Kcal)
high_cal = df.nlargest(50, "Energ_Kcal")[["Shrt_Desc", "Energ_Kcal"]].reset_index()
high_cal.index += 1
high_cal
# # 50 Foods with High Protein
high_Protein = df.nlargest(50, "Protein_(g)")[
["Shrt_Desc", "Protein_(g)"]
].reset_index()
high_Protein.index += 1
high_Protein
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv("/kaggle/input/ufcdata/data.csv")
data.shape
data.head()
def get_missing_values(df):
"""Gets DataFrame of variables and number of values missing per variable"""
rename_dict = {
"index": "variable",
0: "num_missing_values",
"0": "num_missing_values",
}
df_missing_values = df.isnull().sum().to_frame().reset_index()
df_missing_values.rename(mapper=rename_dict, axis=1, inplace=True)
df_missing_values.sort_values(
by="num_missing_values", ascending=False, inplace=True
)
df_missing_values = df_missing_values[
df_missing_values["num_missing_values"] > 0
].reset_index(drop=True)
return df_missing_values
def sort_by_date_descending(df):
df["date"] = pd.to_datetime(df["date"])
df.sort_values(by="date", ascending=False).reset_index(drop=True, inplace=True)
return df
def get_fighters(df):
"""Get list of all fighters"""
red_corner_fighters = df["R_fighter"].unique().tolist()
blue_corner_fighters = df["B_fighter"].unique().tolist()
fighters = sorted(list(set(red_corner_fighters + blue_corner_fighters)))
return fighters
def get_fight_count_by_division(df):
"""Get Pandas DataFrame of fight count by division"""
df_fight_count_by_division = (
df["weight_class"].value_counts().to_frame().reset_index()
)
df_fight_count_by_division.rename(
{"weight_class": "count", "index": "weight_class"}, axis=1, inplace=True
)
df_fight_count_by_division["sex"] = df_fight_count_by_division[
"weight_class"
].apply(get_gender_from_weight_class)
return df_fight_count_by_division
def data_by_fighters(df, fighter):
"""Helper function to get filtered Pandas DataFrame by fighter"""
df_by_fighter = df[(df["R_fighter"] == fighter) | (df["B_fighter"] == fighter)]
return df_by_fighter
def filter_by_fighters_of_interest(df, fighters_of_interest):
"""
Definition: Filter entire DataFrame to get just the fights of fighters you're interested in
Parameters:
- df (Pandas DataFrame): Raw Pandas DataFrame of UFC data
- fighters_of_interest (list): List of fighters of interest
Returns:
Filtered Pandas DataFrame containing just the fights of fighters you're interested in
"""
df_by_fighters_of_interest = pd.DataFrame()
for fighter in fighters_of_interest:
df_by_fighter = df[(df["R_fighter"] == fighter) | (df["B_fighter"] == fighter)]
df_by_fighters_of_interest = pd.concat(
objs=[df_by_fighters_of_interest, df_by_fighter],
ignore_index=True,
sort=False,
)
return df_by_fighters_of_interest
def get_distributions(df):
"""Get distributions of age, weight, height, reach. Returns dictionary of all variables' distributions"""
distribution_dict = dict()
df_temp = df.copy()
age = df_temp["R_age"].dropna().to_list() + df_temp["B_age"].dropna().to_list()
height = (
df_temp["R_Height_cms"].dropna().to_list()
+ df_temp["B_Height_cms"].dropna().to_list()
)
weight = (
df_temp["R_Weight_lbs"].dropna().to_list()
+ df_temp["B_Weight_lbs"].dropna().to_list()
)
reach = (
df_temp["R_Reach_cms"].dropna().to_list()
+ df_temp["B_Reach_cms"].dropna().to_list()
)
distribution_dict["age"] = age
distribution_dict["height"] = height
distribution_dict["weight"] = weight
distribution_dict["reach"] = reach
return distribution_dict
def get_stance(df, fighter):
df_stances = data_by_fighters(df=df, fighter=fighter)
df_stances_red = df_stances[df_stances["R_fighter"] == fighter]
df_stances_blue = df_stances[df_stances["B_fighter"] == fighter]
stances = (
df_stances_red["R_Stance"].value_counts()
+ df_stances_blue["B_Stance"].value_counts()
)
try:
return stances.index[0]
except IndexError as e:
print("IndexError in stance for fighter: {}".format(fighter))
return "unknown"
#
def get_record(df, fighter):
"""Get record by fighter. Returns dictionary of fights, wins, losses, no-contests, and percentages"""
df_fighter = data_by_fighters(df=df, fighter=fighter)
fights = len(df_fighter)
win_as_red = (df_fighter["R_fighter"] == fighter) & (df_fighter["Winner"] == "Red")
win_as_blue = (df_fighter["B_fighter"] == fighter) & (
df_fighter["Winner"] == "Blue"
)
lose_as_red = (df_fighter["R_fighter"] == fighter) & (
df_fighter["Winner"] == "Blue"
)
lose_as_blue = (df_fighter["B_fighter"] == fighter) & (
df_fighter["Winner"] == "Red"
)
df_wins = df_fighter[win_as_red | win_as_blue]
df_losses = df_fighter[lose_as_red | lose_as_blue]
no_contest = 0
for result in df_fighter["Winner"].tolist():
if result.strip().lower() not in ["red", "blue"]:
no_contest += 1
wins = len(df_wins)
losses = len(df_losses)
nc_percent = round((no_contest * 100 / fights), 2)
win_percent = round((wins * 100 / fights), 2)
loss_percent = round(100 - win_percent - nc_percent, 2)
record_dict = dict()
record_dict["fighter"] = fighter
record_dict["fights"] = fights
record_dict["wins"] = wins
record_dict["losses"] = losses
record_dict["no_contest"] = no_contest
record_dict["win_percent"] = win_percent
record_dict["loss_percent"] = loss_percent
record_dict["nc_percent"] = nc_percent
return record_dict
def get_fight_frequency(df, fighter, by="days"):
"""
Definition: Get fight frequency by fighter (in days or months).
Parameters:
- df (Pandas DataFrame): Raw Pandas DataFrame of UFC data
- fighter (string): Name of fighter
- by (string): Options are ['days', 'months']. Default: 'days'
Returns:
Average number of days/months between consecutive fights (int or float)
"""
df_fighter = data_by_fighters(df=df, fighter=fighter)
df_fighter["date"] = pd.to_datetime(df_fighter["date"])
fights = len(df_fighter)
first = df_fighter["date"].iloc[0]
last = df_fighter["date"].iloc[-1]
date_difference = abs((first - last).days)
fight_frequency = round(date_difference / fights, 2)
if by == "days":
return fight_frequency
elif by == "months":
fight_frequency = round(fight_frequency / 30, 2)
return fight_frequency
#
def get_location_counts(df):
df_locations = (
df.drop_duplicates(subset=["date"], keep="first")["location"]
.value_counts()
.to_frame()
.reset_index()
)
df_locations.rename(
{"location": "count", "index": "location"}, axis=1, inplace=True
)
return df_locations
def get_gender_from_weight_class(weight_class_series):
"""Helper function that can be used with pd.apply() on the 'weight_class' column - to get gender of fighters"""
division = weight_class_series.strip().lower()
if "women" in division:
return "Female"
return "Male"
def get_percent_of_title_bouts_by_division(df):
df_percent_title_bouts = (
df.groupby(by="weight_class")["title_bout"].mean() * 100
).to_frame()
df_percent_title_bouts.rename(
{"title_bout": "percent_title_bouts"}, axis=1, inplace=True
)
df_percent_title_bouts["percent_title_bouts"] = df_percent_title_bouts[
"percent_title_bouts"
].apply(round, args=[2])
df_percent_title_bouts.sort_values(
by="percent_title_bouts", ascending=False, inplace=True
)
df_percent_title_bouts = df_percent_title_bouts.reset_index()
df_percent_title_bouts["sex"] = df_percent_title_bouts["weight_class"].apply(
get_gender_from_weight_class
)
return df_percent_title_bouts
def get_win_method(df, fighter):
"""Returns dictionary of method of wins in all UFC fights for said fighter"""
rename_red = {
"R_fighter": "fighter",
"R_win_by_Decision_Majority": "win_by_Decision_Majority",
"R_win_by_Decision_Split": "win_by_Decision_Split",
"R_win_by_Decision_Unanimous": "win_by_Decision_Unanimous",
"R_win_by_KO/TKO": "win_by_KO/TKO",
"R_win_by_Submission": "win_by_Submission",
"R_win_by_TKO_Doctor_Stoppage": "win_by_TKO_Doctor_Stoppage",
"R_wins": "wins",
"R_losses": "losses",
}
rename_blue = {
"B_fighter": "fighter",
"B_win_by_Decision_Majority": "win_by_Decision_Majority",
"B_win_by_Decision_Split": "win_by_Decision_Split",
"B_win_by_Decision_Unanimous": "win_by_Decision_Unanimous",
"B_win_by_KO/TKO": "win_by_KO/TKO",
"B_win_by_Submission": "win_by_Submission",
"B_win_by_TKO_Doctor_Stoppage": "win_by_TKO_Doctor_Stoppage",
"B_wins": "wins",
"B_losses": "losses",
}
df_fighter = data_by_fighters(df=df, fighter=fighter)
df_fighter_latest_fight = df_fighter.head(1)
if df_fighter_latest_fight["R_fighter"].iloc[0] == fighter:
df_fighter_latest_fight.rename(rename_red, axis=1, inplace=True)
df_fighter_latest_stats = df_fighter_latest_fight.loc[:, rename_red.values()]
return df_fighter_latest_stats.iloc[0].to_dict()
elif df_fighter_latest_fight["B_fighter"].iloc[0] == fighter:
df_fighter_latest_fight.rename(rename_blue, axis=1, inplace=True)
df_fighter_latest_stats = df_fighter_latest_fight.loc[:, rename_blue.values()]
return df_fighter_latest_stats.iloc[0].to_dict()
#
def get_referee_appearances(df, normalize=False):
"""Get count/percentage of referee appearances"""
if normalize:
df_referee_appearances = (
df["Referee"].value_counts(normalize=True) * 100
).reset_index()
variable_name = "percent_appearances"
else:
df_referee_appearances = df["Referee"].value_counts().reset_index()
variable_name = "count_appearances"
df_referee_appearances.rename(
{"Referee": variable_name, "index": "referee"}, axis=1, inplace=True
)
return df_referee_appearances
def get_location_details(location):
location_dict = dict()
location = str(location).strip()
location_list = location.split(", ")
city = location_list[0]
country = location_list[-1]
location_dict["city"] = city
location_dict["country"] = country
if len(location_list) == 3:
state = location_list[1] # The state isn't always mentioned
location_dict["state"] = state
return location_dict["country"]
def get_location_counts(df):
df_locations = (
df.drop_duplicates(subset=["date"], keep="first")["location"]
.value_counts()
.to_frame()
.reset_index()
)
df_locations.rename(
{"location": "count", "index": "location"}, axis=1, inplace=True
)
df_locations["country"] = df_locations["location"].apply(get_location_details)
return df_locations
def get_country_located(df_location):
df_location_by_country = (
df_location["country"].value_counts().to_frame().reset_index()
)
df_location_by_country.rename(
{"country": "count", "index": "country"}, axis=1, inplace=True
)
return df_location_by_country
def plot_distributions(df):
"""Plot distributions of age, height, weight, reach"""
distributions = get_distributions(df=df)
color_dict = {
"age": "#D6442D",
"height": "#2DD69F",
"weight": "#3A2B73",
"reach": "#29922A",
}
label_dict = {"age": "years", "height": "cms", "weight": "lbs", "reach": "cms"}
for feature in distributions.keys():
distribution = distributions[feature]
title = "UFC - {} distribution".format(feature.capitalize())
plt.figure(figsize=(25, 14))
plt.hist(x=distribution, color=color_dict[feature])
plt.title(title, fontsize=40)
plt.xlabel("{} (in {})".format(feature, label_dict[feature]), fontsize=30)
plt.ylabel("count", fontsize=30)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
if feature == "weight":
plt.xlim(min(distribution) - 10, 300)
plt.show()
#
def plot_bar_chart(data, title, x, y, hue=None, color=None):
plt.figure(figsize=(25, 14))
sns.barplot(x=x, y=y, hue=hue, data=data, color=color)
plt.title(title, fontsize=40)
plt.xlabel(x, fontsize=30)
plt.ylabel(y, fontsize=30)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
for i, v in enumerate(data[x]):
plt.text(
x=v + 0.4, y=i, s=str(v), fontweight="bold", fontsize=24, color="black"
)
if hue:
plt.legend(loc="best", fontsize=20)
plt.show()
# # Pull various overall stats¶
data = sort_by_date_descending(df=data)
df_locations = get_location_counts(df=data)
df_locations_by_country = get_country_located(df_location=df_locations)
df_referee_appearances = get_referee_appearances(df=data, normalize=False)
df_fight_count_by_division = get_fight_count_by_division(df=data)
df_title_bouts_by_division = get_percent_of_title_bouts_by_division(df=data)
# # Pull stats for fighters of interest
fighters_of_interest = [
"Conor McGregor",
"Khabib Nurmagomedov",
"Tony Ferguson",
"Dustin Poirier",
"Jon Jones",
"Henry Cejudo",
"Stipe Miocic",
"Daniel Cormier",
"Israel Adesanya",
"Kamaru Usman",
"Alexander Volkanovski",
"Max Holloway",
"Tyron Woodley",
"Robert Whittaker",
"Jorge Masvidal",
"Joseph Benavidez",
"Marlon Moraes",
"Petr Yan",
"Jose Aldo",
"Brian Ortega",
"Zabit Magomedsharipov",
"Chan Sung Jung",
"Frankie Edgar",
"Donald Cerrone",
"Kevin Lee",
"Islam Makhachev",
"Colby Covington",
"Nate Diaz",
"Paulo Costa",
"Yoel Romero",
"Kelvin Gastelum",
"Thiago Santos",
"Dominick Reyes",
"Alexander Gustafsson",
"Francis Ngannou",
"Curtis Blaydes",
"Junior Dos Santos",
"Alistair Overeem",
"Amanda Nunes",
"Valentina Shevchenko",
"Zhang Weili",
"Jessica Andrade",
"Rose Namajunas",
"Joanna Jedrzejczyk",
"Holly Holm",
]
print("Number of fighters of interest: {}".format(len(fighters_of_interest)))
data_fighters_of_interest = filter_by_fighters_of_interest(
df=data, fighters_of_interest=fighters_of_interest
)
df_stats = pd.DataFrame()
for fighter in fighters_of_interest:
try:
dict_record = get_record(df=data, fighter=fighter)
dict_win_method = get_win_method(df=data, fighter=fighter)
stance = get_stance(df=data, fighter=fighter)
fight_frequency = get_fight_frequency(df=data, fighter=fighter, by="months")
# Put the stats into a single DataFrame
df_stat_record = pd.DataFrame(data=dict_record, index=[0])
df_stat_win_method = pd.DataFrame(data=dict_win_method, index=[0])
df_stat = pd.merge(
left=df_stat_record, right=df_stat_win_method, on="fighter", how="outer"
)
df_stat["stance"] = stance
df_stat["fight_frequency"] = fight_frequency
df_stats = pd.concat(objs=[df_stats, df_stat], ignore_index=True, sort=False)
except ZeroDivisionError:
print("ZeroDivisionError for fighter: {}".format(fighter))
df_stats.sort_values(
by=["win_percent", "fights"], ascending=[False, False], inplace=True
)
df_stats.reset_index(drop=True, inplace=True)
# # Plotting
# ### Distribution for all data
plot_distributions(df=data)
# ### Distribution for data of just fighters of interest
plot_distributions(df=data_fighters_of_interest)
plot_bar_chart(
data=df_fight_count_by_division,
title="Fight count by division",
x="count",
y="weight_class",
hue="sex",
)
plot_bar_chart(
data=df_title_bouts_by_division,
title="Title bouts by division (total bouts/title bouts)",
x="percent_title_bouts",
y="weight_class",
hue="sex",
)
plot_bar_chart(
data=df_referee_appearances.head(10),
title="Total referee appearances",
x="count_appearances",
y="referee",
color="purple",
)
plot_bar_chart(
data=df_locations_by_country.head(10),
title="UFC event locations by country",
x="count",
y="country",
color=None,
)
# # Insights
df_locations.head(8)
df_locations_by_country.head(8)
df_referee_appearances.head(10)
df_fight_count_by_division
df_title_bouts_by_division
# # Stats of fighters of interest
df_stats
# ## Average rest period of selected fighters
plt.figure(figsize=(25, 20))
df_stats.set_index(keys="fighter").sort_values(by="fight_frequency", ascending=False)[
"fight_frequency"
].plot(kind="barh", color="purple")
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.title("Number of months of rest before a fight", fontsize=40)
plt.xlabel("Number of months of rest before a fight", fontsize=30)
plt.ylabel("Fighter", fontsize=30)
plt.show()
|
#
# # 🥝**Fruit Classification**
# ## Introduction
# The technique which have been used for doing the Fruit Image Classification here is known as **Transfer Learning** technique, which means it provides us a leverage to use other models weights that algorithm has learnt on a very similar type of the problem while learning the patterns in data.
# With the help of transfer learning we can achieve a higher accuracy when compared to build **Convolutional Neural Network** from scratch.
# In this notebook for doing Image Classification we have leveraged to take an advantage of the patterns and weights that **ResNet50V2** has learnt.
# Have also tried to use EfficientNet but the results are not good.
# ### * Import all necessary libraries that has been required to work on data and to built the model
#
# import the libraries
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import os
import matplotlib.image as mpimg
import seaborn as sns
import random
import zipfile
import pathlib
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Activation
from tensorflow.keras import Sequential
import datetime
from tensorflow.keras.applications import EfficientNetB0, ResNet50V2
from tensorflow.keras.preprocessing import image
from sklearn.metrics import confusion_matrix
# ### * To directly download the dataset from kaggle to **Google Colab** below commands are necessary
# ### * To extract the files from **Zip Folder**
zip_ref = zipfile.ZipFile("fruits-classification.zip", "r")
zip_ref.extractall()
zip_ref.close()
# ### * Inspect the data to find out how many directories are there in the folder and in each directory how many files are present.
# * Three folders📁 are there anmed tarin, valid and test.
# * Train folder contains 1940 images for each class.
# * Valid folder contains 40 images of each class
# * test folder contains 20 images each.
for dirpath, dirname, filename in os.walk("Fruits_Classification"):
print(
f"There are {len(dirname)} directories and {len(filename)} images in {dirpath}"
)
# ### * In data we are dealing with **Multi-Class classification** problem where we have 5 classes of fruits 🍎🥭🍇🍓🍌. Thus, to store the names folr each class below code is required.
data_dir = pathlib.Path("Fruits_Classification/train")
class_names = [i.name for i in data_dir.glob("*")]
class_names
# ## Data Visualization
# ### * Define a function to Visualize the data or the image for target class and pick any one random sample from the directory with its Label.
def view_random_image(target_dir, target_class):
# setup target directory
target_folder = target_dir + "/" + target_class
# print(target_folder)
# get the random image from the target dir and targetclass
random_image = random.sample(os.listdir(target_folder), 1)
print(random_image[0])
# read the image and plot
img = mpimg.imread(target_folder + "/" + random_image[0])
plt.imshow(img)
plt.title(target_class)
plt.axis("off")
print(f"Image shape:{img.shape}")
return img
img = view_random_image(target_dir="Fruits_Classification/train", target_class="Apple")
# ## Data Preprocessing
# * With the help of **ImageDataGenerator** we can apply random transformations on each of training images while our model is still in training process.
# * Here we have done only rescaling of the images that leads to values between the range of 0 and 1(**normalization**).
# * 🔑 If we want to apply more of **data augmentation** techniques like rotation of image, change of width and height, zoom, flipping etc. this should be valid on the training imagws only
# * Here we have applied ImageGenerator class with **flow_from_directory**
Image_shape = (224, 224)
Batch_size = 32
train_dir = "/content/Fruits_Classification/train/"
val_dir = "/content/Fruits_Classification/valid/"
train_data_gen = ImageDataGenerator(rescale=1 / 255)
val_data_gen = ImageDataGenerator(rescale=1 / 255)
train_data = train_data_gen.flow_from_directory(
train_dir, target_size=Image_shape, batch_size=Batch_size, class_mode="categorical"
)
valid_data = val_data_gen.flow_from_directory(
val_dir, target_size=Image_shape, batch_size=Batch_size, class_mode="categorical"
)
# ##### **Callbacks** are extra functionality to perform during and after training
# * tracking experiments
# * model checkpoint
# * early stopping before overfitting
# Some popular Callbacks are: -
# * Tensorbaord
# * ModelCheckpoint
# * Earlystopping
def create_tensorboard_callback(dir_name, experiment_name):
log_dir = (
dir_name
+ "/"
+ experiment_name
+ "/"
+ datetime.datetime.now().strftime("%Y%m%d-%H")
)
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
print(f"saving tensorboardcallback:{log_dir}")
return tensorboard_callback
# ## Defining Model
# * steps include :- Creating the model, compiling the model and fitting the model.
# * With the help of **Functional API** we are building our first Tensorflow Transfer Learning Model with the help of **ResNet50V2**.
# * While working on the model using transfer learning we are using the **FeatureExtraction Technique** here. That means we are leveraging the weights of the model and adjusting those weights which would be suited for our classification problem.
#
# * 🗒Over here we'll freeze all the leraned patterns in the bottom layers and we'll adjust the weights of top2-3 pretrained layers of the model in accordance with our custom data.
#
# * This is our **Base Line** model which provides an accuracy of 84 on validation data.
#
base_model = ResNet50V2(include_top=False)
base_model.trainable = False
inputs = tf.keras.layers.Input(shape=(224, 224, 3), name="input_layer")
x = tf.keras.layers.experimental.preprocessing.Rescaling(1.0 / 255)(
inputs
) # resnet model does not have the the normalized layers
x = base_model(inputs)
print(f"Shape after base_model:{x.shape}")
x = tf.keras.layers.GlobalAveragePooling2D(name="gloabl_average2D")(x)
print(f"Global average 2D pooling shape:{x.shape}")
# x = Dense(128,activation='relu')(x)
outputs = tf.keras.layers.Dense(5, activation="softmax", name="output_layer")(x)
model_0 = tf.keras.Model(inputs, outputs)
model_0.compile(loss="categorical_crossentropy", optimizer=Adam(), metrics=["accuracy"])
history_transferlearning_0 = model_0.fit(
train_data,
epochs=10,
steps_per_epoch=len(train_data),
validation_data=valid_data,
validation_steps=int(0.25 * len(valid_data)),
callbacks=[
create_tensorboard_callback(
"transfer_learning", "transfer_learning_featureextraction"
)
],
)
# #### Tweaking the base model
# * Second Model where we have added a Dropout Layer to see the impact on the accuracy in comparison to our Baseline model we have created above.
# * But able to notice that the accuracy decreased.
# * We can do further more experiments to increase the accuracy via using **Data Augmenttaion** technique or by using other pre-trained model example **MobileNetV2**.
base_model = ResNet50V2(include_top=False)
base_model.trainable = False
inputs = tf.keras.layers.Input(shape=(224, 224, 3), name="input_layer")
x = tf.keras.layers.experimental.preprocessing.Rescaling(1.0 / 255)(inputs)
x = base_model(inputs)
print(f"Shape after base_model:{x.shape}")
x = tf.keras.layers.GlobalAveragePooling2D(name="gloabl_average2D")(x)
print(f"Global average 2D pooling shape:{x.shape}")
# x = Dense(128,activation='relu')(x)
x = tf.keras.layers.Dropout(0.2)(x)
outputs = tf.keras.layers.Dense(5, activation="softmax", name="output_layer")(x)
model_1 = tf.keras.Model(inputs, outputs)
model_1.compile(loss="categorical_crossentropy", optimizer=Adam(), metrics=["accuracy"])
history_transferlearning_1 = model_1.fit(
train_data,
epochs=10,
steps_per_epoch=len(train_data),
validation_data=valid_data,
validation_steps=int(0.25 * len(valid_data)),
callbacks=[
create_tensorboard_callback(
"transfer_learning", "transfer_learning_featureextraction2"
)
],
)
# ##### Plotting Loss and Accuracy curves
# **Visualization** **Plotting the Loss and Accuracy** on Training and Validation data.
def plot_loss_curves(history):
loss = history.history["loss"]
val_loss = history.history["val_loss"]
accuracy = history.history["accuracy"]
val_accuracy = history.history["val_accuracy"]
epochs = range(len(history.history["loss"]))
# plotloss
plt.plot(epochs, loss, label="training_loss")
plt.plot(epochs, val_loss, label="val_loss")
plt.title("loss")
plt.xlabel("epochs")
plt.legend()
# plotaccuracy
plt.figure()
plt.plot(epochs, accuracy, label="training_accuracy")
plt.plot(epochs, val_accuracy, label="val_accuracy")
plt.title("accuracy")
plt.xlabel("epochs")
plt.legend()
# find indices of train_data
train_data.class_indices
# ## Testing the Model
# * To look for **Predictions** on image from the model.
# * Here we are evaluating the model for one image at a time
# *Below code can be converted to function, so we can utilize the function again and again without writing the whole code
category = {0: "Apple", 1: "Banana", 2: "Grape", 3: "Mango", 4: "Strawberry"}
img_ = image.load_img(
"/content/Fruits_Classification/valid/Mango/Mango (1306).jpeg",
target_size=(224, 224),
)
img_array = image.img_to_array(img_)
print(img_array)
img_processed = np.expand_dims(img_array, axis=0)
img_processed /= 255
prediction = model_0.predict(img_processed)
print(prediction)
index = np.argmax(prediction)
print(index)
plt.title("Prediction - {}".format(category[index]))
plt.imshow(img_array)
img_ = image.load_img(
"//content/Fruits_Classification/valid/Banana/Banana (2955).jpeg",
target_size=(224, 224),
)
img_array = image.img_to_array(img_)
print(img_array)
img_processed = np.expand_dims(img_array, axis=0)
img_processed /= 255
prediction = model_0.predict(img_processed)
print(prediction)
index = np.argmax(prediction)
print(index)
plt.title("Prediction - {}".format(category[index]))
plt.imshow(img_array)
img_ = image.load_img(
"/content/Fruits_Classification/test/Banana/Banana (3652).jpeg",
target_size=(224, 224),
)
img_array = image.img_to_array(img_)
print(img_array)
img_processed = np.expand_dims(img_array, axis=0)
img_processed /= 255
prediction = model_0.predict(img_processed)
print(prediction)
index = np.argmax(prediction)
print(index)
plt.title("Prediction - {}".format(category[index]))
plt.imshow(img_array)
img_ = image.load_img(
"/content/Fruits_Classification/test/Mango/Mango (1862).jpeg",
target_size=(224, 224),
)
img_array = image.img_to_array(img_)
print(img_array)
img_processed = np.expand_dims(img_array, axis=0)
img_processed /= 255
prediction = model_0.predict(img_processed)
print(prediction)
index = np.argmax(prediction)
print(index)
plt.title("Prediction - {}".format(category[index]))
plt.imshow(img_array)
# ## Model Evaluation
# **Evaluation Metrics** Let's find the other metrics values and those are:-
# * Precision
# * Recall
# * F1 score
# this we'll find with the help of **classification report** and also try to find the **confusion matrix** for all the classes.
len(os.listdir("/content/Fruits_Classification/test/Banana/"))
filedir = "/content/Fruits_Classification/test/Banana"
filedir.split("/")[-1]
# #### Defining the function for Acuracy
# * Function defines the accuracy for each folder in the Test directory.
# * Function to define the accuracy for each class by doing preprocessing of the images in the test data. So all images in the test data converted to tensors having the exact input shape that we have provided to the images trained in the model.
def predict_dir(filedir, model):
cols = 3
pos = 0
images = []
total_images = len(os.listdir(filedir))
rows = total_images // cols + 1
true = filedir.split("/")[-1]
for i in sorted(os.listdir(filedir)):
images.append(os.path.join(filedir, i))
for subplot, imgg in enumerate(images):
img_ = image.load_img(imgg, target_size=(224, 224))
img_array = image.img_to_array(img_)
img_processed = np.expand_dims(img_array, axis=0)
img_processed /= 255
prediction = model.predict(img_processed)
index = np.argmax(prediction)
pred = category.get(index)
if pred == true:
pos += 1
accu = pos / total_images
print(
"Accuracy for {orignal}: {:.2f} ({pos}/{total})".format(
accu, pos=pos, total=total_images, orignal=true
)
)
# ##### **Accuracy** for each calss in the test directory. How many of the images have been correctly classified
for i in os.listdir("/content/Fruits_Classification/test"):
# print(i)
predict_dir(os.path.join("/content/Fruits_Classification/test", i), model_0)
# ## Confusion Matrix
# * Check the accuracy for each label in the test dataset using **confusion_matrix** heat map **Visualization**
from tensorflow.keras.preprocessing import image
def labels_confusion_matix(folder):
mapping = {}
for i, j in enumerate(sorted(os.listdir(folder))):
# print(i)
# print(j)
mapping[j] = i
files = []
real = []
predicted = []
for i in os.listdir(folder):
true = os.path.join(folder, i)
true = true.split("/")[-1]
# print(true)
true = mapping[true]
for j in os.listdir(os.path.join(folder, i)):
img_ = image.load_img(os.path.join(folder, i, j), target_size=(224, 224))
img_array = image.img_to_array(img_)
img_processed = np.expand_dims(img_array, axis=0)
img_processed /= 255
prediction = model_0.predict(img_processed)
index = np.argmax(prediction)
predicted.append(index)
real.append(true)
return real, predicted
def print_confusion_matrix(real, predicted):
total_output_labels = 5
cmap = "turbo"
cm_plot_labels = [i for i in range(5)]
cm = confusion_matrix(y_true=real, y_pred=predicted)
df_cm = pd.DataFrame(cm, cm_plot_labels, cm_plot_labels)
sns.set(font_scale=1.2)
plt.figure(figsize=(15, 10))
s = sns.heatmap(df_cm, fmt="d", annot=True, cmap=cmap)
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.savefig("confusion_matrix.png")
plt.show()
y_true, y_pred = labels_confusion_matix("/content/Fruits_Classification/test")
print_confusion_matrix(y_true, y_pred)
|
# ## Notebook - Table of Contents
# 1. [**Basic Data Analysis**](#1.-Basic-Data-Analysis)
# 1.1 [**Importing the necessary libraries & loading the data**](#1.1-Importing-the-necessary-libraries-&-loading-the-data)
# 1.2 [**Basic statistics - Number of products, subcategories & gender**](#1.2-Basic-statistics---Number-of-products,-subcategories-&-gender)
# 1.3 [**Frequency of each gender**](#1.3-Frequency-of-each-gender)
# 1.4 [**Distribution of products gender-wise**](#1.4-Distribution-of-products-gender-wise)
# 2. [**Data Preparation**](#2.-Data-Preparation)
# 3. [**Feature extraction using ResNet**](#3.-Feature-extraction-using-ResNet)
# 4. [**Computing the Euclidean distance and recommending similar products**](#4.-Computing-the-Euclidean-distance-and-recommending-similar-products)
# 4.1 [**Loading the extracted features**](#4.1-Loading-the-extracted-features)
# 4.2 [**Distance computation and Recommendation**](#4.2-Distance-computation-and-Recommendation)
# 5. [**Deploying the solution**](#5.-Deploying-the-solution)
# ### Basic Data Analysis
# #### 1.1 Importing the necessary libraries & loading the data
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
from keras import applications
from sklearn.metrics import pairwise_distances
import requests
from PIL import Image
import pickle
from datetime import datetime
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import plotly.figure_factory as ff
import plotly.graph_objects as go
import plotly.express as px
# import streamlit as st
# use the below library while displaying the images in jupyter notebook
from IPython.display import display, Image
fashion_df = pd.read_csv("/kaggle/input/fashion-images/data/fashion.csv")
fashion_df
# #### 1.2 Basic statistics - Number of products, subcategories & gender
print("Total number of products : ", fashion_df.shape[0])
print("Total number of unique subcategories : ", fashion_df["SubCategory"].nunique())
print("Total number of unique gender types : ", fashion_df["Gender"].nunique())
# #### 1.3 Frequency of each gender
fashion_df["Gender"].value_counts()
# #### 1.4 Distribution of products gender-wise
plot = sns.countplot(fashion_df["Gender"])
plt.title("Distribution of articles gender-wise")
plt.xlabel("Gender type")
plt.ylabel("Number of products")
plot.set_xticklabels(plot.get_xticklabels())
plt.show()
# ### 2. Data Preparation
apparel_boys = fashion_df[fashion_df["Gender"] == "Boys"]
apparel_girls = fashion_df[fashion_df["Gender"] == "Girls"]
footwear_men = fashion_df[fashion_df["Gender"] == "Men"]
footwear_women = fashion_df[fashion_df["Gender"] == "Women"]
# ### 3. Feature extraction using ResNet
# **For Gender - Men**
img_width, img_height = 224, 224
# top_model_weights_path = 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
train_data_dir = "/kaggle/input/fashion-images/data/Footwear/Men/Images/"
nb_train_samples = 811
epochs = 50
batch_size = 1
def extract_features():
Itemcodes = []
datagen = ImageDataGenerator(rescale=1.0 / 255)
model = applications.ResNet50(include_top=False, weights="imagenet")
generator = datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False,
)
for i in generator.filenames:
Itemcodes.append(i[(i.find("/") + 1) : i.find(".")])
extracted_features = model.predict_generator(
generator, nb_train_samples // batch_size
)
extracted_features = extracted_features.reshape((811, 100352))
np.save(open("./Men_ResNet_features.npy", "wb"), extracted_features)
np.save(open("./Men_ResNet_feature_product_ids.npy", "wb"), np.array(Itemcodes))
a = datetime.now()
extract_features()
print("Time taken in feature extraction", datetime.now() - a)
# ### 4. Computing the Euclidean distance and recommending similar products
# #### 4.1 Loading the extracted features
extracted_features = np.load("/kaggle/working/Men_ResNet_features.npy")
Productids = np.load("/kaggle/working/Men_ResNet_feature_product_ids.npy")
men = footwear_men.copy()
# men = pd.read_csv('./footwear_men.csv')
df_Productids = list(men["ProductId"])
Productids = list(Productids)
# #### 4.2 Distance computation and Recommendation
def get_similar_products_cnn(product_id, num_results):
doc_id = Productids.index(product_id)
pairwise_dist = pairwise_distances(
extracted_features, extracted_features[doc_id].reshape(1, -1)
)
indices = np.argsort(pairwise_dist.flatten())[0:num_results]
pdists = np.sort(pairwise_dist.flatten())[0:num_results]
print("=" * 20, "input product image", "=" * 20)
ip_row = men[["ImageURL", "ProductTitle"]].loc[
men["ProductId"] == int(Productids[indices[0]])
]
# print(ip_row.head())
for indx, row in ip_row.iterrows():
display(Image(url=row["ImageURL"], width=224, height=224, embed=True))
print("Product Title: ", row["ProductTitle"])
print("\n", "=" * 20, "Recommended products", "=" * 20)
for i in range(1, len(indices)):
rows = men[["ImageURL", "ProductTitle"]].loc[
men["ProductId"] == int(Productids[indices[i]])
]
for indx, row in rows.iterrows():
display(Image(url=row["ImageURL"], width=224, height=224, embed=True))
print("Product Title: ", row["ProductTitle"])
print("Euclidean Distance from input image:", pdists[i])
get_similar_products_cnn("13683", 5)
# **NOTE** - The above feature extraction process can be repeated for other genders (Women, Boys and Girls) as well. So let's extract for each one by one.
# **For Gender - Women**
img_width, img_height = 224, 224
# top_model_weights_path = 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
train_data_dir = "/kaggle/input/fashion-images/data/Footwear/Women/Images/"
nb_train_samples = 769
epochs = 50
batch_size = 1
def extract_features():
Itemcodes = []
datagen = ImageDataGenerator(rescale=1.0 / 255)
model = applications.ResNet50(include_top=False, weights="imagenet")
generator = datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False,
)
for i in generator.filenames:
Itemcodes.append(i[(i.find("/") + 1) : i.find(".")])
extracted_features = model.predict_generator(
generator, nb_train_samples // batch_size
)
extracted_features = extracted_features.reshape((769, 100352))
np.save(open("./Women_ResNet_features.npy", "wb"), extracted_features)
np.save(open("./Women_ResNet_feature_product_ids.npy", "wb"), np.array(Itemcodes))
a = datetime.now()
extract_features()
print("Time taken in feature extraction", datetime.now() - a)
# **For Gender - Boys**
img_width, img_height = 224, 224
# top_model_weights_path = 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
train_data_dir = "/kaggle/input/fashion-images/data/Apparel/Boys/Images"
nb_train_samples = 759
epochs = 50
batch_size = 1
def extract_features():
Itemcodes = []
datagen = ImageDataGenerator(rescale=1.0 / 255)
model = applications.ResNet50(include_top=False, weights="imagenet")
generator = datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False,
)
for i in generator.filenames:
Itemcodes.append(i[(i.find("/") + 1) : i.find(".")])
extracted_features = model.predict_generator(
generator, nb_train_samples // batch_size
)
extracted_features = extracted_features.reshape((759, 100352))
np.save(open("./Boys_ResNet_features.npy", "wb"), extracted_features)
np.save(open("./Boys_ResNet_feature_product_ids.npy", "wb"), np.array(Itemcodes))
a = datetime.now()
extract_features()
print("Time taken in feature extraction", datetime.now() - a)
# **For Gender - Girls**
img_width, img_height = 224, 224
# top_model_weights_path = 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
train_data_dir = "/kaggle/input/fashion-images/data/Apparel/Girls/Images"
nb_train_samples = 567
epochs = 50
batch_size = 1
def extract_features():
Itemcodes = []
datagen = ImageDataGenerator(rescale=1.0 / 255)
model = applications.ResNet50(include_top=False, weights="imagenet")
generator = datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False,
)
for i in generator.filenames:
Itemcodes.append(i[(i.find("/") + 1) : i.find(".")])
extracted_features = model.predict_generator(
generator, nb_train_samples // batch_size
)
extracted_features = extracted_features.reshape((567, 100352))
np.save(open("./Girls_ResNet_features.npy", "wb"), extracted_features)
np.save(open("./Girls_ResNet_feature_product_ids.npy", "wb"), np.array(Itemcodes))
a = datetime.now()
extract_features()
print("Time taken in feature extraction", datetime.now() - a)
# ### 5. Deploying the solution
boys_extracted_features = np.load("/kaggle/working/Boys_ResNet_features.npy")
boys_Productids = np.load("/kaggle/working/Boys_ResNet_feature_product_ids.npy")
girls_extracted_features = np.load("/kaggle/working/Girls_ResNet_features.npy")
girls_Productids = np.load("/kaggle/working/Girls_ResNet_feature_product_ids.npy")
men_extracted_features = np.load("/kaggle/working/Men_ResNet_features.npy")
men_Productids = np.load("/kaggle/working/Men_ResNet_feature_product_ids.npy")
women_extracted_features = np.load("/kaggle/working/Women_ResNet_features.npy")
women_Productids = np.load("/kaggle/working/Women_ResNet_feature_product_ids.npy")
fashion_df["ProductId"] = fashion_df["ProductId"].astype(str)
def get_similar_products_cnn(product_id, num_results):
if fashion_df[fashion_df["ProductId"] == product_id]["Gender"].values[0] == "Boys":
extracted_features = boys_extracted_features
Productids = boys_Productids
elif (
fashion_df[fashion_df["ProductId"] == product_id]["Gender"].values[0] == "Girls"
):
extracted_features = girls_extracted_features
Productids = girls_Productids
elif fashion_df[fashion_df["ProductId"] == product_id]["Gender"].values[0] == "Men":
extracted_features = men_extracted_features
Productids = men_Productids
elif (
fashion_df[fashion_df["ProductId"] == product_id]["Gender"].values[0] == "Women"
):
extracted_features = women_extracted_features
Productids = women_Productids
Productids = list(Productids)
doc_id = Productids.index(product_id)
pairwise_dist = pairwise_distances(
extracted_features, extracted_features[doc_id].reshape(1, -1)
)
indices = np.argsort(pairwise_dist.flatten())[0:num_results]
pdists = np.sort(pairwise_dist.flatten())[0:num_results]
print("=" * 20, "input product details", "=" * 20)
ip_row = fashion_df[["ImageURL", "ProductTitle"]].loc[
fashion_df["ProductId"] == Productids[indices[0]]
]
for indx, row in ip_row.iterrows():
display(Image(url=row["ImageURL"], width=224, height=224, embed=True))
print("Product Title: ", row["ProductTitle"])
print("\n", "=" * 20, "Recommended products", "=" * 20)
for i in range(1, len(indices)):
rows = fashion_df[["ImageURL", "ProductTitle"]].loc[
fashion_df["ProductId"] == Productids[indices[i]]
]
for indx, row in rows.iterrows():
display(Image(url=row["ImageURL"], width=224, height=224, embed=True))
print("Product Title: ", row["ProductTitle"])
print("Euclidean Distance from input image:", pdists[i])
get_similar_products_cnn("21030", 5)
get_similar_products_cnn("18181", 5)
get_similar_products_cnn("37633", 5)
|
# In this notebook, I will show the usage of the post-processing discussed here: https://www.kaggle.com/c/google-quest-challenge/discussion/130083
# And show how do I analyze the performance of PP on every column thus giving us the `ban_list`
import pandas as pd
import numpy as np
from scipy.stats.mstats import hmean
from scipy.stats import spearmanr
from functools import partial
# suppress scientific notation in numpy and pandas
np.set_printoptions(suppress=True)
pd.options.display.float_format = "{:.6f}".format
pd.set_option("display.max_columns", None)
# # Postprocess
target_columns = (
pd.read_csv(f"../input/ensemble-data/fold_0_labels.csv")
.iloc[:, 1:]
.columns.tolist()
)
target_columns
# labels.npy stores the frequencies of every labels for every column
classes = np.load("../input/labels/labels.npy", allow_pickle=True)
prior_freqs_list = [
np.array([classes[i][key] for key in sorted(classes[i])])
for i in range(len(classes))
]
prior_probs_list = [freqs / sum(freqs) for freqs in prior_freqs_list]
prior_probs_list
def deal_column(s: np.ndarray, freq):
"""
the idea is illustrated here: https://www.kaggle.com/c/google-quest-challenge/discussion/130083
s is the original predictions, and freq is the number of every labels from small to large.
Example:
If a column only has 3 lables: 0, 1/3, 2/3 and the distribution is [0.5, 0.2, 0.3]
assume the original prediction s for this column is [0.01,0.03,0.05,0.02,0.07,0.04,0.09,0.0,0.08,0.06]
This method will map the lowest 5 predictions to 0 because theoretically this test set has 10*0.5=5 examples that labeled 0.
The processing for labels 1/3 and 2/3 is similar, and the output will be:
[0.0,0.0,0.05,0.0,0.07,0.0,0.07,0.0,0.07,0.05]
"""
res = s.copy() # use a copy to return
d = {i: v for i, v in enumerate(s)} # <index, original_value>
d = sorted(d.items(), key=lambda item: item[1])
j = 0
for i in range(len(freq)):
if freq[i] > 0 and j < len(d):
fixed_value = d[j][1]
while freq[i] > 0:
res[d[j][0]] = fixed_value
freq[i] -= 1
j += 1
return res
# prob is the distribution of the column in trainning set, n is the number of examples of test set
def estimate_frequency(prob: np.ndarray, n):
tmp = prob * n
freq = [int(round(t)) for t in tmp]
# the prob times #example and and use round operation cannot make sure the sum of freq equals to #example
# here we consider the error of round operation, e.g. round(1.9)=2 so the error is 0.1, and round(1.5)=2 so error is 0.5
confidence = {
i: np.abs(0.5 - (x - int(x))) for i, x in enumerate(tmp)
} # the smaller the error, the higher the confidence we have in round
confidence = sorted(confidence.items(), key=lambda item: item[1])
# fix frequency according to confidence of 'round' operation
fix_order = [idx for idx, _ in confidence]
idx = 0
s = np.sum(freq)
# fix the frequency of every label, until the sum is #example
while s != n:
if s > n:
freq[fix_order[idx]] -= 1
else:
freq[fix_order[idx]] += 1
s = np.sum(freq)
# theoretically we can fix the freq in one round, but here we use a loop
idx = (idx + 1) % len(fix_order)
# if the resulting freq only has 1 label/class, we change it to 2 labels: one has n-1 examples and the other has 1 example
if np.sum(np.array(freq) > 0) < 2: # in case there is only one class
freq[0], freq[len(freq) - 1] = n - 1, 1
return freq
def align(predictions: np.ndarray, ban_list=None) -> np.ndarray:
num_samples = predictions.shape[0] # number of examples of test set
predictions_new = predictions.copy()
for i in range(30):
# deal with every column but skip the columns that post-processing won't improve the score
if ban_list is not None and i in ban_list:
continue
frequency = estimate_frequency(prior_probs_list[i], num_samples)
predictions_new[:, i] = deal_column(predictions[:, i], frequency)
return predictions_new
def compute_spearmanr(trues, preds):
rhos = []
for col_trues, col_pred in zip(trues.T, preds.T):
rhos.append(spearmanr(col_trues, col_pred).correlation)
return np.mean(rhos).item()
def cal(arr1, arr2): # calculate column-wise scores
return np.array(
[
compute_spearmanr(arr1[:, i].reshape(-1, 1), arr2[:, i].reshape(-1, 1))
for i in range(30)
]
)
diffs = pd.DataFrame(columns=target_columns)
for FOLD in range(5):
# Read csv files
labels = pd.read_csv(f"../input/ensemble-data/fold_{FOLD}_labels.csv").iloc[:, 1:]
base = pd.read_csv(
f"../input/ensemble-data/bert_base_uncased_fold_{FOLD}_preds.csv"
).iloc[:, 1:]
wwm_uncased = pd.read_csv(
f"../input/ensemble-data/wwm_uncased_fold_{FOLD}_preds.csv"
).iloc[:, 1:]
wwm_cased = pd.read_csv(
f"../input/ensemble-data/wwm_cased_fold_{FOLD}_preds.csv"
).iloc[:, 1:]
large_uncased = pd.read_csv(
f"../input/ensemble-data/large_uncased_fold_{FOLD}_preds.csv"
).iloc[:, 1:]
roberta = pd.read_csv(
f"../input/ensemble-data/roberta_large_fold_{FOLD}_preds.csv"
).iloc[:, 1:]
ps = [
base.values,
wwm_uncased.values,
wwm_cased.values,
large_uncased.values,
roberta.values,
]
mv = np.average(ps, axis=0)
original_scores = cal(labels.values, mv)
# post-processing
mv_1 = mv.copy()
mv_1 = align(mv_1)
relative_scores = cal(labels.values, mv_1) - original_scores
row = pd.DataFrame(relative_scores).T
row.columns = target_columns
diffs = diffs.append(row)
diffs.index = [f"fold-{n}" for n in range(5)]
diffs
# apply post-processing to the following columns will lower the scores. The numbers are the indices of the column in target_columns
ban_list = [0, 1, 3, 4, 6, 10, 16, 17, 18] + list(range(20, 30))
scores, post_scores, post_ban_scores = [], [], []
# test the performance of PP
for FOLD in range(5):
# Read csv files
labels = pd.read_csv(f"../input/ensemble-data/fold_{FOLD}_labels.csv").iloc[:, 1:]
base = pd.read_csv(
f"../input/ensemble-data/bert_base_uncased_fold_{FOLD}_preds.csv"
).iloc[:, 1:]
wwm_uncased = pd.read_csv(
f"../input/ensemble-data/wwm_uncased_fold_{FOLD}_preds.csv"
).iloc[:, 1:]
wwm_cased = pd.read_csv(
f"../input/ensemble-data/wwm_cased_fold_{FOLD}_preds.csv"
).iloc[:, 1:]
large_uncased = pd.read_csv(
f"../input/ensemble-data/large_uncased_fold_{FOLD}_preds.csv"
).iloc[:, 1:]
roberta = pd.read_csv(
f"../input/ensemble-data/roberta_large_fold_{FOLD}_preds.csv"
).iloc[:, 1:]
ps = [
base.values,
wwm_uncased.values,
wwm_cased.values,
large_uncased.values,
roberta.values,
]
mv = np.average(ps, axis=0)
scores.append(compute_spearmanr(labels.values, mv))
# post-processing
mv_1 = mv.copy()
mv_2 = mv.copy()
mv_1 = align(mv_1)
mv_2 = align(mv_2, ban_list)
post_scores.append(compute_spearmanr(labels.values, mv_1))
post_ban_scores.append(compute_spearmanr(labels.values, mv_2))
print(
f"original score: {np.mean(scores)}\npost without ban: {np.mean(post_scores)}\npost with ban: {np.mean(post_ban_scores)}"
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
url = "https://en.wikipedia.org/wiki/List_of_Indians_by_net_worth"
df = pd.read_html(url)
df
df[0]
df[0].head()
df[0].tail()
df[0].info()
df[0].isnull().sum()
df[0].drop("Wealth Change", axis=1, inplace=True)
df[0].columns
df[0].describe()
df[0].to_csv("Indian by Net_Worth.csv")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
dataset = pd.read_csv("/kaggle/input/co2-emissions/CO2 Emissions.csv")
dataset.head()
print(dataset.columns)
print(dataset.isna().sum())
print()
print(dataset.isnull().sum())
datasetChoosen = dataset[
[
"Engine Size(L)",
"Cylinders",
"Fuel Consumption City (L/100 km)",
"Fuel Consumption Hwy (L/100 km)",
"Fuel Consumption Comb (L/100 km)",
"Fuel Consumption Comb (mpg)",
]
]
datasetChoosen.head()
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score
dataset = pd.read_csv("/kaggle/input/co2-emissions/CO2 Emissions.csv")
datasetChoosen = dataset[
[
"Engine Size(L)",
"Cylinders",
"Fuel Consumption City (L/100 km)",
"Fuel Consumption Hwy (L/100 km)",
"Fuel Consumption Comb (L/100 km)",
"Fuel Consumption Comb (mpg)",
]
]
X = datasetChoosen.values
y = dataset["CO2 Emissions(g/km)"].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
model = tf.keras.models.Sequential(
[
tf.keras.layers.Dense(64, activation="relu", input_shape=(6,)),
tf.keras.layers.Dense(64, activation="relu"),
tf.keras.layers.Dense(1),
]
)
model.compile(loss="mse", optimizer="adam")
model.fit(X_train, y_train, epochs=2000, batch_size=32, verbose=0)
y_pred = model.predict(X_test)
score = r2_score(y_test, y_pred)
print("The score with Keras:", score)
forPrediction = np.array([[5.6, 8, 17.5, 12, 15, 19]])
forPrediction = scaler.transform(forPrediction)
prediction = model.predict(forPrediction)[0][0]
print("Predicted CO2 emissions:", prediction)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
data = pd.read_csv("/kaggle/input/tipping/tips.csv")
# to check top five rows
data.head()
data.info()
# to make a linear model plot
sns.lmplot(x="total_bill", y="tip", hue="sex", data=data, height=6, aspect=1.5)
# to add labels
plt.xlabel("total_bill")
plt.ylabel("tip")
plt.title("Multiple regression plot of Tips received Gender wise")
plt.show()
# As the Graph line is showing linearly upward trend which
# mean tip is increased with a increased in total bill and male is giving more tip
# to make a linear model plot
sns.lmplot(x="total_bill", y="tip", hue="smoker", data=data, height=6, aspect=1.5)
# to add labels
plt.xlabel("total_bill")
plt.ylabel("tip")
plt.title("Multilinear Regression plot")
plt.show()
# non smoker are giving more tip
# For linear regression import important libraries
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
# linear Regression always applied on numeric and binary data not on categorical or
# string data so we will convert first smoker data into numeric and this process is called
# label encoder(make Yes as '1', and No as '0')
# Encoding and Numerical Data SET
# in this case of waiters tips data set, the smoker variable is categorical variable with two possible value 'yes' and 'No', so we encode this variable into numerical data using lambda function.
#
data["smoker_bin"] = data["smoker"].apply(lambda x: 1 if x == "Yes" else 0)
data.head()
# Define independent variables X(predictor) and Dependent variable y(response)
X = data[["total_bill", "smoker_bin", "size"]]
y = data["tip"]
# Split the data into training and test sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# create linear regression model object or call linear model
model = LinearRegression()
# fit the model to training data
model.fit(X_train, y_train)
# predict the response variable for the test data
y_predict = model.predict(X_test)
# to evaluate the performance of model
print("R score:", r2_score(y_test, y_predict))
print("Mean Absolute Error:", mean_absolute_error(y_test, y_predict))
print("Mean Squared Error:", mean_squared_error(y_test, y_predict))
# Create a Linear Regression Plot
sns.lmplot(
x="total_bill",
y="tip",
hue="smoker_bin",
data=data,
height=6,
aspect=1.6,
markers=["o", "x"],
)
# plot labels
plt.xlabel("total bill")
plt.ylabel("tip")
plt.title("Multiple Linear Regression model")
plt.show()
data.day.unique()
# sklearn give label encoder option
from sklearn.preprocessing import LabelEncoder
# create a label encoder object
le = LabelEncoder()
# Encode day column
data["day_encoded"] = le.fit_transform(data["day"])
# to print unique value of encoded day column
print(data["day_encoded"].unique())
# To Run Multiple linear regression
# Define Predictor/regressor and reponse/regressed variable
X = data[["total_bill", "size", "day_encoded"]]
y = data["tip"]
# Now split data into train test set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# create a linear regression model object
model = LinearRegression()
# now fit the model to training data
model.fit(X_train, y_train)
# predict response variable on test data
y_predict = model.predict(X_test)
# R2 and Error sum squared
print("R2:", r2_score(y_test, y_predict))
print("Mean_Absolute_Error:", mean_absolute_error(y_test, y_predict))
# Create a Multiple Regression Plot
sns.lmplot(x="total_bill", y="tip", hue="day_encoded", data=data, height=6, aspect=1.6)
# plot labels
plt.xlabel("total_bill")
plt.ylabel("tip")
plt.title("Multiple Regression Model Plot")
plt.show()
# Inverse Label Encoding to figure out which day has assigned which code
# import lable ecoder class from sklearn library
from sklearn.preprocessing import LabelEncoder
# create label encoder object
le = LabelEncoder()
# Encode day column through le.fit_transform
data["day_encoded"] = le.fit_transform(data["day"])
# now do inverse label encoding through le.inverse_transform
data["day_name"] = le.inverse_transform(data["day_encoded"])
# to print out values
print(data[["day", "day_encoded", "day_name"]].head(10))
# but it does not give label corresponding value
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.metrics import accuracy_score, confusion_matrix
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score
import itertools
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# **Loading the data**
df = pd.read_csv(
"/kaggle/input/a-fake-news-dataset-around-the-syrian-war/FA-KES-Dataset.csv",
encoding="latin1",
)
df.head()
# **let's do some checks before start working on our dataset**
# Count NaN or missing values in DataFrame
df.isnull().sum().sum()
print("There are {} rows and {} columns in train".format(df.shape[0], df.shape[1]))
print(df.article_content.describe())
# * We have repated rows in our dataset
# Find Duplicate Rows based on all columns
ddf = df[df.duplicated()]
print(ddf)
# Duplicated rows might affect on our results, So, we should remove them.
df.drop_duplicates(keep=False, inplace=True)
ddf = df[df.duplicated()]
print(ddf)
# Now we can move forward in our task!
# It's better to strat with understaning how our dataset distributed according to the label(labels 0/1)
# Show Labels distribution
df["labels"].value_counts(normalize=True)
# Our dataset is a bit unbalanced towords real news(1)
sns.countplot(x="labels", data=df)
# **Exploratory Data Analysis of News**
df["source"].value_counts().plot(kind="barh")
# We can see here sources of news in an ascending order
df.groupby(["source", "labels"]).size().unstack().plot(kind="bar", stacked=False)
plt.figure(figsize=(20, 10))
plt.show()
# It showes here how each source is contributing in real or fake news
# We will do very basic analysis,that is character level,word level and sentence level analysis.
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
true_len = df[df["labels"] == 1]["article_content"].str.len()
ax1.hist(true_len, color="green")
ax1.set_title("Real News")
fake_len = df[df["labels"] == 0]["article_content"].str.len()
ax2.hist(fake_len, color="red")
ax2.set_title("Fake News")
fig.suptitle("Characters in article")
plt.show()
# Number of words in a article
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
true_len = df[df["labels"] == 1]["article_content"].str.split().map(lambda x: len(x))
ax1.hist(true_len, color="green")
ax1.set_title("Real News")
fake_len = df[df["labels"] == 0]["article_content"].str.split().map(lambda x: len(x))
ax2.hist(fake_len, color="red")
ax2.set_title("Fake News")
fig.suptitle("Words in an article")
plt.show()
# Average word length in a article
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
word = (
df[df["labels"] == 1]["article_content"]
.str.split()
.apply(lambda x: [len(i) for i in x])
)
sns.distplot(word.map(lambda x: np.mean(x)), ax=ax1, color="green")
ax1.set_title("Real")
word = (
df[df["labels"] == 0]["article_content"]
.str.split()
.apply(lambda x: [len(i) for i in x])
)
sns.distplot(word.map(lambda x: np.mean(x)), ax=ax2, color="red")
ax2.set_title("Fake")
fig.suptitle("Average word length in each article")
# The Most common words in Real news
mfreq = pd.Series(
" ".join(df[df["labels"] == 1]["article_content"]).split()
).value_counts()[:25]
mfreq
vect = TfidfVectorizer(use_idf=True, max_df=0.40, min_df=0.1, stop_words="english").fit(
df[df["labels"] == 1]["article_content"]
)
len(vect.get_feature_names())
list(vect.vocabulary_.keys())[:10]
# Wordcloud for words in real news after some cleaning and deleting stop words using TfidfVectorizer
true_tfidf = list(vect.vocabulary_.keys())
wordcloud = WordCloud(width=1600, height=800).generate(str(true_tfidf))
# plot word cloud image.
plt.figure(figsize=(20, 10), facecolor="k")
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad=0)
plt.show()
# And let's see the most common words in fake news
mfreq = pd.Series(
" ".join(df[df["labels"] == 0]["article_content"]).split()
).value_counts()[:25]
mfreq
vect = TfidfVectorizer(use_idf=True, max_df=0.40, min_df=0.1, stop_words="english").fit(
df[df["labels"] == 0]["article_content"]
)
len(vect.get_feature_names())
# Wordcloud for words in fake news after some cleaning and deleting stop words using TfidfVectorizer
fake_tfidf = list(vect.vocabulary_.keys())
wordcloud = WordCloud(width=1600, height=800).generate(str(fake_tfidf))
# plot word cloud image.
plt.figure(figsize=(20, 10), facecolor="k")
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad=0)
plt.show()
# Let's see down wordclod for the whole dataset from article_content
# Intialize TfidfVectorizer
tfidf_vect = TfidfVectorizer(stop_words="english", max_df=0.4, min_df=0.1).fit(
df["article_content"]
)
len(tfidf_vect.get_feature_names())
txt_tfidf = list(tfidf_vect.vocabulary_.keys())
wordcloud = WordCloud(width=1600, height=800).generate(str(txt_tfidf))
# plot word cloud image.
plt.figure(figsize=(20, 10), facecolor="k")
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad=0)
plt.show()
# **Classifier: Features and Design**
# * To train supervised classifiers, we first transformed the “article_content” into a vector of numbers. We explored vector representations such as TF-IDF weighted vectors.
# * After having this vector representations of the text we can train supervised classifiers to train unseen “article_content” and predict the “labels”(0/1) on which they fall.
# After all the above data transformation, now that we have all the features and labels, it is time to train the classifiers. There are a number of algorithms we can use for this type of problem.
# Naive Bayes Classifier: the one most suitable for word counts is the multinomial variant:
tfidf = TfidfVectorizer(
sublinear_tf=True,
min_df=5,
encoding="latin-1",
ngram_range=(1, 2),
stop_words="english",
)
features = tfidf.fit_transform(df.article_content).toarray()
labels = df.labels
features.shape
# Naive Bayes Classifier: the one most suitable for word counts is the multinomial variant:
X_train, X_test, y_train, y_test = train_test_split(
df["article_content"], df["labels"], random_state=0
)
count_vect = CountVectorizer()
X_train_counts = count_vect.fit_transform(X_train)
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
clf = MultinomialNB().fit(X_train_tfidf, y_train)
# Let's try predicting on recent news ???
print(
clf.predict(
count_vect.transform(
[
"The Syrian army has taken control of a strategic northwestern crossroads town, its latest gain in a weeks-long offensive against the country's last major rebel bastion."
]
)
)
)
# Awesome!!!!!!!!!!!!!!!!!!!! That's right
# **Model Selection**
# We are now ready to experiment with different machine learning models, evaluate their accuracy and find the source of any potential issues.
# We will benchmark the following four models:
# * Logistic Regression
# * (Multinomial) Naive Bayes
# * Linear Support Vector Machine
# * Random Forest
models = [
RandomForestClassifier(n_estimators=200, max_depth=3, random_state=0),
LinearSVC(),
MultinomialNB(),
LogisticRegression(random_state=0),
]
CV = 5
cv_df = pd.DataFrame(index=range(CV * len(models)))
entries = []
for model in models:
model_name = model.__class__.__name__
accuracies = cross_val_score(model, features, labels, scoring="accuracy", cv=CV)
for fold_idx, accuracy in enumerate(accuracies):
entries.append((model_name, fold_idx, accuracy))
cv_df = pd.DataFrame(entries, columns=["model_name", "fold_idx", "accuracy"])
sns.boxplot(x="model_name", y="accuracy", data=cv_df)
sns.stripplot(
x="model_name",
y="accuracy",
data=cv_df,
size=8,
jitter=True,
edgecolor="gray",
linewidth=2,
)
plt.show()
cv_df.groupby("model_name").accuracy.mean()
|
# # CNNs - Basics
# The task now is to classify which type of land use we can see from satellite images. We will later use this image dataset to learn about some more advanced techniques for CNNs (and NN models in general), such as transfer learning and data augmentation.
# 
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm_notebook as tqdm
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import *
from glob import glob
# Before we take a look at our new dataset, let's briefly talk about classification.
# ### Probabilistic classification and softmax functions
# Before building neural network models, we briefly discuss our task and necessary steps to adjust the model's output to match the task.
# Our aim is to predict the probability of a given sample belonging to each of our 10 classes. A standard neural network doesn't know anything about probability, so it will just predict some random numbers. Let's simulate this.
p = np.random.rand(10) * 10
plt.bar(np.arange(10), p)
# Probabilities need to sum to one. Ours don't.
p.sum()
# A common way of normalizing outputs for neural netwros is the use of a softmax function in what is called a softmax layer. This layer is typically the last layer in a classification network. A particular advantage compared to just normalizing by `p / p.sum()` is the ability to react to low stimulation (e.g. blurry images) with a rather uniform distribution and to high stimulation (i.e. large numbers, e.g. crisp images) with probabilities close to 0 and 1.
def softmax(a):
return np.exp(a) / np.exp(a).sum()
p = softmax(p)
p.sum()
plt.bar(np.arange(10), p)
# So now we have a prediction given by our softmax layer. Assume that we also have an one-hot encoded target vector `y`, for an observed instance of the 7-th class.
y = np.array([0, 0, 0, 0, 0, 0, 1, 0, 0, 0])
plt.bar(np.arange(10), p)
plt.bar(np.arange(10), y, zorder=0.5)
# To train a neural network we need a loss function that tells us how good our prediction is. For this we will use the log-loss or the cross-entropy. Let's illustrate this for a single class for a true label.
a = np.linspace(0, 1, 100)
true = 1
loss = -true * np.log(a)
plt.plot(a, loss)
# So for a true label, the best prediction (lowest error) is predicting 1. Predicting zero gives the largest error. For a zero label the error would always be zero.
# For several classes we can just sum up the loss over all classes. But of course, the only non-zero values come from the classes where the target is True.
(-y * np.log(p)).sum()
# Now we have a loss function that is differentiable. Which means we can use it to train a neural network.
# ## The landuse dataset
# Now, let's finally look at our new dataset.
# The images are split into a train and test directory.
# Inside the train directory we have a randomly ordered list of images.
test_images = sorted(
[
fn.split("/")[-1]
for fn in glob(
"/kaggle/input/kit-predictive-data-analytics-2023-part-2/test/test/*.jpg"
)
]
)
test_images[:5]
len(test_images)
# The `train.csv` file assigns a land use class to each training image.
train_df = pd.read_csv(
"/kaggle/input/kit-predictive-data-analytics-2023-part-2/train.csv"
)
train_df.head()
# Let's take a look at some images to get a feel for the data.
# To do so we can use pyplot's `imread` function.
train_dir = "/kaggle/input/kit-predictive-data-analytics-2023-part-2/train/train/"
test_dir = "/kaggle/input/kit-predictive-data-analytics-2023-part-2/test/test/"
img = plt.imread(f"{train_dir}1.jpg")
img.shape
# So an image is nothing else but a 2D array of numbers, specifically three 2D arrays, one for each color, RGB.
img
# The number typically go from 0 to 255. Let's plot one on the images and print the land use class.
plt.imshow(img)
train_df["class"][0]
# The images are pretty low resolution (64x64), so it can be quite hard to so what's going on sometimes.
# To get a better feel for the data, let's plot a bunch of images.
classes = sorted(train_df["class"].unique())
classes
# The cell below plots a random image from each class.
fig, axs = plt.subplots(3, 4, figsize=(15, 12))
rand_idxs = np.random.randint(1, len(train_df) + 1, 9)
for c, ax in zip(classes, axs.flat):
fn = train_df[train_df["class"] == c].sample().iloc[0]["fn"]
img = plt.imread(f"{train_dir}/{fn}")
ax.imshow(img)
ax.set_title(c)
for ax in axs.flat:
ax.set_axis_off()
plt.tight_layout()
# ## Generator classes for reading images
# To use the images in a neural network, we will use Keras' `ImageDataGenerator` class. This has several advantages over just reading all the images and putting them into a numpy array.
# We will randomly split our data into training (80%) and validation (20%) and rescale the images by dividing by 255.
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
validation_split=0.2, rescale=1.0 / 255
)
test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
# Next we create two generators, one for training, one for validation.
train_generator = train_datagen.flow_from_dataframe(
train_df,
directory=train_dir,
batch_size=32,
x_col="fn",
y_col="class",
target_size=(64, 64),
subset="training",
)
valid_generator = train_datagen.flow_from_dataframe(
train_df,
directory=train_dir,
batch_size=32,
x_col="fn",
y_col="class",
target_size=(64, 64),
subset="validation",
)
test_generator = test_datagen.flow_from_directory(
directory="/kaggle/input/kit-predictive-data-analytics-2023-part-2/",
classes=["test"],
batch_size=32,
target_size=(64, 64),
shuffle=False,
)
# A Python generator can be called similar to a list by just indexing into it. Let's pick the first batch of data.
X, y = train_generator[0]
X.shape, y.shape
# For X we see what we would expect: an array with shape `[batch_size, img_size1, img_size2, channels]`. For `y`, we have a one-hot-encoded vector with the 10 class labels.
train_generator.class_indices
y[:3]
# ## Fullly connected model
# As before, let's start with a simple fully connected NN model.
model = tf.keras.Sequential(
[
Input(shape=(64, 64, 3)),
Flatten(),
Dense(512, activation="relu"),
Dense(256, activation="relu"),
Dense(128, activation="relu"),
Dense(10, activation="softmax"),
]
)
model.summary()
# Even though it is a relatively simple model, we end up with more than 6 million parameters to estiamte.
model.compile("adam", "categorical_crossentropy", metrics=["accuracy"])
model.fit(train_generator, epochs=3, validation_data=valid_generator)
# We can continue training by executing the same cell again. The `model` instance is not reset by `.fit`. (This is an important behavior to note, for example when training models in a `for`-loop!)
model.fit(train_generator, epochs=3, validation_data=valid_generator)
# With a validation accuracy of around 50%, we definitely do better than random guessing which would give around 10% accuracy.
# But the accuracy is not great for a model with 6 million parameters.
# ## A first CNN model
# Let's start with a (relatively) simple CNN model with `Conv2D` layers with 32 channels.
model = tf.keras.Sequential(
[
Input(shape=(64, 64, 3)),
Conv2D(32, kernel_size=(3, 3), activation="relu"),
MaxPooling2D(),
Conv2D(32, kernel_size=(3, 3), activation="relu"),
MaxPooling2D(),
Conv2D(32, kernel_size=(3, 3), activation="relu"),
MaxPooling2D(),
Conv2D(32, kernel_size=(3, 3), activation="relu"),
MaxPooling2D(),
Flatten(),
Dense(10, activation="softmax"),
]
)
model.summary()
# This network has a lot fewer parameters compared to our fully-connected network above.
model.compile("adam", "categorical_crossentropy", metrics=["accuracy"])
model.fit(train_generator, epochs=3, validation_data=valid_generator)
# Even after only a few epochs we are already doing a lot better than before.
# ## A bigger CNN model
# Let's train a bigger network by increasing the number of channels, the number of convolutions between each max-pooling and another fully-connected layer at the end.
model = tf.keras.Sequential(
[
Input(shape=(64, 64, 3)),
Conv2D(128, kernel_size=(3, 3), activation="relu"),
Conv2D(128, kernel_size=(3, 3), activation="relu"),
MaxPooling2D(),
Conv2D(128, kernel_size=(3, 3), activation="relu"),
Conv2D(128, kernel_size=(3, 3), activation="relu"),
MaxPooling2D(),
Conv2D(128, kernel_size=(3, 3), activation="relu"),
Conv2D(128, kernel_size=(3, 3), activation="relu"),
MaxPooling2D(),
Flatten(),
Dense(64, activation="relu"),
Dense(10, activation="softmax"),
]
)
model.summary()
model.compile("adam", "categorical_crossentropy", metrics=["accuracy"])
# Next, let's train the bigger model. **Now is probably a good time to talk about GPUs**.
# Training takes around 5 minutes for a single epoch on my laptop (and around 10 minutes here). On Kaggle, we can activate GPUs in the `Notebook options` area in the sidebar on the right. Note that this will restart the session, which means that you will have to re-execute the relevant cells from above (or activate GPU acceleration before running the notebook).
# With GPU accelaration activated, a single epoch takes around 15-20 seconds here
# **Note**: Using GPUs may require to authorize your Kaggle account via your phone. If you see "Requires phone authorization" in the sidebar, you need to follow that process to enable the GPU (or continue to use CPUs only). You can read more about using GPUs on Kaggle here: https://www.kaggle.com/docs/efficient-gpu-usage.
# You will typically have to restart the notebook from the beginning after activating GPUs, so it might be a good idea to do so before you start to work on the notebook.
h = model.fit(train_generator, epochs=10, validation_data=valid_generator)
plt.plot(h.history["accuracy"][1:])
plt.plot(h.history["val_accuracy"][1:])
plt.plot(h.history["loss"][1:])
plt.plot(h.history["val_loss"][1:])
# Looks like we are getting some overfitting (looking at the loss in particular). One thing to check is whether our learning rate is too high. Let's see what happens if we reduce our learning rate by an order of magnitude.
model.optimizer.lr = 1e-4
h = model.fit(train_generator, epochs=10, validation_data=valid_generator)
plt.plot(h.history["accuracy"][1:])
plt.plot(h.history["val_accuracy"][1:])
plt.plot(h.history["loss"][1:])
plt.plot(h.history["val_loss"][1:])
# We immediately jump to a higher accuracy. At the beginning of training high learning rates are good to get close to the minima but after that lower learning rates might be required to get into the minima. Eventually we don't want to change our learning rate manually, but rather use a learning rate schedule. For examples, you can take a look at https://keras.io/api/optimizers/learning_rate_schedules/ and https://keras.io/api/callbacks/reduce_lr_on_plateau/.
# Still we have encountered overfitting. We already know how to use early stopping to prevent overfitting. In the next notebook, we will take a look at some additional techniques that are sometimes helpful: L2 regularization (weight decay) and Dropout.
# However, now is a good opportunity to build your first CNN models. To do so, we will next discuss how to create a submission for the landuse dataset.
probs = model.predict(test_generator, verbose=1)
probs.shape
probs[0]
# These are the probabilities for each test image. Now we need to pick a single class (since the predictions will be ranked based on the accuracy on the test set) by selecting the maximum class with the highest predicted probability for each sample.
preds = np.argmax(probs, 1)
preds.shape
preds[:3]
# Next, we need to convert the numbers to the actual class.
pred_classes = [classes[i] for i in preds]
pred_classes[:3]
# Finally we need to put the image name and the classes in a Pandas `dataframe`.
len(test_images)
# Here is a handy function to copy-paste to your notebooks.
def create_submission(model, test_generator, classes, test_images):
probs = model.predict(test_generator)
preds = np.argmax(probs, 1)
pred_classes = [classes[i] for i in preds]
sub = pd.DataFrame({"fn": test_images, "class": pred_classes})
return sub
sub = create_submission(model, test_generator, classes, test_images)
sub.head()
sub.to_csv("submission1.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
import xgboost as xgb
from xgboost import plot_importance, plot_tree
from sklearn.metrics import mean_squared_error, mean_absolute_error
plt.style.use("fivethirtyeight")
# take out PJM East which has data from 2002-2018 for the entire east region.
pjme = pd.read_csv(
"/kaggle/input/hourly-energy-consumption/PJME_hourly.csv",
index_col=[0],
parse_dates=[0],
)
pjme.head(3)
color_pal = [
"#F8766D",
"#D39200",
"#93AA00",
"#00BA38",
"#00C17F",
"#00B9E3",
"#619CFF",
"#DB72FB",
]
_ = pjme.plot(style=".", figsize=(15, 5), color=color_pal[0], title="PJM East")
# **Spliting for test train. Lets split last 4 years for test after yr 2015******
split_date = "01-Jan-2015"
pjme_train = pjme.loc[pjme.index <= split_date].copy()
pjme_test = pjme.loc[pjme.index > split_date].copy()
_ = (
pjme_test.rename(columns={"PJME_MW": "TEST SET"})
.join(pjme_train.rename(columns={"PJME_MW": "TRAINING SET"}), how="outer")
.plot(figsize=(15, 5), title="PJM East", style=".")
)
# pjme_test.head(2)
# Create some feature columns for Time series:
def create_features(df, label=None):
"""
Creates time series features datetime column
"""
df["date"] = df.index
df["hour"] = df["date"].dt.hour
df["dayofweek"] = df["date"].dt.dayofweek
df["quarter"] = df["date"].dt.quarter
df["month"] = df["date"].dt.month
df["year"] = df["date"].dt.year
df["dayofyear"] = df["date"].dt.dayofyear
df["dayofmonth"] = df["date"].dt.day
df["weekofyear"] = df["date"].dt.weekofyear
X = df[
[
"hour",
"dayofweek",
"quarter",
"month",
"year",
"dayofyear",
"dayofmonth",
"weekofyear",
]
]
if label:
y = df[label]
return X, y
return X
X_train, y_train = create_features(pjme_train, label="PJME_MW")
X_test, y_test = create_features(pjme_test, label="PJME_MW")
# Modelling data using XGBoost
reg = xgb.XGBRegressor(n_estimators=1000)
reg.fit(
X_train,
y_train,
eval_set=[(X_train, y_train), (X_test, y_test)],
early_stopping_rounds=50,
verbose=True,
)
# Looking into feature importance
_ = plot_importance(reg, height=0.9)
pjme_test["MW_Prediction"] = reg.predict(X_test)
pjme_all = pd.concat([pjme_test, pjme_train], sort=False)
#
pjme_test.head(3)
pjme_all.head()
_ = pjme_all[["PJME_MW", "MW_Prediction"]].plot(figsize=(15, 5))
mse = mean_squared_error(y_true=pjme_test["PJME_MW"], y_pred=pjme_test["MW_Prediction"])
print("Mean Squared Error: ", mse)
mAbsErr = mean_absolute_error(
y_true=pjme_test["PJME_MW"], y_pred=pjme_test["MW_Prediction"]
)
print("Mean Absolute Error: ", mAbsErr)
# *** mean absolute percent error:**
# it gives an easy to interperate percentage which shows how off the predictions are.
# MAPE isn't included in sklearn metrics pkg hence a custom function.
def mean_absolute_percentage_error(y_true, y_pred):
"""Calculates MAPE given y_true and y_pred"""
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
mean_absolute_percentage_error(
y_true=pjme_test["PJME_MW"], y_pred=pjme_test["MW_Prediction"]
)
# **Lets look at first few months of prediction**
# Prediction along with actual
f, ax = plt.subplots(1)
f.set_figheight(5)
f.set_figwidth(15)
_ = pjme_all[["MW_Prediction", "PJME_MW"]].plot(ax=ax, style=["-", "."])
ax.set_xbound(lower="01-01-2015", upper="02-01-2015")
ax.set_ylim(0, 60000)
plot = plt.suptitle("January 2015 prediction vs Actual")
f, ax = plt.subplots(1)
f.set_figheight(5)
f.set_figwidth(15)
_ = pjme_all[["MW_Prediction", "PJME_MW"]].plot(ax=ax, style=["-", "."])
ax.set_xbound(lower="02-01-2015", upper="03-01-2015")
ax.set_ylim(0, 60000)
plot = plt.suptitle("Feb 2015 prediction vs Actual")
# **Lets plot first week of January and July (6months gap)**
f, ax = plt.subplots(1)
f.set_figheight(5)
f.set_figwidth(15)
_ = pjme_all[["MW_Prediction", "PJME_MW"]].plot(ax=ax, style=["-", "."])
ax.set_xbound(lower="01-01-2015", upper="01-08-2015")
ax.set_ylim(0, 60000)
plot = plt.suptitle("First Week of January Prediction vs Actual")
f, ax = plt.subplots(1)
f.set_figheight(5)
f.set_figwidth(15)
_ = pjme_all[["MW_Prediction", "PJME_MW"]].plot(ax=ax, style=["-", "."])
ax.set_xbound(lower="07-01-2015", upper="07-08-2015")
ax.set_ylim(0, 60000)
plot = plt.suptitle("First Week of July Prediction vs Actual")
# **Worst predicted days and best predicted days**
pjme_test["error"] = pjme_test["PJME_MW"] - pjme_test["MW_Prediction"]
pjme_test["abs_error"] = pjme_test["error"].apply(np.abs)
error_by_day = pjme_test.groupby(["year", "month", "dayofmonth"]).mean()[
["PJME_MW", "MW_Prediction", "error", "abs_error"]
]
error_by_day.sort_values("error", ascending=True).head(
10
) # days when forecasted was most HIGH than actual
# Worst absolute predicted days
error_by_day.sort_values("abs_error", ascending=False).head(10)
# * Clearly most of the worst predicted days are from months when season changes
# * Another perspective could few worst days are exact or around holidays thus when populatoin behavior changes
# Best predicted days
error_by_day.sort_values("abs_error", ascending=True).head(10)
# * Six best predicted days are from OCt when we have no public holidays. Also NorthEast goes through pretty mild/pleasant weather in terms of cold and heat.
# Plot worst perdicted day
f, ax = plt.subplots(1)
f.set_figheight(5)
f.set_figwidth(10)
_ = pjme_all[["MW_Prediction", "PJME_MW"]].plot(ax=ax, style=["-", "."])
ax.set_ylim(0, 60000)
ax.set_xbound(lower="08-13-2016", upper="08-14-2016")
plot = plt.suptitle("Aug 13, 2016 - Worst Predicted Day")
# Best Predicted
f, ax = plt.subplots(1)
f.set_figheight(5)
f.set_figwidth(10)
_ = pjme_all[["MW_Prediction", "PJME_MW"]].plot(ax=ax, style=["-", "."])
ax.set_ylim(0, 60000)
ax.set_xbound(lower="10-03-2016", upper="10-04-2016")
plot = plt.suptitle("Oct 3, 2016 - Best Predicted Day")
|
import darts
import pandas as pd
# # **Loading Dataset**
from darts.datasets import AirPassengersDataset
# Visualizing the Data by converting to dataframe
AirPassengersDataset().load().pd_dataframe()
series_air = AirPassengersDataset().load()
series_air.plot(label="AirPassengers")
# # **Splitting the dataset for training and backtesting**
train, test = series_air.split_before(0.75)
train.plot()
test.plot()
# Clearly we can sense that this time series is not stationary as the mean is not constant and we can detect an upward trend in the data .
from darts.models.forecasting.arima import ARIMA
from darts.utils.statistics import (
check_seasonality,
plot_acf,
plot_pacf,
remove_seasonality,
remove_trend,
stationarity_test_adf,
)
from darts.utils.statistics import extract_trend_and_seasonality
# **Checks whether the TimeSeries ts is seasonal with period m or not.**
# # **Checking for seasonality and Trend**
check_seasonality(train)
noseasonal = remove_seasonality(train)
stationary_ts = remove_trend(noseasonal)
stationary_ts.plot()
from darts.utils.statistics import stationarity_test_adf
stationarity_test_adf(stationary_ts)
def preprocess(s):
print("Checking for seasonality")
print(check_seasonality(s))
print("Removing seasonality ")
if check_seasonality(s)[0] == True:
series1 = remove_seasonality(s)
print("An upward trend is spotted so removing it")
series2 = remove_trend(series1)
return series2.plot()
preprocess(train)
# The Autocorrelation Function (ACF) and Partial Autocorrelation Function (PACF) are important tools in time series analysis for understanding the properties of the data and determining the appropriate models to use for forecasting.
# The ACF measures the correlation between a time series and its lagged values. It helps to identify the presence of any repeating patterns or cycles in the data. A strong positive correlation at a specific lag indicates that the data is highly correlated with its past values at that lag, while a strong negative correlation indicates that the data is negatively correlated with its past values at that lag. The ACF can help to determine the order of an Autoregressive (AR) model.
# # **Plotting ACF and PACF**
plot_acf(stationary_ts)
# Plots the ACF of ts, highlighting it at lag m, with corresponding significance interval.
# The PACF, on the other hand, measures the correlation between a time series and its lagged values after removing the effect of the intervening lags. It helps to identify the presence of any direct or immediate relationships between the data and its past values. A strong positive correlation at a specific lag indicates that the data is highly correlated with its past values at that lag after removing the effect of the intervening lags, while a strong negative correlation indicates that the data is negatively correlated with its past values at that lag after removing the effect of the intervening lags. The PACF can help to determine the order of a Moving Average (MA) model.
plot_pacf(stationary_ts)
# Plots the Partial ACF of ts, highlighting it at lag m, with corresponding significance interval.
# # **Building Models**
# # **AutoARIMA**
from darts.models.forecasting.auto_arima import AutoARIMA
model = AutoARIMA()
model.fit(train)
pred = model.predict(n=36)
train.plot(label="actual ")
pred.plot(label="Forecast")
# print("MAPE = {:.2f}%".format(mape(train,pred)))
from darts.models import NBEATSModel
# Neural Basis Expansion Analysis Time Series Forecasting (N-BEATS).
# input_chunk_length (int) – The length of the input sequence fed to the model.
# output_chunk_length (int) – The length of the forecast of the model.
# N-BEATS is a fully convolutional neural network that is designed to model time series data in a way that is both accurate and interpretable. The model consists of a series of fully connected layers, each of which applies a non-linear transformation to the input time series data.
# Another important feature of N-BEATS is its interpretability. The model can be visualized as a series of "trend" and "seasonality" components, which makes it easy to understand how the model is making its predictions
# # **NBEATSModel**
model = NBEATSModel(
input_chunk_length=24, output_chunk_length=12, n_epochs=100, random_state=0
)
model.fit(train)
pred = model.predict(n=36, series=train)
train.plot(label="actual ")
pred.plot(label="Forecast")
|
import numpy as np
import pandas as pd
import os
import plotly.graph_objs as go
import plotly.express as px
import matplotlib.pyplot as plt
from statsmodels.tsa.holtwinters import ExponentialSmoothing
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
df = pd.DataFrame(
{
"Year": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
"Sales": [
283,
288,
336,
388,
406,
412,
416,
435,
428,
435,
462,
452,
474,
476,
497,
487,
523,
528,
532,
552,
],
}
)
fig = go.Figure()
fig.add_trace(go.Scatter(x=df["Year"], y=df["Sales"], mode="lines", name="Sales"))
fig.update_layout(title="Sales over time", xaxis_title="Year", yaxis_title="Sales")
fig.show()
# # Non-stationary. The data appears to be increasing over time, suggesting a positive trend.
df["MA-2"] = df["Sales"].rolling(window=2).mean()
df["MA-4"] = df["Sales"].rolling(window=4).mean()
fig = go.Figure()
trace1 = go.Scatter(x=df["Year"], y=df["Sales"], mode="lines", name="Sales")
trace2 = go.Scatter(x=df["Year"], y=df["MA-2"], mode="lines", name="MA-2")
trace3 = go.Scatter(x=df["Year"], y=df["MA-4"], mode="lines", name="MA-4")
fig = go.Figure(data=[trace1, trace2, trace3])
fig.show()
MA2_forecast_1 = df["MA-2"].iloc[-1]
MA4_forecast_1 = df["MA-4"].iloc[-1]
MA2_forecast_2 = (MA2_forecast_1 + df["Sales"].iloc[-1]) / 2
MA4_forecast_2 = (
MA4_forecast_1 + df["Sales"].iloc[-1] + df["Sales"].iloc[-2] + df["Sales"].iloc[-3]
) / 4
MA2_forecast_2, MA4_forecast_2
train_df, test_df = train_test_split(df["Sales"], test_size=0.2, shuffle=False)
model = ExponentialSmoothing(train_df, trend="add", seasonal=None)
fit = model.fit(optimized=True)
print("Optimal alpha:", fit.params["smoothing_level"])
plt.plot(df.index, df["Sales"], label="Original data")
plt.plot(
test_df.index,
fit.forecast(len(test_df)),
label="Exponential smoothing",
linestyle="--",
)
plt.legend()
plt.show()
model = ExponentialSmoothing(df["Sales"], trend="add", seasonal=None)
fit = model.fit(optimized=True)
forecast = fit.forecast(2)
print("Forecast for next 2 years:", forecast)
plt.plot(df["Sales"].index, df["Sales"], label="Original data")
plt.plot(forecast.index, forecast, label="Exponential smoothing", linestyle="--")
plt.legend()
plt.show()
|
"""Import basic modules."""
import pandas as pd
import numpy as np
"""visualization Tools"""
import altair as alt
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
"""Bokeh (interactive visualization)"""
from bokeh.plotting import figure, show, output_notebook, ColumnDataSource
from bokeh.layouts import row
output_notebook()
"""Plotly visualization ."""
import plotly.offline as py
from plotly.offline import iplot, init_notebook_mode
import plotly.graph_objs as go
py.init_notebook_mode(connected=True)
# # NCAA® March Madness: Exploratory Analysis
# 
# # About March Madness
# The NCAA college basketball tournament is a single-elimination tournament that features 68 teams vying to survive three weekends of games to be crowned the national champions. The field used to be 64 teams, but the NCAA has recently added four more teams that play "play in" games to earn a spot in the final field of 64.
# Only 16 teams (the "Sweet Sixteen") make it past the first weekend. The second weekend narrows the field first to the "Elite Eight" and then the "Final Four. The final weekend focuses on the four semifinalists. The two semifinal victors move on to play in the national championship game.
# Ohio State University coach Harold Olsen is usually credited with developing the idea for the tournament in 1939 with the help of the National Association of Basketball Coaches.
# The 68 teams in the tournament include 32 teams that receive automatic bids for winning their respective conferences. The remaining 36 teams are given at-large bids by the NCAA selection committee based upon their performance during the season.
# Once the field is set, the teams are divided into four regions (usually spread geographically through the eastern, western, midwestern, and southern U.S.) and placed into a bracket that lays out the path a team must take to reach the finals. Each team is seeded or ranked within its region, from 1 to 16.
# Higher-seeded teams generally play lower-seeded teams in the beginning. For example, in the first round, each team seeded #1 plays the team seeded #16. This trend continues until upsets begin to occur, at which time brackets can become hard to predict as unexpectedly-good teams (often called "Cinderella" teams) make a run in the tournament.
# [**Source**](https://www.wonderopolis.org/wonder/what-is-march-madness).
# **For better understanding about March Madness visit @parulpandey** [Kernel](https://www.kaggle.com/parulpandey/decoding-march-madness/#data)
# 
# # Introduction
# Although I am not a huge college basketball fan, I find this particular data set fascinating in its richness. Let’s dig in and see what we can learn about the history of March Madness.The below analysis will be conducted on both the regular season and tournament statistics. It will begin by taking a high level view at the NCAA championships - who has won them and how. Hopefully along the way, I’ll discover something useful to use in your models.
# # Data Section 1 - The Basics
# This section provides exploratory data analysis (EDA) of:
# * Team ID's and Team Names
# * Historical tournament seeds
# * Final scores of historical regular season, conference tournament, and NCAA® tournament games
# * Season-level details including dates and region names
#
# Data Section 1 - The Basics ==> [File descriptions](https://www.kaggle.com/c/march-madness-analytics-2020/data)
mteams = pd.read_csv(
"../input/march-madness-analytics-2020/2020DataFiles/2020-Mens-Data/MDataFiles_Stage1/MTeams.csv"
)
mseasons = pd.read_csv(
"../input/march-madness-analytics-2020/2020DataFiles/2020-Mens-Data/MDataFiles_Stage1/MSeasons.csv"
)
mtourney_seed = pd.read_csv(
"../input/march-madness-analytics-2020/2020DataFiles/2020-Mens-Data/MDataFiles_Stage1/MNCAATourneySeeds.csv"
)
mseason_results = pd.read_csv(
"../input/march-madness-analytics-2020/2020DataFiles/2020-Mens-Data/MDataFiles_Stage1/MRegularSeasonCompactResults.csv"
)
mtourney_results = pd.read_csv(
"../input/march-madness-analytics-2020/2020DataFiles/2020-Mens-Data/MDataFiles_Stage1/MNCAATourneyCompactResults.csv"
)
conference = pd.read_csv(
"../input/march-madness-analytics-2020/2020DataFiles/2020-Mens-Data/MDataFiles_Stage1/Conferences.csv"
)
team_conference = pd.read_csv(
"../input/march-madness-analytics-2020/2020DataFiles/2020-Mens-Data/MDataFiles_Stage1/MTeamConferences.csv"
)
# ## Which Team Wins And Lost The Most Tournaments?
# data preparation
wteam = mtourney_results.rename(
columns={"WTeamID": "TeamID"}
) # rename the WteamID as TeamID for merge with mteams dataframe
win_team = wteam.merge(mteams, on="TeamID") # merge with mteams dataframe
# win_team = win_team.rename(columns={'TeamID':'WTeamID_X'})
temp = win_team["TeamName"].value_counts().head(15).reset_index()
# Create ColumnDataSource from data frame
source = ColumnDataSource(temp)
win_team_list = source.data["index"].tolist()
# Add Plot
p = figure(
y_range=win_team_list,
plot_width=600,
plot_height=320,
title="Most Tournament Wins since 1985",
y_axis_label="Winners",
tools="",
)
p.title.text_font = "helvetica"
p.title.text_font_size = "12pt"
p.title.text_font_style = "bold"
p.hbar(
y="index",
right="TeamName",
height=0.8,
color="blue",
line_color="black",
line_width=1,
fill_alpha=0.7,
source=source,
)
show(p)
# data preparation
lteam = mtourney_results.rename(columns={"LTeamID": "TeamID"})
lost_team = lteam.merge(mteams, on="TeamID")
lost_team = lost_team.rename(columns={"TeamID": "LTeamID_X"})
temp = lost_team["TeamName"].value_counts().head(15).reset_index()
# Create ColumnDataSource from data frame
source = ColumnDataSource(temp)
lost_team_list = source.data["index"].tolist()
# Add Plot
p = figure(
y_range=lost_team_list,
plot_width=600,
plot_height=320,
title="Most Tournament Lost since 1985",
y_axis_label="Runner-Up",
tools="",
)
p.title.text_font = "helvetica"
p.title.text_font_size = "12pt"
p.title.text_font_style = "bold"
p.hbar(
y="index",
right="TeamName",
height=0.8,
color="orange",
line_color="black",
line_width=1,
fill_alpha=0.7,
source=source,
)
show(p)
# ## Which Team Wins And Lost The Most Championships?
# * DayNum=154 (Mon) - Round 6, otherwise known as "national final" or "national championship", to bring the tournament field from 2 teams to 1 champion team
# data preparation
ncaa_win_camp = (
win_team[win_team["DayNum"] == 154]["TeamName"].value_counts().reset_index()
)
# Create ColumnDataSource from data frame
source = ColumnDataSource(ncaa_win_camp)
win_camp_list = source.data["index"].tolist()
# Add Plot
p = figure(
y_range=win_camp_list,
plot_width=600,
plot_height=320,
title="Tournament Championship Wins since 1985",
y_axis_label="Winners",
tools="",
)
p.title.text_font = "helvetica"
p.title.text_font_size = "12pt"
p.title.text_font_style = "bold"
p.hbar(
y="index",
right="TeamName",
height=0.8,
color="blue",
line_color="black",
line_width=1,
fill_alpha=0.7,
source=source,
)
show(p)
# data preparation
ncaa_lost_camp = (
lost_team[lost_team["DayNum"] == 154]["TeamName"].value_counts().reset_index()
)
# Create ColumnDataSource from data frame
source = ColumnDataSource(ncaa_lost_camp)
lost_camp_list = source.data["index"].tolist()
# Add Plot
p = figure(
y_range=lost_camp_list,
plot_width=600,
plot_height=300,
title="Tournament Championship Lost since 1985",
y_axis_label="Runner-Up",
tools="",
)
p.title.text_font = "helvetica"
p.title.text_font_size = "12pt"
p.title.text_font_style = "bold"
p.hbar(
y="index",
right="TeamName",
height=0.8,
color="orange",
line_color="black",
line_width=1,
fill_alpha=0.7,
source=source,
)
show(p)
# The major programs certainly fill out the top schools when it comes to championship games, and none more than Duke. Duke won 97 times and runner-up 29 times. Also **Duke** wins 5 Championship with 5 runner-up this results are shows that who is the champion of the games so far.
# ## Which Team Seed And Conference Wins The Most Championships?
mtourney_seed["Region"] = mtourney_seed["Seed"].apply(lambda x: x[0][:1])
mtourney_seed["Seed"] = mtourney_seed["Seed"].apply(lambda x: int(x[1:3]))
# data preparation
seed_win_team = win_team.merge(mtourney_seed, on=["TeamID", "Season"])
seed_win_camp = (
seed_win_team[seed_win_team["DayNum"] == 154]["Seed"].value_counts().reset_index()
)
seed = list(seed_win_camp["index"].astype(str))
count = list(seed_win_camp["Seed"])
# plot
dot = figure(
title="Seeds With The Most Titles since 1985",
tools="",
toolbar_location=None,
y_range=seed,
x_range=[0, 25],
plot_width=600,
plot_height=400,
)
dot.title.text_font = "helvetica"
dot.title.text_font_size = "12pt"
dot.title.text_font_style = "bold"
dot.segment(
0,
seed,
count,
seed,
line_width=3,
line_color="green",
)
dot.circle(
count,
seed,
size=15,
fill_color="orange",
line_color="green",
line_width=3,
)
show(dot)
# ----------------------------------------
from bokeh.models import LabelSet
# data preparation
team_conf = team_conference.merge(conference, on="ConfAbbrev")
conf_win_team = win_team.merge(team_conf, on=["TeamID", "Season"])
conf_win_camp = (
conf_win_team[conf_win_team["DayNum"] == 154]["Description"]
.value_counts()
.reset_index()
)
# Create ColumnDataSource from data frame
source = ColumnDataSource(conf_win_camp)
conf_team_list = source.data["index"].tolist()
# Add Plot
p = figure(
y_range=conf_team_list,
plot_width=800,
plot_height=400,
title="'NCAA Championships by Conference 1985",
tools="",
)
p.title.text_font = "helvetica"
p.title.text_font_size = "12pt"
p.title.text_font_style = "bold"
p.hbar(
y="index",
right="Description",
height=0.8,
color="green",
line_color="black",
line_width=1,
fill_alpha=0.7,
source=source,
)
labels = LabelSet(
y="index",
x="Description",
text="Description",
x_offset=-18,
y_offset=-5.5,
source=source,
render_mode="canvas",
)
p.add_layout(labels)
show(p)
# Since the tournament was expanded to 64 teams in 1985, No. 1 seeds have won the tournament 21 times. A No. 2 seed has won the tournament five times while the third seed has won four times. Interestingly, the number 5 seed has not won a tournament in the period analysed. The “seed of death” perhaps.**Atlantic Coast Conference** have produced the most champions and won 11 championships since 1985.
# Let's look on the conferences and their teams who contributed in the tournament
conf_win_team = conf_win_team[conf_win_team["DayNum"] == 154]
temp_df = pd.crosstab(conf_win_team.Description, conf_win_team.TeamName)
plt.rcParams["figure.figsize"] = (8, 8)
sns.set_style("white")
sns.heatmap(temp_df, cmap="YlGnBu", annot=True, fmt="g", cbar=False)
plt.xlabel("Team Name", fontsize=20)
plt.ylabel("Conference", fontsize=20)
plt.title("Conference Matchups With Teams NCAA Tournament", fontsize=20)
plt.show()
# Duke and North Carolina members of the Atlantic Coast Conference (ACC) are conbributed the most championships.
# # Data Section 2 - Team Box Scores
# This section provides game-by-game stats at a team level (free throws attempted, defensive rebounds, turnovers, etc.) for all regular season, conference tournament, and NCAA® tournament games since the 2002-03 season.
# Data Section 2 - Team Box Scores ==> [File descriptions](https://www.kaggle.com/c/march-madness-analytics-2020/data)
#
season_results = pd.read_csv(
"../input/march-madness-analytics-2020/2020DataFiles/2020-Mens-Data/MDataFiles_Stage1/MRegularSeasonDetailedResults.csv"
)
season_results.head()
# ## Indicators of Regular Season Success
# Let’s now turn to the regular season game statistics. We are interested in knowing how certain statistics correlate with winning vs losing. We will take the regular season detail and first convert it to a more ‘long’ format with only 1 column of TeamIDs and a factor indicating whether that row corresponds to a win or a loss. Here I also add some additional game statistcs. These include field goal percentage, free throw percentage, offensive/defensive rebounding efficiency, and possessions. The feature-engineering code from Laksan Nathan’s [kernel here](https://www.kaggle.com/lnatml/feature-engineering-with-advanced-stats) .
# * **For More Info visit [stats.nba.com](https://stats.nba.com/help/glossary/)**
# Points Winning/Losing Team
season_results["WPts"] = season_results.apply(
lambda row: 2 * row.WFGM + row.WFGM3 + row.WFTM, axis=1
)
season_results["LPts"] = season_results.apply(
lambda row: 2 * row.LFGM + row.LFGM3 + row.LFTM, axis=1
)
# Calculate Winning/losing Team Possesion Feature
wPos = season_results.apply(
lambda row: 0.96 * (row.WFGA + row.WTO + 0.44 * row.WFTA - row.WOR), axis=1
)
lPos = season_results.apply(
lambda row: 0.96 * (row.LFGA + row.LTO + 0.44 * row.LFTA - row.LOR), axis=1
)
# two teams use almost the same number of possessions in a game
# (plus/minus one or two - depending on how quarters end)
# so let's just take the average
season_results["Pos"] = (wPos + lPos) / 2
"""Advanced Metrics"""
# Offensive efficiency (OffRtg) = 100 x (Points / Possessions)
season_results["WOffRtg"] = season_results.apply(
lambda row: 100 * (row.WPts / row.Pos), axis=1
)
season_results["LOffRtg"] = season_results.apply(
lambda row: 100 * (row.LPts / row.Pos), axis=1
)
# Defensive efficiency (DefRtg) = 100 x (Opponent points / Opponent possessions)
season_results["WDefRtg"] = season_results.LOffRtg
season_results["LDefRtg"] = season_results.WOffRtg
# Net Rating = Off.Rtg - Def.Rtg
season_results["WNetRtg"] = season_results.apply(
lambda row: (row.WOffRtg - row.WDefRtg), axis=1
)
season_results["LNetRtg"] = season_results.apply(
lambda row: (row.LOffRtg - row.LDefRtg), axis=1
)
# Assist Ratio : Percentage of team possessions that end in assists
season_results["WAstR"] = season_results.apply(
lambda row: 100 * row.WAst / (row.WFGA + 0.44 * row.WFTA + row.WAst + row.WTO),
axis=1,
)
season_results["LAstR"] = season_results.apply(
lambda row: 100 * row.LAst / (row.LFGA + 0.44 * row.LFTA + row.LAst + row.LTO),
axis=1,
)
# Turnover Ratio: Number of turnovers of a team per 100 possessions used.
# (TO * 100) / (FGA + (FTA * 0.44) + AST + TO)
season_results["WTOR"] = season_results.apply(
lambda row: 100 * row.WTO / (row.WFGA + 0.44 * row.WFTA + row.WAst + row.WTO),
axis=1,
)
season_results["LTOR"] = season_results.apply(
lambda row: 100 * row.LTO / (row.LFGA + 0.44 * row.LFTA + row.LAst + row.LTO),
axis=1,
)
# The Shooting Percentage : Measure of Shooting Efficiency (FGA/FGA3, FTA)
season_results["WTSP"] = season_results.apply(
lambda row: 100 * row.WPts / (2 * (row.WFGA + 0.44 * row.WFTA)), axis=1
)
season_results["LTSP"] = season_results.apply(
lambda row: 100 * row.LPts / (2 * (row.LFGA + 0.44 * row.LFTA)), axis=1
)
# eFG% : Effective Field Goal Percentage adjusting for the fact that 3pt shots are more valuable
season_results["WeFGP"] = season_results.apply(
lambda row: (row.WFGM + 0.5 * row.WFGM3) / row.WFGA, axis=1
)
season_results["LeFGP"] = season_results.apply(
lambda row: (row.LFGM + 0.5 * row.LFGM3) / row.LFGA, axis=1
)
# FTA Rate : How good a team is at drawing fouls.
season_results["WFTAR"] = season_results.apply(lambda row: row.WFTA / row.WFGA, axis=1)
season_results["LFTAR"] = season_results.apply(lambda row: row.LFTA / row.LFGA, axis=1)
# OREB% : Percentage of team offensive rebounds
season_results["WORP"] = season_results.apply(
lambda row: row.WOR / (row.WOR + row.LDR), axis=1
)
season_results["LORP"] = season_results.apply(
lambda row: row.LOR / (row.LOR + row.WDR), axis=1
)
# DREB% : Percentage of team defensive rebounds
season_results["WDRP"] = season_results.apply(
lambda row: row.WDR / (row.WDR + row.LOR), axis=1
)
season_results["LDRP"] = season_results.apply(
lambda row: row.LDR / (row.LDR + row.WOR), axis=1
)
# REB% : Percentage of team total rebounds
season_results["WRP"] = season_results.apply(
lambda row: (row.WDR + row.WOR) / (row.WDR + row.WOR + row.LDR + row.LOR), axis=1
)
season_results["LRP"] = season_results.apply(
lambda row: (row.LDR + row.LOR) / (row.WDR + row.WOR + row.LDR + row.LOR), axis=1
)
# ### Distribution of Statistics for Winning and Losing teams.
# Now let’s take a look at the distributions of these statistics for winning and losing teams.
from matplotlib.font_manager import FontProperties
font = FontProperties()
font.set_family("serif")
sns.set_style("whitegrid")
f, axes = plt.subplots(1, 2, figsize=(12, 6))
ax1 = sns.kdeplot(
season_results["WPts"], shade=True, ax=axes[0], label="Winning Points", color="k"
)
ax1 = sns.kdeplot(
season_results["LPts"], shade=True, ax=axes[0], label="Lossing Points", color="m"
)
ax1.set(xlabel="Points")
axes[0].set_title(
"Points Winning/Losing Team",
loc="left",
fontsize=15,
FontProperties=font,
fontweight="bold",
)
plt.setp(ax1.get_legend().get_texts(), fontsize="10")
ax2 = sns.kdeplot(
wPos, shade=True, ax=axes[1], label="Winning Possessions", color="blue"
)
ax2 = sns.kdeplot(
lPos, shade=True, ax=axes[1], label="Lossing Possessions", color="green"
)
ax2.set(xlabel="Possesion Points")
axes[1].set_title(
"Winning/losing Team Possesion",
loc="left",
fontsize=15,
FontProperties=font,
fontweight="bold",
)
plt.setp(ax2.get_legend().get_texts(), fontsize="10")
plt.subplots_adjust(wspace=0.4)
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# import config
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import re
import tensorflow as tf
import numpy as np
import nltk
import keras.backend as K
from nltk.probability import FreqDist
from nltk.corpus import stopwords
import string
from keras.preprocessing.sequence import pad_sequences
# nltk.download('stopwords')
# nltk.download('punkt')
import gc, os, pickle
from nltk import word_tokenize, sent_tokenize
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
def plot_len(df, col_name, i):
plt.figure(i)
sns.distplot(df[col_name].str.len())
plt.ylabel("length of string")
plt.show()
def plot_cnt_words(df, col_name, i):
plt.figure(i)
vals = df[col_name].apply(lambda x: len(x.strip().split()))
sns.distplot(vals)
plt.ylabel("count of words")
plt.show()
puncts = [
",",
".",
'"',
":",
")",
"(",
"-",
"!",
"?",
"|",
";",
"'",
"$",
"&",
"/",
"[",
"]",
">",
"%",
"=",
"#",
"*",
"+",
"\\",
"•",
"~",
"@",
"£",
"·",
"_",
"{",
"}",
"©",
"^",
"®",
"`",
"<",
"→",
"°",
"€",
"™",
"›",
"♥",
"←",
"×",
"§",
"″",
"′",
"Â",
"█",
"½",
"à",
"…",
"\xa0",
"\t",
"“",
"★",
"”",
"–",
"●",
"â",
"►",
"−",
"¢",
"²",
"¬",
"░",
"¶",
"↑",
"±",
"¿",
"▾",
"═",
"¦",
"║",
"―",
"¥",
"▓",
"—",
"‹",
"─",
"\u3000",
"\u202f",
"▒",
":",
"¼",
"⊕",
"▼",
"▪",
"†",
"■",
"’",
"▀",
"¨",
"▄",
"♫",
"☆",
"é",
"¯",
"♦",
"¤",
"▲",
"è",
"¸",
"¾",
"Ã",
"⋅",
"‘",
"∞",
"«",
"∙",
")",
"↓",
"、",
"│",
"(",
"»",
",",
"♪",
"╩",
"╚",
"³",
"・",
"╦",
"╣",
"╔",
"╗",
"▬",
"❤",
"ï",
"Ø",
"¹",
"≤",
"‡",
"√",
]
mispell_dict = {
"aren't": "are not",
"can't": "cannot",
"couldn't": "could not",
"couldnt": "could not",
"didn't": "did not",
"doesn't": "does not",
"doesnt": "does not",
"don't": "do not",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"havent": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"i'd": "I would",
"i'd": "I had",
"i'll": "I will",
"i'm": "I am",
"isn't": "is not",
"it's": "it is",
"it'll": "it will",
"i've": "I have",
"let's": "let us",
"mightn't": "might not",
"mustn't": "must not",
"shan't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"shouldn't": "should not",
"shouldnt": "should not",
"that's": "that is",
"thats": "that is",
"there's": "there is",
"theres": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"theyre": "they are",
"they've": "they have",
"we'd": "we would",
"we're": "we are",
"weren't": "were not",
"we've": "we have",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"where's": "where is",
"who'd": "who would",
"who'll": "who will",
"who're": "who are",
"who's": "who is",
"who've": "who have",
"won't": "will not",
"wouldn't": "would not",
"you'd": "you would",
"you'll": "you will",
"you're": "you are",
"you've": "you have",
"'re": " are",
"wasn't": "was not",
"we'll": " will",
"didn't": "did not",
"tryin'": "trying",
}
def clean_text(text):
text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text)
text = text.lower().split()
stops = set(stopwords.words("english"))
text = [w for w in text if not w in stops]
text = " ".join(text)
return text
def _get_mispell(mispell_dict):
mispell_re = re.compile("(%s)" % "|".join(mispell_dict.keys()))
return mispell_dict, mispell_re
def replace_typical_misspell(text):
mispellings, mispellings_re = _get_mispell(mispell_dict)
def replace(match):
return mispellings[match.group(0)]
return mispellings_re.sub(replace, text)
def clean_data(df, columns: list):
for col in columns:
df[col] = df[col].apply(lambda x: clean_text(x.lower()))
df[col] = df[col].apply(lambda x: replace_typical_misspell(x))
return df
def plot_freq_dist(train_data):
freq_dist = FreqDist(
[
word
for text in train_data["question_body"].str.replace(
"[^a-za-z0-9^,!.\/+-=]", " "
)
for word in text.split()
]
)
plt.figure(figsize=(20, 7))
plt.title("Word frequency on question title (Training Data)").set_fontsize(25)
plt.xlabel("").set_fontsize(25)
plt.ylabel("").set_fontsize(25)
freq_dist.plot(60, cumulative=False)
plt.show()
def get_tfidf_features(data, dims=256):
tfidf = TfidfVectorizer(ngram_range=(1, 3))
tsvd = TruncatedSVD(n_components=dims, n_iter=5)
tfquestion_title = tfidf.fit_transform(data["question_title"].values)
tfquestion_title = tsvd.fit_transform(tfquestion_title)
tfquestion_body = tfidf.fit_transform(data["question_body"].values)
tfquestion_body = tsvd.fit_transform(tfquestion_body)
tfanswer = tfidf.fit_transform(data["answer"].values)
tfanswer = tsvd.fit_transform(tfanswer)
return tfquestion_title, tfquestion_body, tfanswer
def correlation(x, y):
mx = tf.math.reduce_mean(x)
my = tf.math.reduce_mean(y)
xm, ym = x - mx, y - my
r_num = tf.math.reduce_mean(tf.multiply(xm, ym))
r_den = tf.math.reduce_std(xm) * tf.math.reduce_std(ym)
return r_num / r_den
from keras.layers import (
Dense,
Dropout,
Embedding,
LSTM,
Bidirectional,
Input,
Concatenate,
GRU,
)
# from import Attention
from keras.models import Model
df_train = pd.read_csv("/kaggle/input/google-quest-challenge/train.csv")
df_test = pd.read_csv("/kaggle/input/google-quest-challenge/test.csv")
df_submission = pd.read_csv(
"/kaggle/input/google-quest-challenge/sample_submission.csv"
)
tokens = []
def get_words(col):
global tokens
toks = []
for x in sent_tokenize(col):
tokens += word_tokenize(x)
toks += word_tokenize(x)
return toks
def convert_to_indx(col, word2idx, vocab_size):
return [word2idx[word] if word in word2idx else vocab_size for word in col]
def LSTM_model_initial(
df_train,
df_test,
df_submission,
rnn_type="LSTM",
embedding_size=200,
rnn_units=64,
maxlen_qt=26,
maxlen_qb=260,
maxlen_an=210,
dropout_rate=0.2,
dense_hidden_units=60,
):
columns = ["question_title", "question_body", "answer"]
df_train = clean_data(df_train, columns)
df_test = clean_data(df_test, columns)
# columns = ["question_title", "question_body", "answer"]
for col in columns:
df_train[col] = df_train[col].apply(lambda x: get_words(x))
df_test[col] = df_test[col].apply(lambda x: get_words(x))
vocab = sorted(list(set(tokens)))
vocab_size = len(vocab)
word2idx = {}
idx2word = {}
for idx, word in enumerate(vocab):
word2idx[word] = idx
idx2word[idx] = word
for col in columns:
df_train[col] = df_train[col].apply(
lambda x: convert_to_indx(x, word2idx, vocab_size)
)
df_test[col] = df_test[col].apply(
lambda x: convert_to_indx(x, word2idx, vocab_size)
)
X_train_question_title = pad_sequences(
df_train["question_title"], maxlen=maxlen_qt, padding="post", value=0
)
X_train_question_body = pad_sequences(
df_train["question_body"], maxlen=maxlen_qb, padding="post", value=0
)
X_train_answer = pad_sequences(
df_train["answer"], maxlen=maxlen_an, padding="post", value=0
)
X_test_question_title = pad_sequences(
df_test["question_title"], maxlen=maxlen_qt, padding="post", value=0
)
X_test_question_body = pad_sequences(
df_test["question_body"], maxlen=maxlen_qb, padding="post", value=0
)
X_test_answer = pad_sequences(
df_test["answer"], maxlen=maxlen_an, padding="post", value=0
)
target_columns = df_submission.columns[1:]
y_train = df_train[target_columns]
inpqt = Input(shape=(maxlen_qt,), name="inpqt")
inpqb = Input(shape=(maxlen_qb,), name="inpqb")
inpan = Input(shape=(maxlen_an,), name="inpan")
Eqt = Embedding(vocab_size, embedding_size, input_length=maxlen_qt)(inpqt)
Eqb = Embedding(vocab_size, embedding_size, input_length=maxlen_qb)(inpqb)
Ean = Embedding(vocab_size, embedding_size, input_length=maxlen_an)(inpan)
if rnn_type == "LSTM":
BLqt = Bidirectional(LSTM(rnn_units))(Eqt)
BLqb = Bidirectional(LSTM(rnn_units))(Eqb)
BLan = Bidirectional(LSTM(rnn_units))(Ean)
elif rnn_type == "GRU":
BLqt = Bidirectional(GRU(rnn_units))(Eqt)
BLqb = Bidirectional(GRU(rnn_units))(Eqb)
BLan = Bidirectional(GRU(rnn_units))(Ean)
Dqt = Dropout(dropout_rate)(BLqt)
Dqb = Dropout(dropout_rate)(BLqb)
Dan = Dropout(dropout_rate)(BLan)
Concatenated = Concatenate()([Dqt, Dqb, Dan])
Ds = Dense(dense_hidden_units, activation="relu")(Concatenated)
Dsf = Dense(30, activation="sigmoid")(Ds)
model = Model(inputs=[inpqt, inpqb, inpan], outputs=Dsf)
model.compile("adam", "binary_crossentropy", metrics=["accuracy"])
model.fit(
{
"inpqt": X_train_question_title,
"inpqb": X_train_question_body,
"inpan": X_train_answer,
},
y_train,
batch_size=32,
epochs=4,
validation_split=0.1,
)
y_test = model.predict(
{
"inpqt": X_test_question_title,
"inpqb": X_test_question_body,
"inpan": X_test_answer,
}
)
df_submission = pd.read_csv(
"/kaggle/input/google-quest-challenge/sample_submission.csv"
)
df_test = pd.read_csv("/kaggle/input/google-quest-challenge/test.csv")
target_columns = df_submission.columns
outp = {}
outp["qa_id"] = df_test["qa_id"]
for i in range(1, len(target_columns)):
outp[target_columns[i]] = y_test[:, i - 1]
my_submission = pd.DataFrame(outp)
my_submission.to_csv("submission.csv", index=False)
from keras.layers import Lambda, Dot, Activation
# def attention_3d_block(hidden_states):
# hidden_size = int(hidden_states.shape[2])
# score_first_part = Dense(hidden_size, use_bias=False, name='attention_score_vec')(hidden_states)
# h_t = Lambda(lambda x: x[:, -1, :], output_shape=(hidden_size,), name='last_hidden_state')(hidden_states)
# score = dot([score_first_part, h_t], [2, 1], name='attention_score')
# attention_weights = Activation('softmax', name='attention_weight')(score)
# context_vector = dot([hidden_states, attention_weights], [1, 1], name='context_vector')
# pre_activation = concatenate([context_vector, h_t], name='attention_output')
# attention_vector = Dense(128, use_bias=False, activation='tanh', name='attention_vector')(pre_activation)
# return attention_vector
def attention_3d_block_self(hidden_states):
hidden_size = int(hidden_states.shape[2])
score_first_part = Dense(hidden_size, use_bias=False)(hidden_states)
h_t = Lambda(lambda x: x[:, -1, :], output_shape=(hidden_size,))(hidden_states)
score = Dot([2, 1])([score_first_part, h_t])
attention_weights = Activation("softmax")(score)
context_vector = Dot([1, 1])([hidden_states, attention_weights])
pre_activation = Concatenate()([context_vector, h_t])
attention_vector = Dense(128, use_bias=False, activation="tanh")(pre_activation)
return attention_vector
def attention_3d_block_another(hidden_states1, hidden_states2):
hidden_size = int(hidden_states1.shape[2])
score_first_part = Dense(hidden_size, use_bias=False)(hidden_states1)
h_t = Lambda(lambda x: x[:, -1, :], output_shape=(hidden_size,))(hidden_states2)
score = Dot([2, 1])([score_first_part, h_t])
attention_weights = Activation("softmax")(score)
context_vector = Dot([1, 1])([hidden_states1, attention_weights])
pre_activation = Concatenate()([context_vector, h_t])
attention_vector = Dense(128, use_bias=False, activation="tanh")(pre_activation)
return attention_vector
def LSTM_model_modified_with_attention_self(
df_train,
df_test,
df_submission,
rnn_type="LSTM",
embedding_size=200,
rnn_units=64,
maxlen_qt=26,
maxlen_qb=260,
maxlen_an=210,
dropout_rate=0.2,
dense_hidden_units=60,
):
columns = ["question_title", "question_body", "answer"]
df_train = clean_data(df_train, columns)
df_test = clean_data(df_test, columns)
# columns = ["question_title", "question_body", "answer"]
for col in columns:
df_train[col] = df_train[col].apply(lambda x: get_words(x))
df_test[col] = df_test[col].apply(lambda x: get_words(x))
vocab = sorted(list(set(tokens)))
vocab_size = len(vocab)
word2idx = {}
idx2word = {}
for idx, word in enumerate(vocab):
word2idx[word] = idx
idx2word[idx] = word
for col in columns:
df_train[col] = df_train[col].apply(
lambda x: convert_to_indx(x, word2idx, vocab_size)
)
df_test[col] = df_test[col].apply(
lambda x: convert_to_indx(x, word2idx, vocab_size)
)
X_train_question_title = pad_sequences(
df_train["question_title"], maxlen=maxlen_qt, padding="post", value=0
)
X_train_question_body = pad_sequences(
df_train["question_body"], maxlen=maxlen_qb, padding="post", value=0
)
X_train_answer = pad_sequences(
df_train["answer"], maxlen=maxlen_an, padding="post", value=0
)
X_test_question_title = pad_sequences(
df_test["question_title"], maxlen=maxlen_qt, padding="post", value=0
)
X_test_question_body = pad_sequences(
df_test["question_body"], maxlen=maxlen_qb, padding="post", value=0
)
X_test_answer = pad_sequences(
df_test["answer"], maxlen=maxlen_an, padding="post", value=0
)
target_columns = df_submission.columns[1:]
y_train = df_train[target_columns]
inpqt = Input(shape=(maxlen_qt,), name="inpqt")
inpqb = Input(shape=(maxlen_qb,), name="inpqb")
inpan = Input(shape=(maxlen_an,), name="inpan")
Eqt = Embedding(vocab_size, embedding_size, input_length=maxlen_qt)(inpqt)
Eqb = Embedding(vocab_size, embedding_size, input_length=maxlen_qb)(inpqb)
Ean = Embedding(vocab_size, embedding_size, input_length=maxlen_an)(inpan)
if rnn_type == "LSTM":
BLqt = Bidirectional(LSTM(rnn_units, return_state=True))(Eqt)
BLqb = Bidirectional(LSTM(rnn_units, return_sequences=True))(
Eqb, initial_state=BLqt[1:]
)
BLan = Bidirectional(LSTM(rnn_units, return_sequences=True))(Ean)
elif rnn_type == "GRU":
BLqt = Bidirectional(GRU(rnn_units))(Eqt)
BLqb = Bidirectional(GRU(rnn_units))(Eqb)
BLan = Bidirectional(GRU(rnn_units))(Ean)
AtQ = attention_3d_block_self(BLqb)
AtAn = attention_3d_block_self(BLan)
Dqt = Dropout(dropout_rate)(BLqt[0])
Dqbin = Lambda(lambda x: x[:, -1, :], output_shape=(128,), name="lambda_layer1")(
BLqb
)
Dqb = Dropout(dropout_rate)(Dqbin)
Danin = Lambda(lambda x: x[:, -1, :], output_shape=(128,), name="lambda_layer2")(
BLan
)
Dan = Dropout(dropout_rate)(Danin)
Concatenated = Concatenate()([Dqt, Dqb, Dan, AtQ, AtAn])
Ds = Dense(dense_hidden_units, activation="relu")(Concatenated)
Dsf = Dense(30, activation="sigmoid")(Ds)
model = Model(inputs=[inpqt, inpqb, inpan], outputs=Dsf)
model.compile("adam", "binary_crossentropy", metrics=["accuracy"])
model.fit(
{
"inpqt": X_train_question_title,
"inpqb": X_train_question_body,
"inpan": X_train_answer,
},
y_train,
batch_size=32,
epochs=4,
validation_split=0.1,
)
y_test = model.predict(
{
"inpqt": X_test_question_title,
"inpqb": X_test_question_body,
"inpan": X_test_answer,
}
)
df_submission = pd.read_csv(
"/kaggle/input/google-quest-challenge/sample_submission.csv"
)
df_test = pd.read_csv("/kaggle/input/google-quest-challenge/test.csv")
target_columns = df_submission.columns
outp = {}
outp["qa_id"] = df_test["qa_id"]
for i in range(1, len(target_columns)):
outp[target_columns[i]] = y_test[:, i - 1]
my_submission = pd.DataFrame(outp)
my_submission.to_csv("submission.csv", index=False)
def LSTM_model_modified_with_attention_a2q(
df_train,
df_test,
df_submission,
rnn_type="LSTM",
embedding_size=200,
rnn_units=64,
maxlen_qt=26,
maxlen_qb=260,
maxlen_an=210,
dropout_rate=0.2,
dense_hidden_units=60,
):
columns = ["question_title", "question_body", "answer"]
df_train = clean_data(df_train, columns)
df_test = clean_data(df_test, columns)
# columns = ["question_title", "question_body", "answer"]
for col in columns:
df_train[col] = df_train[col].apply(lambda x: get_words(x))
df_test[col] = df_test[col].apply(lambda x: get_words(x))
vocab = sorted(list(set(tokens)))
vocab_size = len(vocab)
word2idx = {}
idx2word = {}
for idx, word in enumerate(vocab):
word2idx[word] = idx
idx2word[idx] = word
for col in columns:
df_train[col] = df_train[col].apply(
lambda x: convert_to_indx(x, word2idx, vocab_size)
)
df_test[col] = df_test[col].apply(
lambda x: convert_to_indx(x, word2idx, vocab_size)
)
X_train_question_title = pad_sequences(
df_train["question_title"], maxlen=maxlen_qt, padding="post", value=0
)
X_train_question_body = pad_sequences(
df_train["question_body"], maxlen=maxlen_qb, padding="post", value=0
)
X_train_answer = pad_sequences(
df_train["answer"], maxlen=maxlen_an, padding="post", value=0
)
X_test_question_title = pad_sequences(
df_test["question_title"], maxlen=maxlen_qt, padding="post", value=0
)
X_test_question_body = pad_sequences(
df_test["question_body"], maxlen=maxlen_qb, padding="post", value=0
)
X_test_answer = pad_sequences(
df_test["answer"], maxlen=maxlen_an, padding="post", value=0
)
target_columns = df_submission.columns[1:]
y_train = df_train[target_columns]
inpqt = Input(shape=(maxlen_qt,), name="inpqt")
inpqb = Input(shape=(maxlen_qb,), name="inpqb")
inpan = Input(shape=(maxlen_an,), name="inpan")
Eqt = Embedding(vocab_size, embedding_size, input_length=maxlen_qt)(inpqt)
Eqb = Embedding(vocab_size, embedding_size, input_length=maxlen_qb)(inpqb)
Ean = Embedding(vocab_size, embedding_size, input_length=maxlen_an)(inpan)
if rnn_type == "LSTM":
BLqt = Bidirectional(LSTM(rnn_units, return_state=True))(Eqt)
BLqb = Bidirectional(LSTM(rnn_units, return_sequences=True))(
Eqb, initial_state=BLqt[1:]
)
BLan = Bidirectional(LSTM(rnn_units, return_sequences=True))(Ean)
elif rnn_type == "GRU":
BLqt = Bidirectional(GRU(rnn_units))(Eqt)
BLqb = Bidirectional(GRU(rnn_units))(Eqb)
BLan = Bidirectional(GRU(rnn_units))(Ean)
AtA2Q = attention_3d_block_another(BLqb, BLan)
Dqt = Dropout(dropout_rate)(BLqt[0])
Dqbin = Lambda(lambda x: x[:, -1, :], output_shape=(128,), name="lambda_layer1")(
BLqb
)
Dqb = Dropout(dropout_rate)(Dqbin)
Danin = Lambda(lambda x: x[:, -1, :], output_shape=(128,), name="lambda_layer2")(
BLan
)
Dan = Dropout(dropout_rate)(Danin)
Concatenated = Concatenate()([Dqt, Dqb, Dan, AtA2Q])
Ds = Dense(dense_hidden_units, activation="relu")(Concatenated)
Dsf = Dense(30, activation="sigmoid")(Ds)
model = Model(inputs=[inpqt, inpqb, inpan], outputs=Dsf)
model.compile("adam", "binary_crossentropy", metrics=["accuracy"])
model.fit(
{
"inpqt": X_train_question_title,
"inpqb": X_train_question_body,
"inpan": X_train_answer,
},
y_train,
batch_size=32,
epochs=4,
validation_split=0.1,
)
y_test = model.predict(
{
"inpqt": X_test_question_title,
"inpqb": X_test_question_body,
"inpan": X_test_answer,
}
)
df_submission = pd.read_csv(
"/kaggle/input/google-quest-challenge/sample_submission.csv"
)
df_test = pd.read_csv("/kaggle/input/google-quest-challenge/test.csv")
target_columns = df_submission.columns
outp = {}
outp["qa_id"] = df_test["qa_id"]
for i in range(1, len(target_columns)):
outp[target_columns[i]] = y_test[:, i - 1]
my_submission = pd.DataFrame(outp)
my_submission.to_csv("submission.csv", index=False)
def LSTM_model_modified_concatenated_qa(
df_train,
df_test,
df_submission,
rnn_type="LSTM",
embedding_size=200,
rnn_units=64,
maxlen_qt=26,
maxlen_qb=260,
maxlen_an=210,
dropout_rate=0.2,
dense_hidden_units=60,
):
columns = ["question_title", "question_body", "answer"]
df_train = clean_data(df_train, columns)
df_test = clean_data(df_test, columns)
# columns = ["question_title", "question_body", "answer"]
for col in columns:
df_train[col] = df_train[col].apply(lambda x: get_words(x))
df_test[col] = df_test[col].apply(lambda x: get_words(x))
vocab = sorted(list(set(tokens)))
vocab_size = len(vocab)
word2idx = {}
idx2word = {}
for idx, word in enumerate(vocab):
word2idx[word] = idx
idx2word[idx] = word
for col in columns:
df_train[col] = df_train[col].apply(
lambda x: convert_to_indx(x, word2idx, vocab_size)
)
df_test[col] = df_test[col].apply(
lambda x: convert_to_indx(x, word2idx, vocab_size)
)
X_train_question_title = pad_sequences(
df_train["question_title"], maxlen=maxlen_qt, padding="post", value=0
)
X_train_question_body = pad_sequences(
df_train["question_body"], maxlen=maxlen_qb, padding="post", value=0
)
X_train_answer = pad_sequences(
df_train["answer"], maxlen=maxlen_an, padding="post", value=0
)
X_test_question_title = pad_sequences(
df_test["question_title"], maxlen=maxlen_qt, padding="post", value=0
)
X_test_question_body = pad_sequences(
df_test["question_body"], maxlen=maxlen_qb, padding="post", value=0
)
X_test_answer = pad_sequences(
df_test["answer"], maxlen=maxlen_an, padding="post", value=0
)
target_columns = df_submission.columns[1:]
y_train = df_train[target_columns]
inpqt = Input(shape=(maxlen_qt,), name="inpqt")
inpqb = Input(shape=(maxlen_qb,), name="inpqb")
inpan = Input(shape=(maxlen_an,), name="inpan")
Eqt = Embedding(vocab_size, embedding_size, input_length=maxlen_qt)(inpqt)
Eqb = Embedding(vocab_size, embedding_size, input_length=maxlen_qb)(inpqb)
Ean = Embedding(vocab_size, embedding_size, input_length=maxlen_an)(inpan)
if rnn_type == "LSTM":
BLqt = Bidirectional(LSTM(rnn_units, return_state=True))(Eqt)
BLqb = Bidirectional(LSTM(rnn_units, return_state=True))(
Eqb, initial_state=BLqt[1:]
)
BLan = Bidirectional(LSTM(rnn_units, return_state=True))(
Ean, initial_state=BLqb[1:]
)
elif rnn_type == "GRU":
BLqt = Bidirectional(GRU(rnn_units, return_state=True))(Eqt)
BLqb = Bidirectional(GRU(rnn_units, return_state=True))(
Eqb, initial_state=BLqt[1:]
)
BLan = Bidirectional(GRU(rnn_units, return_state=True))(
Ean, initial_state=BLqb[1:]
)
Dqt = Dropout(dropout_rate)(BLqt[0])
Dqb = Dropout(dropout_rate)(BLqb[0])
Dan = Dropout(dropout_rate)(BLan[0])
Concatenated = Concatenate()([Dqt, Dqb, Dan])
Ds = Dense(dense_hidden_units, activation="relu")(Concatenated)
Dsf = Dense(30, activation="sigmoid")(Ds)
model = Model(inputs=[inpqt, inpqb, inpan], outputs=Dsf)
model.compile("adam", "binary_crossentropy", metrics=["accuracy"])
model.fit(
{
"inpqt": X_train_question_title,
"inpqb": X_train_question_body,
"inpan": X_train_answer,
},
y_train,
batch_size=32,
epochs=4,
validation_split=0.1,
)
y_test = model.predict(
{
"inpqt": X_test_question_title,
"inpqb": X_test_question_body,
"inpan": X_test_answer,
}
)
df_submission = pd.read_csv(
"/kaggle/input/google-quest-challenge/sample_submission.csv"
)
df_test = pd.read_csv("/kaggle/input/google-quest-challenge/test.csv")
target_columns = df_submission.columns
outp = {}
outp["qa_id"] = df_test["qa_id"]
for i in range(1, len(target_columns)):
outp[target_columns[i]] = y_test[:, i - 1]
my_submission = pd.DataFrame(outp)
my_submission.to_csv("submission.csv", index=False)
LSTM_model_modified_with_attention_a2q(
df_train, df_test, df_submission, rnn_type="LSTM"
)
# y_test = model.predict(X_test)
|
CFG = {
"TPU": 0,
"block_size": 16800,
"block_stride": 1050,
"patch_size": 20,
"batch_size": 128,
}
assert CFG["block_size"] % CFG["patch_size"] == 0
assert CFG["block_size"] % CFG["block_stride"] == 0
# **Imports and Utils**
import os
if CFG["TPU"] and not os.path.exists("/kaggle/working/libs"):
os.makedirs("/kaggle/working/libs")
import math
import time
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import scipy
from tqdm import tqdm
from sklearn.metrics import average_precision_score
from sklearn.model_selection import GroupKFold, StratifiedGroupKFold
GPU = len(tf.config.list_physical_devices("GPU"))
if CFG["TPU"] and not os.path.exists("/kaggle/working/TPU"):
tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect(tpu="local")
tpu_strategy = tf.distribute.TPUStrategy(tpu)
os.makedirs("/kaggle/working/TPU")
def folder(path):
if not os.path.exists(path):
os.makedirs(path)
def plot(e, size=(20, 4)):
plt.figure(figsize=size)
plt.plot(e)
plt.show()
# **Load**
metadata = []
tsfog_metadata = pd.read_csv(
"/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/tdcsfog_metadata.csv"
)
for _, row in tqdm(tsfog_metadata.iterrows()):
row = dict(row)
series = pd.read_csv(
f'/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/train/tdcsfog/{row["Id"]}.csv'
)
_, num_features = scipy.ndimage.label(series["StartHesitation"])
row["StartHesitation_size"] = num_features
_, num_features = scipy.ndimage.label(series["Turn"])
row["Turn_size"] = num_features
_, num_features = scipy.ndimage.label(series["Walking"])
row["Walking_size"] = num_features
row["Source"] = "tsfog"
metadata.append(row)
defog_metadata = pd.read_csv(
"/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/defog_metadata.csv"
)
for _, row in tqdm(defog_metadata.iterrows()):
row = dict(row)
try:
series = pd.read_csv(
f'/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/train/defog/{row["Id"]}.csv'
)
except FileNotFoundError:
continue
_, num_features = scipy.ndimage.label(series["StartHesitation"])
row["StartHesitation_size"] = num_features
_, num_features = scipy.ndimage.label(series["Turn"])
row["Turn_size"] = num_features
_, num_features = scipy.ndimage.label(series["Walking"])
row["Walking_size"] = num_features
row["Source"] = "defog"
metadata.append(row)
metadata = pd.DataFrame(metadata)
metadata
metadata["Walking_size"].sum()
# Tsfog
tsfog = pd.read_csv(
"/kaggle/input/parkinson-s-freezing-data/train/tdcsfog/description.csv"
)
tsfog["Path"] = "/kaggle/input/parkinson-s-freezing-data/train/tdcsfog/" + tsfog["File"]
train_tsfog, val_tsfog = (
tsfog[tsfog["Id"].apply(lambda x: x not in val_ids)],
tsfog[tsfog["Id"].apply(lambda x: x in val_ids)],
)
assert set(train_tsfog["File"]) & set(val_tsfog["File"]) == set()
assert set(train_tsfog["Id"]) & set(val_tsfog["Id"]) == set()
print(
f"[StartHesitation size] {(np.sum(tsfog['StartHesitation_size'])/1000):.0f}k ({(np.sum(val_tsfog['StartHesitation_size'])/1000):.0f}k)"
)
print(
f"[Turn_size] {(np.sum(tsfog['Turn_size'])/1000):.0f}k ({(np.sum(val_tsfog['Turn_size'])/1000):.0f}k)"
)
print(
f"[Walking size] {(np.sum(tsfog['Walking_size'])/1000):.0f}k ({(np.sum(val_tsfog['Walking_size'])/1000):.0f}k)\n"
)
# Defog
defog = pd.read_csv(
"/kaggle/input/parkinson-s-freezing-data/train/defog/description.csv"
)
defog["Path"] = "/kaggle/input/parkinson-s-freezing-data/train/defog/" + defog["File"]
defog = defog[defog["Valid_size"] != 0.0]
val_ids = [
"7a467da4f3",
"4ec23c3d98",
"961b782275",
"be9d33541d",
"bf2fd0ff35",
"0c55be4384",
"62d4a42a73",
"5327e062c9",
"da05ad7058",
"aafcbecb5a",
"bdcff4be3a",
"6a20935af5",
"3e6987cb2d",
"6dc94db321",
"7030643376",
"4520cf1068",
"68e7e02a47",
"3f970065e5",
]
train_defog, val_defog = (
defog[defog["Id"].apply(lambda x: x not in val_ids)],
defog[defog["Id"].apply(lambda x: x in val_ids)],
)
assert set(train_defog["File"]) & set(val_defog["File"]) == set()
assert set(train_defog["Id"]) & set(val_defog["Id"]) == set()
print(
f"[StartHesitation size] {(np.sum(defog['StartHesitation_size'])/1000):.0f}k ({(np.sum(val_defog['StartHesitation_size'])/1000):.0f}k)"
)
print(
f"[Turn_size] {(np.sum(defog['Turn_size'])/1000):.0f}k ({(np.sum(val_defog['Turn_size'])/1000):.0f}k)"
)
print(
f"[Walking size] {(np.sum(defog['Walking_size'])/1000):.0f}k ({(np.sum(val_defog['Walking_size'])/1000):.0f}k)\n"
)
# Concat
train_fog, val_fog = pd.concat([train_tsfog, train_defog]), pd.concat(
[val_tsfog, val_defog]
)
assert set(train_fog["File"]) & set(val_fog["File"]) == set()
assert set(train_fog["Id"]) & set(val_fog["Id"]) == set()
print(
f"[StartHesitation size] {(np.sum(train_fog['StartHesitation_size'])/1000):.0f}k ({(np.sum(val_fog['StartHesitation_size'])/1000):.0f}k)"
)
print(
f"[Turn_size] {(np.sum(train_fog['Turn_size'])/1000):.0f}k ({(np.sum(val_fog['Turn_size'])/1000):.0f}k)"
)
print(
f"[Walking size] {(np.sum(train_fog['Walking_size'])/1000):.0f}k ({(np.sum(val_fog['Walking_size'])/1000):.0f}k)\n"
)
|
# # Aim of the notebook.
# attention-based sequence-to-sequence model that can effectively understand the context of Spanish sentences and translate them into clear and coherent English sentences.
import os, io
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import tensorflow as tf
import warnings
warnings.filterwarnings("ignore")
# # Downloading the dataset
# The dataset used is a paired corpus of **English-Spanish**, provided by [Anki](https://www.manythings.org/anki/).
# The code starts by downloading a zip file containing the dataset for English to Spanish translation. The dataset is stored in the `spa-eng` folder and can be found in the file named `spa.txt`.
# Let's take a look at how the data looks like
# 
# - Each line in the `spa.txt` file contains an English word/sentence and their corresponding Spanish translation.
# - Some words might have multiple translation because of context.
# - Our first objective is to extract each line, and then separate the English and Spanish words/sentences into two separate arrays. These will act as our input and target sentences for training the model
# Downloading the file
zip_file = tf.keras.utils.get_file(
"spa-eng.zip",
origin="http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip",
extract=True,
)
file_path = os.path.dirname(zip_file) + "/spa-eng/spa.txt"
# # Load the data
def load_data(path, size=None):
text = io.open(file_path, encoding="UTF-8").read()
lines = text.splitlines()
pairs = [line.split("\t") for line in lines]
# lines = # split the text into lines separated by newline # Insert Code Here ----
# pairs = # split each line into source and target using tabs # Insert Code Here ----
source = np.array(
[source for target, source in pairs]
) # extract source text into a numpy array
target = np.array(
[target for target, source in pairs]
) # extract target text into a numpy array
return source, target
src_sentences, tgt_sentences = load_data(file_path)
print("Original Sentence:", src_sentences[42])
print("Translated Sentence:", tgt_sentences[42])
# # Visualize the data
# Create a dataframe
df = pd.DataFrame(
zip(src_sentences, tgt_sentences), columns=["source_sentence", "target_sentence"]
)
df
eng_len = []
span_len = []
# populate the lists with sentence lengths
for i in src_sentences:
eng_len.append(len(i.split()))
for i in tgt_sentences:
span_len.append(len(i.split()))
length_df = pd.DataFrame({"english": eng_len, "spanish": span_len})
length_df.hist(bins=20)
plt.show()
# # Process the data
import re, itertools
from collections import Counter
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
# ## Text Standardization
# The model is dealing with multilingual text with a limited vocabulary. So it will be important to standardize the input text. The first step is Unicode normalization to split accented characters and replace compatibility characters with their ASCII equivalents.
# For example
# - Input: Yo lo sé.
# - Output: yo lo se .
import unicodedata
def unicode_to_ascii(s):
normalized = unicodedata.normalize("NFD", s)
return "".join(c for c in normalized if unicodedata.category(c) != "Mn")
def preprocess_text(text):
text = unicode_to_ascii(text.lower().strip())
text = re.sub(r"[^a-zA-Z?.!,¿]+", " ", text)
text = re.sub(r"([?.!,¿])", r" \1 ", text)
text = re.sub(r'[" "]+', " ", text)
text = text.rstrip().strip()
text = "<sos> " + text + " <eos>"
return text
print("Original sentence:", src_sentences[42])
prc_src_sentences = [preprocess_text(w) for w in src_sentences]
prc_tgt_sentences = [preprocess_text(w) for w in tgt_sentences]
print("Preprocessed sentence:", prc_src_sentences[42])
# # Tokenize the data
# Once, the sentences are splitted and special tokens are added, the process of tokenization is carried out using the **Tokenizer** class from the **tensorflow.keras.preprocessing.text** module.
# Steps:
# * The Tokenizer object is initialized and fit to the text data.
# * The texts are then tokenized, meaning that each word is assigned a unique number.
# * The resulting sequences are then padded and truncated to make all of them of equal length.
# We'll create a function named `tokenize` to perform all the above steps together. Here's an example of how the function will transform an input sentence:
# - Input: yo lo se .
# - Output: [ 1 26 18 17 3 2 0 0 0 0 0 0 0 0 0 0]
def tokenize(sentences):
lang_tokenizer = Tokenizer(filters="")
lang_tokenizer.fit_on_texts(sentences)
sequences = lang_tokenizer.texts_to_sequences(sentences)
max_length = max(len(s) for s in sequences)
sequences = pad_sequences(
sequences, maxlen=max_length, padding="post", truncating="post"
)
return sequences, lang_tokenizer, max_length
# ## Create source and target sequences using tokenize()
# Before we apply the tokenize() function, we need to make an important consideration. We won't be able to apply the processing and tokenization function to the entire dataset as it will exhaust all the available RAM. Hence, it is recommended to limit the data. Training on the complete dataset of >100,000 sentences will take a long time.
# To filter the data, process & tokenize them, we can build a function load_sequences that perform all the three tasks inside it. This function will return the source, target sequences, their maximum length & the tokenisers used for them.
def load_sequences(path, size=None):
src_sentences, tgt_sentences = load_data(path)
src_sentences = [preprocess_text(w) for w in src_sentences]
tgt_sentences = [preprocess_text(w) for w in tgt_sentences]
src_sequences, src_lang_tokenizer, max_length_src = tokenize(src_sentences)
tgt_sequences, tgt_lang_tokenizer, max_length_trg = tokenize(tgt_sentences)
return (
src_sequences,
tgt_sequences,
src_lang_tokenizer,
tgt_lang_tokenizer,
max_length_src,
max_length_trg,
)
(
src_sequences,
tgt_sequences,
src_lang_tokenizer,
tgt_lang_tokenizer,
max_length_src,
max_length_trg,
) = load_sequences(file_path)
print("src sequences:", src_sequences.shape)
print("tgt sequences:", tgt_sequences.shape)
print("source maxlen:", max_length_src)
print("target maxlen:", max_length_trg)
print("Original sentence:", src_sentences[42])
print("Text after preprocessing:", preprocess_text(src_sentences[42]))
print("Text after tokenization :", src_sequences[42])
print("Original sentence:", tgt_sentences[42])
print("Text after preprocessing:", preprocess_text(tgt_sentences[42]))
print("Text after tokenization :", tgt_sequences[42])
# ## Understanding what's happening inside
# We're taking another detour here, to understand how the tokenisation layer converts a batch of strings into a batch of token IDs.
# Using the **.index_word()** method, we can convert the token IDs back to words for understanding the mapping and what's under the hood.
#
# Testing if the word to index / index to word mappings have been obtained correctly.
def convert(lang, tensor):
for t in tensor:
if t != 0:
print("%d ----> %s" % (t, lang.index_word[t]))
print("Input Language; index to word mapping")
convert(src_lang_tokenizer, src_sequences[42])
print()
print("Target Language; index to word mapping")
convert(tgt_lang_tokenizer, tgt_sequences[42])
# getting the size of the input and output vocabularies.
src_vocab_size = len(src_lang_tokenizer.word_index) + 1
tgt_vocab_size = len(tgt_lang_tokenizer.word_index) + 1
print(src_vocab_size)
print(tgt_vocab_size)
# # Train-Test Split
# Now that we have processed the data, it's time to split it into train and test datasets. This way, we can use the train data to train our model and evaluate the model performance on the test data.
# The train_test_split() function from the sklearn library is used to split the data. src_sequences and tgt_sequences are the input and target sequences respectively. The test_size parameter specifies the proportion of the data that should be used for testing, and random_state is used to set the random seed for reproducibility.
(
source_sequences_train,
source_sequences_val,
tgt_sequences_train,
tgt_sequences_val,
) = train_test_split(src_sequences, tgt_sequences, shuffle=False, test_size=0.2)
print(
len(source_sequences_train),
len(source_sequences_val),
len(tgt_sequences_train),
len(tgt_sequences_val),
)
print(source_sequences_train[1])
print(source_sequences_val[1])
print(tgt_sequences_train[1])
print(tgt_sequences_val[1])
# ## Create a tf.dataset
# The input pipeline starts from importing the data and creating a dataset from the data stored in the memory. For this, you can use `tf.data.Dataset.from_tensor_slices()`, which creates a **tf.data.Dataset** the object whose elements are slices of the passed tensors. Once you have created the object, you can transform it by applying different operations to the dataset object. (for example, Dataset.map() orDataset.batch()).
# From the arrays of sequences created ater tokenisation, you can create a tf.data.Dataset of strings that shuffles and batches them efficiently
# # Defining Hyperparameters
# The following hyperparameters for the creating the **tf.dataset** (and later for model building purposes as well) are specified:
# - `buffer_size`: This represents the buffer size for the training data. It is calculated by taking the length of the `source_sequences_train` array.
# - `val_buffer_size`: This represents the buffer size for the validation data. It is calculated by taking the length of the `source_sequences_val` array.
# - `BATCH_SIZE`: This represents the batch size for the training process. It is set to 64.
# - `embedding_dim`: This represents the embedding dimension for the input data. It is set to 128.
# - `units`: This represents the number of units in the recurrent neural network (RNN) used for the model. It is set to 1024.
# - `steps_per_epoch`: This represents the number of steps to take during each epoch of training. It is calculated as the `buffer_size` divided by the `BATCH_SIZE`.
# - `val_steps_per_epoch`: This represents the number of validation steps to take during each epoch of training. It is calculated as the `val_buffer_size` divided by the `BATCH_SIZE`.
# It is important to note that these hyperparameters have been selected based on heuristics and may need to be fine-tuned for different problems or data sets.
# Defining hyperparameters
buffer_size = len(source_sequences_train)
val_buffer_size = len(source_sequences_val)
BATCH_SIZE = 64
embedding_dim = 128
units = 1024
steps_per_epoch = buffer_size // BATCH_SIZE
val_steps_per_epoch = val_buffer_size // BATCH_SIZE
train_dataset = tf.data.Dataset.from_tensor_slices(
(source_sequences_train, tgt_sequences_train)
)
train_dataset = train_dataset.shuffle(buffer_size=buffer_size).batch(BATCH_SIZE)
val_dataset = tf.data.Dataset.from_tensor_slices(
(source_sequences_val, tgt_sequences_val)
)
val_dataset = val_dataset.batch(BATCH_SIZE)
example_input_batch, example_target_batch = next(iter(train_dataset))
example_input_batch.shape, example_target_batch.shape
# # Build the NMT model
# ## Encoder-Decoder model with attention
# The encoder model consists of an embedding layer, a GRU layer with 1024 units.
# The decoder model consists of an attention layer, a embedding layer, a GRU layer and a dense layer.
# The attention model consists of three dense layers (BahdanauAttention Model) .
# ## Build the Encoder
# The goal of the encoder is to process the context sequence into a sequence of vectors that are useful for the decoder as it attempts to predict the next output for each timestep.
# ---
# The **Encoder** class is defined as a subclass of tf.keras.Model. It takes the following parameters as inputs:
# > **Class constructor:**
# * **vocab_size:** Integer representing the size of the vocabulary in the input language.
# * **emb_dim:** Integer representing the dimension of the embedding space.
# * **enc_units:** Integer representing the number of encoding GRU units.
# * **batch_sz:** Integer representing the batch size used during training.
# > It also initializes the following layers:
# * An **Embedding layer**, which maps the input language word indices to dense vectors in the embedding space. mask_zero argument is set to True to mask the padding in the input sequence.
# * A **GRU layer**, which performs the encoding of the input sequences.
# > The **call** method is defined to perform the forward pass of the Encoder. It takes two inputs:
# * x: Input sequences of shape (batch_size, sequence_length).
# * hidden: The initial hidden state, with shape **(batch_size, enc_units)**.
# > It does the following operations:
# 1. The method first maps the input sequences to the embedding space using the Embedding layer. The resulting output has shape (batch_size, sequence_length, emb_dim).
# 2. Then, the encoded sequence and the final hidden state are obtained by passing the embedded input through the GRU layer. The shape of the encoded sequence is **(batch_size, sequence_length, enc_units)** and the shape of the final hidden state is **(batch_size, enc_units).**
# 3. The initialize_hidden_state method is used to initialize the hidden state of the GRU layer to all zeros, with shape **(batch_size, enc_units).**
#
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, emb_dim, enc_units, batch_sz):
super(Encoder, self).__init__()
self.enc_units = enc_units
self.batch_sz = batch_sz
self.embedding = tf.keras.layers.Embedding(vocab_size, emb_dim, mask_zero=True)
self.gru = tf.keras.layers.GRU(
self.enc_units,
return_sequences=True,
return_state=True,
recurrent_initializer="glorot_uniform",
)
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state=hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.enc_units))
encoder = Encoder(src_vocab_size, embedding_dim, units, BATCH_SIZE)
sample_hidden = encoder.initialize_hidden_state()
sample_output, sample_hidden = encoder(example_input_batch, sample_hidden)
print(
"Encoder output shape: (batch size, sequence length, units) {}".format(
sample_output.shape
)
)
print("Encoder Hidden state shape: (batch size, units) {}".format(sample_hidden.shape))
# ## Bahdanau Attention Layer
# This class implements the Bahdanau Attention mechanism in TensorFlow as a custom layer. The class extends the **tf.keras.layers.Layer** class and overrides the call method to implement the Bahdanau attention mechanism.
# We will use the subclassing approach again for building the Attention layer as we have done for the Encoder. This will help us in training all of the sub-layers inside it together during the end-to-end training process of the NMT model.
# >**Architecture**
# The Bahdanau Attention layer consists of three fully-connected dense layers:
# 1. **W1**: the first dense layer with units number of units.
# 2. **W2**: the second dense layer with units number of units.
# 3. **V**: the third dense layer with 1 unit.
# > **Inputs:**
# The input to the call method are two tensors:
# 1. **Query**: a tensor with shape (batch_size, hidden size) representing the hidden state.
# 2. **Values**: a tensor with shape (batch_size, max_len, hidden size) representing the values to attend over.
# > **Outputs:**
# The outputs of the call method are:
# 1. **Context_vector**: a tensor with shape (batch_size, hidden_size) representing the context vector.
# 2. **Attention_weights**: a tensor with shape (batch_size, max_length, 1) representing the attention weights.
# >**Algorithm**
# * The query tensor is expanded along the time axis to broadcast addition with the values tensor.
# * The score is calculated by applying the tanh activation on the result of the addition of W1(query_with_time_axis) and W2(values).
# * The attention weights are obtained by applying the softmax activation on the score.
# * The context vector is obtained by multiplying the attention weights with the values tensor and summing over the max length axis.
# * Finally, both the context vector and the attention weights are returned.
class BahdanauAttention(tf.keras.layers.Layer):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units) # fully-connected dense layer-1
self.W2 = tf.keras.layers.Dense(units) # fully-connected dense layer-2
self.V = tf.keras.layers.Dense(1) # fully-connected dense layer-3
def call(self, query, values):
query_with_time_axis = tf.expand_dims(query, 1)
score = self.V(tf.nn.tanh(self.W1(query_with_time_axis) + self.W2(values)))
attention_weights = tf.nn.softmax(score, axis=1)
context_vector = attention_weights * values
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
attention_layer = BahdanauAttention(20)
attention_result, attention_weights = attention_layer(sample_hidden, sample_output)
print(
"Attention result shape (context vector): (batch size, units) {}".format(
attention_result.shape
)
)
print(
"Attention weights shape: (batch_size, sequence_length, 1) {}".format(
attention_weights.shape
)
)
# ## Build the decoder
# The decoder's job is to generate predictions for the next token at each location in the target sequence.
# ---
# This code defines a Decoder class that implements a decoder model for a sequence-to-sequence architecture. The decoder model will take in an input sequence and the hidden state from the encoder and generate the target sequence.
# > **Class constructor**:The __init__ method initializes the class variables:
# * **vocab_size**: The size of the target vocabulary.
# *embedding_dim: The size of the embedding layer that converts the target sequence into dense vectors.
# * **dec_units:** The number of GRU units in the decoder.
# * **batch_sz:** The batch size of the input data.
# > It also initializes the following layers:
# * **attention**: An instance of the BahdanauAttention class.
# * **Embedding**: An embedding layer to convert the target sequences into dense vectors.
# * **GRU**: A GRU layer to perform the decoding.
# * **FC**: A dense layer to generate the final target sequence.
# >**call** method: The call method implements the forward pass of the decoder. It takes in 3 inputs: x, hidden, and enc_output.
# * x: Input sequences of shape (batch_size, sequence_length).
# * hidden: hidden state from the encoder hidden. **(batch_size, dec_units)
# * enc_output: The output of the encoder (shape: (batch_size, max_length, hidden_size)).
# >It does the following operations:
# 1. Computes the context vector and the attention weights from the attention layer,
# 2. Embeds the input x using the embedding layer.
# 3. Concatenates the context vector with the embedded target sequence
# 4. Passes the concatenated sequence and the hidden state from the encoder to the GRU layer. This generates the output and the new hidden state.
# 3. Reshapes the output to a 2D tensor.
# 4. Passes the reshaped output through the dense layer to generate the final target sequence.
# 5. It returns the final target sequence and the new hidden state.
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, emb_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.attention = BahdanauAttention(self.dec_units)
self.embedding = tf.keras.layers.Embedding(vocab_size, emb_dim)
self.gru = tf.keras.layers.GRU(
self.dec_units,
return_sequences=True,
return_state=True,
recurrent_initializer="glorot_uniform",
)
self.fc = tf.keras.layers.Dense(vocab_size)
def call(self, x, hidden, enc_output):
context_vector, attention_weights = self.attention(hidden, enc_output)
x = self.embedding(x)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
output, state = self.gru(x)
output = tf.reshape(output, (-1, output.shape[2]))
x = self.fc(output)
return x, state, attention_weights
decoder = Decoder(tgt_vocab_size, embedding_dim, units, BATCH_SIZE)
sample_decoder_output, _, _ = decoder(
tf.random.uniform((BATCH_SIZE, 1)), sample_hidden, sample_output
)
print(
"Decoder output shape: (batch_size, vocab size) {}".format(
sample_decoder_output.shape
)
)
# # Train the NMT model
# ## Configure the model.
# To train the model you'll need several additional components:
# * **The Loss function**: The loss_function method calculates the loss between the target data and the model's prediction. The real argument is the target data, and the pred argument is the model's prediction.
# > The method first creates a binary mask to indicate which target data points are not padding (not equal to 0). The loss is calculated using the loss_object and the masked loss is returned by taking the mean of the masked loss values.
# * **The Optimizer** : Defines the optimizer and the loss function used to train the model. The optimizer used is Adam and the loss function used is SparseCategoricalCrossentropy.
# > SparseCategoricalCrossentropy is used because the target data is a categorical variable, but not one-hot encoded, so we use this form of categorical crossentropy loss. The from_logits argument is set to True because the model produces logits rather than probabilistic outputs, and the reduction argument is set to none because the loss values need to be calculated for each time step and then masked.
# * **Checkpoints**: The code defines the checkpoint and its directory. The checkpoint is a mechanism to save the model's state after each training epoch, so that we can resume training from the saved state if needed.
# >The checkpoint_dir is set to ./training_checkpoints and the checkpoint_prefix is set to ckpt inside the checkpoint_dir. The checkpoint is then defined using the tf.train.Checkpoint method, and includes the optimizer, the encoder and the decoder as its attributes.
# Here's an implementation of a masked loss and accuracy:
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction="none"
)
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
checkpoint_dir = "./training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer, encoder=encoder, decoder=decoder)
# ## Configure the training step:
# A custom training loop (instead of Model.Fit etc.) is used for which further reference is available from Tensorflow [here](https://www.tensorflow.org/guide/keras/writing_a_training_loop_from_scratch)
# NOTE: While training the model, make sure your instance is GPU based.
# ---
# The code is annotated with "@tf.function", which tells TensorFlow to compile this function for performance optimization.
# This is a TensorFlow function for a single training step of a sequence-to-sequence model. The input to the function includes:
# - `inp`: The input sequence to be encoded
# - `targ`: The target sequence to be decoded
# - `enc_hidden`: The initial hidden state of the encoder
# > The function performs the following operations:
# 1. Encode the input sequence to get the encoded output and the final hidden state of the encoder.
# 2. Set the initial hidden state of the decoder to the final hidden state of the encoder.
# 3. Loop over the time steps of the target sequence to be passed as current input along with the hidden state and encoder output to get the predicted output and the next hidden state.
# 4. Computing the loss between the target and the predicted output for each time step.
# 5. Comparing the gradients of the loss with respect to the trainable variables of the encoder and decoder.
# 6. Update the variables based on the computed gradients using an optimiser.
#
@tf.function
def train_step(inp, targ, enc_hidden):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = encoder(inp, enc_hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims(
[tgt_lang_tokenizer.word_index["<sos>"]] * BATCH_SIZE, 1
)
for t in range(1, targ.shape[1]):
predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)
loss += loss_function(targ[:, t], predictions)
dec_input = tf.expand_dims(targ[:, t], 1)
batch_loss = loss / int(targ.shape[1])
variables = encoder.trainable_variables + decoder.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return batch_loss
# ## Configure the validation step:
# The purpose of the validation step is to evaluate the model's performance on a validation dataset and to monitor the training process.
# ---
# This is a TensorFlow function for a single validation step of a sequence-to-sequence model. The input to the function includes:
# - `inp`: The input sequence to be encoded
# - `targ`: The target sequence to be decoded
# - `enc_hidden`: The initial hidden state of the encoder
# >The function performs the following operations:
# 1. Encoding the input sequence to get the encoded output and the final hidden state of the encoder
# 2. Setting the initial hidden state of the decoder to be the final hidden state of the encoder
# 3. Looping over the time steps of the target sequence, passing the current input and hidden state to the decoder, along with the encoder output, to get the predicted output
# 4. Computing the loss between the target and the predicted output for each time step
# 5. Computing the average loss over all time steps
# 6. Returning the average loss for this batch
@tf.function
def val_step(inp, targ, enc_hidden):
loss = 0
enc_output, enc_hidden = encoder(inp, enc_hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([tgt_lang_tokenizer.word_index["<sos>"]] * BATCH_SIZE, 1)
for t in range(1, targ.shape[1]):
predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)
loss += loss_function(targ[:, t], predictions)
dec_input = tf.expand_dims(targ[:, t], 1)
batch_loss = loss / int(targ.shape[1])
return batch_loss
# # Initiate the model training:
# The following code trains a sequence to sequence model using the training and validation datasets. The model will be trained for 10 epochs.
# > At each epoch:
# * Step 1: Initialise Encoder’s hidden state
# * Step 2: Invoke train_step function
# * Step 3: Generate loss for each batch of the training dataset
# * Step 4: Accumulate loss to calculate the total training loss.
# * Step 5: Invoke train_step function
# * Step 6: Generate loss for each batch of the validation dataset
# * Step 7: Accumulate loss to calculate the total validation loss
# * Step 8: Store model after every 2 epochs
# * Step 9: Print metrics score
# ---
import time
def train_and_validate(train_dataset, val_dataset, EPOCHS=10):
for epoch in range(EPOCHS):
start = time.time()
# Step1:
enc_hidden = encoder.initialize_hidden_state()
total_train_loss = 0
total_val_loss = 0
for batch, (inp, targ) in enumerate(train_dataset.take(steps_per_epoch)):
batch_loss = train_step(inp, targ, enc_hidden)
total_train_loss += batch_loss
if batch % 100 == 0:
print(
"Epoch {} Batch {} Loss {:.4f}".format(
epoch + 1, batch, batch_loss.numpy()
)
)
for batch, (inp, targ) in enumerate(val_dataset.take(val_steps_per_epoch)):
val_batch_loss = val_step(inp, targ, enc_hidden)
total_val_loss += val_batch_loss
if (epoch + 1) % 2 == 0:
checkpoint.save(file_prefix=checkpoint_prefix)
print(
"Total training loss is {:.4f}".format(total_train_loss / steps_per_epoch)
)
print(
"Total validation loss is {:.4f}".format(
total_val_loss / val_steps_per_epoch
)
)
print("Time taken for 1 epoch {} sec\n".format(time.time() - start))
train_and_validate(train_dataset, val_dataset)
# # **Prediction using Greedy Search**
# During inference i.e during testing, we change the prediction process slightly.
# The code evaluate(sentence) defines a function that takes a sentence as input and returns the translated result along with the original sentence and an attention plot. Here's how the code performs these steps:
# 1. Initialize the attention plot with zeros, with shape (max_length_tgt, max_length_src).
# 2. Preprocess the input sentence by splitting it into words and converting each word into its corresponding index in the source tokenizer vocabulary.
# 3. Pad the input sequence with zeros to a fixed length max_length_src.
# 4. Convert the input sequence into a tensor.
# 5. Initialize the hidden state of the encoder with zeros.
# 6. Pass the input sequence through the encoder and get the output and the final hidden state.
# 7. Initialize the hidden state of the decoder with the final hidden state of the encoder.
# 8. Initialize the decoder input with the index of the special symbol .
# 9. For t in range(max_length_tgt):
# >* Pass the decoder input, hidden state and the encoder output through the decoder and get the predictions and attention weights.
# >* Add the attention weights to the attention plot.
# >* Get the index of the word with the highest predicted probability and add it to the result string.
# >* If the word is the special symbol , return the result, the original sentence, and the attention plot.
# >* Set the decoder input to be the index of the word with the highest predicted probability.
# 10. Return the result, the original sentence, and the attention plot.
# ## Attention plot
# The function for plotting the attention weights that takes three arguments: **attention, sentence, and predicted_sentence.**
# * Create a figure and set its size to 10 x 10 using plt.figure(figsize=(10, 10)).
# * Add a subplot of 1 x 1 grid and select the first subplot using fig.add_subplot(1, 1, 1).
# * Display the attention weights with a heatmap using ax.matshow(attention, cmap='viridis').
# * Create a font dictionary to set the font size of the tick labels to 14.
# * Set the x-axis tick labels to sentence and the y-axis tick labels to predicted_sentence with a font size of 14.
# * Set the x-axis major locator to tick every 1 and the y-axis major locator to tick every 1 using ax.xaxis.set_major_locator(ticker.MultipleLocator(1)) and ax.yaxis.set_major_locator(ticker.MultipleLocator(1)).
# * Display the plot using plt.show().
def plot_attention(attention, sentence, predicted_sentence):
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1)
ax.matshow(attention, cmap="viridis")
fontdict = {"fontsize": 14}
ax.set_xticklabels([""] + sentence, fontdict=fontdict, rotation=90)
ax.set_yticklabels([""] + predicted_sentence, fontdict=fontdict)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
def evaluate(sentence):
attention_plot = np.zeros((max_length_trg, max_length_src))
sentence = preprocess_text(sentence)
inputs = [src_lang_tokenizer.word_index[i] for i in sentence.split(" ")]
inputs = pad_sequences([inputs], maxlen=max_length_src, padding="post")
inputs = tf.convert_to_tensor(inputs)
result = ""
hidden = [tf.zeros((1, units))]
enc_out, enc_hidden = encoder(inputs, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([tgt_lang_tokenizer.word_index["<sos>"]], 0)
for t in range(max_length_trg):
predictions, dec_hidden, attention_weights = decoder(
dec_input, dec_hidden, enc_out
)
attention_weights = tf.reshape(attention_weights, (-1,))
attention_plot[t] = attention_weights.numpy()
predicted_id = tf.argmax(predictions[0]).numpy()
result += tgt_lang_tokenizer.index_word[predicted_id] + " "
if tgt_lang_tokenizer.index_word[predicted_id] == "<eos>":
return result, sentence, attention_plot
dec_input = tf.expand_dims([predicted_id], 0)
return result, sentence, attention_plot
def translate(sentence):
result, sentence, attention_plot = evaluate(sentence)
print("Input:", sentence)
print("Predicted Translation:", result)
attention_plot = attention_plot[
: len(result.split(" ")), : len(sentence.split(" "))
]
plot_attention(attention_plot, sentence.split(" "), result.split(" "))
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
translate("hace mucho frío aquí.")
translate("trata de averiguarlo.")
translate("¿todavía están en casa?")
|
import numpy as np
import pandas as pd
import imageio
import datetime
ZHVI = pd.read_csv(
"../input/median-housing-price-us/Affordability_ChainedZHVI_2017Q2.csv"
)
CPI = pd.read_csv("../input/consumer-price-index-usa-all-items/USACPIALLMINMEI.csv")
HP = pd.read_csv("../input/median-housing-price-us/MedianHousePriceIncome.csv")
Income = pd.read_csv("../input/median-housing-price-us/Affordability_Income_2017Q2.csv")
ZHVI = pd.melt(
ZHVI,
id_vars=["RegionID", "RegionName", "SizeRank"],
value_name="ZHVI",
var_name="Date",
)
Income = pd.melt(
Income,
id_vars=["RegionID", "RegionName", "SizeRank"],
value_name="Income",
var_name="Date",
)
ZHVI = ZHVI.merge(
Income, how="outer", on=["RegionID", "RegionName", "SizeRank", "Date"]
)
CPI.columns = ["Date", "CPI"]
CPI.Date = pd.to_datetime(CPI.Date, format="%Y-%m")
ZHVI.Date = pd.to_datetime(ZHVI.Date, format="%Y-%m")
ZHVI = ZHVI.merge(CPI, how="inner", on=["Date"])
CPI.set_index("Date", inplace=True)
base = np.mean(CPI.loc["2019"])[0]
ZHVI["Income_A"] = ZHVI.Income * base / ZHVI.CPI
ZHVI["ZHVI_A"] = ZHVI.ZHVI * base / ZHVI.CPI
ZHVI.set_index("Date", inplace=True)
"""ZHVI_1 = pd.read_csv("../input/median-housing-price-us/Metro_Zhvi_AllHomes.csv", encoding='latin-1')
ZHVI_1 = pd.melt( ZHVI_1, id_vars=['RegionID', 'RegionName', 'SizeRank'], value_name='ZHVI', var_name = 'Date')
PIR = pd.read_csv("../input/median-housing-price-us/Affordability_Wide_2019Q3_Public.csv")
PIR = PIR.loc[PIR.Index=='Price To Income']
del PIR['Index']
del PIR['HistoricAverage_1985thru1999']
PIR = pd.melt( PIR, id_vars=['RegionID', 'RegionName', 'SizeRank'], value_name='PIR', var_name = 'Date')
ZHVI_1 = ZHVI_1.merge(PIR, how='inner', on=['RegionID','RegionName','SizeRank','Date'])
#ZHVI_1.PIR.interpolate(inplace = True)
ZHVI_1['Income'] = ZHVI_1.ZHVI/ZHVI_1.PIR
ZHVI_1.Date = pd.to_datetime(ZHVI_1.Date, format="%Y-%m")
ZHVI_1 = ZHVI_1.merge(CPI, how='inner', on=['Date'])
ZHVI_1['Income_A'] = ZHVI_1.Income*base/ZHVI_1.CPI
ZHVI_1['ZHVI_A'] = ZHVI_1.ZHVI*base/ZHVI_1.CPI
ZHVI_1.set_index('Date', inplace = True)
del ZHVI_1['PIR']
#ZHVI= pd.concat([ZHVI, ZHVI_1[ZHVI_1.index > '2017-06-01']], sort= True)"""
from sklearn.linear_model import LinearRegression
def reg(X, y):
regr = LinearRegression()
regr.fit(X, y)
a = regr.coef_
b = regr.intercept_
r = regr.score(X, y)
return a, b, r
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
date = "2019-06"
data = ZHVI.loc[ZHVI.SizeRank < 100].dropna()
fig, (ax1) = plt.subplots(nrows=1, ncols=1, figsize=(10, 10), squeeze=True)
x = data[date]["Income_A"]
y = data[date]["ZHVI_A"]
US_x = data[date].loc[data[date].RegionName == "United States"].Income_A
US_y = data[date].loc[data[date].RegionName == "United States"].ZHVI_A
a, b, r = reg(x.values.reshape(-1, 1), y.values.reshape(-1, 1))
a = a.squeeze()
b = b.squeeze()
plt.style.use("seaborn-whitegrid")
plt.scatter(x, y, label="Adjusted ZHVI by Locality (Top 100)")
plt.scatter(US_x, US_y, c="r", label="Adjusted ZHVI Total US")
ax1.set_ylim(0, 1000000)
ax1.set_xlim(20000, 100000)
ax1.set_ylabel("ZHVI Adjusted to 2019 Dollars", fontsize="large")
ax1.set_xlabel("Median Income Adjusted to 2019 Dollars", fontsize="large")
ax1.set_title("Regional ZHVI vs. Median Income: " + date)
reg_equation = "y= %fx + %f \nR-squared: %f" % (a, b, r)
plt.text(x=21000, y=940000, s=reg_equation)
plt.plot([20000, 100000], [a * 20000 + b, a * 100000 + b])
formatter = ticker.FormatStrFormatter("$%d")
ax1.yaxis.set_major_formatter(formatter)
ax1.xaxis.set_major_formatter(formatter)
ax1.legend(loc="upper right")
plt.show()
def plot_gif(date):
fig, (ax1) = plt.subplots(nrows=1, ncols=1, figsize=(10, 10), squeeze=True)
x = data[date]["Income_A"]
y = data[date]["ZHVI_A"]
a, b, r = reg(x.values.reshape(-1, 1), y.values.reshape(-1, 1))
a = a.squeeze()
b = b.squeeze()
plt.scatter(x, y)
ax1.set_ylim(0, 1000000)
ax1.set_xlim(0, 100000)
plt.plot([20000, 100000], [a * 20000 + b, a * 100000 + b])
# Used to return the plot as an image array
fig.canvas.draw()
# draw the canvas, cache the renderer
image = np.frombuffer(fig.canvas.tostring_rgb(), dtype="uint8")
image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return image
def plot_gif(date, data):
fig, (ax1) = plt.subplots(nrows=1, ncols=1, figsize=(10, 10), squeeze=True)
x = data[date]["Income_A"]
y = data[date]["ZHVI_A"]
US_x = data[date].loc[data[date].RegionName == "United States"].Income_A
US_y = data[date].loc[data[date].RegionName == "United States"].ZHVI_A
a, b, r = reg(x.values.reshape(-1, 1), y.values.reshape(-1, 1))
a = a.squeeze()
b = b.squeeze()
plt.style.use("seaborn-whitegrid")
plt.scatter(x, y, label="Adjusted ZHVI by Locality (Top 100)")
plt.scatter(US_x, US_y, c="r", label="Adjusted ZHVI Total US")
ax1.set_ylim(0, 1000000)
ax1.set_xlim(20000, 100000)
ax1.set_ylabel("ZHVI Adjusted to 2019 Dollars", fontsize="large")
ax1.set_xlabel("Median Income Adjusted to 2019 Dollars", fontsize="large")
ax1.set_title("Regional ZHVI vs. Median Income: " + date)
reg_equation = "y= %fx + %f \nR-squared: %f" % (a, b, r)
plt.text(x=21000, y=940000, s=reg_equation)
plt.plot([20000, 100000], [a * 20000 + b, a * 100000 + b])
formatter = ticker.FormatStrFormatter("$%d")
ax1.yaxis.set_major_formatter(formatter)
ax1.xaxis.set_major_formatter(formatter)
ax1.legend(loc="upper right")
# Used to return the plot as an image array
fig.canvas.draw()
# draw the canvas, cache the renderer
image = np.frombuffer(fig.canvas.tostring_rgb(), dtype="uint8")
image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return image
data = ZHVI.loc[ZHVI.SizeRank < 100].dropna()
kwargs_write = {"fps": 1.0, "quantizer": "nq"}
imageio.mimsave(
"./bubble.gif",
[plot_gif(date, data) for date in pd.unique(data.index.strftime("%Y-%m"))],
fps=20,
)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("train (1).csv")
df.head()
df.info()
# Now we will find out missing values and try to fill them
df.isnull().sum().loc[lambda x: x > 0]
df.isnull().sum().loc[lambda x: x > 0] / len(df) * 100
# As we can see some columns have more missing data.we need to remove them
na = df.isna().sum().loc[lambda x: x > (df.shape[0] * 0.1)].index
df.drop(na, axis=1, inplace=True)
df.drop("Id", axis=1, inplace=True)
# now we figure out missing values and try to fill them
df["GarageType"].fillna("other", inplace=True)
df["GarageFinish"].fillna("other", inplace=True)
df["GarageYrBlt"].fillna("1980.0", inplace=True)
df["BsmtQual"].value_counts()
# we have two values of same len.so we can fill with either of
df["BsmtFinType1"].value_counts()
df["Electrical"].fillna("SBrkr", inplace=True)
df["MasVnrArea"].fillna("0.0", inplace=True)
df["BsmtQual"].fillna("Gd", inplace=True)
df["BsmtCond"].fillna("TA", inplace=True)
df["BsmtExposure"].fillna("No", inplace=True)
df["BsmtFinType2"].fillna("Unf", inplace=True)
df["GarageQual"].fillna("TA", inplace=True)
df["GarageCond"].fillna("TA", inplace=True)
df["MasVnrType"].fillna("None", inplace=True)
df["BsmtFinType1"].fillna("Unf", inplace=True)
# find outliars and try to remove them
df.describe()
df["LotArea"].plot(kind="box")
df["LotArea"].max()
df[df["LotArea"] > 100000]
s = df[df["LotArea"] > 100000].index
df.drop(s, axis=0, inplace=True)
df.head()
df.drop("MiscVal", axis=1, inplace=True)
df.drop("Neighborhood", axis=1, inplace=True)
df.corr()
# Figure out the object columns.
df.select_dtypes("object").columns
df["MasVnrArea"] = pd.to_numeric(df["MasVnrArea"])
df["PavedDrive"] = pd.factorize(df["PavedDrive"])[0]
from sklearn.model_selection import train_test_split
X = df.drop("SalePrice", axis=1)
y = df["SalePrice"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.30, random_state=101
)
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.pipeline import Pipeline, make_pipeline
col = make_column_transformer(
(
OneHotEncoder(sparse=False, handle_unknown="ignore"),
[
"MSZoning",
"Street",
"LotShape",
"LandContour",
"Utilities",
"LotConfig",
"LandSlope",
"Condition1",
"Condition2",
"BldgType",
"HouseStyle",
"RoofStyle",
"RoofMatl",
"Exterior1st",
"Exterior2nd",
"MasVnrType",
"ExterQual",
"ExterCond",
"Foundation",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"Heating",
"HeatingQC",
"CentralAir",
"Electrical",
"KitchenQual",
"Functional",
"GarageType",
"GarageFinish",
"GarageQual",
"GarageCond",
"SaleType",
"SaleCondition",
],
),
remainder="passthrough",
)
sc = StandardScaler()
from sklearn.ensemble import (
GradientBoostingRegressor,
ExtraTreesRegressor,
RandomForestRegressor,
)
from sklearn.linear_model import Lasso
from xgboost import XGBRegressor
from sklearn.metrics import r2_score, mean_absolute_error
gbr = GradientBoostingRegressor(n_estimators=100)
etr = ExtraTreesRegressor(n_estimators=150)
rfr = RandomForestRegressor(n_estimators=150)
la = Lasso(alpha=5.0)
sv = SVR()
xgb = XGBRegressor()
pipe = make_pipeline(col, sc, gbr)
pipe.fit(X_train, y_train)
pre = pipe.predict(X_test)
print(r2_score(y_test, pre))
pipe = make_pipeline(col, sc, etr)
pipe.fit(X_train, y_train)
pre1 = pipe.predict(X_test)
print(r2_score(y_test, pre1))
pipe = make_pipeline(col, sc, rfr)
pipe.fit(X_train, y_train)
pr = pipe.predict(X_test)
print(r2_score(y_test, pr))
pipe = make_pipeline(col, sc, la)
pipe.fit(X_train, y_train)
p = pipe.predict(X_test)
print(r2_score(y_test, p))
pipe = make_pipeline(col, sc, xgb)
pipe.fit(X_train, y_train)
p5 = pipe.predict(X_test)
print(r2_score(y_test, p5))
|
# 2019 Data Science Bowl
# ===
# Damien Park
# 2019.11.14
# version
# ---
# * ver 35. event_code aggregates by type
# * ver 36. fix null event_code in test data set
# * ver 37. take log and standardscaling
# * ver 38. counting event_id-0.488
# * ver 39. improving code efficiency(rolling, memory management)
# * ver 40. modeling
# * ver 47. fix minor error(columns)
# ---
import pandas as pd
import numpy as np
import json
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler
from sklearn.metrics import confusion_matrix
# from sklearn.feature_selection import RFE
from sklearn.model_selection import train_test_split
from sklearn.utils import class_weight
# from sklearn.svm import SVC
# from catboost import CatBoostClassifier
# from sklearn.linear_model import LogisticRegression
# from sklearn.ensemble import RandomForestClassifier
# from sklearn.ensemble import AdaBoostClassifier
import keras
import tensorflow as tf
# import pprint
import gc
import os
import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
# pandas display option
pd.set_option("display.max_columns", 500)
pd.set_option("display.max_row", 1500)
pd.set_option("max_colwidth", 150)
pd.set_option("display.float_format", "{:.2f}".format)
# data load option
dtypes = {
"event_id": "object",
"game_session": "object",
"timestamp": "object",
"event_data": "object",
"installation_id": "object",
"event_count": "int16",
"event_code": "int16",
"game_time": "int32",
"title": "category",
"type": "category",
"world": "category",
}
label = {
"game_session": "object",
"installation_id": "object",
"title": "category",
"num_correct": "int8",
"num_incorrect": "int8",
"accuracy": "float16",
"accuracy_group": "int8",
}
# hyper parameter
loss_type = "category" # mse/category
dp_log = True
# window = 70
batch_sizes = 20
validation = True
scale_type = "robust" # minmax/robust/standard
# ## Data Prepareing
# ### Split data by ID
train = pd.read_csv("/kaggle/input/data-science-bowl-2019/train.csv", dtype=dtypes)
test = pd.read_csv("/kaggle/input/data-science-bowl-2019/test.csv", dtype=dtypes)
label_ = pd.read_csv(
"/kaggle/input/data-science-bowl-2019/train_labels.csv", dtype=label
)
# sample = pd.read_csv("/kaggle/input/data-science-bowl-2019/sample_submission.csv")
# specs = pd.read_csv("/kaggle/input/data-science-bowl-2019/specs.csv")
# calculating accuracy
class accuracy:
def __init__(self, df):
self.df = df
# Assessment evaluation-Cart Balancer (Assessment)
def cart_assessment(self):
_ = self.df.query(
"title=='Cart Balancer (Assessment)' and event_id=='d122731b'"
)
_ = _.loc[:, ["game_session", "installation_id", "event_data"]]
_["correct"] = _.event_data.apply(
lambda x: (
json.loads(x)["correct"] if "correct" in json.loads(x).keys() else -999
)
)
_["num_correct_"] = 0
_["num_incorrect_"] = 0
_.loc[_.correct == True, "num_correct_"] = 1
_.loc[_.correct == False, "num_incorrect_"] = 1
_ = _.groupby(["installation_id", "game_session"]).sum().reset_index()
_["accuracy_"] = _["num_correct_"] / (_["num_correct_"] + _["num_incorrect_"])
_["accuracy_group"] = (
_["num_incorrect_"].apply(lambda x: 3 if x == 0 else (2 if x == 1 else 1))
* _["num_correct_"]
)
# return _.loc[:, ["installation_id", "game_session", "num_correct_", "num_incorrect_", "accuracy_", "accuracy_group"]]
return _.loc[:, ["installation_id", "game_session", "accuracy_group"]]
def cart_assessment_2(self):
_ = self.df.query(
"title=='Cart Balancer (Assessment)' and event_id=='b74258a0'"
)
_ = _.loc[:, ["game_session", "installation_id", "event_data"]]
_["misses"] = _.event_data.apply(
lambda x: (
json.loads(x)["misses"] if "misses" in json.loads(x).keys() else -999
)
)
_["num_correct_"] = 1
_ = _.groupby("game_session").sum().reset_index()
_["accuracy_"] = _["num_correct_"] / (_["num_correct_"] + _["misses"])
return _.loc[:, ["game_session", "num_correct_", "misses", "accuracy_"]]
# Assessment evaluation-Chest Sorter (Assessment)
def chest_assessment(self):
_ = self.df.query("title=='Chest Sorter (Assessment)' and event_id=='93b353f2'")
_ = _.loc[:, ["game_session", "installation_id", "event_data"]]
_["correct"] = _.event_data.apply(
lambda x: (
json.loads(x)["correct"] if "correct" in json.loads(x).keys() else -999
)
)
_["num_correct_"] = 0
_["num_incorrect_"] = 0
_.loc[_.correct == True, "num_correct_"] = 1
_.loc[_.correct == False, "num_incorrect_"] = 1
_ = _.groupby(["installation_id", "game_session"]).sum().reset_index()
_["accuracy_"] = _["num_correct_"] / (_["num_correct_"] + _["num_incorrect_"])
_["accuracy_group"] = (
_["num_incorrect_"].apply(lambda x: 3 if x == 0 else (2 if x == 1 else 1))
* _["num_correct_"]
)
# return _.loc[:, ["installation_id", "game_session", "num_correct_", "num_incorrect_", "accuracy_", "accuracy_group"]]
return _.loc[:, ["installation_id", "game_session", "accuracy_group"]]
def chest_assessment_2(self):
_ = self.df.query("title=='Chest Sorter (Assessment)' and event_id=='38074c54'")
_ = _.loc[:, ["game_session", "installation_id", "event_data"]]
_["misses"] = _.event_data.apply(
lambda x: (
json.loads(x)["misses"] if "misses" in json.loads(x).keys() else -999
)
)
_["num_correct_"] = 1
_ = _.groupby("game_session").sum().reset_index()
_["accuracy_"] = _["num_correct_"] / (_["num_correct_"] + _["misses"])
return _.loc[:, ["game_session", "num_correct_", "misses", "accuracy_"]]
# Assessment evaluation-Cauldron Filler (Assessment)
def cauldron_assessment(self):
_ = self.df.query(
"title=='Cauldron Filler (Assessment)' and event_id=='392e14df'"
)
_ = _.loc[:, ["game_session", "installation_id", "event_data"]]
_["correct"] = _.event_data.apply(
lambda x: (
json.loads(x)["correct"] if "correct" in json.loads(x).keys() else -999
)
)
_["num_correct_"] = 0
_["num_incorrect_"] = 0
_.loc[_.correct == True, "num_correct_"] = 1
_.loc[_.correct == False, "num_incorrect_"] = 1
_ = _.groupby(["installation_id", "game_session"]).sum().reset_index()
_["accuracy_"] = _["num_correct_"] / (_["num_correct_"] + _["num_incorrect_"])
_["accuracy_group"] = (
_["num_incorrect_"].apply(lambda x: 3 if x == 0 else (2 if x == 1 else 1))
* _["num_correct_"]
)
# return _.loc[:, ["installation_id", "game_session", "num_correct_", "num_incorrect_", "accuracy_", "accuracy_group"]]
return _.loc[:, ["installation_id", "game_session", "accuracy_group"]]
def cauldron_assessment_2(self):
_ = self.df.query(
"title=='Cauldron Filler (Assessment)' and event_id=='28520915'"
)
_ = _.loc[:, ["game_session", "installation_id", "event_data"]]
_["misses"] = _.event_data.apply(
lambda x: (
json.loads(x)["misses"] if "misses" in json.loads(x).keys() else -999
)
)
_["num_correct_"] = 1
_ = _.groupby("game_session").sum().reset_index()
_["accuracy_"] = _["num_correct_"] / (_["num_correct_"] + _["misses"])
return _.loc[:, ["game_session", "num_correct_", "misses", "accuracy_"]]
# Assessment evaluation-Mushroom Sorter (Assessment)
def mushroom_assessment(self):
_ = self.df.query(
"title=='Mushroom Sorter (Assessment)' and event_id=='25fa8af4'"
)
_ = _.loc[:, ["game_session", "installation_id", "event_data"]]
_["correct"] = _.event_data.apply(
lambda x: (
json.loads(x)["correct"] if "correct" in json.loads(x).keys() else -999
)
)
_["num_correct_"] = 0
_["num_incorrect_"] = 0
_.loc[_.correct == True, "num_correct_"] = 1
_.loc[_.correct == False, "num_incorrect_"] = 1
_ = _.groupby(["installation_id", "game_session"]).sum().reset_index()
_["accuracy_"] = _["num_correct_"] / (_["num_correct_"] + _["num_incorrect_"])
_["accuracy_group"] = (
_["num_incorrect_"].apply(lambda x: 3 if x == 0 else (2 if x == 1 else 1))
* _["num_correct_"]
)
# return _.loc[:, ["installation_id", "game_session", "num_correct_", "num_incorrect_", "accuracy_", "accuracy_group"]]
return _.loc[:, ["installation_id", "game_session", "accuracy_group"]]
def mushroom_assessment_2(self):
_ = self.df.query(
"title=='Mushroom Sorter (Assessment)' and event_id=='6c930e6e'"
)
_ = _.loc[:, ["game_session", "installation_id", "event_data"]]
_["misses"] = _.event_data.apply(
lambda x: (
json.loads(x)["misses"] if "misses" in json.loads(x).keys() else -999
)
)
_["num_correct_"] = 1
_ = _.groupby("game_session").sum().reset_index()
_["accuracy_"] = _["num_correct_"] / (_["num_correct_"] + _["misses"])
return _.loc[:, ["game_session", "num_correct_", "misses", "accuracy_"]]
# Assessment evaluation-Bird Measurer (Assessment)
def bird_assessment(self):
_ = self.df.query(
"title=='Bird Measurer (Assessment)' and event_id=='17113b36'"
)
_ = _.loc[:, ["game_session", "installation_id", "event_data"]]
_["correct"] = _.event_data.apply(
lambda x: (
json.loads(x)["correct"] if "correct" in json.loads(x).keys() else -999
)
)
_["num_correct_"] = 0
_["num_incorrect_"] = 0
_.loc[_.correct == True, "num_correct_"] = 1
_.loc[_.correct == False, "num_incorrect_"] = 1
_ = _.groupby(["installation_id", "game_session"]).sum().reset_index()
_["accuracy_"] = _["num_correct_"] / (_["num_correct_"] + _["num_incorrect_"])
_["accuracy_group"] = (
_["num_incorrect_"].apply(lambda x: 3 if x == 0 else (2 if x == 1 else 1))
* _["num_correct_"]
)
# return _.loc[:, ["installation_id", "game_session", "num_correct_", "num_incorrect_", "accuracy_", "accuracy_group"]]
return _.loc[:, ["installation_id", "game_session", "accuracy_group"]]
def bird_assessment_2(self):
_ = self.df.query(
"title=='Bird Measurer (Assessment)' and event_id=='f6947f54'"
)
_ = _.loc[:, ["game_session", "installation_id", "event_data"]]
_["misses"] = _.event_data.apply(
lambda x: (
json.loads(x)["misses"] if "misses" in json.loads(x).keys() else -999
)
)
_["num_correct_"] = 1
_ = _.groupby("game_session").sum().reset_index()
_["accuracy_"] = _["num_correct_"] / (_["num_correct_"] + _["misses"])
return _.loc[:, ["game_session", "num_correct_", "misses", "accuracy_"]]
# quadratic kappa
def quadratic_kappa(actuals, preds, N=4):
w = np.zeros((N, N))
O = confusion_matrix(actuals, preds)
for i in range(len(w)):
for j in range(len(w)):
w[i][j] = float(((i - j) ** 2) / (N - 1) ** 2)
act_hist = np.zeros([N])
for item in actuals:
act_hist[item] += 1
pred_hist = np.zeros([N])
for item in preds:
pred_hist[item] += 1
E = np.outer(act_hist, pred_hist)
E = E / E.sum()
O = O / O.sum()
num = 0
den = 0
for i in range(len(w)):
for j in range(len(w)):
num += w[i][j] * O[i][j]
den += w[i][j] * E[i][j]
return 1 - (num / den)
test["timestamp"] = pd.to_datetime(test.timestamp)
test.sort_values(["timestamp", "event_count"], ascending=True, inplace=True)
_ = accuracy(test).cart_assessment()
_ = _.append(accuracy(test).chest_assessment(), ignore_index=True)
_ = _.append(accuracy(test).cauldron_assessment(), ignore_index=True)
_ = _.append(accuracy(test).mushroom_assessment(), ignore_index=True)
_ = _.append(accuracy(test).bird_assessment(), ignore_index=True)
test = test[test.installation_id.isin(pd.unique(_.installation_id))]
test = test.merge(_, how="left", on=["installation_id", "game_session"])
df_test = []
idx = 0
for _, val in tqdm.tqdm_notebook(test.groupby("installation_id", sort=False)):
# for _, val in tqdm.notebook.tqdm(test.groupby("installation_id", sort=False)):
val.reset_index(drop=True, inplace=True)
_ = val.query("type=='Assessment'")
_ = _[~_.accuracy_group.isnull()]
session = _.reset_index().groupby("game_session", sort=False).index.first().values
for j in session:
sample = val[: j + 1]
sample["ID"] = idx
idx += 1
df_test.append(sample)
label = pd.DataFrame(columns=["ID", "accuracy_group"])
for i in tqdm.tqdm_notebook(df_test):
# for i in tqdm.notebook.tqdm(df_test):
label = pd.concat([label, i.iloc[-1:, -2:]], sort=False)
label.reset_index(drop=True, inplace=True)
label.accuracy_group = label.accuracy_group.astype("int8")
df = train[train.installation_id.isin(pd.unique(label_.installation_id))]
del train
df = df.merge(
label_.loc[:, ["installation_id", "game_session", "title", "accuracy_group"]],
on=["installation_id", "game_session", "title"],
how="left",
)
df["timestamp"] = pd.to_datetime(df.timestamp)
df.sort_values(["timestamp", "event_count"], ascending=True, inplace=True)
df.reset_index(drop=True, inplace=True)
df_train = []
idx = max(label.ID) + 1
for _, val in tqdm.tqdm_notebook(df.groupby("installation_id", sort=False)):
# for _, val in tqdm.notebook.tqdm(df.groupby("installation_id", sort=False)):
val.reset_index(drop=True, inplace=True)
session = (
val.query("type=='Assessment'")
.reset_index()
.groupby("game_session", sort=False)
.index.first()
.values
)
for j in session:
if ~np.isnan(val.iat[j, -1]):
sample = val[: j + 1]
sample["ID"] = idx
idx += 1
df_train.append(sample)
for i in tqdm.tqdm_notebook(df_train):
# for i in tqdm.notebook.tqdm(df_train):
label = pd.concat([label, i.iloc[-1:, -2:]], sort=False)
label.reset_index(drop=True, inplace=True)
label.accuracy_group = label.accuracy_group.astype("int8")
label = label.merge(
pd.get_dummies(label.accuracy_group, prefix="y"), left_on=["ID"], right_index=True
)
df_test.extend(df_train)
df_train = df_test
del df_test
display(df_train[0].head()), display(label.head())
# ---
# # Feature Engineering
col = {}
# ## World
# ### world_log
# How many log in each world
ID = []
world = []
size = []
for i in tqdm.tqdm_notebook(df_train):
# world_log
_ = i.groupby(["ID", "world"]).size().reset_index()
ID.extend(_.ID)
world.extend(_.world)
size.extend(_[0])
world_log = pd.DataFrame(data={"ID": ID, "world": world, "size": size})
world_log = world_log.pivot_table(index="ID", columns="world", values="size")
world_log = world_log.fillna(0)
world_log.columns.name = None
world_log.reset_index(inplace=True)
world_log = world_log.loc[:, ["ID", "CRYSTALCAVES", "MAGMAPEAK", "TREETOPCITY", "NONE"]]
plt.figure(figsize=(30, 10))
for idx, val in enumerate(["CRYSTALCAVES", "MAGMAPEAK", "TREETOPCITY", "NONE"]):
plt.subplot(2, 4, idx + 1)
for i in [0, 1, 2, 3]:
sns.distplot(world_log.merge(label).query("accuracy_group==@i")[val], label=i)
plt.legend()
plt.subplot(2, 4, idx + 5)
for i in [0, 1, 2, 3]:
sns.distplot(
np.log(world_log.merge(label).query("accuracy_group==@i")[val] + 1), label=i
)
plt.legend()
plt.show()
world_log = world_log.add_suffix("_l")
world_log.rename(columns={"ID_l": "ID"}, inplace=True)
if dp_log == True:
world_log.iloc[:, 1:] = np.log(world_log.iloc[:, 1:] + 1)
gc.collect()
world_log.head()
# ### world_time
# How long did play in each world
ID = []
world = []
game_time = []
for i in tqdm.tqdm_notebook(df_train):
# world_time
_ = i.groupby(["ID", "world", "game_session"]).game_time.max().reset_index()
ID.extend(_.ID)
world.extend(_.world)
game_time.extend(_.game_time)
world_time = pd.DataFrame(data={"ID": ID, "world": world, "game_time": game_time})
world_time = world_time.groupby(["ID", "world"]).sum().reset_index()
world_time = world_time.pivot_table(index="ID", columns="world", values="game_time")
world_time = world_time.fillna(-1)
world_time.columns.name = None
world_time["ID"] = world_time.index
world_time.reset_index(drop=True, inplace=True)
world_time = world_time.loc[
:, ["ID", "CRYSTALCAVES", "MAGMAPEAK", "TREETOPCITY", "NONE"]
]
plt.figure(figsize=(30, 10))
for idx, val in enumerate(["CRYSTALCAVES", "MAGMAPEAK", "TREETOPCITY", "NONE"]):
plt.subplot(2, 4, idx + 1)
for i in [0, 1, 2, 3]:
sns.distplot(world_time.merge(label).query("accuracy_group==@i")[val], label=i)
plt.legend()
plt.subplot(2, 4, idx + 5)
for i in [0, 1, 2, 3]:
sns.distplot(
np.log(world_time.merge(label).query("accuracy_group==@i")[val] + 2),
label=i,
)
plt.legend()
plt.show()
plt.figure(figsize=(30, 10))
for idx, val in enumerate(["CRYSTALCAVES", "MAGMAPEAK", "TREETOPCITY", "NONE"]):
plt.subplot(2, 4, idx + 1)
sns.distplot(world_time[val])
# plt.title(val)
plt.subplot(2, 4, idx + 5)
sns.distplot(np.log(world_time[val] + 2))
# plt.title(val)
plt.show()
world_time.drop(columns=["NONE"], inplace=True)
world_time = world_time.add_suffix("_t")
world_time.rename(columns={"ID_t": "ID"}, inplace=True)
if dp_log == True:
world_time.iloc[:, 1:] = np.log(world_time.iloc[:, 1:] + 2)
gc.collect()
world_time.head()
# ### world_session
# How many session is opend by world
ID = []
world = []
game_session = []
for i in tqdm.tqdm_notebook(df_train):
# world_session
_ = i.groupby(["ID", "world"]).game_session.nunique().reset_index()
ID.extend(_.ID)
world.extend(_.world)
game_session.extend(_.game_session)
world_session = pd.DataFrame(
data={"ID": ID, "world": world, "game_session": game_session}
)
world_session = world_session.pivot_table(
index="ID", columns="world", values="game_session"
)
world_session = world_session.fillna(0)
world_session.columns.name = None
world_session["ID"] = world_session.index
world_session.reset_index(drop=True, inplace=True)
world_session = world_session.loc[
:, ["ID", "CRYSTALCAVES", "MAGMAPEAK", "TREETOPCITY", "NONE"]
]
plt.figure(figsize=(30, 10))
for idx, val in enumerate(["CRYSTALCAVES", "MAGMAPEAK", "TREETOPCITY", "NONE"]):
plt.subplot(2, 4, idx + 1)
for i in [0, 1, 2, 3]:
sns.distplot(
world_session.merge(label).query("accuracy_group==@i")[val], label=i
)
plt.legend()
plt.subplot(2, 4, idx + 5)
for i in [0, 1, 2, 3]:
sns.distplot(
np.log(world_session.merge(label).query("accuracy_group==@i")[val] + 1),
label=i,
)
plt.legend()
plt.show()
plt.figure(figsize=(30, 10))
for idx, val in enumerate(["CRYSTALCAVES", "MAGMAPEAK", "TREETOPCITY", "NONE"]):
plt.subplot(2, 4, idx + 1)
sns.distplot(world_session[val])
# plt.title(val)
plt.subplot(2, 4, idx + 5)
sns.distplot(np.log(world_session[val] + 1))
# plt.title(val)
plt.show()
world_session = world_session.add_suffix("_s")
world_session.rename(columns={"ID_s": "ID"}, inplace=True)
if dp_log == True:
world_session.iloc[:, 1:] = np.log(world_session.iloc[:, 1:] + 1)
gc.collect()
world_session.head()
# ## Event_id
# How many times call event_id
ID = []
event_id = []
size = []
for i in tqdm.tqdm_notebook(df_train):
# event_id
_ = i.groupby(["ID", "event_id"]).size().reset_index()
ID.extend(_.ID)
event_id.extend(_.event_id)
size.extend(_[0])
event_id = pd.DataFrame(data={"ID": ID, "event_id": event_id, "size": size})
event_id = event_id.pivot_table(index="ID", columns="event_id", values="size")
event_id = event_id.fillna(0)
event_id.columns.name = None
event_id.index.name = None
event_id["ID"] = event_id.index
event_id.reset_index(drop=True, inplace=True)
if dp_log == True:
event_id.iloc[:, :-1] = np.log(event_id.iloc[:, :-1] + 1)
# event_id.iloc[:, 1:] = np.log(event_id.iloc[:, 1:]+1)
gc.collect()
event_id.head()
# ## Duration
None
# ## Game time
# ### play_time
# How long play game
ID = []
game_time = []
for i in tqdm.tqdm_notebook(df_train):
# play_time
_ = i.groupby(["ID", "game_session"]).game_time.max().reset_index()
ID.extend(_.ID)
game_time.extend(_.game_time)
play_time = pd.DataFrame(data={"ID": ID, "game_time": game_time})
play_time = play_time.groupby(["ID"]).sum().reset_index()
play_time.reset_index(drop=True, inplace=True)
play_time = play_time.fillna(0)
plt.figure(figsize=(20, 5))
plt.subplot(1, 2, 1)
for i in [0, 1, 2, 3]:
sns.distplot(
play_time.merge(label).query("accuracy_group==@i")["game_time"], label=i
)
plt.legend()
plt.subplot(1, 2, 2)
for i in [0, 1, 2, 3]:
sns.distplot(
np.log(play_time.merge(label).query("accuracy_group==@i")["game_time"] + 1),
label=i,
)
plt.legend()
plt.show()
if dp_log == True:
play_time.iloc[:, 1:] = np.log(play_time.iloc[:, 1:] + 1)
gc.collect()
play_time.head()
# ### gap_time
# The gap between start and end
gap_time = pd.DataFrame()
for i in tqdm.tqdm_notebook(df_train):
# gap_time
_ = i.groupby(["ID"]).timestamp.agg(["min", "max"])
_.columns.name = None
gap_time = pd.concat([gap_time, _], sort=True)
gap_time.reset_index(inplace=True)
gap_time["gap"] = gap_time["max"] - gap_time["min"]
gap_time["gap"] = gap_time["gap"].astype("int")
plt.figure(figsize=(20, 5))
plt.subplot(1, 2, 1)
for i in [0, 1, 2, 3]:
sns.distplot(gap_time.merge(label).query("accuracy_group==@i")["gap"], label=i)
plt.legend()
plt.subplot(1, 2, 2)
for i in [0, 1, 2, 3]:
sns.distplot(
np.log(gap_time.merge(label).query("accuracy_group==@i")["gap"] + 1), label=i
)
plt.legend()
plt.show()
gap_time.drop(columns=["max", "min"], inplace=True)
if dp_log == True:
gap_time.iloc[:, 1:] = np.log(gap_time.iloc[:, 1:] + 1)
gc.collect()
gap_time.head()
# ## Session
# ### Session_count
# How many session is opend?
session_count = pd.DataFrame()
for i in tqdm.tqdm_notebook(df_train):
# session_count
_ = i.groupby(["ID"]).game_session.nunique().reset_index()
_.columns.name = None
session_count = pd.concat([session_count, _], sort=True)
session_count.reset_index(drop=True, inplace=True)
plt.figure(figsize=(20, 5))
plt.subplot(1, 2, 1)
for i in [0, 1, 2, 3]:
sns.distplot(
session_count.merge(label).query("accuracy_group==@i")["game_session"],
bins=50,
label=i,
)
plt.legend()
plt.subplot(1, 2, 2)
for i in [0, 1, 2, 3]:
sns.distplot(
np.log(session_count.merge(label).query("accuracy_group==@i")["game_session"]),
bins=50,
label=i,
)
plt.legend()
plt.show()
if dp_log == True:
session_count.iloc[:, 1:] = np.log(session_count.iloc[:, 1:])
gc.collect()
session_count.head()
# ### Session length
# How long did you play in each session on average? (mean, log)
session_length = pd.DataFrame()
for i in tqdm.tqdm_notebook(df_train):
# session_length
# _ = i.query("type!='Clip'").groupby(["ID", "game_session"]).size().groupby(["ID"]).mean().reset_index().rename(columns={0:"session_length"})
_ = (
i.groupby(["ID", "game_session"])
.size()
.groupby(["ID"])
.mean()
.reset_index()
.rename(columns={0: "session_length"})
)
_.columns.name = None
session_length = pd.concat([session_length, _], sort=True)
session_length.reset_index(drop=True, inplace=True)
plt.figure(figsize=(20, 5))
plt.subplot(1, 2, 1)
for i in [0, 1, 2, 3]:
sns.distplot(
session_length.merge(label).query("accuracy_group==@i")["session_length"],
bins=50,
label=i,
)
plt.legend()
plt.subplot(1, 2, 2)
for i in [0, 1, 2, 3]:
sns.distplot(
np.log(
session_length.merge(label).query("accuracy_group==@i")["session_length"]
),
bins=50,
label=i,
)
plt.legend()
plt.show()
if dp_log == True:
session_length.iloc[:, 1:] = np.log(session_length.iloc[:, 1:])
gc.collect()
session_length.head()
# ### Session time
# How long did you play in each session on average? (mean, time)
session_time = pd.DataFrame()
for i in tqdm.tqdm_notebook(df_train):
# session_time
_ = (
i.groupby(["ID", "game_session"])
.game_time.max()
.groupby(["ID"])
.mean()
.reset_index()
)
_.columns.name = None
session_time = pd.concat([session_time, _], sort=True)
session_time.reset_index(drop=True, inplace=True)
plt.figure(figsize=(20, 5))
plt.subplot(1, 2, 1)
for i in [0, 1, 2, 3]:
sns.distplot(
session_time.merge(label).query("accuracy_group==@i")["game_time"],
bins=50,
label=i,
)
plt.legend()
plt.subplot(1, 2, 2)
for i in [0, 1, 2, 3]:
sns.distplot(
np.log(session_time.merge(label).query("accuracy_group==@i")["game_time"] + 1),
bins=50,
label=i,
)
plt.legend()
plt.show()
if dp_log == True:
session_time.iloc[:, 1:] = np.log(session_time.iloc[:, 1:] + 1)
gc.collect()
session_time.head()
# ## Type
ID = []
types = []
size = []
for i in tqdm.tqdm_notebook(df_train):
# types
_ = i.groupby(["ID", "type"]).size().reset_index()
ID.extend(_.ID)
types.extend(_.type)
size.extend(_[0])
types = pd.DataFrame(data={"ID": ID, "type": types, "size": size})
types = types.pivot_table(index="ID", columns="type", values="size")
types.columns.name = None
types.index.name = None
types = types.fillna(0)
types["ID"] = types.index
types = types.loc[:, ["ID", "Activity", "Assessment", "Clip", "Game"]]
plt.figure(figsize=(30, 10))
for idx, val in enumerate(["Activity", "Assessment", "Clip", "Game"]):
plt.subplot(2, 4, idx + 1)
for i in [0, 1, 2, 3]:
sns.distplot(types.merge(label).query("accuracy_group==@i")[val], label=i)
plt.legend()
plt.subplot(2, 4, idx + 5)
for i in [0, 1, 2, 3]:
sns.distplot(
np.log(types.merge(label).query("accuracy_group==@i")[val] + 1), label=i
)
plt.legend()
plt.show()
if dp_log == True:
types.iloc[:, 1:] = np.log(types.iloc[:, 1:] + 1)
gc.collect()
types.head()
# ## Title
# What title is played?
ID = []
title = []
size = []
for i in tqdm.tqdm_notebook(df_train):
# title
_ = i.groupby(["ID", "title"]).size().reset_index()
ID.extend(_.ID)
title.extend(_.title)
size.extend(_[0])
title = pd.DataFrame(data={"ID": ID, "title": title, "size": size})
title = title.pivot_table(index="ID", columns="title", values="size")
title.columns.name = None
title.index.name = None
title = title.fillna(0)
title["ID"] = title.index
if dp_log == True:
title.iloc[:, :-1] = np.log(title.iloc[:, :-1] + 1)
gc.collect()
title.head()
# ## Last Assessment type
# target Assessment type
assessment = pd.DataFrame(columns=["ID", "title"])
for i in tqdm.tqdm_notebook(df_train):
# assessment
_ = i.tail(1).loc[:, ["ID", "title"]].reset_index(drop=True)
assessment = pd.concat([assessment, _], sort=False)
assessment["Assessment_1"] = 0
assessment["Assessment_2"] = 0
assessment["Assessment_3"] = 0
assessment["Assessment_4"] = 0
assessment["Assessment_5"] = 0
assessment.loc[assessment.title == "Mushroom Sorter (Assessment)", "Assessment_1"] = 1
assessment.loc[assessment.title == "Cauldron Filler (Assessment)", "Assessment_2"] = 1
assessment.loc[assessment.title == "Chest Sorter (Assessment)", "Assessment_3"] = 1
assessment.loc[assessment.title == "Cart Balancer (Assessment)", "Assessment_4"] = 1
assessment.loc[assessment.title == "Bird Measurer (Assessment)", "Assessment_5"] = 1
_ = assessment.merge(label).groupby(["title", "accuracy_group"]).size().reset_index()
_.accuracy_group = _.accuracy_group.astype("object")
plt.figure(figsize=(20, 10))
sns.barplot(x="title", y=0, hue="accuracy_group", data=_, dodge=True, alpha=0.7)
plt.show()
plt.figure(figsize=(20, 10))
plt.bar(
"title",
height="count",
data=assessment.groupby("title").size().reset_index().rename(columns={0: "count"}),
)
plt.show()
del assessment["title"]
# assessment = assessment.loc[:, ["ID", "Assessment_1", "Assessment_2", "Assessment_3", "Assessment_4", "Assessment_5"]]
assessment.head()
# ## Assessment time
# When did player submit assessment?
time = pd.DataFrame(columns=["ID", "timestamp"])
for i in tqdm.tqdm_notebook(df_train):
# time
_ = i.tail(1).loc[:, ["ID", "timestamp"]]
time = pd.concat([time, _], sort=False)
time.reset_index(drop=True, inplace=True)
time["hour"] = time.timestamp.dt.hour
time["hour"] = time.hour.astype("object")
time = time.merge(
pd.get_dummies(time.hour, prefix="hour"),
how="left",
left_index=True,
right_index=True,
)
time.drop(columns=["timestamp", "hour"], inplace=True)
time.head()
# ## GAME
# In Type Game, we can find round feature.
ID = []
game_title = []
game_round = []
for i in tqdm.tqdm_notebook(df_train):
if "Game" in i.type.unique():
_ = (
i.query("type=='Game'")
.loc[:, ["ID", "title", "event_data"]]
.set_index(["ID", "title"])
.event_data.apply(lambda x: json.loads(x)["round"])
.groupby(["ID", "title"])
.max()
.reset_index()
)
ID.extend(list(_.ID))
game_title.extend(_.title)
game_round.extend(_.event_data)
game = pd.DataFrame(data={"ID": ID, "game_title": game_title, "round": game_round})
game = game.pivot_table(index="ID", columns="game_title", values="round")
game.reset_index(inplace=True)
game.columns.name = None
game = game.fillna(-1)
ID = pd.DataFrame(data={"ID": range(0, len(df_train))})
game = ID.merge(game, how="left")
game = game.fillna(-1)
game = game.add_suffix("_r")
game.rename(columns={"ID_r": "ID"}, inplace=True)
game.head()
# ---
# # Merge all data set
# world_log, world_time, world_session
# event_id
# play_time, gap_time
# session_count, session_length, session_time
# types
# title
# assessment
# time
# game
# data_set = [world_log, world_time, world_session, event_id, play_time, gap_time, session_count, session_length, session_time, types, title, assessment, time, game]
# _ = pd.concat(data_set, axis=1, keys=["ID"])
_ = world_log.merge(world_time, how="left", on=["ID"])
_ = _.merge(world_session, how="left", on=["ID"])
_ = _.merge(event_id, how="left", on=["ID"])
_ = _.merge(play_time, how="left", on=["ID"])
_ = _.merge(gap_time, how="left", on=["ID"])
_ = _.merge(session_count, how="left", on=["ID"])
_ = _.merge(session_length, how="left", on=["ID"])
_ = _.merge(session_time, how="left", on=["ID"])
_ = _.merge(types, how="left", on=["ID"])
_ = _.merge(title, how="left", on=["ID"])
_ = _.merge(assessment, how="left", on=["ID"])
_ = _.merge(time, how="left", on=["ID"])
_ = _.merge(game, how="left", on=["ID"])
train_x_col = list(_.columns)
train_y_col = ["accuracy_group", "y_0", "y_1", "y_2", "y_3"]
_.to_csv("train.csv", index=False)
label.to_csv("label.csv", index=False)
# ---
# # Scaling / Data Split
if loss_type == "mse":
if scale_type == "minmax":
scaler = MinMaxScaler()
elif scale_type == "robust":
scaler = RobustScaler()
elif scale_type == "standard":
scaler = StandardScaler()
scaler_y = MinMaxScaler()
train_x = scaler.fit_transform(_.loc[:, train_x_col[1:]])
# train_y = scaler_y.fit_transform([_.loc[:, "accuracy_group"]])
train_y = label.loc[:, "accuracy_group"]
print(train_x[0])
print(train_y.iloc[0, :])
elif loss_type == "category":
if scale_type == "minmax":
scaler = MinMaxScaler()
elif scale_type == "robust":
scaler = RobustScaler()
elif scale_type == "standard":
scaler = StandardScaler()
train_x = scaler.fit_transform(_.loc[:, train_x_col[1:]])
train_y = label.loc[:, train_y_col]
print(train_x[0])
print(train_y.iloc[0, :])
class_weights = class_weight.compute_class_weight(
"balanced", np.unique(label.accuracy_group), label.accuracy_group
)
np.unique(label.accuracy_group), class_weights
label["class_weight"] = 0
label.loc[label.accuracy_group == 0, "class_weight"] = class_weights[0]
label.loc[label.accuracy_group == 1, "class_weight"] = class_weights[1]
label.loc[label.accuracy_group == 2, "class_weight"] = class_weights[2]
label.loc[label.accuracy_group == 3, "class_weight"] = class_weights[3]
if validation:
train_x, val_x, train_y, val_y = train_test_split(
train_x, train_y, random_state=1228
)
display(train_x.shape, train_y.shape)
for i in range(len(train_x[0])):
print(i, min(train_x[:, i]), max(train_x[:, i]))
# ---
# # Modeling
# leakyrelu = keras.layers.LeakyReLU(alpha=0.3)
leakyrelu = tf.nn.leaky_relu
model = keras.models.Sequential()
model.add(keras.layers.Dense(128, activation=leakyrelu, kernel_initializer="he_normal"))
model.add(keras.layers.Dropout(0.3))
model.add(keras.layers.Dense(256, activation=leakyrelu, kernel_initializer="he_normal"))
model.add(keras.layers.Dropout(0.3))
model.add(keras.layers.Dense(256, activation=leakyrelu, kernel_initializer="he_normal"))
model.add(keras.layers.Dropout(0.3))
model.add(keras.layers.Dense(128, activation=leakyrelu, kernel_initializer="he_normal"))
model.add(keras.layers.Dropout(0.3))
model.add(keras.layers.Dense(64, activation=leakyrelu, kernel_initializer="he_normal"))
model.add(keras.layers.Dropout(0.3))
model.add(keras.layers.Dense(32, activation=leakyrelu, kernel_initializer="he_normal"))
model.add(keras.layers.Dropout(0.3))
model.add(keras.layers.Dense(16, activation=leakyrelu, kernel_initializer="he_normal"))
model.add(keras.layers.Dropout(0.3))
if loss_type == "mse":
model.add(keras.layers.Dense(1, activation="linear"))
model.compile(loss="mse", optimizer="Adam")
elif loss_type == "category":
model.add(keras.layers.Dense(4, activation="softmax"))
model.compile(
loss="categorical_crossentropy",
optimizer="Adam",
metrics=["categorical_accuracy"],
)
# keras.backend.reset_uids()
if not os.path.exists("model"):
os.mkdir("model")
if validation:
if loss_type == "mse":
model.fit(
x=train_x,
y=train_y.loc[:, ["accuracy_group"]],
validation_data=[val_x, val_y.loc[:, ["accuracy_group"]]],
epochs=50,
batch_size=batch_sizes,
shuffle=True,
class_weight=class_weight,
)
elif loss_type == "category":
model.fit(
x=train_x,
y=train_y.loc[:, ["y_0", "y_1", "y_2", "y_3"]].values,
validation_data=[val_x, val_y.loc[:, ["y_0", "y_1", "y_2", "y_3"]].values],
epochs=1000,
batch_size=batch_sizes,
shuffle=True,
sample_weight=train_y.loc[:, ["class_weight"]].values.flatten(),
callbacks=[
keras.callbacks.EarlyStopping(
monitor="val_categorical_accuracy", patience=100, mode="auto"
),
keras.callbacks.ModelCheckpoint(
"model/weights.{epoch:02d}-{val_categorical_accuracy:.3f}.hdf5",
monitor="val_categorical_accuracy",
verbose=0,
save_best_only=True,
save_weights_only=False,
mode="auto",
period=1,
),
],
)
if validateion == False:
if loss_type == "mse":
model.fit(
train_x,
train_y.values,
epochs=150,
batch_size=batch_sizes,
verbose=1,
validation_split=0.1,
shuffle=True,
)
elif loss_type == "category":
model.fit(
train_x,
train_y.values,
epochs=100,
batch_size=batch_sizes,
verbose=1,
validation_split=0.1,
shuffle=True,
)
# model.fit(train_x, _.accuracy_group.values, epochs=20, batch_size=10, verbose=1, validation_split=.1, shuffle=True)
if loss_type == "mse":
plt.figure(figsize=(40, 20))
plt.subplot(2, 1, 1)
plt.plot(model.history.history["loss"], "o-", alpha=0.4, label="loss")
plt.plot(model.history.history["val_loss"], "o-", alpha=0.4, label="val_loss")
plt.axhline(1.2, linestyle="--", c="C2")
plt.legend()
plt.subplot(2, 1, 2)
plt.plot(model.history.history["loss"][3:], "o-", alpha=0.4, label="loss")
plt.plot(model.history.history["val_loss"][3:], "o-", alpha=0.4, label="val_loss")
plt.axhline(1.1, linestyle="--", c="C2")
plt.legend()
plt.show()
elif loss_type == "category":
plt.figure(figsize=(40, 20))
plt.subplot(2, 1, 1)
plt.plot(model.history.history["loss"], "o-", alpha=0.4, label="loss")
plt.plot(model.history.history["val_loss"], "o-", alpha=0.4, label="val_loss")
plt.axhline(1.05, linestyle="--", c="C2")
plt.legend()
plt.subplot(2, 1, 2)
plt.plot(
model.history.history["categorical_accuracy"],
"o-",
alpha=0.4,
label="categorical_accuracy",
)
plt.plot(
model.history.history["val_categorical_accuracy"],
"o-",
alpha=0.4,
label="val_categorical_accuracy",
)
plt.axhline(0.65, linestyle="--", c="C2")
plt.legend()
plt.show()
np.sort(os.listdir("model"))
model = keras.models.load_model(
"model/" + np.sort(os.listdir("model"))[-1],
custom_objects={"leaky_relu": tf.nn.leaky_relu},
)
np.sort(os.listdir("model"))[-1]
if validation:
if loss_type == "mse":
result = model.predict(train_x)
result[result <= 1.12232214] = 0
result[np.where(np.logical_and(result > 1.12232214, result <= 1.73925866))] = 1
result[np.where(np.logical_and(result > 1.73925866, result <= 2.22506454))] = 2
result[result > 2.22506454] = 3
result = result.astype("int")
print(quadratic_kappa(train_y.accuracy_group, result))
elif loss_type == "category":
result = model.predict(train_x)
print(quadratic_kappa(train_y.accuracy_group, result.argmax(axis=1)))
# ---
# # Predict
test = pd.read_csv("/kaggle/input/data-science-bowl-2019/test.csv", dtype=dtypes)
test["timestamp"] = pd.to_datetime(test.timestamp)
label = []
df_test = []
for idx, val in tqdm.tqdm_notebook(test.groupby(["installation_id"])):
label.append(idx)
df_test.append(val)
col = {}
for i in [
"world_log",
"world_time",
"world_session",
"event_id",
"play_time",
"gap_time",
"session_count",
"session_length",
"session_time",
"types",
"title",
"assessment",
"time",
"game",
]:
vars()[i].rename(columns={"ID": "installation_id"}, inplace=True)
col[i] = list(vars()[i].columns)
# world_log
installation_id = []
world = []
size = []
for i in tqdm.tqdm_notebook(df_test):
# world_log
_ = i.groupby(["installation_id", "world"]).size().reset_index()
installation_id.extend(_.installation_id)
world.extend(_.world)
size.extend(_[0])
world_log = pd.DataFrame(
data={"installation_id": installation_id, "world": world, "size": size}
)
world_log = world_log.pivot_table(
index="installation_id", columns="world", values="size"
)
world_log = world_log.fillna(0)
world_log.columns.name = None
world_log.reset_index(inplace=True)
world_log = world_log.loc[
:, ["installation_id", "CRYSTALCAVES", "MAGMAPEAK", "TREETOPCITY", "NONE"]
]
world_log = world_log.add_suffix("_l")
world_log.rename(columns={"installation_id_l": "installation_id"}, inplace=True)
world_log = world_log.loc[:, col["world_log"]]
world_log = world_log.fillna(0)
# world_time
installation_id = []
world = []
game_time = []
for i in tqdm.tqdm_notebook(df_test):
# world_time
_ = (
i.groupby(["installation_id", "world", "game_session"])
.game_time.max()
.reset_index()
)
installation_id.extend(_.installation_id)
world.extend(_.world)
game_time.extend(_.game_time)
world_time = pd.DataFrame(
data={"installation_id": installation_id, "world": world, "game_time": game_time}
)
world_time = world_time.groupby(["installation_id", "world"]).sum().reset_index()
world_time = world_time.pivot_table(
index="installation_id", columns="world", values="game_time"
)
world_time = world_time.fillna(-1)
world_time.columns.name = None
world_time["installation_id"] = world_time.index
world_time.reset_index(drop=True, inplace=True)
world_time = world_time.loc[
:, ["installation_id", "CRYSTALCAVES", "MAGMAPEAK", "TREETOPCITY", "NONE"]
]
world_time = world_time.add_suffix("_t")
world_time.rename(columns={"installation_id_t": "installation_id"}, inplace=True)
world_time = world_time.loc[:, col["world_time"]]
world_time = world_time.fillna(-1)
# world_session
installation_id = []
world = []
game_session = []
for i in tqdm.tqdm_notebook(df_test):
# world_session
_ = i.groupby(["installation_id", "world"]).game_session.nunique().reset_index()
installation_id.extend(_.installation_id)
world.extend(_.world)
game_session.extend(_.game_session)
world_session = pd.DataFrame(
data={
"installation_id": installation_id,
"world": world,
"game_session": game_session,
}
)
world_session = world_session.pivot_table(
index="installation_id", columns="world", values="game_session"
)
world_session = world_session.fillna(0)
world_session.columns.name = None
world_session["installation_id"] = world_session.index
world_session.reset_index(drop=True, inplace=True)
world_session = world_session.loc[
:, ["installation_id", "CRYSTALCAVES", "MAGMAPEAK", "TREETOPCITY", "NONE"]
]
world_session = world_session.add_suffix("_s")
world_session.rename(columns={"installation_id_s": "installation_id"}, inplace=True)
world_session = world_session.loc[:, col["world_session"]]
world_session = world_session.fillna(0)
# event_id
installation_id = []
event_id = []
size = []
for i in tqdm.tqdm_notebook(df_test):
# event_id
_ = i.groupby(["installation_id", "event_id"]).size().reset_index()
installation_id.extend(_.installation_id)
event_id.extend(_.event_id)
size.extend(_[0])
event_id = pd.DataFrame(
data={"installation_id": installation_id, "event_id": event_id, "size": size}
)
event_id = event_id.pivot_table(
index="installation_id", columns="event_id", values="size"
)
event_id = event_id.fillna(0)
event_id.columns.name = None
event_id.index.name = None
event_id["installation_id"] = event_id.index
event_id.reset_index(drop=True, inplace=True)
event_id = event_id.loc[:, col["event_id"]]
event_id = event_id.fillna(0)
# play_time
installation_id = []
game_time = []
for i in tqdm.tqdm_notebook(df_test):
# play_time
_ = i.groupby(["installation_id", "game_session"]).game_time.max().reset_index()
installation_id.extend(_.installation_id)
game_time.extend(_.game_time)
play_time = pd.DataFrame(
data={"installation_id": installation_id, "game_time": game_time}
)
play_time = play_time.groupby(["installation_id"]).sum().reset_index()
play_time.reset_index(drop=True, inplace=True)
play_time = play_time.fillna(0)
play_time = play_time.loc[:, col["play_time"]]
play_time = play_time.fillna(0)
# gap_time
gap_time = pd.DataFrame()
for i in tqdm.tqdm_notebook(df_test):
# gap_time
_ = i.groupby(["installation_id"]).timestamp.agg(["min", "max"])
_.columns.name = None
gap_time = pd.concat([gap_time, _], sort=True)
gap_time.reset_index(inplace=True)
gap_time["gap"] = gap_time["max"] - gap_time["min"]
gap_time["gap"] = gap_time["gap"].astype("int")
gap_time = gap_time.loc[:, col["gap_time"]]
gap_time = gap_time.fillna(0)
# session_count
session_count = pd.DataFrame()
for i in tqdm.tqdm_notebook(df_test):
# session_count
_ = i.groupby(["installation_id"]).game_session.nunique().reset_index()
_.columns.name = None
session_count = pd.concat([session_count, _], sort=False)
session_count.reset_index(drop=True, inplace=True)
session_count = session_count.loc[:, col["session_count"]]
session_count = session_count.fillna(0)
# session_length
session_length = pd.DataFrame()
for i in tqdm.tqdm_notebook(df_test):
# session_length
# _ = i.query("type!='Clip'").groupby(["installation_id", "game_session"]).size().groupby(["installation_id"]).mean().reset_index().rename(columns={0:"session_length"})
_ = (
i.groupby(["installation_id", "game_session"])
.size()
.groupby(["installation_id"])
.mean()
.reset_index()
.rename(columns={0: "session_length"})
)
_.columns.name = None
session_length = pd.concat([session_length, _], sort=False)
session_length.reset_index(drop=True, inplace=True)
session_length = session_length.loc[:, col["session_length"]]
session_length = session_length.fillna(0)
# session_time
session_time = pd.DataFrame()
for i in tqdm.tqdm_notebook(df_test):
# session_time
_ = (
i.groupby(["installation_id", "game_session"])
.game_time.max()
.groupby(["installation_id"])
.mean()
.reset_index()
)
_.columns.name = None
session_time = pd.concat([session_time, _], sort=False)
session_time.reset_index(drop=True, inplace=True)
session_time = session_time.loc[:, col["session_time"]]
session_time = session_time.fillna(0)
# types
installation_id = []
types = []
size = []
for i in tqdm.tqdm_notebook(df_test):
# types
_ = i.groupby(["installation_id", "type"]).size().reset_index()
installation_id.extend(_.installation_id)
types.extend(_.type)
size.extend(_[0])
types = pd.DataFrame(
data={"installation_id": installation_id, "type": types, "size": size}
)
types = types.pivot_table(index="installation_id", columns="type", values="size")
types.columns.name = None
types.index.name = None
types = types.fillna(0)
types["installation_id"] = types.index
types = types.loc[:, ["installation_id", "Activity", "Assessment", "Clip", "Game"]]
types = types.loc[:, col["types"]]
types = types.fillna(0)
# title
installation_id = []
title = []
size = []
for i in tqdm.tqdm_notebook(df_test):
# title
_ = i.groupby(["installation_id", "title"]).size().reset_index()
installation_id.extend(_.installation_id)
title.extend(_.title)
size.extend(_[0])
title = pd.DataFrame(
data={"installation_id": installation_id, "title": title, "size": size}
)
title = title.pivot_table(index="installation_id", columns="title", values="size")
title.columns.name = None
title.index.name = None
title = title.fillna(0)
title["installation_id"] = title.index
title = title.loc[:, col["title"]]
title = title.fillna(0)
# assessment
assessment = pd.DataFrame(columns=["installation_id", "title"])
for i in tqdm.tqdm_notebook(df_test):
# assessment
_ = i.tail(1).loc[:, ["installation_id", "title"]].reset_index(drop=True)
assessment = pd.concat([assessment, _], sort=False)
assessment["Assessment_1"] = 0
assessment["Assessment_2"] = 0
assessment["Assessment_3"] = 0
assessment["Assessment_4"] = 0
assessment["Assessment_5"] = 0
assessment.loc[assessment.title == "Mushroom Sorter (Assessment)", "Assessment_1"] = 1
assessment.loc[assessment.title == "Cauldron Filler (Assessment)", "Assessment_2"] = 1
assessment.loc[assessment.title == "Chest Sorter (Assessment)", "Assessment_3"] = 1
assessment.loc[assessment.title == "Cart Balancer (Assessment)", "Assessment_4"] = 1
assessment.loc[assessment.title == "Bird Measurer (Assessment)", "Assessment_5"] = 1
del assessment["title"]
assessment = assessment.loc[:, col["assessment"]]
assessment = assessment.fillna(0)
# time
time = pd.DataFrame(columns=["installation_id", "timestamp"])
for i in tqdm.tqdm_notebook(df_test):
# time
_ = i.tail(1).loc[:, ["installation_id", "timestamp"]]
time = pd.concat([time, _], sort=False)
time.reset_index(drop=True, inplace=True)
time["hour"] = time.timestamp.dt.hour
time["hour"] = time.hour.astype("object")
time = time.merge(
pd.get_dummies(time.hour, prefix="hour"),
how="left",
left_index=True,
right_index=True,
)
time.drop(columns=["timestamp", "hour"], inplace=True)
time = time.loc[:, col["time"]]
time = time.fillna(0)
# game
installation_id = []
game_title = []
game_round = []
for i in tqdm.tqdm_notebook(df_test):
if "Game" in i.type.unique():
_ = (
i.query("type=='Game'")
.loc[:, ["installation_id", "title", "event_data"]]
.set_index(["installation_id", "title"])
.event_data.apply(lambda x: json.loads(x)["round"])
.groupby(["installation_id", "title"])
.max()
.reset_index()
)
installation_id.extend(list(_.installation_id))
game_title.extend(_.title)
game_round.extend(_.event_data)
game = pd.DataFrame(
data={
"installation_id": installation_id,
"game_title": game_title,
"round": game_round,
}
)
game = game.pivot_table(index="installation_id", columns="game_title", values="round")
game.reset_index(inplace=True)
game.columns.name = None
game = game.fillna(-1)
installation_id = pd.DataFrame(data={"installation_id": label})
game = installation_id.merge(game, how="left")
game = game.fillna(-1)
game = game.add_suffix("_r")
game.rename(columns={"installation_id_r": "installation_id"}, inplace=True)
game = game.loc[:, col["game"]]
game = game.fillna(-1)
if dp_log == True:
world_log.iloc[:, 1:] = np.log(world_log.iloc[:, 1:] + 1)
# world_time.drop(columns=["NONE"], inplace=True)
if dp_log == True:
world_time.iloc[:, 1:] = np.log(world_time.iloc[:, 1:] + 2)
if dp_log == True:
world_session.iloc[:, 1:] = np.log(world_session.iloc[:, 1:] + 1)
if dp_log == True:
event_id.iloc[:, :-1] = np.log(event_id.iloc[:, :-1] + 1)
# event_id.iloc[:, 1:] = np.log(event_id.iloc[:, 1:]+1)
if dp_log == True:
play_time.iloc[:, 1:] = np.log(play_time.iloc[:, 1:] + 1)
# gap_time.drop(columns=["max", "min"], inplace=True)
if dp_log == True:
gap_time.iloc[:, 1:] = np.log(gap_time.iloc[:, 1:] + 1)
if dp_log == True:
session_count.iloc[:, 1:] = np.log(session_count.iloc[:, 1:])
if dp_log == True:
session_length.iloc[:, 1:] = np.log(session_length.iloc[:, 1:])
if dp_log == True:
session_time.iloc[:, 1:] = np.log(session_time.iloc[:, 1:] + 1)
if dp_log == True:
types.iloc[:, 1:] = np.log(types.iloc[:, 1:] + 1)
if dp_log == True:
title.iloc[:, :-1] = np.log(title.iloc[:, :-1] + 1)
_ = world_log.merge(world_time, how="left", on=["installation_id"])
_ = _.merge(world_session, how="left", on=["installation_id"])
_ = _.merge(event_id, how="left", on=["installation_id"])
_ = _.merge(play_time, how="left", on=["installation_id"])
_ = _.merge(gap_time, how="left", on=["installation_id"])
_ = _.merge(session_count, how="left", on=["installation_id"])
_ = _.merge(session_length, how="left", on=["installation_id"])
_ = _.merge(session_time, how="left", on=["installation_id"])
_ = _.merge(types, how="left", on=["installation_id"])
_ = _.merge(title, how="left", on=["installation_id"])
_ = _.merge(assessment, how="left", on=["installation_id"])
_ = _.merge(time, how="left", on=["installation_id"])
_ = _.merge(game, how="left", on=["installation_id"])
train_x_col[0] = "installation_id"
_ = _.loc[:, train_x_col]
_ = _.fillna(-1)
_.to_csv("test.csv", index=False)
test_x = scaler.transform(_.loc[:, train_x_col[1:]])
result = model.predict(test_x)
# result[result <= 1.12232214] = 0
# result[np.where(np.logical_and(result > 1.12232214, result <= 1.73925866))] = 1
# result[np.where(np.logical_and(result > 1.73925866, result <= 2.22506454))] = 2
# result[result > 2.22506454] = 3
# result = result.astype("int")
if loss_type == "mse":
submission = pd.DataFrame(
{"installation_id": _.installation_id, "accuracy_group": result.flatten()}
)
submission.to_csv("submission.csv", index=False)
elif loss_type == "category":
submission = pd.DataFrame(
{"installation_id": _.installation_id, "accuracy_group": result.argmax(axis=1)}
)
submission.to_csv("submission.csv", index=False)
plt.figure(figsize=(20, 10))
plt.hist(submission.accuracy_group)
plt.show()
np.unique(submission.accuracy_group, return_counts=True)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
nRowsRead = 1000 # specify 'None' if want to read whole file
# ArXiv.csv may have more rows in reality, but we are only loading/previewing the first 1000 rows
df = pd.read_csv(
"/kaggle/input/quantum-physics-articles-on-arxiv-1994-to-2009/ArXiv_old.csv",
delimiter=",",
nrows=nRowsRead,
)
df.dataframeName = "ArXiv_old.csv"
nRow, nCol = df.shape
print(f"There are {nRow} rows and {nCol} columns")
df.head()
df.dtypes
|
# # Getting Started
import numpy as np # linear algebra
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas_profiling
from pandas.plotting import (
scatter_matrix,
) # I didn't end up using it because the dataset has too many dimensions, but can keep it here for now
from datetime import datetime
# the below allows to see all fields and rows of the output when running analysis -- important when we are starting with 80+ fields
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
pd.set_option("display.width", None)
import seaborn as sns
color = sns.color_palette()
sns.set_style("darkgrid")
from scipy import stats
from scipy.stats import norm, skew
import missingno as mn
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
plt.style.use("ggplot")
# Limit floats to 3 decimal points
pd.set_option("display.float_format", lambda x: "{:.3f}".format(x))
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
df = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/train.csv")
df_train = df.drop(["SalePrice"], axis=1)
df_test = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/test.csv"
)
df_all = pd.concat([df_test, df_test])
y = df.SalePrice
# checking that the concat function worked
print("Training dataset: ", df_train.shape)
print("Testing dataset: ", df_test.shape)
print("Joined dataset: ", df_all.shape)
# THwe above also indicates that the data is highly dimensional, which we are about to see below as well.
# # What's the objective?
# Price Prediction We are looking to predict prices of houses in Ames, Iowa, using various characteristics that are provided to us. We can assume that we won't need all the characteristics (need PCA?), but we'll have to see once we explore the data further.
# This dataset provides us with a good opportunity to pursue the hedonic pricing method: a theory that price of a marketed good reflects its characteristics. In this case, we will be looking to show that features such as total square footage, general house quality, and other characteristics affect house prices in a given geographic area.
# # What's in the data?
# **Documentation**
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques/data
# * SalePrice - the property's sale price in dollars. This is the target variable that you're trying to predict.
# * MSSubClass: The building class
# * MSZoning: The general zoning classification
# * LotFrontage: Linear feet of street connected to property
# * LotArea: Lot size in square feet
# * Street: Type of road access
# * Alley: Type of alley access
# * LotShape: General shape of property
# * LandContour: Flatness of the property
# * Utilities: Type of utilities available
# * LotConfig: Lot configuration
# * LandSlope: Slope of property
# * Neighborhood: Physical locations within Ames city limits
# * Condition1: Proximity to main road or railroad
# * Condition2: Proximity to main road or railroad (if a second is present)
# * BldgType: Type of dwelling
# * HouseStyle: Style of dwelling
# * OverallQual: Overall material and finish quality
# * OverallCond: Overall condition rating
# * YearBuilt: Original construction date
# * YearRemodAdd: Remodel date
# * RoofStyle: Type of roof
# * RoofMatl: Roof material
# * Exterior1st: Exterior covering on house
# * Exterior2nd: Exterior covering on house (if more than one material)
# * MasVnrType: Masonry veneer type
# * MasVnrArea: Masonry veneer area in square feet
# * ExterQual: Exterior material quality
# * ExterCond: Present condition of the material on the exterior
# * Foundation: Type of foundation
# * BsmtQual: Height of the basement
# * BsmtCond: General condition of the basement
# * BsmtExposure: Walkout or garden level basement walls
# * BsmtFinType1: Quality of basement finished area
# * BsmtFinSF1: Type 1 finished square feet
# * BsmtFinType2: Quality of second finished area (if present)
# * BsmtFinSF2: Type 2 finished square feet
# * BsmtUnfSF: Unfinished square feet of basement area
# * TotalBsmtSF: Total square feet of basement area
# * Heating: Type of heating
# * HeatingQC: Heating quality and condition
# * CentralAir: Central air conditioning
# * Electrical: Electrical system
# * 1stFlrSF: First Floor square feet
# * 2ndFlrSF: Second floor square feet
# * LowQualFinSF: Low quality finished square feet (all floors)
# * GrLivArea: Above grade (ground) living area square feet
# * BsmtFullBath: Basement full bathrooms
# * BsmtHalfBath: Basement half bathrooms
# * FullBath: Full bathrooms above grade
# * HalfBath: Half baths above grade
# * Bedroom: Number of bedrooms above basement level
# * Kitchen: Number of kitchens
# * KitchenQual: Kitchen quality
# * TotRmsAbvGrd: Total rooms above grade (does not include bathrooms)
# * Functional: Home functionality rating
# * Fireplaces: Number of fireplaces
# * FireplaceQu: Fireplace quality
# * GarageType: Garage location
# * GarageYrBlt: Year garage was built
# * GarageFinish: Interior finish of the garage
# * GarageCars: Size of garage in car capacity
# * GarageArea: Size of garage in square feet
# * GarageQual: Garage quality
# * GarageCond: Garage condition
# * PavedDrive: Paved driveway
# * WoodDeckSF: Wood deck area in square feet
# * OpenPorchSF: Open porch area in square feet
# * EnclosedPorch: Enclosed porch area in square feet
# * 3SsnPorch: Three season porch area in square feet
# * ScreenPorch: Screen porch area in square feet
# * PoolArea: Pool area in square feet
# * PoolQC: Pool quality
# * Fence: Fence quality
# * MiscFeature: Miscellaneous feature not covered in other categories
# * MiscVal: $Value of miscellaneous feature
# * MoSold: Month SoldGr
# * YrSold: Year Sold
# * SaleType: Type of sale
# * SaleCondition: Condition of sale# What's in the data?
# This dataset provides us with a good opportunity to pursue the hedonic pricing method: a theory that price of a marketed good reflects its characteristics. In this case, we will be looking to show that features such as total square footage, general house quality, and other characteristics affect house prices in a given geographic area.
# # Data Details
# **NB: the below is quite long, but I wanted to ensure it is easily available for reference.**
# MSSubClass: Identifies the type of dwelling involved in the sale.
# 20 1-STORY 1946 & NEWER ALL STYLES
# 30 1-STORY 1945 & OLDER
# 40 1-STORY W/FINISHED ATTIC ALL AGES
# 45 1-1/2 STORY - UNFINISHED ALL AGES
# 50 1-1/2 STORY FINISHED ALL AGES
# 60 2-STORY 1946 & NEWER
# 70 2-STORY 1945 & OLDER
# 75 2-1/2 STORY ALL AGES
# 80 SPLIT OR MULTI-LEVEL
# 85 SPLIT FOYER
# 90 DUPLEX - ALL STYLES AND AGES
# 120 1-STORY PUD (Planned Unit Development) - 1946 & NEWER
# 150 1-1/2 STORY PUD - ALL AGES
# 160 2-STORY PUD - 1946 & NEWER
# 180 PUD - MULTILEVEL - INCL SPLIT LEV/FOYER
# 190 2 FAMILY CONVERSION - ALL STYLES AND AGES
# MSZoning: Identifies the general zoning classification of the sale.
#
# A Agriculture
# C Commercial
# FV Floating Village Residential
# I Industrial
# RH Residential High Density
# RL Residential Low Density
# RP Residential Low Density Park
# RM Residential Medium Density
#
# LotFrontage: Linear feet of street connected to property
# LotArea: Lot size in square feet
# Street: Type of road access to property
# Grvl Gravel
# Pave Paved
#
# Alley: Type of alley access to property
# Grvl Gravel
# Pave Paved
# NA No alley access
#
# LotShape: General shape of property
# Reg Regular
# IR1 Slightly irregular
# IR2 Moderately Irregular
# IR3 Irregular
#
# LandContour: Flatness of the property
# Lvl Near Flat/Level
# Bnk Banked - Quick and significant rise from street grade to building
# HLS Hillside - Significant slope from side to side
# Low Depression
#
# Utilities: Type of utilities available
#
# AllPub All public Utilities (E,G,W,& S)
# NoSewr Electricity, Gas, and Water (Septic Tank)
# NoSeWa Electricity and Gas Only
# ELO Electricity only
#
# LotConfig: Lot configuration
# Inside Inside lot
# Corner Corner lot
# CulDSac Cul-de-sac
# FR2 Frontage on 2 sides of property
# FR3 Frontage on 3 sides of property
#
# LandSlope: Slope of property
#
# Gtl Gentle slope
# Mod Moderate Slope
# Sev Severe Slope
#
# Neighborhood: Physical locations within Ames city limits
# Blmngtn Bloomington Heights
# Blueste Bluestem
# BrDale Briardale
# BrkSide Brookside
# ClearCr Clear Creek
# CollgCr College Creek
# Crawfor Crawford
# Edwards Edwards
# Gilbert Gilbert
# IDOTRR Iowa DOT and Rail Road
# MeadowV Meadow Village
# Mitchel Mitchell
# Names North Ames
# NoRidge Northridge
# NPkVill Northpark Villa
# NridgHt Northridge Heights
# NWAmes Northwest Ames
# OldTown Old Town
# SWISU South & West of Iowa State University
# Sawyer Sawyer
# SawyerW Sawyer West
# Somerst Somerset
# StoneBr Stone Brook
# Timber Timberland
# Veenker Veenker
#
# Condition1: Proximity to various conditions
#
# Artery Adjacent to arterial street
# Feedr Adjacent to feeder street
# Norm Normal
# RRNn Within 200' of North-South Railroad
# RRAn Adjacent to North-South Railroad
# PosN Near positive off-site feature--park, greenbelt, etc.
# PosA Adjacent to postive off-site feature
# RRNe Within 200' of East-West Railroad
# RRAe Adjacent to East-West Railroad
#
# Condition2: Proximity to various conditions (if more than one is present)
#
# Artery Adjacent to arterial street
# Feedr Adjacent to feeder street
# Norm Normal
# RRNn Within 200' of North-South Railroad
# RRAn Adjacent to North-South Railroad
# PosN Near positive off-site feature--park, greenbelt, etc.
# PosA Adjacent to postive off-site feature
# RRNe Within 200' of East-West Railroad
# RRAe Adjacent to East-West Railroad
#
# BldgType: Type of dwelling
#
# 1Fam Single-family Detached
# 2FmCon Two-family Conversion; originally built as one-family dwelling
# Duplx Duplex
# TwnhsE Townhouse End Unit
# TwnhsI Townhouse Inside Unit
#
# HouseStyle: Style of dwelling
#
# 1Story One story
# 1.5Fin One and one-half story: 2nd level finished
# 1.5Unf One and one-half story: 2nd level unfinished
# 2Story Two story
# 2.5Fin Two and one-half story: 2nd level finished
# 2.5Unf Two and one-half story: 2nd level unfinished
# SFoyer Split Foyer
# SLvl Split Level
#
# OverallQual: Rates the overall material and finish of the house
# 10 Very Excellent
# 9 Excellent
# 8 Very Good
# 7 Good
# 6 Above Average
# 5 Average
# 4 Below Average
# 3 Fair
# 2 Poor
# 1 Very Poor
#
# OverallCond: Rates the overall condition of the house
# 10 Very Excellent
# 9 Excellent
# 8 Very Good
# 7 Good
# 6 Above Average
# 5 Average
# 4 Below Average
# 3 Fair
# 2 Poor
# 1 Very Poor
#
# YearBuilt: Original construction date
# YearRemodAdd: Remodel date (same as construction date if no remodeling or additions)
# RoofStyle: Type of roof
# Flat Flat
# Gable Gable
# Gambrel Gabrel (Barn)
# Hip Hip
# Mansard Mansard
# Shed Shed
#
# RoofMatl: Roof material
# ClyTile Clay or Tile
# CompShg Standard (Composite) Shingle
# Membran Membrane
# Metal Metal
# Roll Roll
# Tar&Grv Gravel & Tar
# WdShake Wood Shakes
# WdShngl Wood Shingles
#
# Exterior1st: Exterior covering on house
# AsbShng Asbestos Shingles
# AsphShn Asphalt Shingles
# BrkComm Brick Common
# BrkFace Brick Face
# CBlock Cinder Block
# CemntBd Cement Board
# HdBoard Hard Board
# ImStucc Imitation Stucco
# MetalSd Metal Siding
# Other Other
# Plywood Plywood
# PreCast PreCast
# Stone Stone
# Stucco Stucco
# VinylSd Vinyl Siding
# Wd Sdng Wood Siding
# WdShing Wood Shingles
#
# Exterior2nd: Exterior covering on house (if more than one material)
# AsbShng Asbestos Shingles
# AsphShn Asphalt Shingles
# BrkComm Brick Common
# BrkFace Brick Face
# CBlock Cinder Block
# CemntBd Cement Board
# HdBoard Hard Board
# ImStucc Imitation Stucco
# MetalSd Metal Siding
# Other Other
# Plywood Plywood
# PreCast PreCast
# Stone Stone
# Stucco Stucco
# VinylSd Vinyl Siding
# Wd Sdng Wood Siding
# WdShing Wood Shingles
#
# MasVnrType: Masonry veneer type
# BrkCmn Brick Common
# BrkFace Brick Face
# CBlock Cinder Block
# None None
# Stone Stone
#
# MasVnrArea: Masonry veneer area in square feet
# ExterQual: Evaluates the quality of the material on the exterior
#
# Ex Excellent
# Gd Good
# TA Average/Typical
# Fa Fair
# Po Poor
#
# ExterCond: Evaluates the present condition of the material on the exterior
#
# Ex Excellent
# Gd Good
# TA Average/Typical
# Fa Fair
# Po Poor
#
# Foundation: Type of foundation
#
# BrkTil Brick & Tile
# CBlock Cinder Block
# PConc Poured Contrete
# Slab Slab
# Stone Stone
# Wood Wood
#
# BsmtQual: Evaluates the height of the basement
# Ex Excellent (100+ inches)
# Gd Good (90-99 inches)
# TA Typical (80-89 inches)
# Fa Fair (70-79 inches)
# Po Poor (<70 inches
# NA No Basement
#
# BsmtCond: Evaluates the general condition of the basement
# Ex Excellent
# Gd Good
# TA Typical - slight dampness allowed
# Fa Fair - dampness or some cracking or settling
# Po Poor - Severe cracking, settling, or wetness
# NA No Basement
#
# BsmtExposure: Refers to walkout or garden level walls
# Gd Good Exposure
# Av Average Exposure (split levels or foyers typically score average or above)
# Mn Mimimum Exposure
# No No Exposure
# NA No Basement
#
# BsmtFinType1: Rating of basement finished area
# GLQ Good Living Quarters
# ALQ Average Living Quarters
# BLQ Below Average Living Quarters
# Rec Average Rec Room
# LwQ Low Quality
# Unf Unfinshed
# NA No Basement
#
# BsmtFinSF1: Type 1 finished square feet
# BsmtFinType2: Rating of basement finished area (if multiple types)
# GLQ Good Living Quarters
# ALQ Average Living Quarters
# BLQ Below Average Living Quarters
# Rec Average Rec Room
# LwQ Low Quality
# Unf Unfinshed
# NA No Basement
# BsmtFinSF2: Type 2 finished square feet
# BsmtUnfSF: Unfinished square feet of basement area
# TotalBsmtSF: Total square feet of basement area
# Heating: Type of heating
#
# Floor Floor Furnace
# GasA Gas forced warm air furnace
# GasW Gas hot water or steam heat
# Grav Gravity furnace
# OthW Hot water or steam heat other than gas
# Wall Wall furnace
#
# HeatingQC: Heating quality and condition
# Ex Excellent
# Gd Good
# TA Average/Typical
# Fa Fair
# Po Poor
#
# CentralAir: Central air conditioning
# N No
# Y Yes
#
# Electrical: Electrical system
# SBrkr Standard Circuit Breakers & Romex
# FuseA Fuse Box over 60 AMP and all Romex wiring (Average)
# FuseF 60 AMP Fuse Box and mostly Romex wiring (Fair)
# FuseP 60 AMP Fuse Box and mostly knob & tube wiring (poor)
# Mix Mixed
#
# 1stFlrSF: First Floor square feet
#
# 2ndFlrSF: Second floor square feet
# LowQualFinSF: Low quality finished square feet (all floors)
# GrLivArea: Above grade (ground) living area square feet
# BsmtFullBath: Basement full bathrooms
# BsmtHalfBath: Basement half bathrooms
# FullBath: Full bathrooms above grade
# HalfBath: Half baths above grade
# Bedroom: Bedrooms above grade (does NOT include basement bedrooms)
# Kitchen: Kitchens above grade
# KitchenQual: Kitchen quality
# Ex Excellent
# Gd Good
# TA Typical/Average
# Fa Fair
# Po Poor
#
# TotRmsAbvGrd: Total rooms above grade (does not include bathrooms)
# Functional: Home functionality (Assume typical unless deductions are warranted)
# Typ Typical Functionality
# Min1 Minor Deductions 1
# Min2 Minor Deductions 2
# Mod Moderate Deductions
# Maj1 Major Deductions 1
# Maj2 Major Deductions 2
# Sev Severely Damaged
# Sal Salvage only
#
# Fireplaces: Number of fireplaces
# FireplaceQu: Fireplace quality
# Ex Excellent - Exceptional Masonry Fireplace
# Gd Good - Masonry Fireplace in main level
# TA Average - Prefabricated Fireplace in main living area or Masonry Fireplace in basement
# Fa Fair - Prefabricated Fireplace in basement
# Po Poor - Ben Franklin Stove
# NA No Fireplace
#
# GarageType: Garage location
#
# 2Types More than one type of garage
# Attchd Attached to home
# Basment Basement Garage
# BuiltIn Built-In (Garage part of house - typically has room above garage)
# CarPort Car Port
# Detchd Detached from home
# NA No Garage
#
# GarageYrBlt: Year garage was built
#
# GarageFinish: Interior finish of the garage
# Fin Finished
# RFn Rough Finished
# Unf Unfinished
# NA No Garage
#
# GarageCars: Size of garage in car capacity
# GarageArea: Size of garage in square feet
# GarageQual: Garage quality
# Ex Excellent
# Gd Good
# TA Typical/Average
# Fa Fair
# Po Poor
# NA No Garage
#
# GarageCond: Garage condition
# Ex Excellent
# Gd Good
# TA Typical/Average
# Fa Fair
# Po Poor
# NA No Garage
#
# PavedDrive: Paved driveway
# Y Paved
# P Partial Pavement
# N Dirt/Gravel
#
# WoodDeckSF: Wood deck area in square feet
# OpenPorchSF: Open porch area in square feet
# EnclosedPorch: Enclosed porch area in square feet
# 3SsnPorch: Three season porch area in square feet
# ScreenPorch: Screen porch area in square feet
# PoolArea: Pool area in square feet
# PoolQC: Pool quality
#
# Ex Excellent
# Gd Good
# TA Average/Typical
# Fa Fair
# NA No Pool
#
# Fence: Fence quality
#
# GdPrv Good Privacy
# MnPrv Minimum Privacy
# GdWo Good Wood
# MnWw Minimum Wood/Wire
# NA No Fence
#
# MiscFeature: Miscellaneous feature not covered in other categories
#
# Elev Elevator
# Gar2 2nd Garage (if not described in garage section)
# Othr Other
# Shed Shed (over 100 SF)
# TenC Tennis Court
# NA None
#
# MiscVal: $Value of miscellaneous feature
# MoSold: Month Sold (MM)
# YrSold: Year Sold (YYYY)
# SaleType: Type of sale
#
# WD Warranty Deed - Conventional
# CWD Warranty Deed - Cash
# VWD Warranty Deed - VA Loan
# New Home just constructed and sold
# COD Court Officer Deed/Estate
# Con Contract 15% Down payment regular terms
# ConLw Contract Low Down payment and low interest
# ConLI Contract Low Interest
# ConLD Contract Low Down
# Oth Other
#
# SaleCondition: Condition of sale
# Normal Normal Sale
# Abnorml Abnormal Sale - trade, foreclosure, short sale
# AdjLand Adjoining Land Purchase
# Alloca Allocation - two linked properties with separate deeds, typically condo with a garage unit
# Family Sale between family members
# Partial Home was not completed when last assessed (associated with New Homes)
# # Ames, Iowa background
# Since Ames is a real town (city?), we can look up some information about it to place all the data into context.
# According to the [Wikipedia article](https://en.wikipedia.org/wiki/Ames,_Iowa):
# * Ames is home to Iowa State University -- we can assume lots of students and professors around. Iowa State students make up 1/2 of the city's population of ~58k people.
# * Students are unlikely housebuyers; but does it mean there is a market for people buying larger houses with many bedrooms (and perhaps with large basements) with lots of parking spots to rent out to students to live in large groups?
# * Some relevant features could be various square footage fields, as well as *GarageCars* / *GarageAreas*.
# * It is also home to several USDA sites
# * This doesn't immediately speall out housebuyer implications to me, althgough we can assume that government employees are not buying mansions. There are no indicators as to who the buyers are, so that information is not as helpful.
# * Ames has humid continental climate (cold winters, hot summers). Average low in January is 12F and average hot temp in July is 84F.
# * This means there is a need for both good heating and AC, which will likely affect prices.
# * We can see already that there are both heating and AC-related features in the dataset (*Heating* for Type of heating; *HeatingQC* for quality and condition of heating; and *CentralAir* to indicate whether there is Central Air Conditioning).
#
#
# Just for fun, we can quickly validate the assumptions above before we do more investigation and cleanup:
sns.relplot(
x="GrLivArea",
y="SalePrice",
hue="CentralAir",
style="CentralAir",
data=df,
height=8,
aspect=2,
)
# Houses without central air (blue dots) are definitely cheaper and they have a smaller range of prices in general for a given living area.
# But could that mean that houses without central air are older and cheaper because of their age as a result?
# To note, there seem to be some outliers, we will take a look at them later to see if they need to be dropped.
sns.relplot(
x="YearBuilt",
y="SalePrice",
hue="CentralAir",
style="CentralAir",
data=df,
height=8,
aspect=2,
)
# There are no houses built after the 1970s or so without central air. Ok, cool. So now we know that older houses are less likely to have central air and are probably cheaper as a result at any price point. Are older houses cheaper in general?
sns.relplot(x="YearBuilt", y="SalePrice", data=df, height=8, aspect=2)
# Newer houses seem to be slightly more expensive on average; most of the more expensive houses are newer. The cheapest houses are all older. But how do we reconcile it with what we have learned about central air, house size, date built and sale price?
# In reality, we need to take a serious look at all the features available and do proper analysis to understand what features actually impact the price.
# # BEGINNING TO EXPLORE
df.columns
df_test.columns
# Let's confirm the columns in the test and train dataset are the same are the same (sans SalePrice)
(df_test.columns == df_train.columns).any()
print("-" * 100)
print(df.describe())
df.head(10)
df.tail(10)
df_test.head(10)
df.info()
df.dtypes
# let's check dtypes are the same for training and testing datasets:
(df_test.dtypes == df_train.dtypes).any()
# set index
df_train.set_index("Id", inplace=True)
df_test.set_index("Id", inplace=True)
df.set_index("Id", inplace=True)
# # Pandas Profiling
# I will comment out the below because it slows the notebook down, but I used the output extensively when running analyses.
# pandas_profiling.ProfileReport(df)
# pandas_profiling.ProfileReport(df_test)
# # Basic Data Validations
# Something I see a lot is years in datasets not making sense (e.g., negative years, year built later than current years, etc). I can do a scan of the .describe function above, but I am going to run a few explicit checks as well. Most are based on common assumptions (e.g., a house has to have been built before it was sold). It wasn't clear to me if a garage could have been build before the house was built.
# We can build in a few more checks, but I didn't want to go overboard. Some ideas:
# * YearBuilt is before or the same as YrSold
# * GarageYrBlt is before or the same as YrSold
# * YearRemoAdd is after or the same as YearBuilt
# * YearRemoAdd is before or the same as YrSold
# * Etc.
def check_year_assumptions(df):
currentYear = datetime.now().year
print(
"* All dates in question occurred during or earlier than current year: ",
(datetime.now().year >= df.YearBuilt.max())
& (datetime.now().year >= df.GarageYrBlt.max())
& (datetime.now().year >= df.YearRemodAdd.max())
& (datetime.now().year >= df.YrSold.max()),
)
print("* Earliest MonthSold is January or later:", df["MoSold"].min() >= 1)
print("* Latest MonthSold is December or earlier:", df["MoSold"].max() <= 12)
print("Training dataset: ")
check_year_assumptions(df_train)
print("---" * 10)
print("Testing dataset: ")
check_year_assumptions(df_test)
# Let's look into the False flag further.
print(df_test.YearBuilt.max())
print(df_test.YearRemodAdd.max())
print(df_test.GarageYrBlt.max())
print(df_test.YrSold.max())
# GarageYrBlt is a float here, but it should be an integer. I will fix later when I check on all the datatypes.
# 2207 is definitely not right for the year a Garage could be built. Let's see what some of the other values in the row are.
off_values = df_test[df_test.GarageYrBlt > 2010]
off_values[["YearBuilt", "YearRemodAdd", "GarageYrBlt", "YrSold"]]
# In this case, it looks like the garage was probably built in 2007, so we will take an educated guess and replace GarageYrBlt with that value.
df_test["GarageYrBlt"] = df_test["GarageYrBlt"].replace(
2207.0, 2007.0
) # only doing this on the test dataset since the training dataset didn't have this issue
# Something else I want to check is whether Garages are generally built the same year as a house, before a house was built, or after a house was built.
# using joined dataset
fig, ax = plt.subplots(figsize=(20, 10))
ax.scatter(x=df_all.YearBuilt, y=df_all.GarageYrBlt)
plt.xlabel("House Year Built", fontsize=15)
plt.ylabel("Garage Year Built", fontsize=15)
# There are some cases where garage was built before the house (dots that are below the line on the chart). I think we should leave those for now as variation of the norm (perhaps the house burned down and was rebuilt, but the garage stayed intact), but in the real world, I would want to talk to a SME to verify whether that makes sense.
# # Basic Visualizations
# Let's start digging into the exploration of the data a little bit. First, let's look at our target variable (price).
plt.hist(df.SalePrice, bins=15)
plt.xlabel("Sale price, $")
sns.distplot(df["SalePrice"], fit=norm)
fig = plt.figure()
res = stats.probplot(df["SalePrice"], plot=plt)
# The target variable (y, SalePrice) is skewed to the right (positively skewed) -- there are more cheap houses than expensive ones (aka a long tail). We will have to adjust it so that it is more normally distribued it we want to fit a linear model to predict this data.
df.YrSold.plot.hist()
plt.xticks(range(2006, 2010))
fig, ax = plt.subplots(figsize=(15, 9))
ax.hist(x=df.YearBuilt)
plt.xlabel("House Year Built", fontsize=15)
# checking distribution for joined dataset
fig, ax = plt.subplots(figsize=(15, 9))
ax.hist(x=df_all.YearBuilt)
plt.xlabel("House Year Built", fontsize=15)
# Both look similar, which is good for out model.
(df.groupby("YrSold")[["SalePrice"]].mean()).plot()
# We are seeing a sharp drop in average price in 2008, with the 2009 prices recovering somewhat, but not majorly. This does make me wonder to which extent we need to adjust prices given the economic environment.
(df.groupby("MoSold")[["SalePrice"]].mean()).plot()
df_train.MoSold.plot.hist() # seasonality?
# for joined dataset
df_all.MoSold.plot.hist()
# There is definitely seasonality in both prices sold (more expensive houses close in the fall/winter) and number of houses sold -- there are more sales in the summer.
pd.pivot_table(df, index=["YrSold", "MoSold"], values=["SalePrice"]).plot()
pd.pivot_table(df, index=["YrSold", "MoSold"], values=["SalePrice"]).plot(
kind="bar", figsize=(15, 8)
)
# convert pivot table to a dataframe
df_viz_cont = pd.DataFrame(
pd.pivot_table(
df, index=["YrSold", "MoSold"], values=["SalePrice"], aggfunc="mean"
).to_records()
)
# convert int to string to perform length check and concatenation
df_viz_cont["MoSold"] = df_viz_cont["MoSold"].astype(str)
df_viz_cont["YrSold"] = df_viz_cont["YrSold"].astype(str)
# add zero is string length is 1
df_viz_cont["MoSold"] = df_viz_cont["MoSold"].apply(
lambda x: ("0" + x) if len(x) != 2 else x
)
# create a Mo Yr column to use as x in charts
df_viz_cont["MoYr"] = df_viz_cont["MoSold"] + df_viz_cont["YrSold"]
# chart
fig, ax = plt.subplots()
sns.lineplot(
x="MoYr", y="SalePrice", data=df_viz_cont, dashes=True, markers=True, color="coral"
)
plt.ylabel("Sale Price, $", fontsize=13)
plt.xlabel("Month and Year", fontsize=13)
# plt.yticks(range(0, 225000))
# seaborn.lineplot(x=None, y=None, hue=None, size=None, style=None, data=None, palette=None, hue_order=None,
# hue_norm=None, sizes=None, size_order=None, size_norm=None, dashes=True, markers=None, style_order=None, units=None, estimator='mean',
# ci=95, n_boot=1000, seed=None, sort=True, err_style='band', err_kws=None, legend='brief', ax=None, **kwargs)
# fig, ax = plt.subplots()
# ax.plot(x = df.MoYr, y = df.SalePrice)
# plt.ylabel('Sale Price, $', fontsize=13)
# plt.xlabel('Month and Year', fontsize=13)
# plt.xticks(range(0, 10))
# sns.regplot(x='MoYr',y='SalePrice',data=df_viz_cont, fit_reg=True)
sns.relplot(
x="MoYr",
y="SalePrice",
kind="line",
ci="sd",
data=df_viz_cont,
height=7,
aspect=3,
color="red",
)
# plt.figure(figsize=(20,5))
# chart = sns.relplot(
# data=df_viz_cont,
# x="MoYr",
# y="SalePrice",
# palette='Set1'
# )
# chart.set_xticklabels(chart.get_xticklabels(), rotation=45)
pd.pivot_table(df, index=["MoSold"], columns=["YrSold"], values=["SalePrice"])
pd.pivot_table(
df, index=["MoSold"], columns=["YrSold"], values=["SalePrice"], aggfunc="count"
)
pd.pivot_table(df, index=["MoSold"], columns=["YrSold"], values=["SalePrice"]).plot()
pd.pivot_table(
df, index=["MoSold"], columns=["YrSold"], values=["SalePrice"], aggfunc="count"
).plot()
pd.pivot_table(
df, index=["MoSold"], columns=["YrSold"], values=["SalePrice"], aggfunc="mean"
).plot()
# There seems to be some seasonal trend in the data: Prices in 2H seem higher, prices in 1H / particularly in the summer are low. Interestingly, there are more houses sold in the summer, so perhaps lower prices are due to a glut of houses available then?
# I have to guess that the low mean sale price in July is a result of a small sample size (6 houses).
# Let's try to visualiza this in more ways. We need to convert our pivot table to a dataframe first.
df_viz = pd.DataFrame(
pd.pivot_table(
df, index=["MoSold"], columns=["YrSold"], values=["SalePrice"], aggfunc="mean"
).to_records()
)
df_viz = df_viz.set_index(["MoSold"])
df_viz.columns = [
hdr.replace("(", "").replace(")", "").replace("'", "").replace(", ", "")
for hdr in df_viz.columns
] # should really use regex here instead...
df_viz["mean_monthly_saleprice"] = df_viz.mean(axis=1)
df_viz
df_viz[
[
"SalePrice2006",
"SalePrice2007",
"SalePrice2008",
"SalePrice2009",
"SalePrice2010",
]
].plot(
figsize=(12, 6),
color=["green", "brown", "yellow", "blue", "black"],
use_index=False,
)
df_viz["mean_monthly_saleprice"].plot(
figsize=(16, 6), color=["gray"], kind="bar", use_index=True
)
# another quick visualization
fig = plt.figure()
plt.figure(figsize=(16, 6))
sale_prices = df_viz[
[
"SalePrice2006",
"SalePrice2007",
"SalePrice2008",
"SalePrice2009",
"SalePrice2010",
]
]
avg_monthly_prices = df_viz["mean_monthly_saleprice"]
plt.plot(sale_prices, "go-", label="average monthly prices")
plt.plot(avg_monthly_prices)
plt.figure(figsize=(16, 6))
(df.groupby("OverallCond")[["SalePrice"]].mean()).plot()
# # DATA WRANGLING
# Something to note: if I were not doing purely as an exercise, I would have probably done some feature engineering first, so I would not have to deal with 80+ features. But looking at the fields and figuring what needs to be to each, if anything, was helpful from a standpoing of understanding the data
# # Identifying missing data
# Quick look with a *missingno* matrix:
# training dataset
mn.matrix(df) # can use the corresponding part of Pandas Profiling as well
# mn.matrix(df.iloc[:200,:40])
# testing dataset
mn.matrix(df_test)
# Looking at the above, it doesn't tell us much -- there are some entries that seem to have similar entries missing, but from cursory analysis, it seems that it is largely due to a house not having a feature that is described in several ways (e.g., not having a finished basement -> no data for quality of basement finishings).
# It does seem that the training and testing datasets have slightly different distributions of missing data, with the testing dataset having a few columns that have very few missing entries where the training dataset isn't missing any data.
# Let's actually calculate what is missing. Let's keep in mind that is there is a NaN in the data, it doesn't mean that it is actually missing due to an error -- need to consult data documentation on that
# missing data for training dataset -- we should go straight to checking missing data for both datasets since the missing data distribution is not the same
# def calc_missing_data(df):
# total = df.isnull().sum().sort_values(ascending=False)
# percent_1 = df.isnull().sum()/df.isnull().count()*100
# percent_2 = (round(percent_1, 1)).sort_values(ascending=False)
# missing_data = pd.concat([total, percent_2], axis=1, keys=['Total', '%'])
# return missing_data
# calc_missing_data(df).head(25)
# calculate for both train datataset (df) and test dataset (df_test)
def calc_missing_data(df_train, df_test):
total_train = df_train.isnull().sum()
total_test = df_test.isnull().sum()
percent_1_train = df_train.isnull().sum() / df_train.isnull().count() * 100
percent_2_train = round(percent_1_train, 1)
percent_1_test = df_test.isnull().sum() / df_test.isnull().count() * 100
percent_2_test = round(percent_1_test, 1)
missing_data = pd.concat(
[total_train, percent_2_train, total_test, percent_2_test],
axis=1,
keys=[
"Total Train Missing",
"% Train Missing",
"Total Test Missing",
"% Test Missing",
],
)
missing_data = missing_data[(missing_data.T != 0).any()].sort_values(
by=["% Train Missing", "% Test Missing"], ascending=False
)
return missing_data
calc_missing_data(df_train, df_test)
# Some trends in the missing data: some fields are missing a lot of values (50% and more), and some fields are are only missing 1 or 2 entries in either the testing or the training dataset. This will inform our thinking as to dealing with missing data: the cases of very few values missing can be replaced with the mode for categorical data / mean for numeric data, and the cases of many values missing have to be addressed in a more thoughtful manner.
# # Handling Missing Data
# Let's start with the fields with the most missing data.
# PoolQC:
# * Options are: Ex, Gd, TA (Avg/Typical), Fair, and NA = No Pool.
# * This is interesting, because this field carries two meanings in one field: quality of a pool, if present, and if a pool is present at all. Therefore, deleting NA entries or imputing them with another value would be erroneous, as we will lose a whole layer or meaning.
# * *However*, this does offer a potential issue -- if the data is truly missing, we may think of it as NaN (absence of feature) instead. Because of the way this survery was designed, we don't have a way to address it presently.
df.PoolQC.value_counts()
# Same as above, for MiscFeature, the options are Elevator, 2nd Garage, Other, (large) Shed, and Tennis Court or NO feature.
df.MiscFeature.value_counts()
df.Alley.value_counts()
# Same as above, NaN would mean there is no Alley access.
df.Fence.value_counts()
# Same as above, NaN would mean there is no Fence.
df.FireplaceQu.value_counts()
# Same as above, NaN would mean there is no Fireplace.
df.LotFrontage.hist()
# LotFrontage is linear feet connected to property. It seems unlikely that there is no street connected to property, so this value is likely to be genuinely missing.
# But do we even care about this missing data? Let's see if it has any impact on the price.
sns.lmplot(x="LotFrontage", y="SalePrice", data=df)
# Bigger LotFrontage values tend to command a higher price. While the relationship is not that strong, I don't want to replace LotFrontage with a mean of the whole series. Maybe we can break it down by neighborhoods.
bplot = sns.boxplot(
y="LotFrontage", x="Neighborhood", data=df, width=1, palette="colorblind"
)
df.Electrical.value_counts()
# TotalBsmtSF: Total square feet of basement area + GrLivArea: Above grade (ground) living area square feet + GarageArea: Size of garage in square feet + 1stFlrSF: First Floor square feet + 2ndFlrSF: Second floor square feet
df[["TotalBsmtSF", "GrLivArea", "1stFlrSF", "2ndFlrSF", "GarageArea"]]
# **Let's deal with the missing data.**
# Let's consider the columns with missing data:
# * As a thought, 'quality' is quite subjective, so unless we are dealing with very biased readings (e.g., 'terrible quality' or 'world-class'), I suspect that it wouldn't be that helpful to our ML model anyway
# * What's intertesting is that the field below indicate quality of and / or absence of, e.g. PoolQC is NA if there is no pool, not because quality pool data is missing.
# ### Misc Features ###
# * PoolQC = 99.5% missing - pool quality | solution -> replace NaN with 0 (no pool), then one hot encoding; NB there is a square footage field for pool as well
# * MiscFeature = 96.3% missing - miscelaneous features | solution -> these are features not covered in other categories, so we have to break them down into separate fields
# * Alley = 93.8% missing -- Type of alley access | solution -> replace NaN with 0 (no alley access), then one hot encoding
# * Fence = 80.8% missing -- Fence quality | solution -> replace NaN with 0 (no fence), then one hot encoding
# * FireplaceQu = 47.3% missing -- Fireplace quality | solution -> one hot encoding (NaN means 0 fireplaces, but there is already a separate Fireplace column for that)
# * LotFrontage = 17.7% missing -- Linear feet of street connected to property -> it's hard to imagine that there is no street conneted to property, so I am going to treat it as missing values. I will replace it with a mean for each neighborhood to bring in a little more dimension to the data.
# * Functional [test dataset only] -- 0 is not an option, so missing entries are truly missing. Replace with the mode.
# * KitchenQual [test dataset only] -- there are 4 options, and NaN is not one of them. Replace with the mode.
# * MSZoning [test dataset only] -- there are no NaN options in the documentation. Replace with the mode.
# * SaleType [test dataset only] -- there are no NaN options in the documentation. Replace with the mode.
# * Utilities [test dataset only] -- assuming all houses must have some sort of utilities, and no allowance for NaN in documentation, we will assume this field is actually missing. Replace with the mode as this is categorical data.
# ### Garage data ###
# * GarageQual, GarageCond, GarageFinish,GarageType,GarageYrBlt -> replace NaN with 0 (no garage), then one hot encoding
# * GarageArea [test dataset only] -- 0 is an option, so any missing entries are likely to be truly missing. Replace with the mode.
# * GarageCars [test dataset only] -- 0 is an option, so any missing entries are likely to be truly missing. Replace with the mode.
# ### Basement data ###
# * BsmtExposure (Walkout or garden level basement walls), BsmtFinType2 (Quality of second finished area (if present), BsmtQual [Height of the basement, qualitative], BsmtCond [General condition of the basement], BsmtFinType1 (Quality of basement finished area) -> replace NaN with 0, then one hot encoding
# * Test dataset only:
# * BsmtFinSF1: Type 1 finished square feet; 0 if NaN
# * BsmtFinSF2: Rating of basement finished area (if multiple types); NaN means no basement, so replace with 0
# * BsmtFullBath: Basement full bathrooms; 0 is an option, so NaN is not automatically zero. We can replace with the mode.
# * BsmtHalfBath: Basement half bathrooms; 0 is an option, so NaN is not automatically zero. We can replace with the mode.
# * BsmtUnfSF: Unifnished Basement area; 0 is an option, so NaN is not automatically zero. We can replace with the mean.
# * TotalBsmtSF: there are 0s in the data (no basement), so missing data is likely to be truly missing. Replace with a mean.
#
# ### Exterior ###
# * Exterior1st [test dataset only]: Exterior Covering on house; 0 is not an option, so this is truly missing data. Replace with the mode as this is categorical data.
# * Exterior2nd [test dataset only]: Exterior Covering on house if more than one material; 0 is not an option, so this is truly missing data. Replace with the mode as this is categorical data.
# ### Masonry Veneer ###
# * MasVnrArea (Masonry veneer area in square feet) -- replace NaN with 0
# * MasVnrType (Masonry veneer type) -> replace NaN with 0 (no masonry veneer), then one hot encoding
# ### Electrical ###
# * Electrical -- there is no allowance for NaN in the documentation, so I assume this is genuine missing data. In addition, it is unlikely that a house sold in our day and time isn't connected to the grid. I will replace with the mode.
def impute_data(df):
# Misc
df.PoolQC.fillna(0, inplace=True)
df.MiscFeature.fillna(0, inplace=True)
df.Alley.fillna(0, inplace=True)
df.Fence.fillna(0, inplace=True)
df.FireplaceQu.fillna(0, inplace=True)
df.Functional.fillna(0, inplace=True) # to check
df.Functional.fillna(df.Functional.mode()[0], inplace=True)
df.KitchenQual.fillna(df.KitchenQual.mode()[0], inplace=True)
df.MSZoning.fillna(df.MSZoning.mode()[0], inplace=True)
df.SaleType.fillna(df.SaleType.mode()[0], inplace=True)
df.Utilities.fillna(df.Utilities.mode()[0], inplace=True)
# Lot Frontage
df.LotFrontage.fillna(df.LotFrontage.mean(), inplace=True)
# df.LotFrontage = df.groupby(['Neighborhood'])['LotFrontage'].transform(lambda x : fillna(x.mean()))
# Garage Data -- all the values below have 81 missing entries, which corresponds to the 81 houses without a garage. We can replace with 0.
df.GarageQual.fillna(0, inplace=True)
df.GarageCond.fillna(0, inplace=True)
df.GarageFinish.fillna(0, inplace=True)
df.GarageType.fillna(0, inplace=True)
df.GarageYrBlt.fillna(0, inplace=True)
df.GarageArea.fillna(df.GarageArea.mode()[0], inplace=True)
df.GarageCars.fillna(df.GarageCars.mode()[0], inplace=True)
# Basement Data -- all the values below have 37 mission entries in the train dataset, which corresponds to the 37 houses without a basement in the train dataset. We can replace with 0. 37 v. 38!!!
df.BsmtExposure.fillna(0, inplace=True)
df.BsmtFinSF1.fillna(0, inplace=True)
df.BsmtFinSF2.fillna(0, inplace=True)
df.BsmtFullBath.fillna(df.BsmtFullBath.mode()[0], inplace=True)
df.BsmtHalfBath.fillna(df.BsmtHalfBath.mode()[0], inplace=True)
df.BsmtUnfSF.fillna(df.BsmtUnfSF.mean(), inplace=True)
df.BsmtFinType1.fillna(0, inplace=True)
df.BsmtFinType2.fillna(0, inplace=True)
df.BsmtCond.fillna(0, inplace=True)
df.BsmtQual.fillna(0, inplace=True)
df.TotalBsmtSF.fillna(df.BsmtFullBath.mean(), inplace=True)
# Exterior
df.Exterior1st.fillna(df.Exterior1st.mode()[0], inplace=True)
df.Exterior2nd.fillna(df.Exterior2nd.mode()[0], inplace=True)
# Masonry Data
df.MasVnrType.fillna(0, inplace=True)
df.MasVnrArea.fillna(0, inplace=True)
# Electrical Data
df.Electrical.fillna(df.Electrical.mode()[0], inplace=True)
return df
df_train = impute_data(df_train)
df_test = impute_data(df_test)
df = impute_data(df)
# Let's confirm that the above worked and that we have addressed all NaNs at all datasets.
calc_missing_data(df_train, df_test)
# # Deriving Features
# We have a lot of raw features to work with, but we can also derive some new ones that we think could be useful for our model.
# Some ideas:
# * Calculate age of house when sold and number if years since it was remodeled (if not remodeled, it will automatically be house age)
# * Calculate age of garage when sold, similar to the above
# * Drop YearBuild, YearRemodAdd, GarageYrBlt to avoid fields with duplicate data
# * There is no total area / square footage field, so we will calculate it separately:
# * TotalBsmtSF: Total square feet of basement area + GrLivArea: Above grade (ground) living area square feet + GarageArea: Size of garage in square feet + 1stFlrSF: First Floor square feet + 2ndFlrSF: Second floor square feet
# * From looking at the data, I can see that 'GrLivArea' = '1stFlrSF'+ '2ndFlrSF' (this implies there are no houses with more tha two stories in the city of Ames)
# * calculate total liveable area = TotalBsmtSF+GrLivArea
#
# * We can also infer the number of floors: 1 or 2 using the informaton and data above (if 2ndFlrSF=0, then there is only 1 floor)
# Please note, at this point I am not confident that deriving these features is a way to go, so I may not end up using all of them.
# let's check that the datasets have the number of fields we expect
print("training dataset shape is: ", df_train.shape)
print("testing dataset shape is: ", df_test.shape)
print("original dataset shape is: ", df.shape)
def derive_features(df):
df["AgeWhenSold"] = df.YrSold - df.YearBuilt
df.drop(["YearBuilt"], axis=1, inplace=True)
df["YrsSinceRemodel"] = df.YrSold - df.YearRemodAdd
df.drop(["YearRemodAdd"], axis=1, inplace=True)
df["GarageAgeWhenSold"] = df.YrSold - df.GarageYrBlt
# we want to make sure that if there is no Garage, its age when sold is simply marked as 0
def clean_up_garage(val):
if val < 2000:
return val
return 0
df["GarageAgeWhenSold"] = df["GarageAgeWhenSold"].apply(clean_up_garage)
df.drop(["GarageYrBlt"], axis=1, inplace=True)
df["TotalLiveArea"] = df.TotalBsmtSF + df.GrLivArea
df["NoStories"] = df["2ndFlrSF"].apply(lambda x: 2 if x > 0 else 1)
return df
df_train = derive_features(df_train)
df_test = derive_features(df_test)
df = derive_features(df)
# let's check that the datasets have the number of fields we expect
print("training dataset shape is: ", df_train.shape)
print("testing dataset shape is: ", df_test.shape)
print("original dataset shape is: ", df.shape)
# Let's quickly check our function worked (without the cleanup, max garage age would be about 2000):
print(df_train["GarageAgeWhenSold"].max())
print(df_test["GarageAgeWhenSold"].max())
print(df["GarageAgeWhenSold"].max())
# # Bringing In Additional Data
# One thing we could consider is bringing in additional data instead of just deriving it.
# Some ideas:
# * An inflation index or an average housing price index to reflect the state of the economy (especialy since 2008 sits square in the middle of our data);
# * Measurements of neighborhood characteristics: average income, walkability, etc.
# # Additional data clean up
# Initial observations:
# * No reason for GarageYrBlt to be a float -> convert to integer
# * CentralAir is currently Boolean, we want to convert it into 1s and 0s. Can either use LabelEncoder or do a simple replace
# NB on the list below: I originally included nice screenshots of the data from Pandas profiling to illustrate my logic, but it was slowing things down significantly, so I ended up removing them.
# * **BldgType**: keep 1Fam, bundle up other ones
# * **Neighborhood**: has 25 unique variables, but looking at the data, it is hard to believe that it is that significant. In order to avoid breaking this into 25 columns, we can keep top 5 variables and replace other neighborhoods with other.
# * **MSSubClass**: needs to be categorical. Convert to string + keep top 5 cariables, convert others to 'other'. Use value_counts() to identify top 5.
# * **KitchenAbgGr**: needs to be categorical. Convert to string.
# * **BsmtFullBath**: int now, needs to be categorical convert to string (interesting that pandas_profiling labels it as categorical, but doesn't flag that the data type is int, seems like it would be useful)
# * **BsmtHalfBath**: same as above, convert to string
# * **Condition2**: (Proximity to main road or railroad (if a second is present). I am not even sure we will need this field, but for now let's convert to 'normal' vs. 'other' so we don't deal with this long tail of distribution
# * **Electrical**: let's keep top 2 and the rest will be 'other'
# * **ExterCond**: Present condition of the material on the exterior. Ex: Excellent; Gd: Good; TA: Average/Typical; Fa: Fair; Po: Poor; very few Excellent, Fair and Poor, so I would just label those as other and keep Good and Average
# * **FireplaceQu**: same as above, keep top 3, bundle the other 2 together
# * **Functional**: home functionality rating. Most fall under typical, so we will keep that and bundle the others ones as 'other.'
# * **GarageCond**: narrow down to 'TA (avg)' and 'other'
# * **GarageQual**: same approach as above
# * **GarageType**: Keep top 3 (Ex,TA,Gd, bundle up other 2)
# * **Heating**: Keep top 3 (Ex, TA, Gd, bundle up other 2)
# * **HeatingQC**: Keep Ex,TA, Gd, bundle up other two
# * **HouseStyle**: keep top 3, bundle up others [1Story, 2Story,1.5Fin]
# * **PavedDrive**: keep top 1 (Y), bundle up the other two as N, then use label encoder
# * **PoolQC**: There are only 3 variables (Go, Fa, Ex), and they are normally distributed, so let's leave as is.
# * **RoofMatl**: keep top 1 (CompShg), bundle up others
# * **RoofStyle**: keep top 2 (Gable and Hip), bundle up others
# * **SaleCondition**: same as above, keep top 3 (Normal, Partial, Abnorml), group up other 2)
# * **SaleType**: same as above, keep top 2 (Wd, New) and bundle up others as 'other'
#
sns.boxplot(y="SalePrice", x="BldgType", data=df, width=0.5, palette="colorblind")
sns.boxplot(y="SalePrice", x="BsmtCond", data=df, width=0.5, palette="colorblind")
sns.boxplot(y="SalePrice", x="BsmtExposure", data=df, width=0.5, palette="colorblind")
sns.boxplot(y="SalePrice", x="Neighborhood", data=df, width=0.5, palette="colorblind")
# One of the clean up actions I wanted to take was converting OverallCond into a categorical variable (now it's an integer). Because I thought it might be correlated to price, I wanted to run a regression plot before that.
# correlation between sale price (y) and OverallCond
fig, ax = plt.subplots()
ax.scatter(x=df.OverallQual, y=df.SalePrice)
plt.ylabel("Sale Price, $", fontsize=13)
plt.xlabel("Overall Condition", fontsize=13)
plt.xticks(range(0, 10))
df.OverallCond.corr(df.SalePrice)
# Interesting that the correlation is showing almost no relationship (-0.07), although visually, it seems that that there should be a relationship. Let's run a few more checks:
# adding random noise for a clearer visualization of the values
sns.lmplot(x="OverallCond", y="SalePrice", data=df, x_jitter=0.1)
# The linear regression chart also confirms there is ano strong relationship between condition and price.
# At this point, we could simply drop the field, but for the sake of the exercise, I will bin variables into several categories on the off chance it will generate some results.
df.OverallCond.value_counts()
df.OverallCond.plot(kind="hist", color="red", edgecolor="black", figsize=(10, 10))
plt.title("Distribution of scores", size=24)
plt.xlabel("Condition Score", size=18)
plt.ylabel("Frequency", size=18)
# confirming distribution for the testing dataset
df_test.OverallCond.plot(kind="hist", color="red", edgecolor="black", figsize=(10, 10))
plt.title("Distribution of scores", size=24)
plt.xlabel("Condition Score", size=18)
plt.ylabel("Frequency", size=18)
# I am going to bin these into several categories:
# * 1-4: poor
# * 5-7: good
# * 8-10: exc {to note, there are no houses rated 10 in this dataset})
pd.pivot_table(df, index=["SaleType"], values=["SalePrice"]).plot()
def clean_data(df):
# df.GarageYrBlt = df.GarageYrBlt.astype(int) -- we already dropped this because we calculate Garage Age instead
df.BsmtFullBath = df.BsmtFullBath.astype(str)
df.BsmtHalfBath = df.BsmtHalfBath.astype(str)
# addressing CentralAir
lab_enc = preprocessing.LabelEncoder()
lab_enc.fit(df["CentralAir"])
var = lab_enc.transform(df["CentralAir"])
df["CentralAir"] = var
# addressing BldgType
def fix_BldgType(val):
if val in {"1Fam"}:
return val
return "Other"
df.BldgType = df.BldgType.apply(fix_BldgType)
# addressing MsSubClass
df.MSSubClass = df.MSSubClass.astype(str)
def fix_MSSubClass(val):
if val in {"20", "60", "50", "120", "30"}:
return val
return "Other"
df.MSSubClass = df.MSSubClass.apply(fix_MSSubClass)
# addressing Condition2
def fix_Condition2(val):
if val in {"Norm"}:
return val
return "Other"
df.Condition2 = df.Condition2.apply(fix_Condition2)
# addressing Electrical
def fix_Electrical(val):
if val in {"Sbrkr", "FuseA"}:
return val
return "Other"
df.Electrical = df.Electrical.apply(fix_Electrical)
# addressing ExterCond
def fix_ExterCond(val):
if val in {"TA", "Gd"}:
return val
return "Other"
df.ExterCond = df.ExterCond.apply(fix_ExterCond)
# addressing FireplaceQu
def fix_FireplaceQu(val):
if val in {"TA", "Gd"}:
return val
return "Other"
df.FireplaceQu = df.FireplaceQu.apply(fix_FireplaceQu)
# addressing Foundation
def fix_Foundation(val):
if val in {"PConc", "CBlock", "BrkTil"}:
return val
return "Other"
df.Foundation = df.Foundation.apply(fix_Foundation)
# addressing Functional
def fix_Functional(val):
if val in {"Typ"}:
return val
return "Other"
df.Functional = df.Functional.apply(fix_Functional)
# addressing GarageCond
def fix_GarageCond(val):
if val in {"TA"}:
return val
return "Other"
df.GarageCond = df.GarageCond.apply(fix_GarageCond)
df.GarageQual = df.GarageQual.apply(fix_GarageCond)
# address GarageType, Heating, HeatingQC -- all need the safe method
def keep_Ex_TA_Gd(val):
if val is {"Ex", "TA", "Gd"}:
return val
return "Other"
df.GarageType = df.GarageType.apply(keep_Ex_TA_Gd)
df.Heating = df.Heating.apply(keep_Ex_TA_Gd)
df.HeatingQC = df.HeatingQC.apply(keep_Ex_TA_Gd)
# addressing HouseStyle:
def fix_HouseStyle(val):
if val in {"1Story", "2Story", "1.5Fin"}:
return val
return "Other"
df.HouseStyle = df.HouseStyle.apply(fix_HouseStyle)
# addressing Neighborhood
def fix_Neighborhood(val):
if val in {"NAmes", "CollgCr", "OldTown", "Edwards", "Somerst"}:
return val
return "Other"
df.Neighborhood = df.Neighborhood.apply(fix_Neighborhood)
# addressing OverallCond:
df.OverallCond = df.OverallCond.astype(str)
def fix_OverallCond(val):
if val in {"1", "2", "3", "4"}:
return "Poor"
elif val in {"5", "6", "7"}:
return "Good"
else:
return "Excellent"
df.OverallCond = df.OverallCond.apply(fix_OverallCond)
# addressing SaleType
def fix_SaleType(val):
if val in {"WD", "New"}:
return val
return "Other"
df.SaleType = df.SaleType.apply(fix_SaleType)
# addressing PavedDrive
def fix_PavedDrive(val):
if val in {"Y"}:
return val
return "N"
df.PavedDrive = df.PavedDrive.apply(fix_PavedDrive)
lab_enc = preprocessing.LabelEncoder()
lab_enc.fit(df["PavedDrive"])
var = lab_enc.transform(df["PavedDrive"])
df["PavedDrive"] = var
# addressing RoofMatl
def fix_RoofMatl(val):
if val in {"CompShg"}:
return val
return "Other"
df.RoofMatl = df.RoofMatl.apply(fix_RoofMatl)
# addressing RoofStyle
def fix_RoofStyle(val):
if val in {"Gable", "Hip"}:
return val
return "Other"
df.RoofStyle = df.RoofStyle.apply(fix_RoofStyle)
# addressing SaleCond:
def fix_SaleCondition(val):
if val in {"Normal", "Partial", "Abnorml"}:
return val
return "Other"
df.SaleCondition = df.SaleCondition.apply(fix_SaleType)
# some variables should be categorical and not numeric
# df.MoSold = df.MoSold.astype(str)
return df
df_train = clean_data(df_train)
df_test = clean_data(df_test)
df = clean_data(df)
# let's check that the datasets have the number of fields we expect
print("training dataset shape is: ", df_train.shape)
print("testing dataset shape is: ", df_test.shape)
print("original dataset shape is: ", df.shape)
# Let's confirm it worked by selecting a few fields we modified to see if we achieved a result we would expect.
df_train.Functional.value_counts()
df_test.Functional.value_counts()
df_train.OverallCond.value_counts()
df_test.OverallCond.value_counts()
# **Drop features**
# This is some preliminary dropping using information I have learned from looking at the data. We will have to do more aggressive feature selection later.
# * **MiscVal** -- value of a miscellaneous feature. There are only 21 entries (fewer than misc feature entries, so some data is missing), and the max is 15k. It feels that we can drop this.
# * **Utilities** -- There are only two variables, and 1 (NoSeWa) is only present 1x in the training dataset. The test dataset doesn't even feature it. Let's delete it as it carries very little significance.
# * **Street** -- there are 1454 'Paved' options and 6 'Grvl' options. Let's simply drop this field.
def preliminary_drop_features(df):
df.drop(["MiscVal"], axis=1, inplace=True)
df.drop(["Utilities"], axis=1, inplace=True)
df.drop(["Street"], axis=1, inplace=True)
return df
df_train = preliminary_drop_features(df_train)
df_test = preliminary_drop_features(df_test)
df = preliminary_drop_features(df)
# let's check that the datasets have the number of fields we expect
print("training dataset shape is: ", df_train.shape)
print("testing dataset shape is: ", df_test.shape)
print("original dataset shape is: ", df.shape)
# **SOME MORE VISUALIZATION / EXPLORATION**
# Let's start with a general heatmap of correlations (we are mostly interested in correlations with the sale price, but other correlations can be of interest, too). From there, we can zoom in on specific relationship by plotting dedicated scatter plots.
corr = df.corr()
f, ax = plt.subplots(figsize=(30, 15))
sns.heatmap(corr, vmax=0.8, annot_kws={"size": 10}, cmap="coolwarm", annot=True)
# We are interested in 2 things here: correlations between parameters in general that may indicate collinearity and correlation between SalePrice and other variables.
# I see some interesting pairings that indicate potential collinearity. Since it's inefficient to figure it out manually, let's identify strong correlation pairings (both positive and negatives) first.
s = corr.unstack()
s[(abs(s) > 0.6) & (abs(s) < 1)]
# We are interested in 2 things here: correlations between parameters in general that may indicate collinearity and correlation between SalePrice and other variables.
# *COLLINEARITY*
# I see some interesting pairings that indicate potential collinearity:
# *Positive correlations*
# * 2ndFLrSF and NoStories (0.91); we added NoStories ourselves, so let's see which one to keep. 2ndFlrSF corr to SalePrice is 0.32; NoStories corr to SalePrice is 0.14.
# * We may well drop NoStories as our hypothesis that is has strong correlation to the SalePrice is erroneous.
# * Several interesting variables pertaining to TotalLiveArea, which we added ourselves, so let's see which one to keep.
# * GrLiveArea and TotalLiveArea (0.88); corr GrLiveArea and SalePrice 0.71; corr TotalLiveArea and SalePrice 0.78
# * 1stFlrSF and TotalLiveArea (0.88); corr 1stFlrSF and SalePrice 0.61; corr TotalLiveArea and SalePrice 0.78
# * TotalLiveArea and TotalBsmtSF (0.82); corr TotalBsmtSF and SalePrice 0.61; corr TotalLiveArea and SalePrice 0.78
# * Here, it looks like in each case, TotalLiveArea has a higher correlation than another feature in a pair, so I would actually drop GrLiveArea, 1stFlrSt, and TotalBsmtSF.
# * GrLiveArea and TotalRmsAbvGrd (0.83); corr GrLiveArea and SalePrice 0.71; corr TotalRmsAbvGrd and SalePrice 0.53
# * Both of these features were in the dataset originally. I would suggest dropping TotalRmsAbvGrd, but we have just dropped GrLiveArea (see above), so let's keep it for now.
# * GarageCars and GarageArea (0.89); this makes sense as garages are generally sized in terms of how many cars can fit there
# * Which one do we keep? Corr GarageCars and SalePrice = 0.64; corr GarageArea and SalePrice = 0.62. Let's keep GarageCars, even though the correlation is only a bit higher.
# *Negative correlations*
# It's interesting that the absolute of the negative correlations would be much lower than the positive correlations (|-0.6| v. |0.9|. I am interpreting it as there are certain factors that boost the base price, but fewer factors that truly take away from the price. That would explain the positive skew we saw in the price distribution above.
# * GarageCars and GarageAgeWhenSold (-0.6).
# * This means that older garages tend to fit fewer cars, which makes sense as American society has been increasingly reliant on single-occupancy vehicle trips. I am not going to drop one of these as the GarageAgeWhenSold is not that strongly negatively correlated to SalePrice (-0.26).
# None of the other negative correlations stand out right now, so we will move on with our analysis. First, let's drop the features we have identified as collinear.
def drop_features(df):
df.drop("NoStories", axis=1, inplace=True)
df.drop("GrLivArea", axis=1, inplace=True)
df.drop("TotalBsmtSF", axis=1, inplace=True)
df.drop("1stFlrSF", axis=1, inplace=True)
df.drop("GarageArea", axis=1, inplace=True)
return df
df_train = drop_features(df_train)
df_test = drop_features(df_test)
df = drop_features(df)
# let's check that the datasets have the number of fields we expect
print("training dataset shape is: ", df_train.shape)
print("testing dataset shape is: ", df_test.shape)
print("original dataset shape is: ", df.shape)
# The dataset is still quite multidimensional. Let's try to go for the top 20 corr variables instead, and ensure we are running it against SalePrice
# saleprice correlation matrix
plt.figure(figsize=(20, 10))
corrmat = df.corr()
# picking the top 20correlated features
cols = corrmat.nlargest(10, "SalePrice")["SalePrice"].index
cm = np.corrcoef(df[cols].values.T)
sns.set(font_scale=1.25)
hm = sns.heatmap(
cm,
cbar=True,
annot=True,
square=True,
fmt=".2f",
annot_kws={"size": 10},
yticklabels=cols.values,
xticklabels=cols.values,
)
# Overall Quality stulls comes out on top in terms of correlation. Let's look into its relationship with the sales price
# The dataset is still quite multidimensional. Let's try to go for the top 20 corr variables instead, and ensure we are running it against SalePrice
# saleprice correlation matrix
plt.figure(figsize=(20, 10))
corrmat = df.corr()
# picking the top 20correlated features
cols = corrmat.nsmallest(10, "SalePrice")["SalePrice"].index
cm = np.corrcoef(df[cols].values.T)
sns.set(font_scale=1.25)
hm = sns.heatmap(
cm,
cbar=True,
annot=True,
square=True,
fmt=".2f",
annot_kws={"size": 10},
yticklabels=cols.values,
xticklabels=cols.values,
)
fig, ax = plt.subplots()
ax.scatter(x=df.OverallQual, y=df.SalePrice)
plt.ylabel("Sale Price, $", fontsize=13)
plt.xlabel("overall material and finish of the house", fontsize=13)
plt.xticks(range(0, 10))
# adding random noise for a more clear visualization of the values
sns.lmplot(x="OverallQual", y="SalePrice", data=df, x_jitter=0.1)
# confidence interval
sns.lmplot(x="OverallQual", y="SalePrice", data=df, x_estimator=np.mean)
df.GarageCars.describe()
# -> min 0 cars, max 4
fig, ax = plt.subplots()
ax.scatter(x=df.GarageCars, y=df.SalePrice)
plt.ylabel("Sale Price, $", fontsize=13)
plt.xlabel("Number of cars in a garage", fontsize=13)
plt.xticks(range(0, 5))
plt.show()
df.GarageCars.corr(df.SalePrice)
sns.lmplot(x="GarageCars", y="SalePrice", data=df, x_jitter=0.05)
# just for fun, we don't really need a box plot here
with sns.color_palette("Dark2"):
with sns.plotting_context("poster"):
with sns.axes_style("dark", {"axes.facecolor": "pink"}):
fig, ax = plt.subplots(figsize=(20, 8))
sns.boxplot(x="GarageCars", y="SalePrice", data=df, ax=ax)
ax.tick_params(axis="x", labelrotation=45)
# fig.savefig('/tmp/box.png', dpi=300)
df.GarageCars.corr(df.TotalLiveArea)
# numbers of cars that fit in a garage does correlate with size of the house, although the fact that it correlates strongly with the price as we saw separately above indicates its indipendent value to buyers
df.shape
sns.lmplot(x="TotalLiveArea", y="SalePrice", data=df)
df.TotalLiveArea.corr(df.SalePrice)
# delete the two outliers in the bottom right of the two charts below; the top right ones look to be on the best fit line, althought they are quite removed from the bulk of the data
df = df.drop(df[(df.TotalLiveArea > 6000) & (df.SalePrice < 200000)].index)
print(df.shape)
fig, ax = plt.subplots()
ax.scatter(x=df.AgeWhenSold, y=df.SalePrice)
plt.ylabel("Sale Price, $", fontsize=13)
plt.xlabel("Age When Sold", fontsize=13)
df.AgeWhenSold.corr(df.SalePrice)
# Curious whether there is a correlation between AgeBuilt and GarageYearBuilt
fig, ax = plt.subplots()
ax.scatter(x=df.AgeWhenSold, y=df.GarageAgeWhenSold)
plt.ylabel("Garage Age When Sold", fontsize=13)
plt.xlabel("Age When Sold", fontsize=13)
df.AgeWhenSold.corr(df.GarageAgeWhenSold)
fig, ax = plt.subplots()
ax.scatter(x=df.YrsSinceRemodel, y=df.SalePrice)
plt.ylabel("Sale Price, $", fontsize=13)
plt.xlabel("Years Since Remodel", fontsize=13)
plt.show()
df.YrsSinceRemodel.corr(df.SalePrice)
fig, ax = plt.subplots()
ax.scatter(x=df.OverallCond, y=df.SalePrice)
plt.ylabel("Sale Price, $", fontsize=13)
plt.xlabel("Overall Condition", fontsize=13)
plt.show()
# Still not seeing much value in Overall Condition, but let's move on for now.
# # Dealing with data skews
# * Looking at the Pandas profiling, I noticed that a lot of data is skewed, at least visually.
# * Let's find out what those skews are.
# * We already have cleaned our data to have numeric features only, so don't have to worry about that.
# * See: https://www.geeksforgeeks.org/python-pandas-dataframe-skew/
# skewed = df.apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
# pd.DataFrame({'Skew' :skewed_feats}).head(20)
# * We can see that there is indeed a fair number of skewed features. Now we have to bring them back to normal distribution.
# * Some interesting thoughts on dealing with skewed data here: https://towardsdatascience.com/transforming-skewed-data-73da4c2d0d16
# * Specifically on Box-Cox: https://medium.com/@kangeugine/box-cox-transformation-ba01df7da884; statistics behind it: http://onlinestatbook.com/2/transformations/box-cox.html
# * We use the scipy function boxcox1p which computes the Box-Cox transformation of 1+x .
# * Note that setting λ=0 is equivalent to log1p used above for the target variable.
# skewness = skewness[abs(skewness) > 0.75]
# print("There are {} skewed numerical features to Box Cox transform".format(skewness.shape[0]))
# from scipy.special import boxcox1p
# skewed_features = skewness.index
# lam = 0.15
# for feat in skewed_features:
# df[feat] = boxcox1p(df[feat], lam)
# Use the get_dummies method to deal with categorical values
# let's check that the datasets have the number of fields we expect; original dataset (df) is expected to have N+1 number of fields
print("training dataset shape is: ", df_train.shape)
print("testing dataset shape is: ", df_test.shape)
print("original dataset shape is: ", df.shape)
df_test.dtypes
df = pd.get_dummies(df, prefix_sep="_", drop_first=True)
df_train = pd.get_dummies(df_train, prefix_sep="_", drop_first=True)
df_test = pd.get_dummies(df_test, prefix_sep="_", drop_first=True)
# let's check that the datasets have the number of fields we expect; original dataset (df) is expected to have N+1 number of fields
print("training dataset shape is: ", df_train.shape)
print("testing dataset shape is: ", df_test.shape)
print("original dataset shape is: ", df.shape)
df_test.head(10)
# # Split into training and testing datasets
X_train, X_test, y_train, y_test = train_test_split(
df.drop("SalePrice", axis=1), df["SalePrice"], test_size=0.3, random_state=42
)
# # Standartize the data
# We need this step to perform PCA
from sklearn.preprocessing import StandardScaler
x = StandardScaler().fit_transform(df_train)
# # PCA projection to 2D
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(
data=principalComponents, columns=["principal component 1", "principal component 2"]
)
finalDf = pd.concat([principalDf, df[["SalePrice"]]], axis=1)
pca.explained_variance_ratio_
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # **인공지능 수업 과제 제출 02323038 김병승**
# #
# # **Space Titanic 데이터 분석에 필요한 모듈 importing**
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno
# # **Loading Dataset**
dataset_df = pd.read_csv("/kaggle/input/spaceship-titanic/train.csv")
dataset_df
dataset_df.info()
# # **Missing data 확인**
missingno.matrix(dataset_df)
# # **Numeric Features**
# Transporting 현상은 물리적인 수치 변수항목에만 연관이 있을것이므로,
# 수치 Columns 만을 분석 대상으로 한다. 따라서
# Transported True ->1
# Transported False ->0 으로 바꾸어 수치항목화 시킨다
#
dataset_df["Transported_num"] = dataset_df["Transported"] + 0
dataset_df
# # **Filling out the missing value**
# 결손치 데이터는 보수적으로 모두 0으로 가정한다.
#
dataset_df.isnull()
dataset_df["Age"].fillna(0, inplace=True)
dataset_df["RoomService"].fillna(0, inplace=True)
dataset_df["FoodCourt"].fillna(0, inplace=True)
dataset_df["ShoppingMall"].fillna(0, inplace=True)
dataset_df["Spa"].fillna(0, inplace=True)
dataset_df["VRDeck"].fillna(0, inplace=True)
missingno.matrix(dataset_df)
# # **상관관계 분석**
num_cols = [
"Age",
"RoomService",
"FoodCourt",
"ShoppingMall",
"Spa",
"VRDeck",
"Transported_num",
]
dataset_df[num_cols].corr()
sns.heatmap(data=dataset_df[num_cols].corr(), annot=True, cmap="coolwarm")
|
import numpy as np
import pandas as pd
import glob
sub = pd.read_csv(
"/kaggle/input/abstraction-and-reasoning-challenge/sample_submission.csv"
)
tasks = pd.DataFrame(
glob.glob("/kaggle/input/abstraction-and-reasoning-challenge/**/**"),
columns=["path"],
)
tasks["tte"] = tasks["path"].map(lambda x: x.split("/")[-2])
tasks["output_id"] = tasks["path"].map(lambda x: x.split("/")[-1].split(".")[0])
tasks["file"] = tasks["path"].map(lambda x: eval(open(x).read()))
tasks["train"] = tasks["file"].map(lambda x: x["train"])
tasks["test"] = tasks["file"].map(lambda x: x["test"])
tasks.drop(columns=["file"], inplace=True)
tasks["l"] = tasks.apply(lambda r: (len(r["train"]), len(r["test"])), axis=1)
sub.shape, tasks.tte.value_counts()
import matplotlib.pyplot as plt
from matplotlib import colors
fig = plt.figure(figsize=(8.0, 6.0))
# https://www.kaggle.com/nagiss/manual-coding-for-the-first-10-tasks
cmap = colors.ListedColormap(
[
"#000000",
"#0074D9",
"#FF4136",
"#2ECC40",
"#FFDC00",
"#AAAAAA",
"#F012BE",
"#FF851B",
"#7FDBFF",
"#870C25",
]
)
norm = colors.Normalize(vmin=0, vmax=9)
def viz(path):
f = eval(open(path, "r").read())
train = f["train"]
test = f["test"]
f, ar = plt.subplots(3, len(train))
for i in range(len(train)):
ar[0, i].imshow(np.array(train[i]["input"]), cmap=cmap, norm=norm)
ar[1, i].imshow(np.array(train[i]["output"]), cmap=cmap, norm=norm)
if i < len(test):
ar[2, i].imshow(np.array(test[i]["input"]), cmap=cmap, norm=norm)
else:
ar[2, i].imshow(
np.zeros(np.array(test[0]["input"]).shape), cmap=cmap, norm=norm
)
plt.show()
for i in range(3):
viz(tasks["path"][i])
# [The Abstraction and Reasoning Corpus (ARC)](https://github.com/fchollet/ARC)
# * Download and use the testing_interface.html file to try a few yourself then pass it on to your neural network
# * You get up to 3 predictions per test task
def flattener(pred):
str_pred = "|" + "|".join(["".join([str(v) for v in row]) for row in pred]) + "|"
str_pred = " ".join([str_pred for i in range(2)]) # simulating 2 predictions
# Adding a blank prediction similar to the sample submission
str_pred += " |" + "|".join(["".join([str(0) for v in row]) for row in pred]) + "|"
return str_pred
test = tasks[tasks["tte"] == "test"].reset_index(drop=True)
sub = open("submission.csv", "w")
sub.write("output_id,output\n")
for i in range(len(test)):
for j in range(len(test["test"][i])):
# Add your predictions here - just taking the first train ouput here for shape
sub.write(
test["output_id"][i]
+ "_"
+ str(j)
+ ","
+ flattener(np.array(test["train"][i][0]["output"]))
+ " \n"
)
sub.close()
|
# # Implimenting KNN classifier
# ## Importing our Data Set
# In order to used our dataset, first, we need to read the content of the .csv file then storing it in list for later usage
import csv
data_set_X = [] # featurs (length_of_ears, wieght)
data_set_y = [] # labels (0 (cat), 1 (dog))
# 1. reading our data set
with open("/kaggle/input/cats-dogs-csv-featurs/cats_dogs_dataset.csv", "r") as file:
skip_first_line = True
samples = csv.reader(file)
for sample in samples:
if skip_first_line == True:
skip_first_line = False
else:
data_set_X.append([float(feature) for feature in sample[:-1]])
data_set_y.append(sample[len(sample) - 1])
# ## Data Visualization
# To better understand our data ditribution, we need to vizualizae our data in the form of scatter plot.
import matplotlib.pyplot as plt
# 2. vizualising our data
x, y = [], []
for sample in data_set_X:
x.append(sample[0])
y.append(sample[1])
plt.scatter(x, y, c=data_set_y, edgecolor="k", s=100)
plt.xlabel("Length of ears")
plt.ylabel("wieght")
plt.show()
# ## Data Spliting (Training / Testing)
def train_test_split(samples, labels):
test_size = int(len(samples) * 0.2) # 20% of original samples size
train_size = len(samples) - test_size # 80% of original samples size
X_train = samples[:train_size]
X_test = samples[train_size:]
y_train = labels[:train_size]
y_test = labels[train_size:]
return X_train, X_test, y_train, y_test
X_train, X_test, y_train, y_test = train_test_split(data_set_X, data_set_y)
# print(len(X_train))
# print(len(X_test))
# print(y_train)
# print(y_test)
# ## Model Creation & Prediection
# KNN is a lazy algorithms and it does not required a train step
import numpy as np
from collections import Counter
K = 3 # K paramater of KNN
# EXPORIMENTAL using sklearn
from sklearn.neighbors import KNeighborsClassifier
knn_classifier = KNeighborsClassifier(n_neighbors=K, p=2, metric="euclidean")
knn_classifier.fit(X_train, y_train)
y_pred = knn_classifier.predict(X_test)
print(y_pred)
# END EXPORIMENTAL
# helpers functions
def ecludian_distance(x1, x2):
return np.sqrt(np.sum((np.array(x1) - np.array(x2)) ** 2))
# running infrence of single input sample
def knn_run_single_prediction(x):
# 1. Calulcation of distances
distances = []
for x_train in X_train:
distances.append(ecludian_distance(x_train, x))
# 2. Shosing the top K samples and getting thier labels
top_k_idx = np.argsort(distances)[:K]
top_k_lables = []
for idx in top_k_idx:
top_k_lables.append(y_train[idx])
# 3. Get lable of dominate label (Majority Vote)
dominate_label = Counter(top_k_lables).most_common()
# 4. return predected label
return dominate_label[0][0]
# running infrence on bach of input samples
def knn_run_batch_prediction(X):
return [knn_run_single_prediction(x) for x in X]
predictions = knn_run_batch_prediction(X_test)
print(predictions)
# ## Evalutations
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
c_matrix = confusion_matrix(y_test, predictions)
accuracy = accuracy_score(y_test, predictions)
# MANUAL CALCULATION OF ACCURACY
# accuracy = np.sum(predictions == y_test) / len(y_test)
# print(accuracy)
print(c_matrix)
print(accuracy)
|
import pandas as pd
import numpy as np
# Breast cancer detection
url = "https://raw.githubusercontent.com/anvarnarz/praktikum_datasets/main/breast-cancer.csv"
df = pd.read_csv(url)
df.sample(10)
# The dataset includes breast tumor information. The diagnosis column means that the tumor is cancerous (M - malignant) or non-cancerous (B - benign).
df["diagnosis"].value_counts()
# We change these values to 0 and 1. M->1, B->0
from sklearn.preprocessing import LabelEncoder
labelencoder = LabelEncoder()
df["diagnosis"] = labelencoder.fit_transform(df["diagnosis"].values)
df["diagnosis"].value_counts()
# We randomly extract 212 rows with values equal to 0 in the Diagnosis column.
df_0_212 = df[df["diagnosis"] == 0].sample(212)
df_1 = df[df["diagnosis"] == 1]
df_norm = pd.concat([df_0_212, df_1])
df_norm["diagnosis"].value_counts()
# Drop the ID column
df_norm.drop(columns=["id"], inplace=True)
# Correlation
corr_matrix = df_norm.corr().abs()
corr_matrix.style.background_gradient(cmap="coolwarm")
df_norm.corrwith(df_norm["diagnosis"]).abs().sort_values(ascending=False)
# As you can see from this object, we discard the columns whose correlation with the diagnosis column is less than 0.4.
cols = [
"diagnosis",
"symmetry_se",
"texture_se",
"fractal_dimension_mean",
"smoothness_se",
"fractal_dimension_worst",
"smoothness_mean",
"fractal_dimension_se",
"fractal_dimension_se",
"compactness_se",
"symmetry_mean",
"concavity_se",
"fractal_dimension_worst",
"symmetry_worst",
]
X = df_norm.drop(cols, axis=1).values
y = df_norm["diagnosis"]
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X = scaler.fit_transform(X)
# Machine Learning
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=12
)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(X_train, y_train)
y_predict = knn.predict(X_test)
# Evaluation
# Jaccard index
from sklearn.metrics import jaccard_score
jaccard_score(y_test, y_predict)
# Confusion matrix
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt
sns.heatmap(confusion_matrix(y_test, y_predict), annot=True)
plt.show()
confusion_matrix(y_test, y_predict)
# Precision, recall, F1
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
precision = precision_score(y_test, y_predict)
recall = recall_score(y_test, y_predict)
f1 = f1_score(y_test, y_predict)
accuracy = accuracy_score(y_test, y_predict)
print(f"precision={precision}\nrecall={recall}\nf1={f1}\naccuracy={accuracy}")
from sklearn.metrics import classification_report
print(classification_report(y_test, y_predict))
# Cross-validation
# Cross validation
from sklearn.model_selection import cross_val_predict
predict = cross_val_predict(estimator=knn, X=X, y=y, cv=5)
from sklearn.metrics import classification_report
print("Classification Report: \n", classification_report(y, predict))
# Finding the best k
# Using for
f1 = []
for k in range(1, 25):
knn = KNeighborsClassifier(n_neighbors=k) # k-ni qiymati
knn.fit(X_train, y_train)
y_predict = knn.predict(X_test)
f1.append(f1_score(y_test, y_predict))
plt.figure(figsize=(10, 6))
plt.plot(range(1, 25), f1)
plt.xticks(range(1, 25))
plt.grid()
plt.show()
# Grid Search
from sklearn.model_selection import GridSearchCV
param_grid = {"n_neighbors": np.arange(1, 25)}
knn_gscv = GridSearchCV(knn, param_grid, cv=5)
knn_gscv.fit(X, y)
knn_gscv.cv_results_["rank_test_score"]
knn_gscv.best_params_
knn_gscv.best_score_
plt.figure(figsize=(10, 6))
plt.plot(param_grid["n_neighbors"], knn_gscv.cv_results_["rank_test_score"])
plt.xticks(param_grid["n_neighbors"])
plt.xlabel("k")
plt.ylabel("Xatolik reytingi")
plt.grid()
plt.show()
|
# # Analyse en composante Principale - ACP
# L'analyse en composantes principales (ACP ou PCA en anglais pour principal component analysis) est une méthode de la famille de l'analyse des données et plus généralement de la statistique multivariée, qui consiste à transformer des variables liées entre elles (dites « corrélées » en statistique) en nouvelles variables décorrélées les unes des autres. Ces nouvelles variables sont nommées « composantes principales », ou axes principaux. Elle permet au praticien de réduire le nombre de variables et de rendre l'information moins redondante.
# Elle s'utilise dans deux cas:
# - L'analyse Multivariée. Dans la partie analyse exploratoire de données les méthodes utilisées sont le calcul de statistiques univariées (Min, Max etc..), Bi-variées (Corrélation, Scatter plot etc...). Cependant pour étudier des corrélations entre plus de deux variables il est nécessaire d'utiliser l'ACP.
# - En big data ou grande dimensionnalité, afin de réduire le nombre de variables et permettre la mise en place d'un modèle de Machine Learning rapidement. Cependant le niveau d'interprétabilité est moindre que si on utlisait les variables bruts.
# !
# # Première partie: Analyse Multivariée
# Vous avez pour mission de manipuler une base de données de forte volumétrie et vous souhaitez en tirer des informations pertinentes. Non, pas sous forme d’un banal tableau avec des pourcentages agrémenté d’un histogramme et d'une moyenne, mais d’une analyse multidimensionnelle, de détection de liaisons qui permettent de segmenter une clientèle afin d’adapter les variables de votre marketing mix, de booster votre knowledge management, bref, de passer à la vitesse supérieure.
# L’ACP sur les variables constitue à cet égard un outil puissant, dès lors que les variables disponibles sont numériques. Voici un exemple avec peu de variables afin de bien comprendre le pipeline d'une Analyse en Composante Principale.
# L’ACP consistant en une réduction de dimensionnalité, le statisticien peut généralement visualiser l’essentiel de l’espace des variables sur un, deux, voire trois plans comme nous allons le voir. Graphiquement, les points sont projetés sur des axes normés sur lesquels sont lues les nouvelles coordonnées. Pris deux à deux, ceux-ci définissent des plans. L'espace vectoriel des variables est un dual de celui des individus.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import warnings
warnings.filterwarnings("ignore")
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# ### Question 1:
# Importer Pandas assigné à pd, numpy assigné à np, matplotlib.pyplot assigné à plt, seaborn assigné à sns et PCA (https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html)
## PCA
## pandas
## numpy
## matplotlib.pyplot
## seaborn
# ### Question 2:
# Importer le fichier autos_acp.
# - Importer le fichier - Attention ici nous avons un fichier excel et non un csv. Utiliser pd.read_excel() - Spécifier les options suivantes: sheet_name=0,header=0,index_col=0
# - Afficher le nombre de ligne, de colonnes,
# - Afficher le nom des colonnes
# - Décrire le dataset avec .info()
# - Sortir les statistiques descriptives basiques
# - Afficher les 5 première lignes
# - Afficher les 5 dernières lignes
# - Stocker la variable FINITION dans finition
# - Supprimer la variable FINITION
import pandas as pd
df = pd.read_excel(
"/kaggle/input/ml-training-vlib/autos_acp.xls", sheet_name=0, header=0, index_col=0
)
finition = df["FINITION"]
df = df.drop("FINITION", axis=1)
df.head(), df.info(), df.describe(), df.tail(), df.columns, df.shape, df.shape[
0
], df.shape[1]
# ### Question 3:
# Les données étant exprimées dans des unités différentes, nous avons intérêt à réaliser une ACP normée. Pour cela nous avons besoin de retrancher la moyenne à chaque variable puis de diviser par son écart type.
# - Utiliser StandardScaler() pour normer vos données - https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html
# - Vérifier que la moyenne de chaque variables est nulle et que l'écart type est de 1.
# NB: Il est toujours intéressant d'utiliser ce genre de transformation même dans un pipeline de Machine Learning.
# classe pour standardisation
from sklearn.preprocessing import StandardScaler
# instanciation
sc = StandardScaler()
# transformation – centrage-réduction
Z = sc.fit_transform(df)
# vérification - librairie numpy
import numpy
# moyenne
print(numpy.mean(Z, axis=0))
# écart-type
print(numpy.std(Z, axis=0))
print(Z)
# # Question 4:
# Le paramètre (svd_solver = ‘full’) indique l’algorithme utilisé pour la décomposition en valeurs singulières. Nous choisissons la méthode ‘’exacte’’, sélectionnée de toute manière par défaut pour l’appréhension des bases de taille réduite. D’autres approches sont disponibles pour le traitement des grands ensembles de données. Le nombre de composantes (K) n’étant pas spécifié (n_components = None), il est par défaut égal au nombre de variables (K = p).
# - Instancier la PCA() dans une variable appelé acp
# - Utiliser fit_transform() sur votre donnée et la stocké dans la variable coord
# - Afficher le nombre de composantes avec .n_components_
# classe pour l'ACP
from sklearn.decomposition import PCA
# instanciation
acp = PCA()
# calculs
coord = acp.fit_transform(Z)
# nombre de composantes calculées
print(acp.n_components_)
# # Question 5:
# Le pourcentage de variance expliquée est la somme des pourcentages de variance expliquée par les axes de la représentation nous indique, la part de l'information restituée par l'ACP. Plus celle est proche de 100%,meilleure est notre analyse. Souvent, on cherche une représentation qui restitue au moins 50% de la variance, soit au moins la moitié de l'information totale. Cela permet de quantifier le pourcentage d'information gardé ou perdu par notre réduction de dimension.
# - Afficher le pourcentage de variance expliquée par nos axes. Voir la documentation PCA scikit learn
# - Afficher cela sous forme de graphique (diagrame à bar). Pour cela: Créer une liste avec le nom des 6 CP, puis créer un DataFrame avec cette liste. Appeler la colonne: "ACP". Dans une seconde colonne portant le nom de "explained_variance" stocké la variance expliqué. Puis utiliser un barplot() de seaborn - https://seaborn.pydata.org/examples/horizontal_barplot.html
# - Que remarquez vous?
# variance expliquée
print(acp.explained_variance_)
list_acp = ["CP1", "CP2", "CP3", "CP4", "CP5", "CP6", "CP7", "CP8"]
df_acp = pd.DataFrame(list_acp, columns=["ACP"])
df_acp["explained_variance"] = acp.explained_variance_
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="whitegrid")
# Initialize the matplotlib figure
f, ax = plt.subplots(figsize=(6, 8))
# Plot the total crashes
sns.set_color_codes("pastel")
sns.barplot(x="explained_variance", y="ACP", data=df_acp, label="Total", color="b")
# La plus grande quantité d'information est toujours représenté par notre première composante.
# # Question 6:
# Le graphique ci dessus nous montre la quantité d'information pour chaque composante. Nous avons besoin d'afficher la quantité d'information mais cette fois ci cumulée. Suite à cela nous allons pouvoir choisir le nombre de composante gardée pour notre analyse statistique.
# - Utiliser la fonction .cumsum() pour avoir la quantité de variance expliquée de façon cumulative
# - Afficher avec un graphique - Un simple plot de matplotlib sera suffisant. Utiliser list_acp pour l'axe x
# cumul de variance expliquée
plt.plot(list_acp, numpy.cumsum(acp.explained_variance_ratio_))
plt.title("Explained variance vs. # of factors")
plt.ylabel("Cumsum explained variance ratio")
plt.xlabel("Factor number")
plt.show()
# Combien de variance expliquée a t'on pour le plan CP1 & CP2?
# Réponse: 85%
# # Question 7:
# Bien que la première composante représente plus de 50% de l'information, par convention nous prendrons 2 composantes afin de pouvoir réaliser une projection et une analyse dans ce plan.
# Coordonnées factorielles. Les coordonnées factorielles (Fik) des individus ont été
# collectées dans la variable coord (Section 3.3.1). Nous les positionnons dans le premier plan
# factoriel avec leurs labels pour situer et comprendre les proximités entre les véhicules.
# Je ferai deux commentaires au préalable :
# 1. L’ajout d’une étiquette dans un graphique nuage de points n’est pas très pratique sous Python (librairie Matplotlib), ma solution a le mérite de fonctionner, je ne sais pas s’il y a plus simple (j’ai cherché pourtant).
# 2. Les outils graphiques calculent souvent automatiquement les échelles en fonction des plages de valeurs. Ce n’est pas une bonne idée en ce qui concerne l’ACP. En effet, les axes n’ont pas la même importance (% de variance restituée). Pour ne pas fausser la perception des proximités, il est très important de veiller à ce que les échelles soient identiques en abscisse et en ordonnée. Respecter cette règle nous dispense de faire afficher les pourcentages de variance portés par les axes. Nous nous rendons compte directement dans notre graphique que les dispersions des individus sont nettement plus marquées sur le premier axe, en abscisse.
#
# positionnement des individus dans le premier plan
fig, axes = plt.subplots(figsize=(12, 12))
axes.set_xlim(-6, 6) # même limites en abscisse
axes.set_ylim(-6, 6) # et en ordonnée
# placement des étiquettes des observations
for i in range(18):
plt.annotate(df.index[i], (coord[i, 0], coord[i, 1]))
# ajouter les axes
plt.plot([-6, 6], [0, 0], color="silver", linestyle="-", linewidth=1)
plt.plot([0, 0], [-6, 6], color="silver", linestyle="-", linewidth=1)
# affichage
plt.show()
# # Question 8:
# Avant de pouvoir interpréter les résultats il est nécessaire de faire 2 choses: Identifier les individus interprétables et identifier les variables de nos composantes principales. Nous allons tout d'abord la contribution de chaque individu à la création des composante principale. Seul les individus avec une contribution supérieur à la moyenne seront interprétables. En ce qui concerne les autres, ils le sont peut être sur d'autres plans (CP3 etc...). Qualité de représentation – Les COS² (cosinus carré). Pour calculer la qualité de représentation des individus sur les axes, nous devons d’abord calculer les carrés des distances à l’origine des individus, qui correspondent également à leur contribution dans l’inertie totale.
# - Filtrer les individus avec d_i > à la moyenne de d_i
# - Afficher les ID des ces individus
# - Combien d'individus pourront être exploités? Ou se trouvent ces individus dans notre plan?
di = np.sum(Z**2, axis=1)
df_ctr_ind = pd.DataFrame({"ID": df.index, "d_i": di})
df_ctr_ind
df_ctr_ind[df_ctr_ind["d_i"] > df_ctr_ind.d_i.mean()]
# # Question 9:
# Ce que nous venons de voir correspond à la contribution de chaque individus pour l'ensemble de l'ACP. Nous nous sommes retreint à deux composantes. Nous allons calculer la contribution de chaque individus dans notre plan CP1 et CP2. Puis nous déduirons les individus interprétable dans ce plan.
# - Filtrer les individus avec une contribtuions supérieur à la moyenne pour la CP1
# - Idem pour la CP2
# - Afficher ces individus
# contributions aux axes
eigval = (18 - 1) / 18 * acp.explained_variance_
ctr = coord**2
for j in range(8):
ctr[:, j] = ctr[:, j] / (18 * eigval[j])
df_ctr_cp1cp2 = pd.DataFrame({"id": df.index, "CTR_1": ctr[:, 0], "CTR_2": ctr[:, 1]})
df_ctr_cp1cp2[df_ctr_cp1cp2["CTR_1"] > df_ctr_cp1cp2.CTR_1.mean()]
df_ctr_cp1cp2[df_ctr_cp1cp2["CTR_2"] > df_ctr_cp1cp2.CTR_2.mean()]
# # Question 10:
# Les individus ne sont pas tous bien représentés sur nos composantes principales. Dans un premier temps nous avons afficher tous les individus sur nos axes CP1 & CP2. Nous voulons juste afficher ceux qui seront interprétables. Pour cela nous allons calculer la qualité de représentations d'un individus dans le plan CP1 & CP2.
# - Filtrer les individus ayant une bonne représentation sur l'axe 1
# - Filtrer les individus ayant une bonne représentation sur l'axe 2
# - Afficher les individus bien représenté sur l'axe1 et axe 2.
# qualité de représentation des individus - COS2
cos2 = coord**2
for j in range(8):
cos2[:, j] = cos2[:, j] / di
df_ctr_12 = pd.DataFrame(
{"id": df.index, "COS2_1": cos2[:, 0], "COS2_2": cos2[:, 1]}
)
df_ctr_12
df_ctr_12[df_ctr_12["COS2_1"] > df_ctr_12.COS2_1.mean()]
df_ctr_12[df_ctr_12["COS2_2"] > df_ctr_12.COS2_2.mean()]
df_ctr_12[df_ctr_12["COS2_1"] > df_ctr_12.COS2_1.mean()].id.append(
df_ctr_12[df_ctr_12["COS2_2"] > df_ctr_12.COS2_2.mean()].id
).drop_duplicates().reset_index(drop=True)
# # Question 11:
# Nous avons une idée des individus que nous allons pouvoir étudier. La prochaine étape est de caractériser nos composantes principales. Pour rappel, chaque composante est composé d'une combinaison linéaire des variables corrélés de notre jeu de données d'entré. Chaque composante est décorrélé.
# - Analyser le cercle des corrélation et en déduire qu'elles individus seront à droite de l'axe 1 et les individus à gauche
# - Analyser le cercle des corrélation et en déduire qu'elles individus seront en haut de l'axe 2 et les individus en bas
# racine carrée des valeurs propres
sqrt_eigval = np.sqrt(eigval)
# corrélation des variables avec les axes
corvar = np.zeros((8, 8))
for k in range(8):
corvar[:, k] = acp.components_[k, :] * sqrt_eigval[k]
# afficher la matrice des corrélations variables x facteurs
print(pd.DataFrame(corvar))
# on affiche pour les deux premiers axes
print(pd.DataFrame({"id": df.columns, "COR_1": corvar[:, 0], "COR_2": corvar[:, 1]}))
# cercle des corrélations
fig, axes = plt.subplots(figsize=(8, 8))
axes.set_xlim(-1, 1)
axes.set_ylim(-1, 1)
# affichage des étiquettes (noms des variables)
for j in range(8):
plt.annotate(df.columns[j], (corvar[j, 0], corvar[j, 1]))
# ajouter les axes
plt.plot([-1, 1], [0, 0], color="silver", linestyle="-", linewidth=1)
plt.plot([0, 0], [-1, 1], color="silver", linestyle="-", linewidth=1)
# ajouter un cercle
cercle = plt.Circle((0, 0), 1, color="blue", fill=False)
axes.add_artist(cercle)
# affichage
plt.show()
# Réponse sur l'analyse: On retrouvera a droite des individus avec un prix, cylindre, poids, puissance, large, long élevé. A gauche un prix, cylindre, poids, puissance, large, long faible. On retrouvera des individus avec R-Poid.puis élevé en bas et faible en haut.
# On perçoit clairement l’effet taille sur le premier axe : les voitures puissantes et rapides sont aussi les plus lourdes et imposantes, la relation globale entre les variables est en réalité déterminée par la cylindrée (CYL).
# # Question 12:
# Nous allons maintenant étudier la qualité de représentation de chaque variable.
# - Filtrer les variables avec un taux de représentation supérier à la moyenne sur l'axe 1
# - Filtrer les variables avec un taux de représentation supérier à la moyenne sur l'axe 2
# - Afficher les variables
# - Affiner l'analyse de la question 11
# cosinus carré des variables
cos2var = corvar**2
df_ctr_variables = pd.DataFrame(
{"id": df.columns, "COS2_1": cos2var[:, 0], "COS2_2": cos2var[:, 1]}
)
df_ctr_variables[df_ctr_variables["COS2_1"] > df_ctr_variables["COS2_1"].mean()]
df_ctr_variables[df_ctr_variables["COS2_2"] > df_ctr_variables["COS2_2"].mean()]
# Réponse: On associera la CP1 à CYL, PUSS, LONG, POIDS & PRIX. On associera la CP2 à R-POD PUIS et dans une moindre mesur LONG, LARG, V-MAX.
# # Question 13:
# Il est possible d'ajouter des variables ou des indivus de façon "illustratives". Ils ne vont pas rentrer en compte dans le calcul des composantes principales. Par exemple nous pouvons croiser notre ACP avec des variables catégorielle comme celle que nous avons supprimé "FINITION" afin d'en déduire certaines corrélations.
# - Afficher finition
# - Analyser la nouvelle projection avec les couleurs associés. Que peut on en déduire?
#
print(finition)
# modalités de la variable qualitative
modalites = numpy.unique(finition)
print(modalites)
# Nous représentons les individus dans le plan factoriel, coloriées selon la modalité associée de la variable illustrative.
# liste des couleurs
couleurs = ["red", "green", "blue"]
# faire un graphique en coloriant les points
fig, axes = plt.subplots(figsize=(12, 12))
axes.set_xlim(-6, 6)
axes.set_ylim(-6, 6)
# pour chaque modalité de la var. illustrative
for c in range(len(modalites)):
# numéro des individus concernés
numero = numpy.where(finition == modalites[c])
# les passer en revue pour affichage
for i in numero[0]:
plt.annotate(df.index[i], (coord[i, 0], coord[i, 1]), color=couleurs[c])
# ajouter les axes
plt.plot([-6, 6], [0, 0], color="silver", linestyle="-", linewidth=1)
plt.plot([0, 0], [-6, 6], color="silver", linestyle="-", linewidth=1)
# affichage
plt.show()
# Réponse: La variable finition semble corrélé aux variables de nos composantes principales. Ainsi on retrouvera plutot des individus {1_M : rouge, 2_B : vert, 3_TB : bleu} avec une finition TB à droite et des individus avec une finition M à gauche. Il semble que la finition soit corrélé plutôt à la taille du véhicule (effet taille sur l'axe 1) qu'à la performance brut (effet sportivité sur l'axe 2).
# # Deuxième partie: Réduction de dimension
# Aucune analyse sera mené dans cette seconde partie. Nous allons nous concentrer sur la réduction de dimension et son implémentation dans un pipeline de Machine Learning. Le but ici étant avant tout de mettre en place un modèle prédictif utilisant moins de dimension que l'on peut retrouver dans notre donnée brut. Cela permettra à notre modèle moins de calcul car moins de dimensions, moins de variables.
# # Question 1:
# Nous allons réaliser un problème de classification avec les variables Composante Principale. Dans notre exemple nous cherchons à classer des images représentants des chiffre compris entre zero et dix. Il est possible de faire cela sans transformation avec du Deep Learning, cependant nous allons utiliser une méthode de Machine Learning coupler à une ACP afin de répondre à notre problématique.
# Voici la donnée d'entrée:
# !
# Chaque image est composé de 8*8 pixels soit 64 pixels. Pour pouvoir utiliser une méthode de Machine Learning nous devons avoir un individus correspondant à une ligne de notre tableau. Pour cela nous allons rendre flat notre image et ainsi avoir 64 variables, 64 descripteurs correspondant à l'intensité de noir sur un pixel donnée.
# - Importer datasets provenant de sklearn
# - Chargé la data avec load_digits() et la stocker dans la variable digits
# - Afficher le contenu de digits
from sklearn import datasets
digits = datasets.load_digits()
digits
# # Question 2:
# - Stcoker la donnée (data) de digits dans X_digits. Cela correspond à nos variables explicatives
# - Stocker les labels (target) de digits dans Y_digits. Cela correspond à notre variable à prédire
# - Afficher la taille de X_digits & Y_digits
X_digits = digits.data
y_digits = digits.target
X_digits.shape
y_digits.shape
# # Question 3:
# - Effectuer une ACP sur X_digits
# - Afficher la variance expliqué par les axes (explained_variance_ratio_)
# - Afficher la variances cumuléss expliquées par les axes
# - Emettre une hypothèse sur le nombre d'axe représentant au mieux l'information
pca = PCA()
pca.fit(X_digits)
pca.explained_variance_ratio_
plt.figure(1, figsize=(8, 5))
plt.clf()
plt.axes([0.2, 0.2, 0.7, 0.7])
plt.plot(pca.explained_variance_ratio_.cumsum(), linewidth=2)
plt.axis("tight")
plt.xlabel("n_components")
plt.ylabel("explained_variance_")
# Réponse: 40 Semble correct. On a environ 100% de l'information. Les autres axes n'embarques presque rien et semble donc peu déterminant pour comparer nos individus
# # Question 4:
# Nous allons utiliser un modèle de classification. La régression logistique.
# - Importer la regression logistic avec sklearn https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
# - Importer Pipeline de sklearn https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html
# - Instancier une PCA et une regression logistique
# - Créer un Pipeline avec une étape 'pca' et une étape 'LR'
# L'idée du Pipeline est de faire plusieurs tâches à la suite via une seule et unique fonction.
# 
from sklearn.pipeline import Pipeline
from sklearn import linear_model
# Création du pipeline et détermination des meilleurs paramètres
logistic = linear_model.LogisticRegression()
pca = PCA()
pipe = Pipeline(steps=[("pca", pca), ("logistic", logistic)])
# # Question 5:
# En ajoutant cette étape nous ajoutonsun paramètre à notre modèle de Machine Learning. Pour trouver le bon nombre de composante à garder nous allons utiliser un GridSearchCV.
# - Importer GridSearchCV
# - Créer une liste n_components avec les valeurs 20, 40, 64
# - Remplir le GridSearchCV et le faire fitter
from sklearn.model_selection import GridSearchCV
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
penalties = ["l1", "l2"]
# Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(
pipe,
dict(pca__n_components=n_components, logistic__C=Cs, logistic__penalty=penalties),
scoring="accuracy",
)
estimator.fit(X_digits, y_digits)
# # Question 6:
# - Afficher les résultats du GridSearcheCV - le tableau avec l'ensemble des résultats
# - Afficher les hyper paramètres
# - A votre avis qu'elles sont les étapes suivantes?
print(estimator.best_estimator_)
print(X_digits.shape, y_digits.shape)
pd.DataFrame(estimator.cv_results_)
print("Best parameters set found on development set:")
print()
print(estimator.best_params_)
print()
print("Grid scores on development set:")
print()
means = estimator.cv_results_["mean_test_score"]
stds = estimator.cv_results_["std_test_score"]
for mean, std, params in zip(means, stds, estimator.cv_results_["params"]):
print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Reading combined_data_1 file
df = pd.read_csv(
"/kaggle/input/netflix-prize-data/combined_data_1.txt",
header=None,
names=["Cust_Id", "Rating"],
usecols=[0, 1],
)
# Convert Ratings column to a float datatype
df["Rating"] = df["Rating"].astype(float)
df.head()
df.shape
df.info()
# checking null values-- total number of movies
no_of_movie = df["Rating"].isnull().sum()
no_of_movie
# checking percentage of null values
(df.isnull().sum() / len(df)) * 100
# ### From above we can observe the null value percentage it's actually not a null value which we are going to use it's due to the structure of data
# let's check all unique items present in rating column
df["Rating"].unique()
# To find the distribution of different ratings in the datset
d = df.groupby("Rating")["Rating"].agg(["count"])
d = pd.DataFrame(d)
d
# get customer count
cust_count = df["Cust_Id"].nunique() - no_of_movie
cust_count
# get rating count
rating_count = df["Cust_Id"].count() - no_of_movie
rating_count
import matplotlib.pyplot as plt
ax = d.plot(kind="barh", legend=False, figsize=(15, 10))
plt.title(
f"Total pool: {no_of_movie} Movies, {cust_count} customers, {rating_count} ratings given",
fontsize=20,
)
plt.axis("On")
for i in range(1, 6):
ax.text(
d.iloc[i - 1][0] / 4,
i - 1,
"Rating {}: {:.0f}%".format(i, d.iloc[i - 1][0] * 100 / d.sum()[0]),
color="white",
weight="bold",
)
df.head()
# To count all the 'nan' values in the Ratings column in the 'ratings' dataset
df_nan = pd.DataFrame(
pd.isnull(df.Rating),
)
df_nan.head()
df1 = pd.isnull(df["Rating"])
df2 = pd.DataFrame(df1)
df3 = df2[df2["Rating"] == True]
df3
df3 = df3.reset_index()
df_nan = df3.copy()
df_nan.head()
# To create a numpy array containing movie ids according the 'ratings' dataset
movie_np = []
movie_id = 1
for i, j in zip(df_nan["index"][1:], df_nan["index"][:-1]):
# numpy approach
temp = np.full((1, i - j - 1), movie_id)
movie_np = np.append(movie_np, temp)
movie_id += 1
# Account for last record and corresponding length
# numpy approach
last_record = np.full((1, len(df) - df_nan.iloc[-1, 0] - 1), movie_id)
movie_np = np.append(movie_np, last_record)
# To append the above created array to the datset after removing the 'nan' rows
df = df[pd.notnull(df["Rating"])]
df["Movie_Id"] = movie_np.astype(int)
df["Cust_Id"] = df["Cust_Id"].astype(int)
print("-Dataset examples-")
df.head()
f = ["count", "mean"]
# To create a list of all the movies rated less often(only include top 30% rated movies)
dataset_movie_summary = df.groupby("Movie_Id")["Rating"].agg(f)
dataset_movie_summary.index = dataset_movie_summary.index.map(int)
# dataset_movie_summary
movie_benchmark = round(dataset_movie_summary["count"].quantile(0.7), 0)
drop_movie_list = dataset_movie_summary[
dataset_movie_summary["count"] < movie_benchmark
].index
print("Movie minimum times of review: {}".format(movie_benchmark))
# To create a list of all the inactive users(users who rate less often)
dataset_cust_summary = df.groupby("Cust_Id")["Rating"].agg(f)
dataset_cust_summary.index = dataset_cust_summary.index.map(int)
cust_benchmark = round(dataset_cust_summary["count"].quantile(0.7), 0)
drop_cust_list = dataset_cust_summary[
dataset_cust_summary["count"] < cust_benchmark
].index
print(f"Customer minimum times of review: {cust_benchmark}")
print(f"Original Shape: {df.shape}")
df = df[~df["Movie_Id"].isin(drop_movie_list)]
df = df[~df["Cust_Id"].isin(drop_cust_list)]
print("After Trim Shape: {}".format(df.shape))
df_p = pd.pivot_table(df, values="Rating", index="Cust_Id", columns="Movie_Id")
print(df_p.shape)
df_p
df_title = pd.read_csv(
"/kaggle/input/netflix-prize-data/movie_titles.csv",
encoding="ISO-8859-1",
header=None,
names=["Movie_Id", "Year", "Name"],
)
df_title.set_index("Movie_Id", inplace=True)
print(df_title.head(10))
# Import required libraries
import math
import re
import matplotlib.pyplot as plt
from surprise import Reader, Dataset, SVD
from surprise.model_selection import cross_validate
# Load Reader library
reader = Reader()
# get just top 100K rows for faster run time
data = Dataset.load_from_df(df[["Cust_Id", "Movie_Id", "Rating"]][:100000], reader)
# Use the SVD algorithm.
svd = SVD()
# Compute the RMSE of the SVD algorithm
cross_validate(svd, data, measures=["RMSE", "MAE"], cv=3, verbose=True)
df.head(4)
dataset_712664 = df[(df["Cust_Id"] == 712664) & (df["Rating"] == 5)]
dataset_712664 = dataset_712664.set_index("Movie_Id")
dataset_712664 = dataset_712664.join(df_title)["Name"]
dataset_712664.head(10)
dataset_1331154 = df[(df["Cust_Id"] == 1331154) & (df["Rating"] == 4)]
dataset_1331154
dataset_1331154 = dataset_1331154.set_index("Movie_Id")
dataset_1331154
dataset_1331154 = dataset_1331154.join(df_title)["Name"]
dataset_1331154.head(10)
# Create a shallow copy for the movies dataset
dataset_1331154 = df_title.copy()
dataset_1331154.head()
dataset_1331154 = dataset_1331154.reset_index()
dataset_1331154.head()
# To remove all the movies rated less often
dataset_1331154 = dataset_1331154[~dataset_1331154["Movie_Id"].isin(drop_movie_list)]
dataset_1331154
trainset = data.build_full_trainset()
svd.fit(trainset)
# Predict the ratings for user_712664
dataset_1331154["Estimate_Score"] = dataset_1331154["Movie_Id"].apply(
lambda x: svd.predict(1331154, x).est
)
# Drop extra columns from the user_712664 data frame
dataset_1331154 = dataset_1331154.drop("Movie_Id", axis=1)
dataset_1331154.head()
# Sort predicted ratings for user_712664 in descending order
dataset_1331154 = dataset_1331154.sort_values("Estimate_Score", ascending=False)
# Print top 10 recommendations
print(dataset_1331154.head(10))
|
import numpy as np
import pandas as pd
import seaborn as sns
import missingno as msno
from matplotlib import pyplot as plt
from datetime import date
from sklearn.metrics import accuracy_score, mean_squared_error, mean_absolute_error
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.neighbors import LocalOutlierFactor
from sklearn.preprocessing import (
MinMaxScaler,
LabelEncoder,
StandardScaler,
RobustScaler,
)
import warnings
warnings.filterwarnings("ignore")
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
pd.set_option("display.float_format", lambda x: "%.3f" % x)
pd.set_option("display.width", 500)
df = pd.read_csv("/kaggle/input/heart-failure-prediction/heart.csv")
def check_df(dataframe, head=5):
print("#################### Head ####################")
print(dataframe.head(head))
print("################### Shape ####################")
print(dataframe.shape)
print("#################### Info #####################")
print(dataframe.info())
print("################### Nunique ###################")
print(dataframe.nunique())
print("##################### NA #####################")
print(dataframe.isnull().sum())
print("################## Quantiles #################")
print(dataframe.describe([0, 0.05, 0.50, 0.95, 0.99, 1]).T)
print("################# Duplicated ###################")
print(dataframe.duplicated().sum())
check_df(df)
# We need to identify the numerical and categorical variables in the data.
def grab_col_names(dataframe, cat_th=10, car_th=20):
# cat_cols, cat_but_car
cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"]
num_but_cat = [
col
for col in dataframe.columns
if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"
]
cat_but_car = [
col
for col in dataframe.columns
if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"
]
cat_cols = cat_cols + num_but_cat
cat_cols = [col for col in cat_cols if col not in cat_but_car]
# num_cols
num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"]
num_cols = [col for col in num_cols if col not in num_but_cat]
return cat_cols, num_cols, cat_but_car
cat_cols, num_cols, cat_but_car = grab_col_names(df)
print(num_cols)
print(cat_cols)
def cat_summary(dataframe, col_name, plot=False):
print(
pd.DataFrame(
{
col_name: dataframe[col_name].value_counts(),
"Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe),
}
)
)
print("##########################################")
if plot:
sns.kdeplot(x=col, data=dataframe, shade=True, bw=0.1, color="red")
sns.set(rc={"figure.dpi": 100})
plt.show(block=True)
# We are analyzing the numeric variables.
for col in num_cols:
cat_summary(df, col, plot=True)
# We are analyzing the categorical variables.
for col in cat_cols:
print(df[col].value_counts())
sns.countplot(x=df[col], data=df)
sns.set(rc={"figure.dpi": 90})
plt.show(block=True)
# We are analyzing the target variable.
for col in num_cols:
print(df.groupby("HeartDisease").agg({col: "mean"}))
sns.violinplot(x=df["HeartDisease"], y=df[col])
plt.show(block=True)
# We are analyzing the outliers.
# To detect outliers, we need to set threshold values.
def outlier_thresholds(dataframe, col_name, q1=0.04, q3=0.96):
quartile1 = dataframe[col_name].quantile(q1)
quartile3 = dataframe[col_name].quantile(q3)
interquantile_range = quartile3 - quartile1
up_limit = quartile3 + 1.5 * interquantile_range
low_limit = quartile1 - 1.5 * interquantile_range
return low_limit, up_limit
# We are checking the variables that have outliers.
def check_outlier(dataframe, col_name):
low_limit, up_limit = outlier_thresholds(dataframe, col_name)
if dataframe[
(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)
].any(axis=None):
return True
else:
return False
for col in num_cols:
print(col, check_outlier(df, col))
# We replace the outliers with the threshold values we determined.
def replace_with_thresholds(dataframe, variable):
low_limit, up_limit = outlier_thresholds(dataframe, variable)
dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit
dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit
for col in num_cols:
replace_with_thresholds(df, col)
check_outlier(df, num_cols)
# We generate our scores with LOF.
df_num_cols = df[num_cols]
clf = LocalOutlierFactor(n_neighbors=20)
clf.fit_predict(df_num_cols)
df_scores = clf.negative_outlier_factor_
# We are examining the scores through a graph.
scores = pd.DataFrame(np.sort(df_scores))
scores.plot(stacked=True, xlim=[0, 70], style=".-")
plt.show()
# We set the 10th point as the threshold.
th = np.sort(df_scores)[7]
# We are looking at the outlier observation units that fall below this threshold.
df[df_scores < th]
# We remove these outlier observations from the dataset.
df = df[~(df_scores < th)]
# We are examining our correlation analysis.
corr = df[df.columns].corr()
sns.set(rc={"figure.figsize": (12, 12)})
sns.heatmap(
corr,
cmap="RdBu",
vmin=-1,
vmax=1,
center=0,
annot=True,
linewidth=0.5,
square=False,
)
# sns.heatmap(corr, cmap="RdBu")
plt.show()
# We are analyzing the missing values.
df.isnull().any()
df.describe().T
# It seems no missing values.
# But at first we saw that there are meaningless values.
# For example: Variable such as Cholesterol cannot be 0.
# We will treat these variables as NaN.
# We set 0 values to NaN
df["Cholesterol"] = np.where(df["Cholesterol"] == 0, np.NaN, df["Cholesterol"])
df.isnull().sum()
# We have identified missing values and we need to fill them in.
# Bunu Knn ile komşuluk temelli yapiyoruz.
dff = pd.get_dummies(df, drop_first=True)
scaler = MinMaxScaler()
dff = pd.DataFrame(scaler.fit_transform(dff), columns=dff.columns)
from sklearn.impute import KNNImputer
imputer = KNNImputer(n_neighbors=5)
dff = pd.DataFrame(imputer.fit_transform(dff), columns=dff.columns)
dff.head()
# Değişkeni doldurmuş oluyoruz ve veriyi eski haline çeviriyoruz.
dff = pd.DataFrame(scaler.inverse_transform(dff), columns=dff.columns)
df["Cholesterol"] = dff["Cholesterol"]
df.isnull().sum()
df.dropna(inplace=True)
df.describe().T
# Now we are creating new variables.
df.groupby("Oldpeak")["HeartDisease"].mean()
|
# **Implement the 10 most important binary classification algorithms & check their performance**
# * Naive Bayes
# * Logistic Regression
# * K-Nearest Neighbours
# * Support Vector Machine
# * Decision Tree
# * Bagging Decision Tree (Ensemble Learning I)
# * Boosted Decision Tree (Ensemble Learning II)
# * Random Forest (Ensemble Learning III)
# * Voting Classification (Ensemble Learning IV)
# * Deep Learning with a neuronal network
# ### Standard Libraries
# standard libraries
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os, warnings
warnings.simplefilter("ignore")
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import (
StandardScaler,
MinMaxScaler,
MaxAbsScaler,
RobustScaler,
)
from tabulate import tabulate
import datetime
Table = []
# ### Load Data
# paths
TRAIN_PATH = "/kaggle/input/playground-series-s3e12/train.csv"
TEST_PATH = "/kaggle/input/playground-series-s3e12/test.csv"
SUBMISSION_PATH = "/kaggle/input/playground-series-s3e12/sample_submission.csv"
# read data
train_df = pd.read_csv(TRAIN_PATH)
test_df = pd.read_csv(TEST_PATH)
submit_df = pd.read_csv(SUBMISSION_PATH)
# drop column = id
train_df.drop("id", axis=1, inplace=True)
test_df.drop("id", axis=1, inplace=True)
# drop NULLs
train_df.dropna(inplace=True)
test_df.dropna(inplace=True)
# reset index
train_df.reset_index(drop=True, inplace=True)
test_df.reset_index(drop=True, inplace=True)
# scaling the data
scaler = MinMaxScaler()
for col in train_df.columns:
train_df[col] = scaler.fit_transform(train_df[[col]])
for col in test_df.columns:
test_df[col] = scaler.fit_transform(test_df[[col]])
# view
print(f"Training data shape: {train_df.shape}")
# feature engineering
X = train_df.iloc[:, :6]
Y = train_df[["target"]]
# split data
x_train, x_test, y_train, y_test = train_test_split(X, Y, random_state=0)
print(f"x_train shape: {x_train.shape} | x_test shape: {x_test.shape}")
# ### Naive Bayes
# import the library
from sklearn.naive_bayes import MultinomialNB
# instantiate & fit
mnb = MultinomialNB().fit(x_train, y_train)
print("score on test: " + str(mnb.score(x_test, y_test)))
Table.append(["Naive Bayes", mnb.score(x_test, y_test)])
# ### Logistic Regression
#
# import the library
from sklearn.linear_model import LogisticRegression
# instantiate & fit
lr = LogisticRegression(max_iter=5000)
lr.fit(x_train, y_train)
print("score on test: " + str(lr.score(x_test, y_test)))
Table.append(["Logistic Regression", lr.score(x_test, y_test)])
# import the library
from sklearn.linear_model import SGDClassifier
# instantiate & fit
sgd = SGDClassifier()
sgd.fit(x_train, y_train)
print("score on test: " + str(sgd.score(x_test, y_test)))
Table.append(["SGDClassifier", sgd.score(x_test, y_test)])
# ### K-Nearest Neighbours
# import the library
from sklearn.neighbors import KNeighborsClassifier
# instantiate & fit
knn = KNeighborsClassifier(algorithm="brute", n_jobs=-1)
knn.fit(x_train, y_train)
print("score on test: " + str(knn.score(x_test, y_test)))
Table.append(["KNN", knn.score(x_test, y_test)])
# ### Support Vector Machine
# import the library
from sklearn.svm import LinearSVC
# instantiate & fit
svm = LinearSVC(C=0.0001)
svm.fit(x_train, y_train)
print("score on test: " + str(svm.score(x_test, y_test)))
Table.append(["SVM", svm.score(x_test, y_test)])
# ### Decision Tree
# import the library
from sklearn.tree import DecisionTreeClassifier
# instantiate & fit
clf = DecisionTreeClassifier(min_samples_split=10, max_depth=3)
clf.fit(x_train, y_train)
print("score on test: " + str(clf.score(x_test, y_test)))
Table.append(["Decision Tree", clf.score(x_test, y_test)])
# ### Bagging Decision Tree
# import the library
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
# instantiate & fit
bg = BaggingClassifier(
DecisionTreeClassifier(min_samples_split=10, max_depth=3),
max_samples=0.5,
max_features=1.0,
n_estimators=10,
)
bg.fit(x_train, y_train)
print("score on test: " + str(bg.score(x_test, y_test)))
Table.append(["Bagging Decision Tree", bg.score(x_test, y_test)])
# ### Boosting Decision Tree
# import the library
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
# instantiate & fit
adb = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2), n_estimators=100, learning_rate=0.5
)
adb.fit(x_train, y_train)
print("score on test: " + str(adb.score(x_test, y_test)))
Table.append(["AdaBoost Classifier", adb.score(x_test, y_test)])
# import the library
from sklearn.ensemble import GradientBoostingClassifier
# instantiate & fit
gbc = GradientBoostingClassifier(n_estimators=100)
gbc.fit(x_train, y_train)
print("score on test: " + str(gbc.score(x_test, y_test)))
Table.append(["Gradient Boost Classifier", gbc.score(x_test, y_test)])
# ### Random Forest
# import the library
from sklearn.ensemble import RandomForestClassifier
# instantiate & fit
rf = RandomForestClassifier(n_estimators=300, max_depth=3)
rf.fit(x_train, y_train)
print("score on test: " + str(rf.score(x_test, y_test)))
Table.append(["Random Forest", rf.score(x_test, y_test)])
# ### Voting Classifier
# import the library
from sklearn.ensemble import VotingClassifier
# 1) naive bias = mnb
mnb = MultinomialNB().fit(x_train, y_train)
# 2) logistic regression =lr
lr = LogisticRegression(max_iter=5000)
# 3) random forest =rf
rf = RandomForestClassifier(n_estimators=30, max_depth=3)
# 4) suport vecotr mnachine = svm
svm = LinearSVC(max_iter=5000)
evc = VotingClassifier(estimators=[("mnb", mnb), ("lr", lr), ("rf", rf), ("svm", svm)])
evc.fit(x_train, y_train)
print("score on test: " + str(evc.score(x_test, y_test)))
Table.append(["Voting Classifier", evc.score(x_test, y_test)])
# ### Deep Learning
# import the library
from keras import layers
from keras import models
from keras import optimizers
from keras import losses
from keras import regularizers
from keras import metrics
from tqdm.keras import TqdmCallback
# add validation dataset
validation_split = 100
x_validation = X[:validation_split]
x_partial_train = X[validation_split:]
y_validation = Y[:validation_split]
y_partial_train = Y[validation_split:]
# build & compile model
model = models.Sequential()
model.add(
layers.Dense(
4,
kernel_regularizer=regularizers.l2(0.003),
activation="relu",
input_shape=(6,),
)
)
model.add(layers.Dropout(0.7))
model.add(layers.Dense(4, kernel_regularizer=regularizers.l2(0.003), activation="relu"))
model.add(layers.Dropout(0.7))
model.add(layers.Dense(1, activation="sigmoid"))
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"])
# fir the model
model.fit(
x_partial_train,
y_partial_train,
epochs=100,
batch_size=512,
validation_data=(x_validation, y_validation),
callbacks=[TqdmCallback(verbose=0)],
verbose=0,
)
print("")
print("score on test: " + str(model.evaluate(x_test, y_test)[1]))
Table.append(["Neural Network", model.evaluate(x_test, y_test)[1]])
# view
print(tabulate(Table, headers=["Model", "Score"], tablefmt="fancy_outline"))
# **Decision Tree Classifer seems to be giving the best result.**
# ### Fine-Tune SGD Classifier -- updated
# fine-tuning the Decision Tree Classifier
from sklearn.model_selection import GridSearchCV
# instantiate
sgdc = SGDClassifier()
# define paramter grid.
param_grid = [
{
"alpha": [1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3], # learning rate
"n_iter_no_change": [5, 10, 15, 20, 30, 40, 50], # number of epochs
"penalty": ["l2", "l1", "elasticnet"],
"n_jobs": [-1],
"max_iter": [500],
"l1_ratio": [0.0, 0.1, 0.2, 0.3, 0.4, 0.5],
"tol": [1e-3, 1e-4, 1e-5],
}
]
# define grid-search
grid_search = GridSearchCV(
estimator=sgdc,
param_grid=param_grid,
scoring="top_k_accuracy",
cv=5,
return_train_score=True,
)
# fit the grid search
grid_search.fit(x_train, y_train)
# get the best estimator
sgdc_tuned = grid_search.best_estimator_
# fit the estimator
dtc_tuned.fit(x_train, y_train)
print("score on test: " + str(dtc_tuned.score(x_test, y_test)))
# make prediction on TEST data
result = dtc_tuned.predict(test_df)
# add the prediction to the submission
submit_df["target"] = result
# get the datestamp
datestamp = "{:%Y_%m_%d}".format(datetime.date.today())
# save the submission
submit_df.to_csv(str(datestamp) + "_submission.csv", index=False)
|
import numpy as np
a = np.array([True, False, True])
b = np.array([1, 2, 3])
np.any(a)
np.all(a)
np.all(b > 0)
x = np.array([1, 2, 3])
y = np.array([4, 5, 6])
z = np.array([7, 8, 9])
# x<y<z
res1 = x < y
res1
res2 = y < z
res2
res = res1 & res2
np.all(res)
# **Brodcasting**
# **Tile Function**
a = np.array([1, 2, 3])
b = np.tile(a, (2, 2))
b
b.ndim
c = np.eye(2)
c
d = np.tile(c, (2, 2)) # (2,2) (Row, Column)
d
# convert 1D Array to 2D Array
a = np.array([1, 2, 3])
a
b = a[0:3, np.newaxis]
b.shape
# **Brodcasting Example**
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
a
c = a + b # the row in b will be repeated to have shape in both arrays
c
b = np.array([1, 2, 0])
b
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
a
b = np.array([[1], [2], [0]])
b
c = a + b
c
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
a
b = np.array([1])
b
c = a + b
c
h = np.array([1, 1, 0])
g = np.array([[1], [2], [1]])
i = h + g
i
# **Flattening**
a = np.array([[1, 2, 3], [4, 5, 6]])
a
np.ravel(a)
# **Transpose**
a = np.array([[1, 2, 3], [4, 5, 6]])
a
np.transpose(a)
# **Reshape**
a = np.array([[1, 2, 3], [4, 5, 6]])
a
np.reshape(a, (3, 2))
b = np.arange(18).reshape(2, 3, 3)
b.ndim
# **Sort**
a = np.random.randint(1, 30, 10)
a
a = np.sort(a)
a
a = a[::-1]
a
x = np.array([[1, 3, 6, 8], [2, 5, 3, 9]])
x
np.sort(x, axis=1)
b = np.random.randint(1, 20, 10)
b
ind = np.argsort(b) # find sortad place value
ind
b[ind]
|
import os
import sys
import numpy as np
import pandas as pd
import tensorflow as tf
from transformers import BertTokenizer
sys.path.append("../input/kagglerig")
import krig
sys.path.append("../input/googlequestchallenge/kaggle-google-quest-challenge-1.0")
import googlequestchallenge as gqc
krig.seed_everything()
# Characters such as empty strings '' or numpy.inf are considered NA values
pd.set_option("use_inf_as_na", True)
pd.set_option("display.max_columns", 999)
pd.set_option("display.max_rows", 999)
MAX_SEQUENCE_LENGTH = 10000
WINDOW_LENGTH = 512
STRIDE = 256
TARGET_COLUMNS = [
"question_asker_intent_understanding",
"question_body_critical",
"question_conversational",
"question_expect_short_answer",
"question_fact_seeking",
"question_has_commonly_accepted_answer",
"question_interestingness_others",
"question_interestingness_self",
"question_multi_intent",
"question_not_really_a_question",
"question_opinion_seeking",
"question_type_choice",
"question_type_compare",
"question_type_consequence",
"question_type_definition",
"question_type_entity",
"question_type_instructions",
"question_type_procedure",
"question_type_reason_explanation",
"question_type_spelling",
"question_well_written",
"answer_helpful",
"answer_level_of_information",
"answer_plausible",
"answer_relevance",
"answer_satisfaction",
"answer_type_instructions",
"answer_type_procedure",
"answer_type_reason_explanation",
"answer_well_written",
]
QUESTION_COLUMNS = ["question_title", "question_body"]
ANSWER_COLUMNS = ["answer"]
# # Eval corrs from training
corrs = pd.read_csv(
"../input/googlequestchallenge/kaggle-google-quest-challenge-1.0/resources/bert-base-uncased/finetuned/corrs.csv"
)
corrs.head(len(TARGET_COLUMNS))
test = pd.read_csv("../input/google-quest-challenge/test.csv")
test.info()
# tokenizer = BertTokenizer.from_pretrained('../resources/bert-base-uncased/finetuned/')
tokenizer = BertTokenizer.from_pretrained(
"../input/googlequestchallenge/kaggle-google-quest-challenge-1.0/resources/bert-base-uncased/finetuned/"
)
ds = gqc.Dataset(key_column="qa_id")
ds.preprocess(
test,
tokenizer,
question_columns=QUESTION_COLUMNS,
answer_columns=ANSWER_COLUMNS,
max_sequence_length=MAX_SEQUENCE_LENGTH,
window_length=WINDOW_LENGTH,
stride=STRIDE,
)
x_test = ds.inputs()
print(f"x_test.shape={np.shape(x_test)}")
ds.df.head()
q_input_ids = pd.DataFrame(x_test[0])
q_input_ids.head()
a_input_ids = pd.DataFrame(x_test[3])
a_input_ids.head()
# path = '../resources/bert-base-uncased/finetuned/'
path = "../input/googlequestchallenge/kaggle-google-quest-challenge-1.0/resources/bert-base-uncased/finetuned/"
model = tf.keras.models.load_model(path)
model.summary(line_length=100)
y_pred = model.predict(x_test)
print(f"y_pred.shape={np.shape(y_pred)}")
df = pd.DataFrame(y_pred, columns=TARGET_COLUMNS)
df["qa_id"] = ds.df["qa_id"].values
df = df.groupby(["qa_id"], as_index=False)[TARGET_COLUMNS].median()
df.info()
df.head()
sub = pd.read_csv("../input/google-quest-challenge/sample_submission.csv")
sub.iloc[:, 1:] = df[TARGET_COLUMNS].values
gqc.check_submission(sub, shape=(476, 31), exclude={"qa_id"})
sub.head()
sub.to_csv("submission.csv", index=False)
# # Debug
print("\n".join(krig.file_paths(".")))
print("\n".join(krig.file_paths("../input")))
pd.show_versions()
|
# # **MASK FILTERING**
import pandas as pd
a = pd.read_csv("/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv")
a
a.head()
a.info()
# use to change type from object to datetime64
a["ObservationDate"] = pd.to_datetime(a["ObservationDate"])
a["Last Update"] = pd.to_datetime(a["Last Update"])
a.info() # here the type of observation date and last update has been changed
# to fetch any single data from a column
a["Country/Region"] == "India"
mask = a["Country/Region"] == "India"
a[mask]
# to fetch multiple data using condition
mask1 = a["Country/Region"] == "India"
mask2 = a["ObservationDate"] == "2021-05-29"
b = a[mask1 & mask2]
b
b.sort_values("Confirmed", ascending=False).head()
# # **ISIN ( ) METHOD**
import pandas as pd
a = pd.read_csv("/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv")
a.head()
mask = a["Country/Region"].isin(["India", "Japan"])
a[mask]
# **WITHOUT ISIN METHOD**
a = pd.read_csv("/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv")
a.head()
mask1 = a["Country/Region"] == "India"
mask2 = a["Country/Region"] == "Japan"
a[mask1 | mask2]
# # **NULL FILTERING**
a = pd.read_csv("/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv")
a.head()
a["ObservationDate"] = pd.to_datetime(a["ObservationDate"])
a["Last Update"] = pd.to_datetime(a["Last Update"])
a.head()
a.info()
a["Province/State"].isnull()
mask = a["Province/State"].isnull()
a[mask]
# **NOTNULL ( ) METHOD**
mask2 = a["Province/State"].notnull()
a[mask2]
# # **BETWEEN ( ) METHOD**
a = pd.read_csv("/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv")
a.head()
a["Confirmed"].between(19, 20)
mask = a["Confirmed"].between(19, 20)
a[mask]
a["ObservationDate"].between("01/22/2020", "01/23/2020")
mask2 = a["ObservationDate"].between("01/22/2020", "01/23/2020")
a[mask2]
# # **FIND DUPLICATES**
import pandas as pd
d = {
"students": ["aman", "biswa", "aman", "disha", "dhruvika", "aman"],
"marks": [23, 44, 33, 54, 78, 23],
"age": [18, 19, 17, 18, 18, 18],
}
a = pd.DataFrame(d)
a
a["students"].duplicated()
mask = a["students"].duplicated()
a[mask]
# **KEEP ARGUMENT**
a["students"].duplicated(keep="last") # keep default is first
mask1 = a["students"].duplicated(keep="last")
a[mask1]
# **SUBSET ARGUMENT**
a
a.duplicated(
subset=["students", "marks", "age"]
) # use to find the which data is getting repeated in all the columns which are called out in subset
m1 = a.duplicated(subset=["students", "marks", "age"])
a[m1]
# **testing on covid data**
a = pd.read_csv("/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv")
a.head()
m2 = a.duplicated(
["ObservationDate", "Province/State", "Country/Region", "Last Update"]
)
m2
a[m2]
a.loc[49:55]
# # **TREATING DUPLICATES**
d = {
"students": ["aman", "biswa", "aman", "disha", "dhruvika", "aman"],
"marks": [23, 44, 33, 54, 78, 23],
"age": [18, 19, 17, 18, 18, 18],
}
a = pd.DataFrame(d)
a
a[
"students"
].drop_duplicates() # droping duplicates which are repeating in called column
a["marks"].drop_duplicates()
a.drop_duplicates(
subset=["students"]
) # another way of using drop duplicates using subset
a.drop_duplicates(subset=["students"], ignore_index=True)
a.drop_duplicates(subset=["students", "marks", "age"], ignore_index=True, inplace=True)
a
# # **DISTINCT COUNT**
a = pd.read_csv("/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv")
a.head()
a["Country/Region"].unique()
a["Country/Region"].unique().ndim
a["Country/Region"].nunique()
a["Province/State"].nunique()
a["Confirmed"].sum() / a[
"Country/Region"
].nunique() # no of confirmed case per country on average
|
import numpy as np
import pandas as pd
import seaborn as sns
import plotly.express as px
import plotly.graph_objs as go
df = pd.read_csv("/kaggle/input/maven-churn-dataset/telecom_customer_churn.csv")
# # **Basic steps**
df
df.info()
df.shape
df.columns
df.describe()
df.duplicated().value_counts()
df.isnull().sum()
# # **Handling null values for required columns**
# ## **Value before handling null values**
mean = df["Avg Monthly GB Download"].mean()
mean
mode = df["Streaming TV"].mode()
mode
mode1 = df["Streaming Movies"].mode()
mode1
mode2 = df["Streaming Music"].mode()
mode2
mode3 = df["Unlimited Data"].mode()
mode3
# handling nnull values
df["Avg Monthly GB Download"].fillna(value=mean, inplace=True)
df["Streaming TV"].fillna(value=mode, inplace=True)
df["Streaming Movies"].fillna(value=mode1, inplace=True)
df["Streaming Music"].fillna(value=mode2, inplace=True)
df["Unlimited Data"].fillna(value=mode3, inplace=True)
# ## **Value after handling null values**
df["Avg Monthly GB Download"].mean()
# # **Adding required columns**
bins = [10, 20, 30, 40, 50, 60, 70]
names = ["<20", "20-30", "30-40", "40-50", "50-60", "60-70", "70-80"]
d = dict(enumerate(names, 1))
df["Age_Range"] = np.vectorize(d.get)(np.digitize(df["Age"], bins))
df
# # **1.How many customers joined the company during the last quarter? How many customers joined?**
# Based on
# * Gender
# * Age Group
# * Cities
# * Internet Service
# * Average Revenue
# * Average GB Consumed
# * Unlimited Data
# * Specific Streaming Service
# * All Streaming Services
# ## **Gender count of customers joined the company during the last quarter**
df1 = (
df.groupby(["Customer Status", "Gender"])
.size()
.to_frame()
.sort_values([0], ascending=False)
.head(10)
.reset_index()
)
df1
dfl = df1.loc[df1["Customer Status"] == "Joined"]
dfl
fig = px.histogram(dfl, x="Gender", y=0, color="Gender", text_auto="0.05")
fig.update_layout(
title="Gender count of customers joined the company during the last quarter",
xaxis_title="Gender",
yaxis_title="Count",
)
fig.show()
# ## **Age group of customers joined the company during the last quarter**
df2 = (
df.groupby(["Customer Status", "Age_Range"])
.size()
.to_frame()
.sort_values([0], ascending=False)
.reset_index()
)
df2
df3 = df2.loc[df2["Customer Status"] == "Joined"]
df3
fig = px.histogram(df3, x="Age_Range", y=0, color="Age_Range", text_auto="0.05")
fig.update_layout(
title="Age group of customers joined the company during the last quarter",
xaxis_title="Age_Range",
yaxis_title="Count",
)
fig.show()
# ## **Top 10 City of customers joined the company during the last quarter**
df4 = (
df.groupby(["Customer Status", "City"])
.size()
.to_frame()
.sort_values([0], ascending=False)
.reset_index()
)
df4
df5 = df4.loc[df4["Customer Status"] == "Joined"].head(10)
df5
fig = px.histogram(df5, x="City", y=0, color="City", text_auto="0.05")
fig.update_layout(
title="Top 10 City of customers joined the company during the last quarter",
xaxis_title="City",
yaxis_title="Count",
)
fig.show()
# ## **Internet Service of customers joined the company during the last quarter**
df6 = (
df.groupby(["Customer Status", "Internet Service"])
.size()
.to_frame()
.sort_values([0], ascending=False)
.reset_index()
)
df6
df7 = df6.loc[df6["Customer Status"] == "Joined"]
df7
fig = px.histogram(
df7, x="Internet Service", y=0, color="Internet Service", text_auto="0.05"
)
fig.update_layout(
title="Internet Service of customers joined the company during the last quarter",
xaxis_title="Joined",
yaxis_title="Count",
)
fig.show()
# ## **Average Revenue generated of customers joined the company during the last quarter**
#
da1 = df.reindex(columns=["Customer Status", "Total Revenue"])
da1 = da1.loc[da1["Customer Status"] == "Joined"]
da1
print(
"Average revenue generated of customers joined the company during last quarter is:",
da1["Total Revenue"].mean(),
)
# ## **Average GB Consumed by customers joined the company during the last quarter**
#
dgb1 = df.reindex(columns=["Customer Status", "Avg Monthly GB Download"])
dgb1 = dgb1.loc[dgb1["Customer Status"] == "Joined"]
dgb1
print(
"Overall Average GB Consumed by customers joined the company during the last quarter is:",
dgb1["Avg Monthly GB Download"].mean(),
)
# ## **Unlimited Data customers joined the company during the last quarter**
#
dfu1 = (
df.groupby(["Customer Status", "Unlimited Data"])
.size()
.to_frame()
.sort_values([0], ascending=False)
.reset_index()
)
dfu1
dfu2 = dfu1.loc[dfu1["Customer Status"] == "Joined"]
dfu2
fig = px.histogram(
dfu2, x="Unlimited Data", y=0, color="Unlimited Data", text_auto="0.05"
)
fig.update_layout(
title="Unlimited Data customers joined the company during the last quarter",
xaxis_title="Unlimited Data",
yaxis_title="Count",
)
fig.show()
# ## **Specific Streaming Service of customers joined the company during the last quarter**
#
dsa1 = df.reindex(columns=["Customer Status", "Streaming Music"])
ds1 = dsa1.loc[dsa1["Customer Status"] == "Joined"]
ds1.value_counts()
dsa2 = df.reindex(columns=["Customer Status", "Streaming TV"])
ds2 = dsa2.loc[dsa2["Customer Status"] == "Joined"]
ds2.value_counts()
dsa3 = df.reindex(columns=["Customer Status", "Streaming Movies"])
ds3 = dsa3.loc[dsa3["Customer Status"] == "Joined"]
ds3.value_counts()
# ## **All Streaming Services of customers joined the company during the last quarter**
dsf1 = df.reindex(
columns=["Customer Status", "Streaming Music", "Streaming TV", "Streaming Movies"]
)
dsf1 = dsf1.loc[dsf1["Customer Status"] == "Joined"]
dsf1 = dsf1.loc[dsf1["Streaming Music"] == "Yes"]
dsf1 = dsf1.loc[dsf1["Streaming TV"] == "Yes"]
dsf1 = dsf1.loc[dsf1["Streaming Movies"] == "Yes"]
dsf1.value_counts()
# # **2.What is the customer profile for a customer that churned, joined, and stayed? Are they different?**
# Based on
# * Gender
# * Age Group
# * Cities
# * Internet Service
# * Average Revenue
# * Average GB Consumed
# * Unlimited Data
# * Specific Streaming Service
# * All Streaming Services
# ## **Gender count of customers based on the customer status during the last quarter**
df1
fig = px.histogram(df1, x="Customer Status", y=0, color="Gender", text_auto="0.05")
fig.update_layout(
title="Gender of customers based on the customer status during the last quarter",
xaxis_title="Gender",
yaxis_title="Count",
)
fig.show()
# ## **Age range of customers based on the customer status during the last quarter**
df2
fig = px.histogram(
df2,
x="Customer Status",
y=0,
color="Age_Range",
text_auto="0.05",
width=1400,
height=800,
)
fig.update_layout(
title="Age range of customers based on the customer status during the last quarter",
xaxis_title="Customer Status",
yaxis_title="Count",
)
fig.show()
# ## **Top 10 City of customers based on the customer status during the last quarter**
df41 = df4.loc[df4["Customer Status"] == "Joined"].head(10)
df41
df42 = df4.loc[df4["Customer Status"] == "Stayed"].head(10)
df42
df43 = df4.loc[df4["Customer Status"] == "Churned"].head(10)
df43
# ## **Internet Service of customers based on the customer status during the last quarter**
df6
fig = px.histogram(
df6, x="Customer Status", y=0, color="Internet Service", text_auto="0.05"
)
fig.update_layout(
title="Internet Service of customers based on the customer status during the last quarter",
xaxis_title="Customer Status",
yaxis_title="Count",
)
fig.show()
# ## **Average Revenue generated of customers based on the customer status during the last quarter**
da2 = df.reindex(columns=["Customer Status", "Total Revenue"])
da2
dfa2 = (
df.groupby("Customer Status")
.agg("Total Revenue")
.mean()
.sort_values(ascending=False)
.to_frame()
.reset_index()
)
print(
"Average Revenue generated of customers based on the customer status during the last quarter"
)
dfa2
# ## **Average GB Consumed by customers based on the customer status during the last quarter**
#
dgb2 = (
df.groupby("Customer Status")
.agg("Avg Monthly GB Download")
.mean()
.sort_values(ascending=False)
.to_frame()
.reset_index()
)
print(
"Overall Average GB Consumed by customers based on the customer status during the last quarte"
)
dgb2
# ## **Unlimited Data customers based on the customer status during the last quarter**
dfu1
fig = px.histogram(
dfu1, x="Customer Status", y=0, color="Unlimited Data", text_auto="0.05"
)
fig.update_layout(
title="Unlimited Data customers based on the customer status during the last quarter",
xaxis_title="Customer Status",
yaxis_title="Count",
)
fig.show()
# ## **Specific Streaming Service of customers based on the customer status during the last quarter**
#
dre1 = df.reindex(columns=["Customer Status", "Streaming Music"])
dre1 = dre1.loc[dre1["Streaming Music"] == "Yes"]
dre1.value_counts()
dre2 = df.reindex(columns=["Customer Status", "Streaming TV"])
dre2 = dre2.loc[dre2["Streaming TV"] == "Yes"]
dre2.value_counts()
dre3 = df.reindex(columns=["Customer Status", "Streaming Movies"])
dre3 = dre3.loc[dre3["Streaming Movies"] == "Yes"]
dre3.value_counts()
# ## **All Streaming Services of customers based on the customer status during the last quarter**
dsf2 = df.reindex(
columns=["Customer Status", "Streaming Music", "Streaming TV", "Streaming Movies"]
)
dsf2 = dsf2.loc[dsf2["Streaming Music"] == "Yes"]
dsf2 = dsf2.loc[dsf2["Streaming TV"] == "Yes"]
dsf2 = dsf2.loc[dsf2["Streaming Movies"] == "Yes"]
dsf2.value_counts()
# # **3.What seem to be the key drivers of customer churn?**
dq31 = (
df.groupby(["Churn Reason"])
.size()
.to_frame()
.sort_values([0], ascending=False)
.reset_index()
.head(10)
)
dq31
px.pie(
values=dq31[0],
names=dq31["Churn Reason"],
title="Key Churn Reason of customer during the last quarter",
)
dq32 = (
df.groupby(["Churn Category"])
.size()
.to_frame()
.sort_values([0], ascending=False)
.reset_index()
)
dq32
px.pie(
values=dq32[0],
names=dq32["Churn Category"],
title="Key Churn Category of customer during the last quarter",
)
# # **4.Is the company losing high value customers? If so, how can they retain them?**
d4 = df.reindex(
columns=[
"Customer Status",
"Tenure in Months",
"Total Revenue",
"Churn Reason",
"Churn Category",
]
)
dq41 = d4.loc[d4["Customer Status"] == "Churned"]
dq41
dq41.sort_values(by="Total Revenue", ascending=False).head(30)
# ## **Data of 30 customers that company has lost based on loyalty(Tenure in Months), Total Revenue and the reason why they lost the loyal customer**
# # **5.Out of the 3 customer status, stayed, churned and joined, which has the highest %?**
dfq5 = (
df.groupby(["Customer Status"])
.size()
.to_frame()
.sort_values([0], ascending=False)
.reset_index()
)
dfq5
px.pie(
values=dfq5[0],
names=dfq5["Customer Status"],
title="Total % of customer status during the last quarter",
)
# # **6.What payment method was preferred by churned users?**
dfuse = (
df.groupby(["Customer Status", "Payment Method"])
.size()
.to_frame()
.sort_values([0], ascending=False)
.reset_index()
)
dfuse
dfus = dfuse.loc[dfuse["Customer Status"] == "Churned"]
dfus
fig = px.histogram(
dfus, x="Payment Method", y=0, color="Payment Method", text_auto="0.05"
)
fig.update_layout(
title="payment method was preferred by churned users during the last quarter",
xaxis_title="Payment Method",
yaxis_title="Count",
)
fig.show()
# # **7.What are the top 12 cities that churned?**
dfch = (
df.groupby(["Customer Status", "City"])
.size()
.to_frame()
.sort_values([0], ascending=False)
.reset_index()
)
dfch
dfto = dfch.loc[dfch["Customer Status"] == "Churned"].head(12)
dfto
fig = px.histogram(dfto, x="City", y=0, color="City", text_auto="0.05")
fig.update_layout(
title="Top 12 City of customers Churned the company during the last quarter",
xaxis_title="City",
yaxis_title="Count",
)
fig.show()
# # **8.What churn offers were more preferable by the customers?**
dfq8 = (
df.groupby(["Customer Status", "Offer"])
.size()
.to_frame()
.sort_values([0], ascending=False)
.reset_index()
)
dfq8
dfq81 = dfq8.loc[dfq8["Customer Status"] == "Churned"]
dfq81
fig = px.histogram(dfq81, x="Offer", y=0, color="Offer", text_auto="0.05")
fig.update_layout(
title="Offers more preferable by the Churn customers during the last quarter",
xaxis_title="Offer",
yaxis_title="Count",
)
fig.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.