script
stringlengths 113
767k
|
---|
# ---
# ## Import Package ที่จะใช้
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
print(f"pandas version ={pd.__version__}")
print(f"seaborn version ={pd.__version__}")
print(f"numpy version ={pd.__version__}")
# ---
# ### เตรียมข้อมูล
url = "/kaggle/input/the-best-cities-for-a-workation/best cities for a workation.csv"
df = pd.read_csv(url)
df.head()
df.info()
cols = [
"Remote connection: Average WiFi speed (Mbps per second)",
"Co-working spaces: Number of co-working spaces",
"Caffeine: Average price of buying a coffee",
"Travel: Average price of taxi (per km)",
"After-work drinks: Average price for 2 beers in a bar",
"Accommodation: Average price of 1 bedroom apartment per month",
"Food: Average cost of a meal at a local, mid-level restaurant",
"Climate: Average number of sunshine hours",
"Tourist attractions: Number of ‘Things to do’ on Tripadvisor",
"Instagramability: Number of photos with #",
]
sns.pairplot(df, vars=cols, plot_kws={"alpha": 0.4})
# ---
# ## Scikit-learn: KMeans Clustering
from sklearn import preprocessing
from sklearn.cluster import KMeans
# ---
# ## Z-score
scaler = preprocessing.StandardScaler()
z = scaler.fit_transform(df[cols])
z[:5].round(5)
# ---
# ## Yeo-Johnson transformation
pt = preprocessing.PowerTransformer(method="yeo-johnson", standardize=True)
mat = pt.fit_transform(df[cols])
mat[:5].round(5)
z_cols = [f"z_{c}" for c in cols]
bc_cols = [f"bc_{c}" for c in cols]
ds = pd.concat(
[df, pd.DataFrame(mat, columns=bc_cols), pd.DataFrame(z, columns=z_cols)],
axis="columns",
)
ds[bc_cols].hist(
layout=(1, len(cols)), figsize=(3 * len(cols), 3.5), color="orange", alpha=0.5
)
X = pd.DataFrame(mat, columns=cols)
X.head()
# ---
# ## Scatter matrix ด้วยข้อมูลที่ transform แล้ว
sns.pairplot(X, plot_kws={"alpha": 0.4})
# ---
# # KMeans Clustering
from sklearn.cluster import KMeans
# ---
# ## Elbow method
ssd = []
for k in range(2, 10):
m = KMeans(n_clusters=k)
m.fit(X)
ssd.append([k, m.inertia_])
ssd
dd = pd.DataFrame(ssd, columns=["k", "ssd"])
dd
dd["pct_chg"] = dd["ssd"].pct_change() * 100
plt.plot(dd["k"], dd["ssd"], linestyle="--", marker="o")
for index, row in dd.iterrows():
plt.text(row["k"] + 0.02, row["ssd"] + 0.02, f'{row["pct_chg"]:.2f}', fontsize=10)
# ---
# ## fit the model
# ## กำหนดให้มี 3 Clusters
model = KMeans(n_clusters=3)
model.fit(X)
model.cluster_centers_.round(4)
model.labels_
# ## sense making about each cluster
df["cluster"] = model.labels_
df.head()
sns.countplot(x="cluster", data=df)
fig, ax = plt.subplots(nrows=4, ncols=3, figsize=(20, 9))
ax = ax.ravel()
for i, col in enumerate(cols):
sns.violinplot(x="cluster", y=col, data=df, ax=ax[i])
dx = X
dx["cluster"] = model.labels_
dx.head()
dx.groupby("cluster").median()
sns.heatmap(dx.groupby("cluster").median(), cmap="Blues", linewidths=1)
|
# # Taxes and Partisanship
# ## Isaac Liu
# Objective: Analyze state personal income, corporate, and sales tax rates based on the identity of the party in control of state government.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Load state tax data
state_tax_data = pd.read_csv(
"/kaggle/input/us-state-dc-tax-rates-2010-2020/State_Taxes.csv"
)
state_tax_data
# For this exercise, we will make use of the adjusted rate values
# These versions of the variables generally contain useful edits for corporate and sales taxes
# For personal income taxes, the adjusted version is more consistent, relying on a single data source
state_tax_data_limited = state_tax_data[
["year", "State", "Corp_Rate_Adj", "Pers_Rate_Adj", "Sales_Rate_Adj"]
]
state_tax_data_limited
# Set values of NaN to 0
state_tax_data_no_NaN = state_tax_data_limited.fillna(value=0)
state_tax_data_no_NaN
# Load State and Territory control data
list_of_dfs = []
for year in range(2010, 2021):
print(year)
year_control_data = pd.read_excel(
"/kaggle/input/us-state-and-territory-control-ncsl/US State and Territory Control - NCSL.xlsx",
sheet_name=str(year),
)
year_control_data["year"] = year
list_of_dfs.append(year_control_data)
# Combine dataframes
combined_control_frame = pd.concat(list_of_dfs)
combined_control_frame
# Limit and rename variables
# Fill NaN with 'Unknown'
control_frame_lim = combined_control_frame.rename(
columns={"StateOrTerritory": "State", "State\nControl": "Control"}
)[["State", "year", "Control"]].fillna(value="Unknown")
control_frame_lim
# Unique values of control
pd.unique(control_frame_lim["Control"])
# Recode Dem* to Dem
control_frame_for_join = control_frame_lim
control_frame_for_join.loc[
control_frame_for_join["Control"] == "Dem*", "Control"
] = "Dem"
pd.unique(control_frame_for_join["Control"])
# Join together
joined_df = state_tax_data_no_NaN.join(
control_frame_for_join, how="outer", lsuffix="_tax", rsuffix="_control"
)
joined_df
# For each value of Control, compute the mean of all taxes
control_taxes = (
joined_df[["Control", "Corp_Rate_Adj", "Pers_Rate_Adj", "Sales_Rate_Adj"]]
.groupby("Control")
.mean()
)
control_taxes
|
# Basic libraries
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import missingno as msno
# Visualization libraries
import altair as alt
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
# Plotly visualization
import chart_studio.plotly as py
import plotly.tools as tls
# Any results you write to the current directory are saved as output.
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Display markdown formatted output like bold, italic bold etc.
from IPython.display import Markdown
def bold(string):
display(Markdown(string))
#
# ### Introduction to 2019-nCoV Dataset
# We will be performing exploratory data analysis
data = pd.read_csv(
"../input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv"
) # Loading Day level information on 2019-nCoV affected cases to "data"
head = data.head()
tail = data.tail()
conc_data_row = pd.concat([head, tail], axis=0, ignore_index=True)
conc_data_row
# Simple statistics on this dataset
data.describe()
# This is information about the dataset itself
data.info()
# Converting Date and Last Update objects to datetime
data["Last Update"] = data["Last Update"].apply(pd.to_datetime)
data["Date"] = data["Date"].apply(pd.to_datetime)
data.drop(["Sno"], axis=1, inplace=True)
data = data.replace("nan", np.nan)
data.head()
# Creating a data-dense display to visualize patterns in data completion
# As you can see, this dataset is very well put together with complete fields
msno.matrix(data)
bold("**Areas where deaths occurred**")
from datetime import date
data_3_feb = data[data["Date"] > pd.Timestamp(date(2020, 2, 4))]
data_deaths = data_3_feb[data_3_feb["Deaths"] > 1]
data_deaths
data_deaths.groupby(["Country", "Province/State"]).sum()
#
# ### Modeling the Potential Spread
# Using RandomForestRegressor and cross validation, I will predict the potential spread of the disease
from sklearn.model_selection import train_test_split
# Assigning X as our data
X = data.copy()
# Remove rows with missing tartget, seperate target from predictors
X.dropna(axis=0, subset=["Confirmed"], inplace=True)
y = X.Confirmed
X.drop(["Confirmed"], axis=1, inplace=True)
# Create validation set from training data
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, train_size=0.8, test_size=0.2, random_state=0
)
# Selecting categorical columns with cardinality in mind
categorical_cols = [
cname for cname in X_train.columns if X_train[cname].dtype == "object"
]
# Select numerical columns
numerical_cols = [
cname for cname in X_train.columns if X_train[cname].dtype in ["int64", "float64"]
]
# Keep selected columns only
my_cols = categorical_cols + numerical_cols
X_train = X_train[my_cols].copy()
X_Valid = X_valid[my_cols].copy()
print(categorical_cols)
print(numerical_cols)
print("")
X_train.head()
bold("**Setting up pipeline and getting scores**")
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import cross_val_score
# preprocessing for numerical data
numerical_transformer = SimpleImputer(strategy="constant")
# preprocessing for categorical data
categorical_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="most_frequent")),
("onehot", OneHotEncoder(handle_unknown="ignore")),
]
)
# Bundle preprocessing for numerical and categorical data
preprocessor = ColumnTransformer(
transformers=[
("num", numerical_transformer, numerical_cols),
("cat", categorical_transformer, categorical_cols),
]
)
# Run cross_validation since this is a small dataset
# Multiply by -1 since sklearn calculates *negative* MAE
def get_score(n_estimators):
my_pipeline = Pipeline(
steps=[
("preprocessor", preprocessor),
("model", RandomForestRegressor(n_estimators, random_state=0)),
]
)
scores = -1 * cross_val_score(
my_pipeline, X, y, cv=3, scoring="neg_mean_absolute_error"
)
return scores.mean()
results = {}
for i in range(1, 20):
results[50 * i] = get_score(50 * i)
print(results)
bold("**Visualizing best model to use**")
plt.plot(results.keys(), results.values())
plt.show
bold("**This is our best model yet for predicting the spread of 2019-nCoV**")
print(get_score(700))
|
# ### This notebook uses the Global Terrorism - START data CSV dataset to analyse the behavior of terrorism in the world in the period from 1970 to 2015 & to analyse the behaviour of top terrorist groups (Preferred weapons , Preferred target type , ... etc ).
# The Questions this notebook answers:
# 1 - Number of Attacks Per Region
# 2 - Global Terrorism - Number Of Attacks Per country (All Over The World)
# 3 - Number of People Killed by Their Nationality
# 4 - Number of Attacks Per Provstate (Primary Subnational Administrative Division)
# 5 - Number of Attacks & Suicides Per Year (1970 - 2015)
# 6 - Number Of Attacks Per Target Types (1970-2017)
# 7 - Number Of Attacks By Country (Countries That Have More Than 500 Attacks)
# 8 - Terrorist Attacks by Type (1970-2017)
# 9 - Suicide by Country (1970-2017) "Countries that have more than 1000 Suicides"
# 10 - Cases over Time (Total , Suicide , Extended , Succeeded ) Attacks Per Year.
# 11 - Number of Attacks For Each Group Per Year
# 12 - Number of Attacks Per Region Per Year
# 13 - Number of Attacks For Top Groups Per Year & Their Success Rate & Rate For Different Behaviour & Rate For Multiple Attacks
# 14 - Number of Attack Per Each Target Type For Each Group (Top Groups)
# 15 - Number of Attack Per Each Weapon Type For Each Group (Top Groups)
# 16 - Number of Citizens Killed & Terrorist Killed For Top Groups Per Group (Top Groups)
# 17 - Number of Attacks Per Nationality For Each Group
# # Import Libiraries And Read The Data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import plotly.express as px
import plotly.graph_objs as go
from plotly.subplots import make_subplots
import seaborn as sns
data = pd.read_csv(
"/kaggle/input/gtd/globalterrorismdb_0718dist.csv", encoding="iso-8859-1"
)
# # Little Data Preprocessing
data.head()
cols = []
for col in data.columns:
print(col, data[col].isna().sum())
if data[col].isna().sum() > (
data.shape[0] / 2
): # columns that have more empty rows than half of the dataset "Useless cols"
print("####### Drop " + col)
cols.append(col)
cols, len(cols) # We got 77 columns that are empty enough to drop
data["approxdate"].isna().sum()
# 🛑 There Are 77 columns that we should avoid using , So we will drop them
data.drop(cols, axis=1, inplace=True)
data.shape # We reduce or data columns from 135 to 58
data.head()
"""
Make sure that every column has a suitable number of missing values. There are some columns that still have a large number of
missing values, like "Summary , corp1 , nperps , weapdetail ,weapsubtype1" But we will not use them in our analysis.
"""
for col in data.columns:
print(col, data[col].isna().sum())
"""Generated By GPT
cols_to_drop = ['eventid', 'approxdate', 'extended', 'resolution', 'country', 'region', 'provstate', 'city', 'latitude',
'longitude', 'specificity', 'vicinity', 'location', 'summary', 'crit1', 'crit2', 'crit3', 'doubtterr',
'alternative', 'alternative_txt', 'multiple', 'success', 'suicide', 'attacktype2', 'attacktype2_txt',
'attacktype3', 'attacktype3_txt', 'targtype2', 'targtype2_txt', 'targsubtype2', 'targsubtype2_txt',
'corp2', 'target2', 'natlty2', 'natlty2_txt', 'targtype3', 'targtype3_txt', 'targsubtype3',
'targsubtype3_txt', 'corp3', 'target3', 'natlty3', 'natlty3_txt', 'gname', 'gsubname', 'guncertain1',
'guncertain2', 'guncertain3', 'claimed', 'claimmode', 'claimmode_txt', 'claim2', 'claimmode2',
'claimmode2_txt', 'claim3', 'claimmode3', 'claimmode3_txt', 'compclaim', 'weaptype2', 'weaptype2_txt',
'weapsubtype2', 'weapsubtype2_txt', 'weaptype3', 'weaptype3_txt', 'weapsubtype3', 'weapsubtype3_txt',
'weaptype4', 'weaptype4_txt', 'weapsubtype4', 'weapsubtype4_txt', 'propextent', 'propextent_txt', 'propvalue',
'propcomment', 'ishostkid', 'nhostkid', 'nhostkidus', 'nhours', 'ndays', 'divert', 'kidhijcountry', 'ransom',
'ransomamt', 'ransomamtus', 'ransompaid', 'ransompaidus', 'ransomnote', 'hostkidoutcome', 'hostkidoutcome_txt',
'nreleased', 'addnotes', 'scite1', 'scite2', 'scite3', 'dbsource', 'INT_LOG', 'INT_IDEO', 'INT_MISC', 'INT_ANY']
"""
# # Some Basic Analysis
# 🛑 Some columns are not clear in terms of their meaning. Let's use ChatGPT 🤖 to clarify these columns and understand what they mean.
# 
# 🛑 There are a huge number of columns to investigate, so let's use ChatGPT 🤖 to help us identify what we should expect from this dataset.
# 
# All Attacks per year "We will need it too much in our analysis".
all_attacks = data.groupby("iyear")["eventid"].count().reset_index()
all_attacks.head()
# ### Q1 - Number of Attacks Per Region
fig = px.bar(
data.groupby("region_txt")["eventid"].count().reset_index(),
x="region_txt",
y="eventid",
color="eventid",
title="Number of Attacks Per Region",
)
fig.update_layout(xaxis_title="Region", yaxis_title="Number Of Attacks")
fig.show()
# ### Q2 - Global Terrorism - Number Of Attacks Per country (All Over The World)
attacks_per_country = data.groupby("country_txt")["eventid"].count().reset_index()
attacks_per_country.columns = ["country_txt", "Number Of Attacks"]
fig1 = px.choropleth(
attacks_per_country,
locations="country_txt",
locationmode="country names",
color="Number Of Attacks",
hover_name="country_txt",
projection="natural earth",
)
fig1.update_layout(title_text="Global Terrorism - Number Of Attacks Per country")
fig1.show()
# ### Q3 - Number of People Killed by Their Nationality
Nationality = (
data.groupby(["natlty1_txt"])["eventid"]
.count()
.reset_index()
.sort_values(by="eventid", ascending=False)[:40]
)
fig2 = px.bar(
Nationality,
x="natlty1_txt",
y="eventid",
title="Number of People Killed by Their Nationality",
)
fig2.update_layout(xaxis_title="Nationality", yaxis_title="Number Of Kills")
fig2.show()
# ### Q4 - Number of Attacks Per Provstate (Primary Subnational Administrative Division)
prov = (
data.groupby("provstate")["eventid"]
.count()
.reset_index()
.sort_values(by="eventid", ascending=False)[:50]
)
fig3 = px.bar(
prov,
x="provstate",
y="eventid",
title="Number of Attacks Per Provstate (Primary Subnational Administrative Division)",
)
fig3.update_layout(xaxis_title="Provstate", yaxis_title="Number Of Attacks")
fig3.show()
# ### Q5 - Number of Attacks & Suicides Per Year (1970 - 2015)
# Create subplots with 1 row and 1 column
suicide_year = data.groupby("iyear")["suicide"].sum().reset_index()
fig4 = make_subplots(
rows=1, cols=2, subplot_titles=("Number Of Attacks", "Number Of Sucides")
)
attacks_per_year = go.Bar(
x=all_attacks["iyear"], y=all_attacks["eventid"], name="Attacks"
)
suicide_per_year = go.Bar(
x=suicide_year["iyear"], y=suicide_year["suicide"], name="Sucides"
)
fig4.add_trace(attacks_per_year, row=1, col=1)
fig4.add_trace(suicide_per_year, row=1, col=2)
fig4.update_layout(
barmode="group", title_text="Number of Attacks & Suicides Per Year (1970 - 2015)"
)
fig4.update_xaxes(title_text="Year")
fig4.update_yaxes(title_text="Number of Cases", secondary_y=False)
fig4.show()
# 🛑 Number of terrorist attacks increased in 2014 "interesting" and fades in nearly 1993 and number of sucide increased in 2016
# ### Q6 - Number Of Attacks Per Target Types (1970-2017)
target_type = data.groupby("targtype1_txt")["eventid"].count()
attack_region = data.groupby("region_txt")["eventid"].count()
fig5 = px.bar(target_type, x=target_type.index, y=target_type)
fig5.update_layout(
title="Number Of Attacks Per Target Types (1970-2017)",
xaxis_title="Target Type Of Attack",
yaxis_title="Number Of Attacks",
)
fig5.show()
# ### Q7 - Number Of Attacks By Country (Countries That Have More Than 500 Attacks)
attacks_per_country.columns = ["Country", "Attacks"]
attacks_per_country = attacks_per_country[attacks_per_country["Attacks"] > 500]
fig6 = px.scatter(
attacks_per_country,
x="Country",
y="Attacks",
size="Attacks",
color="Attacks",
color_continuous_scale=px.colors.sequential.Plasma,
hover_name="Country",
labels={"Attacks": "Number of Attacks", "Country": "Country"},
template="plotly_white",
size_max=80,
)
# Add text labels to the plot
for i, row in attacks_per_country.iterrows():
fig6.add_annotation(
x=row["Country"],
y=row["Attacks"],
text=row["Country"],
font=dict(size=10, color="black"),
showarrow=False,
)
# Update the plot layout
fig6.update_layout(
title="Number Of Attacks By Country (Countries That Have More Than 500 Attacks)",
xaxis_title="",
yaxis_title="Number Of Attacks",
plot_bgcolor="white",
)
# Hide the x-axis tick labels
fig6.update_xaxes(showticklabels=False)
# Set the size of the plot
fig6.update_layout(width=1000, height=800)
# Show the plot
fig6.show()
# ### Q8 - Terrorist Attacks by Type (1970-2017)
attacks_type = data.groupby("attacktype1_txt")["eventid"].count().reset_index()
fig7 = px.scatter(
attacks_type,
x="attacktype1_txt",
y="eventid",
size="eventid",
color="eventid",
color_continuous_scale=px.colors.sequential.Plasma,
hover_name="attacktype1_txt",
labels={"eventid": "Number of Attacks", "attacktype1_txt": "Country"},
template="plotly_white",
size_max=80,
)
# Add text labels to the plot
for i, row in attacks_type.iterrows():
fig7.add_annotation(
x=row["attacktype1_txt"],
y=row["eventid"],
text=row["attacktype1_txt"],
font=dict(size=10, color="black"),
showarrow=False,
)
# Update the plot layout
fig7.update_layout(
title="Terrorist Attacks by Type (1970-2017)",
xaxis_title="",
yaxis_title="Number Of Attacks",
plot_bgcolor="white",
)
# Hide the x-axis tick labels
fig7.update_xaxes(showticklabels=False)
# Set the size of the plot
fig7.update_layout(width=900, height=800)
# Show the plot
fig7.show()
# ### Q9 - Suicide by Country (1970-2017) "Countries that have more than 1000 Suicides"
suicide_country = data.groupby("country_txt")["suicide"].count().reset_index()
suicide_country.columns = ["Country", "Suicde"]
suicide_country = suicide_country[suicide_country["Suicde"] > 1000]
fig8 = px.scatter(
suicide_country,
x="Country",
y="Suicde",
size="Suicde",
color="Suicde",
color_continuous_scale=px.colors.sequential.Plasma,
hover_name="Country",
labels={"Suicide": "Number of suicides", "Country": "Country"},
template="plotly_white",
size_max=80,
)
# Add text labels to the plot
for i, row in suicide_country.iterrows():
fig8.add_annotation(
x=row["Country"],
y=row["Suicde"],
text=row["Country"],
font=dict(size=10, color="black"),
showarrow=False,
)
# Update the plot layout
fig8.update_layout(
title='Suicide by Country (1970-2017) "Countries that have more than 1000 Suicides"',
xaxis_title="",
yaxis_title="Number of suicides",
plot_bgcolor="white",
)
# Hide the x-axis tick labels
fig8.update_xaxes(showticklabels=False)
# Set the size of the plot
fig8.update_layout(width=1000, height=800)
# Show the plot
fig8.show()
# ### Q10 - Cases over Time (Total , Suicide , Extended , Succeeded ) Attacks Per Year
suicide = data.groupby("iyear")["suicide"].sum().reset_index()
extended = data.groupby("iyear")["extended"].sum().reset_index()
success = data.groupby("iyear")["success"].sum().reset_index()
total_cases = go.Scatter(
x=all_attacks["iyear"], y=all_attacks["eventid"], mode="lines", name="Total cases"
)
suicide_cases = go.Scatter(
x=suicide["iyear"], y=suicide["suicide"], mode="lines", name="Success cases"
)
extended_cases = go.Scatter(
x=extended["iyear"], y=extended["extended"], mode="lines", name="Extended cases"
)
success_cases = go.Scatter(
x=success["iyear"], y=success["success"], mode="lines", name="Suicide cases"
)
# Add traces to a single Figure object
fig9 = go.Figure(data=[total_cases, success_cases, extended_cases, suicide_cases])
fig9.update_layout(title="Cases over Time", xaxis_title="Year", yaxis_title="Cases")
fig9.show()
# # Deeper Analysis (This Analysis Focuses More On The Top Terrorist Groups' Attacks)
# ### Q12 - Number of Attacks For Each Group Per Year
groups = (
data.groupby("gname")["eventid"]
.count()
.reset_index()
.sort_values(by="eventid", ascending=False)["gname"][:20]
)
columns = ["iyear", "gname", "eventid"]
filtered_data = data[columns][data["gname"].isin(groups)]
# Group the data by year and group name, and count the number of attacks
grouped_data = filtered_data.groupby(["iyear", "gname"]).count().reset_index()
# Plot the data using Plotly
fig10 = px.line(grouped_data, x="iyear", y="eventid", color="gname")
fig10.update_layout(
xaxis_title="Year",
yaxis_title="Number Of Attacks",
legend_title="Name Of The Group",
legend_traceorder="reversed",
title_text="Number of Attacks For Each Group Per Year",
width=900,
height=600,
paper_bgcolor="LightSteelBlue",
)
fig10.show()
# ### Q12 - Number of Attacks Per Region Per Year
# 🛑 We saw that most attacks are in the Middle East & North Africa (Figure 1), but was that the case a long time ago? Let's See.
region = (
data.groupby("region_txt")["eventid"]
.count()
.reset_index()
.sort_values(by="eventid", ascending=False)["region_txt"]
)
columns_region = ["iyear", "region_txt", "eventid"]
filtered_data_region = data[columns_region][data["region_txt"].isin(region)]
# Group the data by year and group name, and count the number of attacks
grouped_data_region = (
filtered_data_region.groupby(["iyear", "region_txt"]).count().reset_index()
)
# Plot the data using Plotly
fig11 = px.bar(grouped_data_region, x="iyear", y="eventid", color="region_txt")
fig11.update_layout(
xaxis_title="Year",
yaxis_title="Number Of Attacks",
legend_title="Regions",
title_text="Number of Attacks Per Region Per Year",
width=900,
height=600,
paper_bgcolor="LightSteelBlue",
)
fig11.show()
# 🛑 From 1980 to 1990, most attacks were in south america or Central America & the Caribbean region. Then it fades out and another trend appears from 2005 to 2017: the attacks were most common in both South Asia and the Middle East & North Africa
# ### Q13 - Number of Attacks For Top Groups Per Year & Their Success Rate & Rate For Different Behaviour & Rate For Multiple Attacks
# All And Succeeded Attacks
all_attacks = (
data.groupby("gname")["eventid"]
.count()
.reset_index()
.sort_values(by="eventid", ascending=False)[1:15]
)
succeeded_attacks = data.groupby("gname")["success"].sum().reset_index()
merged_s_attacks = pd.merge(all_attacks, succeeded_attacks, on="gname")
merged_s_attacks["success_rate"] = (
merged_s_attacks["success"] * 100 / merged_s_attacks["eventid"]
)
# Different Behaviour Attacks "If the attack used different beachiour from the group main behaviour"
differenet_attacks = data.groupby("gname")["crit3"].sum().reset_index()
merged_d_attacks = pd.merge(all_attacks, differenet_attacks, on="gname")
merged_d_attacks["differ_rate"] = (
merged_d_attacks["crit3"] * 100 / merged_d_attacks["eventid"]
)
# Multiple Attacks "If the attack is part of a multiple attack"
multiple_attacks = data.groupby("gname")["multiple"].sum().reset_index()
merged_m_attacks = pd.merge(all_attacks, multiple_attacks, on="gname")
merged_m_attacks["multiple_rate"] = (
merged_m_attacks["multiple"] * 100 / merged_m_attacks["eventid"]
)
# Create subplots with 1 row and 1 column
fig12 = make_subplots(rows=1, cols=1)
success = go.Bar(
x=merged_s_attacks["gname"], y=merged_s_attacks["success"], name="Success"
)
all_atk = go.Bar(x=all_attacks["gname"], y=all_attacks["eventid"], name="Total Cases")
dif = go.Bar(
x=merged_d_attacks["gname"], y=merged_d_attacks["crit3"], name="Different Behaviour"
)
mult = go.Bar(
x=merged_m_attacks["gname"], y=merged_m_attacks["multiple"], name="Multiple"
)
line_success_rate = go.Scatter(
x=merged_s_attacks["gname"],
y=merged_s_attacks["success_rate"],
mode="lines",
name="Success rate",
)
line_differ_rate = go.Scatter(
x=merged_d_attacks["gname"],
y=merged_d_attacks["differ_rate"],
mode="lines",
name="Differ rate",
)
line_multi_rate = go.Scatter(
x=merged_m_attacks["gname"],
y=merged_m_attacks["multiple_rate"],
mode="lines",
name="Multi rate",
)
fig12 = make_subplots(specs=[[{"secondary_y": True}]])
fig12.add_trace(all_atk)
fig12.add_trace(success)
fig12.add_trace(dif)
fig12.add_trace(mult)
fig12.add_trace(line_success_rate, secondary_y=True)
fig12.add_trace(line_differ_rate, secondary_y=True)
fig12.add_trace(line_multi_rate, secondary_y=True)
fig12.update_layout(
barmode="group",
title_text="Number of Attacks For Top Groups Per Year & Their Success Rate"
" & Rate For Different Behaviour & <br>Rate For Multiple Attacks",
)
fig12.update_xaxes(title_text="Group Name")
fig12.update_yaxes(title_text="Number of Cases", secondary_y=False)
fig12.update_yaxes(title_text="Success Rate", secondary_y=True, range=[0, 100])
fig12.show()
# 🛑 We can see that Taliban has the most number of cases & FMLN has the highest success rate & CPI Maoist has the highest difeerent rate (I assumably they use different behaviours nearly every time) & BOKO HARAM has the highest multiple rate (I assumably their attacks will repeat alot).
# ### Q14 - Number of Attack Per Each Target Type For Each Group (Top Groups)
Group_Target = data.groupby(["gname", "targtype1_txt"])["eventid"].count().reset_index()
Group_Target_Chart = Group_Target.loc[
Group_Target["gname"].isin(list(all_attacks["gname"]))
]
temp = Group_Target.groupby("gname")["eventid"].sum().reset_index()
Group_Target_Chart = pd.merge(Group_Target_Chart, temp, on="gname")
Group_Target_Chart["Percentage"] = (
Group_Target_Chart["eventid_x"] * 100 / Group_Target_Chart["eventid_y"]
)
Group_Target_Chart["Percentage"] = Group_Target_Chart["Percentage"].round(decimals=3)
# Plot the data using Plotly
fig13 = px.bar(
Group_Target_Chart,
x="gname",
y="eventid_x",
custom_data=["targtype1_txt", "eventid_x", "Percentage"],
color="targtype1_txt",
)
fig13.update_layout(
xaxis_title="Group Name",
yaxis_title="Number Of Attacks",
legend_title="Type Of Attack",
title_text="Number of Attack Per Each Type For Each Group",
width=900,
height=600,
paper_bgcolor="LightSteelBlue",
)
fig13.update_traces(
hovertemplate="<br>".join(
[
"targtype1_txt: %{customdata[0]}",
"# Of Attacks: %{customdata[1]}",
"Percentage: %{customdata[2]}",
]
)
)
fig13.show()
# 🛑 We see that Taliban highest percent of attacks are on Police & ISIL are on private citizens and property & FMLN are on military
# ### Q15 - Number of Attack Per Each Weapon Type For Each Group (Top Groups)
Group_Weapon = data.groupby(["gname", "weaptype1_txt"])["eventid"].count().reset_index()
Group_Weapon_Chart = Group_Weapon.loc[
Group_Weapon["gname"].isin(list(all_attacks["gname"]))
]
temp = Group_Weapon.groupby("gname")["eventid"].sum().reset_index()
Group_Weapon_Chart = pd.merge(Group_Weapon_Chart, temp, on="gname")
Group_Weapon_Chart["Percentage"] = (
Group_Weapon_Chart["eventid_x"] * 100 / Group_Weapon_Chart["eventid_y"]
)
Group_Weapon_Chart["Percentage"] = Group_Weapon_Chart["Percentage"].round(decimals=3)
# Plot the data using Plotly
fig14 = px.bar(
Group_Weapon_Chart,
x="gname",
y="eventid_x",
custom_data=["weaptype1_txt", "eventid_x", "Percentage"],
color="weaptype1_txt",
)
fig14.update_layout(
xaxis_title="Group Name",
yaxis_title="Number Of Attacks",
legend_title="Type Of Attack",
title_text="Number of Attack Per Each Weapon Type For Each Group",
paper_bgcolor="LightSteelBlue",
)
fig14.update_traces(
hovertemplate="<br>".join(
[
"weaptype1_txt: %{customdata[0]}",
"# Of Attacks: %{customdata[1]}",
"Percentage: %{customdata[2]}",
]
)
)
fig14.show()
# 🛑 We see that Taliban highest percent of used weapons are explosives & FMLN are firearms And nearly all the groups use explosives most
# ### Q16 - Number of Citizens Killed & Terrorist Killed For Top Groups Per Group (Top Groups)
n_kills = (
data.groupby("gname")["nkill"]
.sum()
.reset_index()
.sort_values(by="nkill", ascending=False)[1:15]
)
n_citizens_killed = (
data.groupby("gname")["nkillus"]
.sum()
.reset_index()
.sort_values(by="nkillus", ascending=False)[1:15]
)
n_terrorists_killed = (
data.groupby("gname")["nkillter"]
.sum()
.reset_index()
.sort_values(by="nkillter", ascending=False)[1:15]
)
n_kills = n_kills.loc[n_kills["gname"].isin(list(all_attacks["gname"]))]
n_terrorists_killed = n_terrorists_killed.loc[
n_terrorists_killed["gname"].isin(list(all_attacks["gname"]))
]
# Create subplots with 1 row and 1 column
fig15 = make_subplots(rows=1, cols=3)
n_kills_chart = go.Bar(x=n_kills["gname"], y=n_kills["nkill"], name="nkill")
n_citizens_killed_chart = go.Bar(
x=n_citizens_killed["gname"], y=n_citizens_killed["nkillus"], name="nkillus"
)
n_terrorists_killed_chart = go.Bar(
x=n_terrorists_killed["gname"], y=n_terrorists_killed["nkillter"], name="nkillter"
)
fig15.add_trace(n_kills_chart, row=1, col=1)
fig15.add_trace(n_citizens_killed_chart, row=1, col=2)
fig15.add_trace(n_terrorists_killed_chart, row=1, col=3)
fig15.update_layout(
barmode="group",
title_text="Number of All Killed & Citizens Killed & Terrorist Killed For Top "
"Groups Per Group",
height=800,
)
fig15.update_xaxes(title_text="Group Name")
fig15.update_yaxes(title_text="Number of Cases", secondary_y=False)
fig15.show()
# 🛑 We see that ISIL has the highest number of kills (nearly 10K different from the second) although they haven't done the highest number of attacks "It means they kill so many people in their attacks" & Hezbollah has the highest number of citizens killed although they, haven't done the highest number of attacks "They are #38 in the list of number of attacks which means that they are tend to kill more citizens than other groups" & ISIL has the highest number of terrorist killed "which means they use a high number of individuals in their attacks"
# ### Q17 - Number of Attacks Per Nationality For Each Group
Group_Natilnality = (
data.groupby(["gname", "natlty1_txt"])["eventid"].count().reset_index()
)
Group_Natilnality_Chart = Group_Natilnality.loc[
Group_Natilnality["gname"].isin(list(all_attacks["gname"]))
]
temp = Group_Natilnality.groupby("gname")["eventid"].sum().reset_index()
Group_Natilnality_Chart = pd.merge(Group_Natilnality_Chart, temp, on="gname")
Group_Natilnality_Chart["Percentage"] = (
Group_Natilnality_Chart["eventid_x"] * 100 / Group_Natilnality_Chart["eventid_y"]
)
Group_Natilnality_Chart["Percentage"] = Group_Natilnality_Chart["Percentage"].round(
decimals=3
)
# Plot the data using Plotly
fig16 = px.bar(
Group_Natilnality_Chart,
x="gname",
y="eventid_x",
custom_data=["natlty1_txt", "eventid_x", "Percentage"],
color="natlty1_txt",
)
fig16.update_layout(
xaxis_title="Group Name",
yaxis_title="Number Of Attacks",
legend_title="Nationality",
title_text="Number of Attacks Per Nationality For Each Group",
paper_bgcolor="LightSteelBlue",
)
fig16.update_traces(
hovertemplate="<br>".join(
[
"natlty1_txt: %{customdata[0]}",
"# Of Attacks: %{customdata[1]}",
"Percentage: %{customdata[2]}",
]
)
)
fig16.show()
# 🛑 We see that nearly every group's highest number of killed people are from the same nationality of the group (which makes sense).
# To make sure how many charts there are in the notebook!!
fig_list = [f for f in globals() if "Figure" in str(type(globals()[f]))]
print(fig_list)
# # Summary
Data_Summary = {
"Group_Name": all_attacks["gname"],
"Number_Of_Attacks": [
7478,
5613,
4555,
3351,
3288,
2772,
2671,
2487,
2418,
2310,
2024,
1878,
1630,
1606,
],
"Number_Of_Succeeded_Attacks": [
6680,
4759,
4337,
3317,
3016,
2457,
2282,
2302,
2247,
2175,
1712,
1774,
1309,
1530,
],
"Number_Of_Different_Attacks": [
6401,
4466,
4145,
2131,
2103,
1901,
1930,
2115,
2174,
1604,
1914,
1863,
1559,
1166,
],
"Number_Of_Multiple_Attacks": [
1300,
1849,
932,
926,
475,
313,
233,
301,
965,
273,
194,
265,
203,
77,
],
"Success_Percentage %": [
89.3,
84.7,
95.2,
98.9,
91.7,
88.6,
85.4,
92.5,
92.9,
94.1,
85.0,
94.4,
80.3,
95.2,
],
"Differ_Percentage %": [
85.5,
79.5,
90.9,
63.5,
63.9,
68.5,
72.2,
85.0,
89.9,
69.4,
94.5,
99.2,
95.6,
72.6,
],
"Multiple_Percentage %": [
17.3,
32.9,
20.4,
27.6,
14.4,
11.2,
8.7,
12.1,
39.9,
11.8,
9.5,
14.1,
12.4,
4.7,
],
"Prefered_Weapon": [
"Explosives",
"Explosives ",
"Explosives ",
"Firearms ",
"Explosives ",
"Firearms ",
"Explosives ",
"Explosives ",
"Explosives ",
"Firearms ",
"Explosives ",
"Firearms ",
"Explosives ",
"Firearms ",
],
"Percentage_Of_Used_Of_Prefered_Weapon %": [
42.5,
67.7,
46.3,
53.5,
46.6,
60.6,
46.8,
38.9,
42.7,
41.2,
62.6,
36.3,
42.7,
46.7,
],
"Prefered_Target_Type": [
"Police",
"Private Citizens & Property",
"Private Citizens & Property ",
"Military ",
"Military ",
"Military ",
"Military ",
"Private Citizens & Property ",
"Private Citizens & Property ",
"Military ",
"Business ",
"Private Citizens & Property ",
"Police ",
"Military ",
],
"Percentage_Of_Target_Type_Prefered %": [
33.6,
39.9,
18.8,
36.7,
42.9,
35.8,
29.3,
20.2,
46.5,
34.2,
31.8,
28.4,
26.1,
34.1,
],
"Prefered_Nationlaity": [
"Afghanistan",
"Iraq",
" Peru ",
"El-salvador ",
"Somalia ",
"Philipins ",
"Northen Irelan ",
"Colombia",
"Nigeria",
"Turkey ",
"Spain ",
"India",
"India",
"Sri Lanka",
],
"Percentage_Nationlaity %": [
90.6,
85.2,
97.3,
98.5,
66.2,
97.4,
56.8,
96.0,
85.4,
97.3,
98.9,
99.5,
85.7,
92.4,
],
}
Data_Summary = pd.DataFrame(Data_Summary).reset_index(drop=True)
Data_Summary
# # Useful Resources For Further Information
# 🛑 Notebooks that I referred to to understand the data or read to get some intuition or ideas about how to analyse this data
# https://www.kaggle.com/code/linhvuu/terrorism-eda/notebook
# https://www.kaggle.com/code/ash316/terrorism-around-the-world
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import matplotlib.pyplot as plt
import seaborn as sns
"""Set the color palette"""
sns.set_style(style="darkgrid")
sns.set_context(context="poster", font_scale=0.8)
sns.set_palette(sns.color_palette("muted"))
df_all = pd.read_csv("../input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv")
"""Data Cleaning"""
df_all = df_all.fillna(0)
df_all = df_all.drop(columns=["Sno"])
df_all["Last Update"] = df_all["Last Update"].str.split(" ").str[0]
df_all["Country"] = df_all["Country"].where(df_all["Country"] != "China")
df_all["Country"] = df_all["Country"].fillna("Mainland China")
print(df_all)
"""The total number cases in China over time included Hong Kong, Taiwan and Macau"""
# lineplot
df = df_all.copy()
df = df[df["Province/State"] != 0][
df["Country"].isin(["Mainland China", "China", "Hong Kong", "Taiwan", "Macau"])
]
df = df.sort_values(by=["Last Update"])
df_group_sum = pd.DataFrame(df.groupby("Last Update").sum())
plt.figure(figsize=(20, 10))
sns.lineplot(data=df_group_sum)
plt.xticks(rotation=15)
plt.xlabel("Time")
plt.ylabel("Number")
plt.title("Accumulation of Virus Cases Over Time(In China)")
x_date = df_group_sum.index
y_num = ["Confirmed", "Recovered", "Deaths"]
temp = 0
for i in y_num: # Disply the text on the line
for x, y in zip(x_date, df_group_sum[i]):
h_pos = ["center", "right", "left"]
v_pos = ["center", "top", "bottom"]
plt.text(x, y, y, ha=h_pos[temp], va=v_pos[temp])
temp += 1
"""The motality and survival rate in China over time included Hong Kong, Taiwan and Macau"""
df_group_sum["Motality"] = df_group_sum["Deaths"] / df_group_sum["Confirmed"]
df_group_sum["Survival Rate"] = df_group_sum["Recovered"] / df_group_sum["Confirmed"]
df_MS = df_group_sum[["Motality", "Survival Rate"]]
plt.figure(figsize=(15, 5))
sns.lineplot(data=df_MS)
plt.xlabel("Time")
plt.ylabel("Rate")
plt.title("Motality & Survival Rate Over Time(In China)")
"""The new cases in China over time included Hong Kong, Taiwan and Macau"""
df_group_sum = pd.DataFrame(df.groupby("Last Update").sum())
rate = ["New Confirmed", "New Deaths", "New Recovered"]
col = ["Confirmed", "Deaths", "Recovered"]
df_group_sum[rate] = df_group_sum[["Confirmed", "Deaths", "Recovered"]]
df_group_sum = df_group_sum.reset_index()
for i in range(len(rate)):
for j in range(len(df_group_sum["Last Update"])):
if j == 0:
df_group_sum.at[j, rate[i]] = 0
else:
df_group_sum.at[j, rate[i]] = (
df_group_sum.at[j, col[i]] - df_group_sum.at[j - 1, col[i]]
)
df_group_sum = df_group_sum[
["Last Update", "New Confirmed", "New Deaths", "New Recovered"]
].set_index("Last Update")
plt.figure(figsize=(15, 5))
sns.lineplot(data=df_group_sum)
plt.xlabel("Time")
plt.ylabel("New Number")
plt.title("The new cases over the time(In China)")
"""Country explorations"""
df = df_all.copy()
unique_date = list(df["Last Update"].unique())
type_list = ["Confirmed", "Recovered", "Deaths"]
df = (
df[df["Last Update"] == unique_date[-1]]
.groupby("Country")
.sum()
.sort_values(by="Confirmed", ascending=False)
.reset_index()
)
for i in type_list:
plt.figure(figsize=(20, 10))
sns.barplot(x=i, y="Country", data=df)
plt.xlabel(i)
plt.ylabel("Country")
plt.title(i + " Number(Over Country)")
"""Province/State explorations"""
df = df_all.copy()
unique_date = list(df["Last Update"].unique())
type_list = ["Confirmed", "Recovered", "Deaths"]
df = df[df["Province/State"] != 0][
df["Country"].isin(["Mainland China", "China", "Hong Kong", "Taiwan", "Macau"])
]
df = (
df[df["Last Update"] == unique_date[-1]]
.groupby("Province/State")
.sum()
.sort_values(by="Confirmed", ascending=False)
.reset_index()
)
for i in type_list:
plt.figure(figsize=(20, 10))
sns.barplot(x=i, y="Province/State", data=df)
plt.xlabel(i)
plt.ylabel("Province/State")
plt.title(i + " Number(In China)")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
train = pd.read_csv("../input/cat-in-the-dat-ii/train.csv")
test = pd.read_csv("../input/cat-in-the-dat-ii/test.csv")
train.sort_index(inplace=True)
train_y = train["target"]
test_id = test["id"]
train.drop(["target", "id"], axis=1, inplace=True)
test.drop("id", axis=1, inplace=True)
from sklearn.metrics import roc_auc_score
cat_feat_to_encode = train.columns.tolist()
smoothing = 0.20
import category_encoders as ce
oof = pd.DataFrame([])
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
for tr_idx, oof_idx in StratifiedKFold(
n_splits=5, random_state=1600, shuffle=True
).split(train, train_y):
ce_target_encoder = ce.TargetEncoder(cols=cat_feat_to_encode, smoothing=smoothing)
ce_target_encoder.fit(train.iloc[tr_idx, :], train_y.iloc[tr_idx])
oof = oof.append(
ce_target_encoder.transform(train.iloc[oof_idx, :]), ignore_index=False
)
ce_target_encoder = ce.TargetEncoder(cols=cat_feat_to_encode, smoothing=smoothing)
ce_target_encoder.fit(train, train_y)
train = oof.sort_index()
test = ce_target_encoder.transform(test)
glm = LogisticRegression(
random_state=None,
solver="lbfgs",
max_iter=1600,
fit_intercept=True,
penalty="l2",
verbose=0,
)
glm.fit(train, train_y)
pd.DataFrame({"id": test_id, "target": glm.predict_proba(test)[:, 1]}).to_csv(
"submission.csv", index=False
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# # Corona Virus - Analysis
# Exploratory Data Analysis about the Corona Virus spread using data supplied by the World Health Organization.
# via GIPHY
# importing magic functions
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# importing visualization libraries
import matplotlib.pyplot as plt
import seaborn as sns
# importing the combined data
df = pd.read_csv("/kaggle/input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv")
df.head()
df.info()
# ### Preprocessing the Data
# dropping columns with redundant data
df = df.drop(["Last Update", "Sno"], axis=1)
# changing the date type to datetime
df["Date"] = df["Date"].astype("datetime64[D]")
# changing data type float to int (there are no half people :-))
df[["Confirmed", "Deaths", "Recovered"]] = df[
["Confirmed", "Deaths", "Recovered"]
].astype(int)
# checking for missing values
df.isna().sum()
# fill NA with new string: 'Unknown'
df[["Province/State"]] = df[["Province/State"]].fillna("Unknown")
# combine China and Mainland China
df["Country"] = df["Country"].replace({"Mainland China": "China"})
# combine Cruise Ship and Diamons Princess Cruise Ship
df["Province/State"] = df["Province/State"].replace(
{"Cruise Ship": "Diamond Princess cruise ship"}
)
# replace 'Other' country for the cruise with 'Japan'
df.loc[df["Province/State"] == "Diamond Princess cruise ship", "Country"] = "Japan"
# ### Development over time
# visualize development of Corona Cases over time
f, ax = plt.subplots(figsize=(12, 6))
sns.lineplot(x="Date", y="Confirmed", data=df, ci=None, label="Confirmed", color="B")
sns.lineplot(x="Date", y="Deaths", data=df, label="Deaths", ci=None, color="R")
sns.lineplot(x="Date", y="Recovered", data=df, label="Recovered", ci=None, color="G")
plt.legend(loc="upper left")
plt.xticks(rotation=45)
plt.ylabel("Corona Cases")
plt.xlim("2020-01-22", "2020-02-15")
plt.tight_layout()
plt.box(False)
plt.title("Corona Cases over Time", fontweight="bold")
plt.show()
# ### Current Situation - Summary
# get the data for the most recent date
df["Date"].max()
df_now = df[df["Date"] == "2020-02-15"]
print(
"As of February 15, 2020 there are",
df_now["Confirmed"].sum(),
"confirmed Corona cases worldwide.",
df_now["Deaths"].sum(),
"people have died from the virus and",
df_now["Recovered"].sum(),
"have recovered.",
)
# ### Development over time
# Corona Cases by Country
df_now = (
df_now.groupby("Country", as_index=False)
.agg({"Confirmed": "sum", "Deaths": "sum", "Recovered": "sum"})
.sort_values(by=["Confirmed"], ascending=False)
)
df_now = df_now.reset_index(drop=True)
df_now
# ## Visualizing on Map
# loading world coordinates map (found on Kaggle)
df_geo = pd.read_csv("../input/world-coordinates/world_coordinates.csv")
df_geo = df_geo.drop(["Code"], axis=1)
df_geo.head()
df_geo.shape
df_now.head()
df_now.shape
# Merging the 2 dataframes on Country to get the long- and latitude values
df_comb = pd.merge(df_now, df_geo, on="Country", how="left")
df_comb
# Geomapping with Folium
import folium
world_map = folium.Map(location=[10, -20], zoom_start=0.5, tiles="cartodbdark_matter")
for lat, lon, value, name in zip(
df_comb["latitude"], df_comb["longitude"], df_comb["Confirmed"], df_comb["Country"]
):
folium.CircleMarker(
[lat, lon],
radius=7,
popup=(
"<strong>Country</strong>: " + str(name).capitalize() + "<br>"
"<strong>Confirmed Cases</strong>: " + str(value) + "<br>"
),
color="red",
fill_color="red",
fill_opacity=0.5,
).add_to(world_map)
world_map
|
# ### Dataset Download
import urllib
urllib.request.urlretrieve("https://sc.link/AO5l", "subsample.zip")
import os
import json
import logging
import random
from tqdm import tqdm
from collections import defaultdict
from typing import Tuple
from glob import glob
import pandas as pd
import numpy as np
from PIL import Image, ImageOps
import os
from ipywidgets import interact
from IPython.display import Image as DImage
import cv2
import torch
from torch import nn, Tensor
from torchvision import models
from torchvision.transforms import Compose
from torchvision.transforms import functional as F
from torchvision import transforms as T
from torchmetrics.detection.mean_ap import MeanAveragePrecision
import warnings
warnings.filterwarnings("ignore")
class_names = [
"call",
"dislike",
"fist",
"four",
"like",
"mute",
"ok",
"one",
"palm",
"peace_inverted",
"peace",
"rock",
"stop_inverted",
"stop",
"three",
"three2",
"two_up",
"two_up_inverted",
"no_gesture",
]
FORMATS = (".jpeg", ".jpg", ".jp2", ".png", ".tiff", ".jfif", ".bmp", ".webp", ".heic")
# ### Dataset loader class
transform = T.ToTensor()
class GestureDataset(torch.utils.data.Dataset):
@staticmethod
def __get_files_from_dir(pth: str, extns: Tuple):
if not os.path.exists(pth):
print(f"Dataset directory doesn't exist {pth}")
return []
files = [f for f in os.listdir(pth) if f.endswith(extns)]
return files
def __read_annotations(self, path):
annotations_all = None
exists_images = []
for target in class_names:
path_to_csv = os.path.join(path, f"{target}.json")
if os.path.exists(path_to_csv):
json_annotation = json.load(open(os.path.join(path, f"{target}.json")))
json_annotation = [
dict(annotation, **{"name": f"{name}.jpg"})
for name, annotation in zip(
json_annotation, json_annotation.values()
)
]
annotation = pd.DataFrame(json_annotation)
annotation["target"] = target
annotations_all = pd.concat(
[annotations_all, annotation], ignore_index=True
)
exists_images.extend(
self.__get_files_from_dir(
os.path.join(self.path_images, target), FORMATS
)
)
else:
if target != "no_gesture":
print(f"Database for {target} not found")
annotations_all["exists"] = annotations_all["name"].isin(exists_images)
annotations_all = annotations_all[annotations_all["exists"]]
users = annotations_all["user_id"].unique()
users = sorted(users)
random.Random(42).shuffle(users)
train_users = users[: int(len(users) * 0.8)]
val_users = users[int(len(users) * 0.8) :]
annotations_all = annotations_all.copy()
if self.is_train:
annotations_all = annotations_all[
annotations_all["user_id"].isin(train_users)
]
else:
annotations_all = annotations_all[
annotations_all["user_id"].isin(val_users)
]
return annotations_all
def __init__(self, path_annotation, path_images, is_train, transform=None):
self.is_train = is_train
self.transform = transform
self.path_annotation = path_annotation
self.path_images = path_images
self.transform = transform
self.labels = {
label: num for (label, num) in zip(class_names, range(len(class_names)))
}
self.annotations = self.__read_annotations(self.path_annotation)
def __len__(self):
return self.annotations.shape[0]
def get_sample(self, index: int):
row = self.annotations.iloc[[index]].to_dict("records")[0]
image_pth = os.path.join(self.path_images, row["target"], row["name"])
image = Image.open(image_pth).convert("RGB")
labels = torch.LongTensor([self.labels[label] for label in row["labels"]])
target = {}
width, height = image.size
bboxes = []
for bbox in row["bboxes"]:
x1, y1, w, h = bbox
bbox_abs = [x1 * width, y1 * height, (x1 + w) * width, (y1 + h) * height]
bboxes.append(bbox_abs)
target["labels"] = labels
target["boxes"] = torch.as_tensor(bboxes, dtype=torch.float32)
target["orig_size"] = torch.as_tensor([int(height), int(width)])
return image, target
def __getitem__(self, index: int):
image, target = self.get_sample(index)
if self.transform:
image = self.transform(image)
return image, target
# ## Setting some constants for training
random_seed = 42
num_classes = len(class_names)
batch_size = 16
num_epoch = 15
torch.manual_seed(random_seed)
np.random.seed(random_seed)
random.seed(random_seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_data = GestureDataset(
path_images="/kaggle/working/dataset/subsample",
path_annotation="/kaggle/working/dataset/ann_subsample",
is_train=True,
transform=transform,
)
test_data = GestureDataset(
path_images="/kaggle/working/dataset/subsample",
path_annotation="/kaggle/working/dataset/ann_subsample",
is_train=False,
transform=transform,
)
def collate_fn(batch):
batch_targets = list()
images = list()
for b in batch:
images.append(b[0])
batch_targets.append({"boxes": b[1]["boxes"], "labels": b[1]["labels"]})
return images, batch_targets
train_dataloader = torch.utils.data.DataLoader(
train_data,
batch_size=batch_size,
collate_fn=collate_fn,
shuffle=True,
num_workers=4,
)
test_dataloader = torch.utils.data.DataLoader(
test_data, batch_size=batch_size, collate_fn=collate_fn, shuffle=True, num_workers=4
)
# ## Loading a pretrained framework and creating a model class
lr = 0.005
momentum = 0.9
weight_decay = 5e-4
model = models.detection.ssdlite320_mobilenet_v3_large(
num_classes=len(class_names) + 1, pretrained_backbone=True
)
model.to(device)
optimizer = torch.optim.SGD(
model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay
)
warmup_factor = 1.0 / 1000
warmup_iters = min(1000, len(train_data) - 1)
lr_scheduler_warmup = torch.optim.lr_scheduler.LinearLR(
optimizer, start_factor=warmup_factor, total_iters=warmup_iters
)
# ## Evaluation function for calculate mAP metric
def eval(model, test_dataloader, epoch):
model.eval()
with torch.no_grad():
mapmetric = MeanAveragePrecision()
for images, targets in test_dataloader:
images = list(image.to(device) for image in images)
output = model(images)
for pred in output:
for key, value in pred.items():
pred[key] = value.cpu()
mapmetric.update(output, targets)
metrics = mapmetric.compute()
return metrics
# ## Training loop
for epoch in range(num_epoch):
model.train()
total = 0
sum_loss = 0
for images, targets in tqdm(train_dataloader):
batch = len(images)
images = list(image.to(device) for image in images)
for target in targets:
for key, value in target.items():
target[key] = value.to(device)
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
loss = losses.item()
optimizer.zero_grad()
losses.backward()
optimizer.step()
lr_scheduler_warmup.step()
total = total + batch
sum_loss = sum_loss + loss
metrics = eval(model, test_dataloader, epoch)
print(f"epoch : {epoch} ||| loss : {sum_loss / total} ||| MAP : {metrics['map']}")
torch.save(model.state_dict(), f"checkpoints/{epoch}.pth")
# ## Test model
images = []
for gesture in class_names[:-1]:
image_path = glob(f"/kaggle/working/dataset/subsample/{gesture}/*.jpg")[0]
images.append(Image.open(image_path))
images_tensors = images.copy()
images_tensors_input = list(transform(image).to(device) for image in images_tensors)
with torch.no_grad():
model.eval()
out = model(images_tensors_input)
# ### Set the score **threshold = 0.2** because the training model on a small dataset will be of poor quality
bboxes = []
scores = []
labels = []
for pred in out:
ids = pred["scores"] >= 0.2
bboxes.append(pred["boxes"][ids][:2].cpu().numpy().astype(np.int))
scores.append(pred["scores"][ids][:2].cpu().numpy())
labels.append(pred["labels"][ids][:2].cpu().numpy())
short_class_names = []
for name in class_names:
if name == "stop_inverted":
short_class_names.append("stop inv.")
elif name == "peace_inverted":
short_class_names.append("peace inv.")
elif name == "two_up":
short_class_names.append("two up")
elif name == "two_up_inverted":
short_class_names.append("two up inv.")
elif name == "no_gesture":
short_class_names.append("no gesture")
else:
short_class_names.append(name)
final_images = []
for bbox, score, label, image in zip(bboxes, scores, labels, images):
image = np.array(image)
for i, box in enumerate(bbox):
_, width, _ = image.shape
image = cv2.rectangle(image, box[:2], box[2:], thickness=3, color=[255, 0, 255])
cv2.putText(
image,
f"{short_class_names[label[i]]}: {score[i]:0.2f}",
(box[0], box[1]),
cv2.FONT_HERSHEY_SIMPLEX,
width / 780,
(0, 0, 255),
2,
)
final_images.append(Image.fromarray(image))
out_images = []
for i, image in enumerate(final_images):
out_name = f"out_images/{i}.png"
out_images.append(out_name)
image.save(out_name)
# ### Now we can look at the results of the model.
out_dir = "out_images/"
@interact
def show_images(file=os.listdir(out_dir)):
display(DImage(out_dir + file, width=600, height=300))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import tensorflow as tf
import pandas as pd
import numpy as np
from sklearn import preprocessing
keras = tf.keras
layers = keras.layers
dataset_size = 891
batch_size = 16
epoch = 30
print(f"tensorflow version : {tf.__version__}")
titanic = pd.read_csv(r"/kaggle/input/titanic/train.csv")
titanic = titanic.drop("Name", axis=1)
titanic = titanic.drop("Cabin", axis=1)
titanic = titanic.drop("Ticket", axis=1)
titanic = titanic.drop("PassengerId", axis=1)
titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median())
titanic["Embarked"] = titanic["Embarked"].fillna("N")
titanic["Sex"] = titanic["Sex"].map({"male": 0, "female": 1}).astype(int)
titanic["Embarked"] = titanic["Embarked"].map({"N": 0, "C": 1, "Q": 2, "S": 3})
titanic = np.array(titanic.values.tolist())
label = titanic[:, 0]
label = tf.cast(label, tf.int64)
data = titanic[:, 1:]
data = preprocessing.MinMaxScaler(feature_range=(0, 1)).fit_transform(data)
dataset = tf.data.Dataset.from_tensor_slices((data, label))
dataset = dataset.shuffle(dataset_size).repeat().batch(batch_size)
network = keras.Sequential()
network.add(layers.Dense(32, input_shape=(7,)))
network.add(layers.BatchNormalization())
network.add(layers.ReLU())
network.add(layers.Dense(64))
network.add(layers.BatchNormalization())
network.add(layers.ReLU())
network.add(layers.Dense(128))
network.add(layers.BatchNormalization())
network.add(layers.ReLU())
network.add(layers.Dense(64))
network.add(layers.BatchNormalization())
network.add(layers.ReLU())
network.add(layers.Dense(32))
network.add(layers.BatchNormalization())
network.add(layers.ReLU())
network.add(layers.Dense(1, activation="sigmoid"))
network.summary()
network.compile(
optimizer=keras.optimizers.Adam(1e-3),
loss=keras.losses.BinaryCrossentropy(),
metrics=[keras.metrics.BinaryAccuracy()],
)
network.fit(dataset, epochs=epoch, steps_per_epoch=dataset_size // batch_size)
network.save(r"./FNN.h5")
titanic_test = pd.read_csv(r"/kaggle/input/titanic/test.csv")
titanic_test = titanic_test.drop("Name", axis=1)
titanic_test = titanic_test.drop("Cabin", axis=1)
titanic_test = titanic_test.drop("Ticket", axis=1)
titanic_test["Age"] = (
titanic_test["Age"].fillna(titanic_test["Age"].median()).astype(int)
)
titanic_test["Fare"] = titanic_test["Fare"].fillna(titanic_test["Fare"].mean())
titanic_test["Embarked"] = titanic_test["Embarked"].fillna("N")
titanic_test["Sex"] = titanic_test["Sex"].map({"male": 0, "female": 1}).astype(int)
titanic_test["Embarked"] = (
titanic_test["Embarked"].map({"N": 0, "C": 1, "Q": 2, "S": 3}).astype(int)
)
titanic_test = np.array(titanic_test.values.tolist())
ID = titanic_test[:, 0]
titanic_test = preprocessing.MinMaxScaler(feature_range=(0, 1)).fit_transform(
titanic_test
)
titanic_test[:, 0] = ID
predict_csv = []
for input_data in titanic_test:
index = input_data[0]
input_data = tf.reshape(input_data[1:], [1, 7])
predict = network.predict(input_data)
if predict[0, 0] > 0.5:
predict = 1
else:
predict = 0
predict_csv.append([int(index), int(predict)])
csv_name = ["PassengerId", "Survived"]
csv_data = pd.DataFrame(columns=csv_name, data=predict_csv)
csv_data.to_csv(r"./predict.csv", index=False)
print("Done")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score, recall_score, accuracy_score
import xgboost as xgb
from sklearn.model_selection import GridSearchCV
# Label encoder
lable_encoder = LabelEncoder()
plt.style.use("ggplot")
df = pd.read_csv("/kaggle/input/titanic/train.csv")
df.head()
# Checking the null values in the data sets.
plt.figure()
plt.title("Total null values")
plt.bar(df.isna().sum().index, df.isna().sum().values)
plt.xlabel("Columns")
plt.ylabel("Null value counts")
plt.xticks(rotation=90)
plt.show()
len(df)
# Name of the columns.
df.columns
# Creating a copy of the data set with required columns.
tmp_df = df[["Survived", "Pclass", "Sex", "Age", "SibSp", "Parch", "Embarked"]].copy()
tmp_df.head()
# Filling the null values of age with median and zero.
def impute_na(df_, variable, median):
df_[variable + "_median"] = df_[variable].fillna(median)
df_[variable + "_zero"] = df_[variable].fillna(0)
# Calculating the median of age.
median = tmp_df.Age.median()
median
# Applying the function, to fill the null values.
impute_na(tmp_df, "Age", median)
tmp_df.head()
# creating a dataframe with median filled null values.
median_df = tmp_df[
["Survived", "Pclass", "Sex", "Age_median", "SibSp", "Parch", "Embarked"]
]
median_df.dropna(inplace=True)
median_df.isnull().sum()
# Encoding the categorical column
median_df["Sex"] = lable_encoder.fit_transform(median_df["Sex"])
median_df["Embarked"] = lable_encoder.fit_transform(median_df["Embarked"])
forest = ExtraTreesClassifier(n_estimators=250, random_state=0)
# Defining x and y values median filled dataframe
x = median_df[["Pclass", "Sex", "Age_median", "SibSp", "Parch", "Embarked"]]
y = median_df["Survived"]
forest.fit(x, y)
# Using random forest to extract the important features from median filled dataframe
importances = forest.feature_importances_
print(importances)
std = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0)
print(std)
indices = np.argsort(importances)[::-1]
print(indices)
plt.figure()
plt.title("Feature importances - (median_df)")
plt.bar(range(x.shape[1]), importances[indices], yerr=std[indices], align="center")
plt.xlabel("Index")
plt.ylabel("Importance")
plt.xticks(range(x.shape[1]), indices)
plt.xlim([-1, x.shape[1]])
plt.show()
# Takeing the important features which are extracted from median dataframe.
x_m = median_df[["Pclass", "Sex", "Age_median"]]
y_m = median_df["Survived"]
# Train / test split.
x_train_m, x_test_m, y_train_m, y_test_m = train_test_split(
x_m, y_m, test_size=0.25, random_state=0
)
# Scaling the data.
sc = StandardScaler()
x_train_m = sc.fit_transform(x_train_m)
x_test_m = sc.transform(x_test_m)
# We are using three algorithms as metioned below. and will compare the model
# outcome one by one.
models = {
"logistic_regression": LogisticRegression(random_state=0),
"svm": SVC(random_state=0),
"random_forest": RandomForestClassifier(
n_estimators=250, criterion="entropy", random_state=0
),
}
# We will be using for loop to one by one apply the model.
for name, model in models.items():
model.fit(x_train_m, y_train_m)
y_pred_m = model.predict(x_test_m)
print(name)
print("Precision - {}".format(precision_score(y_test_m, y_pred_m, average="macro")))
print("Recall - {}".format(recall_score(y_test_m, y_pred_m, average="macro")))
print("Accuracy - {}".format(accuracy_score(y_test_m, y_pred_m)))
cm = confusion_matrix(y_test_m, y_pred_m)
print()
print("Confusion matrix")
print(cm)
print()
print("----------------------------------------")
print()
xgboost = xgb.XGBClassifier(n_estimators=50, colsample_bytree=0.7, gamma=0.3)
xgboost.fit(x_train_m, y_train_m)
y_pred_m = xgboost.predict(x_test_m)
print("Precision - {}".format(precision_score(y_test_m, y_pred_m, average="macro")))
print("Recall - {}".format(recall_score(y_test_m, y_pred_m, average="macro")))
print("Accuracy - {}".format(accuracy_score(y_test_m, y_pred_m)))
cm = confusion_matrix(y_test_m, y_pred_m)
print()
print("Confusion matrix")
print(cm)
print()
test_df = pd.read_csv("/kaggle/input/titanic/test.csv")
test_df.isna().sum()
len(test_df)
# Calculating the median of age.
median = test_df.Age.median()
median
# Applying the function, to fill the null values.
impute_na(test_df, "Age", median)
tmp_df.head()
test_tmp = test_df[["Pclass", "Sex", "Age_median"]]
# Encoding the categorical column
test_tmp["Sex"] = lable_encoder.fit_transform(test_tmp["Sex"])
# Scaling the data.
test_tmp = sc.transform(test_tmp)
test_predict = xgboost.predict(test_tmp)
test_predict
rdc = RandomForestClassifier(n_estimators=250, criterion="entropy", random_state=0)
rdc.fit(x_train_m, y_train_m)
y_pred_m = rdc.predict(x_test_m)
print("Precision - {}".format(precision_score(y_test_m, y_pred_m, average="macro")))
print("Recall - {}".format(recall_score(y_test_m, y_pred_m, average="macro")))
print("Accuracy - {}".format(accuracy_score(y_test_m, y_pred_m)))
cm = confusion_matrix(y_test_m, y_pred_m)
print()
print("Confusion matrix")
print(cm)
print()
pred_rdc = rdc.predict(test_tmp)
len(pred_rdc)
pred_rdc
len(test_df[["PassengerId"]])
submission = test_df[["PassengerId"]]
submission["Survived"] = pred_rdc
# submission.to_csv('submission/submission_rdc.csv')
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# Data Preprocessing - Data import
os.chdir(r"/kaggle/input/telco-customer-churn")
My_data = pd.read_csv("WA_Fn-UseC_-Telco-Customer-Churn.csv")
My_data.head()
print(My_data.describe())
My_data.dtypes
# Data Cleansing and Analysis
M_Values = My_data.isnull()
M_Values.head()
# Visualize the missing data
import seaborn as sns
sns.heatmap(data=M_Values, yticklabels=False, cbar=False, cmap="viridis")
# replace values for SeniorCitizen as a categorical feature
My_data["SeniorCitizen"] = My_data["SeniorCitizen"].replace({1: "Yes", 0: "No"})
num_cols = ["tenure", "MonthlyCharges", "TotalCharges"]
My_data[num_cols].describe()
def categorical_segment(column_name: str) -> "grouped_dataframe":
segmented_df = My_data[[column_name, "Churn"]]
segmented_churn_df = segmented_df[segmented_df["Churn"] == "Yes"]
grouped_df = (
segmented_churn_df.groupby(column_name)
.count()
.reset_index()
.rename(columns={"Churn": "Churned"})
)
total_count_df = (
segmented_df.groupby(column_name)
.count()
.reset_index()
.rename(columns={"Churn": "Total"})
)
merged_df = pd.merge(grouped_df, total_count_df, how="inner", on=column_name)
merged_df["Percent_Churned"] = merged_df[["Churned", "Total"]].apply(
lambda x: (x[0] / x[1]) * 100, axis=1
)
return merged_df
categorical_columns_list = list(My_data.columns)[1:5] + list(My_data.columns)[6:18]
grouped_df_list = []
for column in categorical_columns_list:
grouped_df_list.append(categorical_segment(column))
grouped_df_list[0]
# Churn by categorical features
import matplotlib.pyplot as plt
for i, column in enumerate(categorical_columns_list):
fig, ax = plt.subplots(figsize=(13, 5))
plt.bar(
grouped_df_list[i][column],
[100 - i for i in grouped_df_list[i]["Percent_Churned"]],
width=0.1,
color="g",
)
plt.bar(
grouped_df_list[i][column],
grouped_df_list[i]["Percent_Churned"],
bottom=[100 - i for i in grouped_df_list[i]["Percent_Churned"]],
width=0.1,
color="r",
)
plt.title("Percent Churn by " + column)
plt.xlabel(column)
plt.ylabel("Percent Churned")
plt.legend(("Retained", "Churned"))
plt.show()
# Churn by numerical features
def continous_var_segment(column_name: str) -> "segmented_df":
segmented_df = My_data[[column_name, "Churn"]]
segmented_df = segmented_df.replace({"Churn": {"No": "Retained", "Yes": "Churned"}})
segmented_df["Customer"] = ""
return segmented_df
continous_columns_list = [list(My_data.columns)[18]] + [list(My_data.columns)[5]]
continous_segment_list = []
for var in continous_columns_list:
continous_segment_list.append(continous_var_segment(var))
import seaborn as sns
sns.set("talk")
for i, column in enumerate(continous_columns_list):
fig, ax = plt.subplots(figsize=(8, 11))
sns.violinplot(
x="Customer", y=column, data=continous_segment_list[i], hue="Churn", split=True
)
plt.title("Churn by " + column)
plt.show()
# Normalizing tenure and monthly charges and using K-means clustering to cluster churned customers based on them.
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
monthlyp_and_tenure = My_data[["MonthlyCharges", "tenure"]][My_data.Churn == "Yes"]
scaler = MinMaxScaler()
monthly_and_tenure_standardized = pd.DataFrame(
scaler.fit_transform(monthlyp_and_tenure)
)
monthly_and_tenure_standardized.columns = ["MonthlyCharges", "tenure"]
kmeans = KMeans(n_clusters=3, random_state=42).fit(monthly_and_tenure_standardized)
monthly_and_tenure_standardized["cluster"] = kmeans.labels_
fig, ax = plt.subplots(figsize=(13, 8))
plt.scatter(
monthly_and_tenure_standardized["MonthlyCharges"],
monthly_and_tenure_standardized["tenure"],
c=monthly_and_tenure_standardized["cluster"],
cmap="Spectral",
)
plt.title("Clustering churned users by monthly Charges and tenure")
plt.xlabel("Monthly Charges")
plt.ylabel("Tenure")
plt.show()
# Pre-processing the data using label encoding and one hot encoding to get it ready for ML Model
My_data_filtered = My_data.drop(["TotalCharges", "customerID"], axis=1)
def encode_binary(column_name: str):
global My_data_filtered
My_data_filtered = My_data_filtered.replace({column_name: {"Yes": 1, "No": 0}})
binary_feature_list = (
list(My_data_filtered.columns)[1:4]
+ [list(My_data_filtered.columns)[5]]
+ [list(My_data_filtered.columns)[15]]
+ [list(My_data_filtered.columns)[18]]
)
for binary_feature in binary_feature_list:
encode_binary(binary_feature)
My_data_processed = pd.get_dummies(My_data_filtered, drop_first=True)
My_data_processed.head(10)
My_data.Churn.value_counts()
# Importing all the necessary librabries
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
import sklearn.metrics as metrics
X = np.array(My_data_processed.drop(["Churn"], axis=1))
y = np.array(My_data_processed["Churn"])
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
def get_metrics(model):
y_pred = model.predict(X_test)
y_prob = model.predict_proba(X_test)
y_actual = y_test
print()
print("-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*")
print()
print(
"Accuracy on unseen hold out set:",
metrics.accuracy_score(y_actual, y_pred) * 100,
"%",
)
print()
f1_score = metrics.f1_score(y_actual, y_pred)
precision = metrics.precision_score(y_actual, y_pred)
recall = metrics.recall_score(y_actual, y_pred)
score_dict = {"f1_score": [f1_score], "precision": [precision], "recall": [recall]}
score_frame = pd.DataFrame(score_dict)
print(score_frame)
print()
fpr, tpr, thresholds = metrics.roc_curve(y_actual, y_prob[:, 1])
fig, ax = plt.subplots(figsize=(8, 6))
plt.plot(fpr, tpr, "b-", alpha=0.5, label="(AUC = %.2f)" % metrics.auc(fpr, tpr))
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.legend(loc="lower right")
plt.show()
# Model 1: Random Forest
rf = RandomForestClassifier(
n_estimators=20, n_jobs=-1, max_features="sqrt", random_state=42
)
param_grid1 = {
"min_samples_split": np.arange(2, 11),
"min_samples_leaf": np.arange(1, 11),
}
rf_cv = GridSearchCV(rf, param_grid1, cv=5, iid=False)
rf_cv.fit(X_train, y_train)
print(rf_cv.best_params_)
print(rf_cv.best_score_)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv("../input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv")
# time_series_2019_ncov_confirmed = pd.read_csv("../input/novel-corona-virus-2019-dataset/time_series_2019_ncov_confirmed.csv")
# time_series_2019_ncov_deaths = pd.read_csv("../input/novel-corona-virus-2019-dataset/time_series_2019_ncov_deaths.csv")
# time_series_2019_ncov_recovered = pd.read_csv("../input/novel-corona-virus-2019-dataset/time_series_2019_ncov_recovered.csv")
data.columns
data.Country.unique()
data.Country = data.Country.replace("Mainland China", "China")
data = data.drop(["Sno", "Date"], axis=1)
data[["Province/State", "Country"]] = data[["Province/State", "Country"]].fillna(
"Unknown"
)
data[["Confirmed", "Deaths", "Recovered"]] = data[
["Confirmed", "Deaths", "Recovered"]
].fillna(0.0)
latest_data = (
data.groupby("Country")["Last Update", "Confirmed", "Deaths", "Recovered"]
.max()
.reset_index()
)
latest_data
max_death = latest_data.sort_values(by="Deaths", ascending=False).reset_index()
max_death = max_death.head(5)
max_death
colors = ["#E13F29", "#D69A80", "#D63B59", "#AE5552", "#CB5C3B"]
plt.bar(latest_data["Country"], latest_data["Deaths"], align="center", alpha=0.5)
plt.show()
no_china_data = (
latest_data.groupby(["Country"])["Confirmed", "Recovered", "Deaths"]
.sum()
.reset_index()
)
no_china_data = no_china_data[
(no_china_data["Country"] != "China") & (grouped_cnf_df["Country"] != "Others")
]
import plotly.graph_objects as go
fig = go.Figure(
go.Bar(
x=no_china_data["Confirmed"],
y=no_china_data["Country"],
name="Confirmed",
orientation="h",
)
)
fig.add_trace(
go.Bar(
x=no_china_data["Deaths"],
y=no_china_data["Country"],
name="Deaths",
orientation="h",
)
)
fig.add_trace(
go.Bar(
x=no_china_data["Recovered"],
y=no_china_data["Country"],
name="Recovered",
orientation="h",
)
)
fig.update_layout(
barmode="stack", yaxis={"categoryorder": "total ascending"}, height=1000
)
fig.show()
only_china_data = data[data["Country"] == "China"]
only_china_data
only_china_data = (
only_china_data.groupby(["Province/State"])["Confirmed", "Deaths", "Recovered"]
.sum()
.reset_index()
)
only_china_data
import plotly.graph_objects as go
fig = go.Figure(
go.Bar(
x=only_china_data["Confirmed"],
y=only_china_data["Province/State"],
name="Confirmed",
orientation="h",
)
)
fig.add_trace(
go.Bar(
x=only_china_data["Deaths"],
y=only_china_data["Province/State"],
name="Deaths",
orientation="h",
)
)
fig.add_trace(
go.Bar(
x=only_china_data["Recovered"],
y=only_china_data["Province/State"],
name="Recovered",
orientation="h",
)
)
fig.update_layout(
barmode="stack", yaxis={"categoryorder": "total ascending"}, height=1000
)
fig.show()
hubei_data = only_china_data[only_china_data["Province/State"] == "Hubei"]
hubei_data = hubei_data.drop(["Province/State"], axis=1)
hubei_data
hubei_data.plot(kind="pie", subplots=True, figsize=(8, 8))
import seaborn as sns
p = only_china_data.pivot("")
sns.heatmap(only_china_data, annot=True)
sns.plt.show()
import plotly.express as px
df = px.data.gapminder().query("")
fig = px.scatter_geo(
df,
locations="iso_alpha",
color="continent",
hover_name="country",
size="pop",
projection="natural earth",
)
fig.show()
|
import gc
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from matplotlib import pyplot as plt
import torch
import torch.utils.data as data_utils
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from torch import nn
from torchsummary import summary
from tqdm.auto import tqdm
from sklearn.model_selection import train_test_split
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
full_train_df = pd.read_feather(
"/kaggle/input/full-bengali-graphemes-normalized/full_train_df.feather"
)
target_cols = ["grapheme_root", "vowel_diacritic", "consonant_diacritic"]
X_train = full_train_df.drop(target_cols, axis=1)
Y_train = full_train_df[target_cols]
del full_train_df
gc.collect()
X_train, X_val, Y_train, Y_val = train_test_split(
X_train, Y_train, test_size=0.002, random_state=666
)
gc.collect()
IMG_SIZE = 64
CHANNELS = 1
W, H = IMG_SIZE, IMG_SIZE
BATCH_SIZE = 512
# Convert to PyTorch tensors
X_train = torch.from_numpy(X_train.values.reshape(-1, CHANNELS, IMG_SIZE, IMG_SIZE))
X_val = torch.from_numpy(X_val.values.reshape(-1, CHANNELS, IMG_SIZE, IMG_SIZE))
Y_train = torch.from_numpy(Y_train.values)
Y_val = torch.from_numpy(Y_val.values)
print(f"Size of X_train: {X_train.shape}")
print(f"Size of Y_train: {Y_train.shape}")
print(f"Size of X_val: {X_val.shape}")
print(f"Size of Y_val: {Y_val.shape}")
# Visualize few samples of training dataset
fig, ax = plt.subplots(nrows=3, ncols=4, figsize=(16, 8))
count = 0
for row in ax:
for col in row:
col.imshow(
X_train[count]
.reshape(IMG_SIZE, IMG_SIZE)
.cpu()
.detach()
.numpy()
.astype(np.float64)
)
col.set_title(str(Y_train[count].cpu().detach().numpy()))
count += 1
plt.show()
# ## Creating custom PyTorch data generator and Data Loader
class GraphemesDataset(Dataset):
"""
Custom Graphemes dataset
"""
def __init__(self, X, Y):
self.X = X
self.Y = Y
def __len__(self):
return len(self.X)
def __getitem__(self, index):
return self.X[index], self.Y[index]
train_dataset = GraphemesDataset(X_train, Y_train)
train_loader = data_utils.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.BatchNorm2d(num_features=32),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(5, 5), padding=2),
nn.ReLU(True),
nn.Dropout2d(p=0.3),
)
self.layer2 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), padding=1),
nn.BatchNorm2d(num_features=64),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(5, 5), padding=2),
nn.ReLU(True),
nn.Dropout2d(p=0.3),
)
self.layer3 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.BatchNorm2d(num_features=128),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(5, 5), padding=2),
nn.ReLU(True),
nn.Dropout2d(p=0.3),
)
self.layer4 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.BatchNorm2d(num_features=256),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(5, 5), padding=2),
nn.ReLU(True),
nn.Dropout2d(p=0.3),
)
self.flatten = nn.Flatten()
self.fc1 = nn.Linear(in_features=4096, out_features=1024)
self.fc1_dropout = nn.Dropout2d(p=0.3)
self.fc2 = nn.Linear(in_features=1024, out_features=512)
self.fc3 = nn.Linear(in_features=512, out_features=168)
self.fc4 = nn.Linear(in_features=512, out_features=11)
self.fc5 = nn.Linear(in_features=512, out_features=7)
def forward(self, X):
output = self.layer1(X)
output = self.layer2(output)
output = self.layer3(output)
output = self.layer4(output)
output = self.flatten(output)
output = self.fc1(output)
output = self.fc1_dropout(output)
output = self.fc2(output)
output_root = self.fc3(output)
output_vowel = self.fc4(output)
output_consonant = self.fc5(output)
return output_root, output_vowel, output_consonant
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = Model().to(device)
# Print summary of our model
summary(model, input_size=(CHANNELS, IMG_SIZE, IMG_SIZE))
LEARNING_RATE = 0.02
EPOCHS = 40
CUTMIX_ALPHA = 1
model = nn.DataParallel(model)
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE)
criterion = nn.CrossEntropyLoss()
def get_accuracy(
root_preds, target_root, vowel_pred, target_vowel, consonant_pred, target_consonant
):
assert (
len(root_preds) == len(target_root)
and len(vowel_pred) == len(target_vowel)
and len(consonant_pred) == len(target_consonant)
)
total = len(target_root) + len(target_vowel) + len(target_consonant)
_, predicted_root = torch.max(root_preds.data, axis=1)
_, predicted_vowel = torch.max(vowel_pred.data, axis=1)
_, predicted_consonant = torch.max(consonant_pred.data, axis=1)
del root_preds
del vowel_pred
del consonant_pred
torch.cuda.empty_cache()
correct = (
(predicted_root == target_root).sum().item()
+ (predicted_vowel == target_vowel).sum().item()
+ (predicted_consonant == target_consonant).sum().item()
)
del target_root
del target_vowel
del target_consonant
torch.cuda.empty_cache()
return correct / total
def shuffle_minibatch(x, y):
assert x.size(0) == y.size(0) # Size should be equal
indices = torch.randperm(x.size(0))
return x[indices], y[indices]
del X_train
del Y_train
gc.collect()
def clear_cache():
gc.collect()
torch.cuda.empty_cache()
X_val = X_val.to(device)
Y_val = Y_val.to(device)
# Split validation's Y into 3 separate targets
target_val_root, target_val_vowel, target_val_consonant = (
Y_val[:, 0],
Y_val[:, 1],
Y_val[:, 2],
)
del Y_val
clear_cache()
total_steps = len(train_loader)
val_acc_list = []
for epoch in range(EPOCHS):
for i, (x_train, y_train) in tqdm(enumerate(train_loader)):
x_train = x_train.to(device)
target_root = y_train[:, 0].to(device, dtype=torch.long)
target_vowel = y_train[:, 1].to(device, dtype=torch.long)
target_consonant = y_train[:, 2].to(device, dtype=torch.long)
# Forward pass
root_preds, vowel_pred, consonant_pred = model(x_train)
del x_train
clear_cache()
# Calculate loss
loss = (
criterion(root_preds, target_root)
+ criterion(vowel_pred, target_vowel)
+ criterion(consonant_pred, target_consonant)
)
# Backpropagate
optimizer.zero_grad() # Reason: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch
loss.backward()
optimizer.step()
del root_preds
del target_root
del vowel_pred
del target_vowel
del consonant_pred
del target_consonant
clear_cache()
# Calculate validation accuracy after each epoch
# Predict on validation set
root_val_preds, vowel_val_pred, consonant_val_pred = model(X_val)
val_acc = get_accuracy(
root_val_preds,
target_val_root,
vowel_val_pred,
target_val_vowel,
consonant_val_pred,
target_val_consonant,
)
val_acc_list.append(val_acc)
del root_val_preds
del vowel_val_pred
del consonant_val_pred
clear_cache()
print(
"Epoch [{}/{}], Loss: {:.4f}, Validation accuracy: {:.2f}%".format(
epoch + 1, EPOCHS, loss.item(), val_acc * 100
)
)
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, EPOCHS), val_acc_list, label="val_accuracy")
plt.title("Accuracy")
plt.xlabel("# of epochs")
plt.ylabel("Accuracy")
plt.legend(loc="upper right")
plt.show()
torch.save(model, "40epochs.pt")
torch.save(model.state_dict(), "40epochs_state_dict.pt")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# Step 1: Import data analysis modules
import numpy as np
import pandas as pd
import os
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
# Step 2 : Data import
# os.chdir(r'C:\Users\dhrimand\Desktop')
input_file = pd.read_csv("/kaggle/input/glass/glass.csv")
input_file.head(5)
input_file.dtypes
input_file.describe()
input_file["Type"].value_counts()
# Step 3: Clean up data
# Use the .isnull() method to locate missing data
missing_values = input_file.isnull()
missing_values.head(5)
# Step 4.1: Visualize the data
# Use seaborn to conduct heatmap to identify missing data
# data -> argument refers to the data to creat heatmap
# yticklabels -> argument avoids plotting the column names
# cbar -> argument identifies if a colorbar is required or not
# cmap -> argument identifies the color of the heatmap
sns.heatmap(data=missing_values, yticklabels=False, cbar=False, cmap="viridis")
# Convert the target feature into a binary feature
input_file["label"] = input_file.Type.map({1: 0, 2: 0, 3: 0, 5: 1, 6: 1, 7: 1})
input_file.head(20)
# Step 5.1: Prepare input X parameters/features and output y
# split dataset in features and target variable
feature_cols = ["Na", "Mg", "Al", "Si", "K", "Ca", "Ba", "Fe"]
X = input_file[feature_cols] # Features
y = input_file.label # Target variable
# Import module to split dataset
from sklearn.model_selection import train_test_split
# Split data set into training and test sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=100
)
# import the class
from sklearn.linear_model import LogisticRegression
# instantiate the model (using the default parameters)
logreg = LogisticRegression()
# fit the model with data
logreg.fit(X_train, y_train)
#
y_pred = logreg.predict(X_test)
# import the metrics class
from sklearn import metrics
cnf_matrix = metrics.confusion_matrix(y_test, y_pred)
cnf_matrix
# Visualizing Confusion Matrix using Heatmap
class_names = [0, 1] # name of classes
fig, ax = plt.subplots()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names)
plt.yticks(tick_marks, class_names)
# create heatmap
sns.heatmap(pd.DataFrame(cnf_matrix), annot=True, cmap="YlGnBu", fmt="g")
ax.xaxis.set_label_position("top")
plt.tight_layout()
plt.title("Confusion matrix", y=1.1)
plt.ylabel("Actual label")
plt.xlabel("Predicted label")
# Confusion Matrix Evaluation Metrics
print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
print("Precision:", metrics.precision_score(y_test, y_pred))
print("Recall:", metrics.recall_score(y_test, y_pred))
# ROC Curve
y_pred_proba = logreg.predict_proba(X_test)[::, 1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
auc = metrics.roc_auc_score(y_test, y_pred_proba)
plt.plot(fpr, tpr, label="data 1, auc=" + str(auc))
plt.legend(loc=4)
plt.show()
|
# **isnull() method returns a boolean series True in case we have a NULL record**
import pandas as pd
a = pd.read_csv("/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv")
a["ObservationDate"] = pd.to_datetime(a["ObservationDate"])
a["Last Update"] = pd.to_datetime(a["Last Update"])
a.head()
a.info()
mask = a["Province/State"].isnull()
a[mask]
# notnull()- complementary method to find a null record
mask2 = a["Province/State"].notnull()
a[mask2]
# **use between() method**
a = pd.read_csv("/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv")
a.head()
mask5 = a["Confirmed"].between(19, 20)
a[mask5] # it will only filter where mask is True
# **Finding Duplicate Values**
d = {
"Student": ["Aman", "Biswa", "Aman", "Disha", "Dhruvika", "Aman"],
"Marks": [23, 44, 33, 54, 78, 23],
"Age": [10, 19, 17, 18, 18, 18],
}
a = pd.DataFrame(d)
a
a["Student"].duplicated()
a["Age"].duplicated()
a
a["Age"].duplicated(keep="last")
# **Subset Argument**
a
m = a.duplicated(subset=["Student", "Marks", "Age"])
m
a[m]
a = pd.read_csv("/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv")
a.head()
mask9 = a.duplicated(
subset=["ObservationDate", "Province/State", "Country/Region", "Last Update"],
keep=False,
)
a[mask9]
a.loc[45:55]
|
# # Celeb Faces Mediapipe Images
# Mediapipe face detection
import cv2
import os
import math
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import mediapipe as mp
mp_pose = mp.solutions.pose
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
paths0 = []
for dirname, _, filenames in os.walk(
"/kaggle/input/celeba-dataset/img_align_celeba/img_align_celeba"
):
for filename in filenames:
if filename[-4:] == ".jpg":
paths0 += [(os.path.join(dirname, filename))]
print(paths0[0:3])
paths = random.sample(paths0, 200)
paths2 = []
for i, path in enumerate(paths):
if i % 50 == 0:
print("i=", i)
file = path.split("/")[-1]
label = path.split("/")[-2]
image = cv2.imread(path)
image = cv2.resize(image, dsize=(400, 400))
with mp_pose.Pose(
static_image_mode=True,
model_complexity=2,
enable_segmentation=True,
min_detection_confidence=0.1,
) as pose:
try:
results = pose.process(cv2.flip(image, 1))
if results.pose_landmarks:
image_hight, image_width, _ = image.shape
annotated_image = cv2.flip(image.copy(), 1)
mp_drawing.draw_landmarks(
annotated_image,
results.pose_landmarks,
mp_pose.POSE_CONNECTIONS,
mp_drawing_styles.get_default_pose_landmarks_style(),
)
anno_img = cv2.flip(annotated_image, 1)
cv2.imwrite(file, anno_img)
paths2 += [file]
except:
continue
selected_num = random.sample(range(len(data)), 9)
fig, axes = plt.subplots(3, 3, figsize=(10, 10))
for i, ax in enumerate(axes.flat):
j = selected_num[i]
img_path = data.iloc[j, 0]
img = plt.imread(img_path)
ax.imshow(img)
ax.axis("off")
plt.tight_layout()
plt.show()
#!rm *
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from sklearn import metrics
data = pd.read_csv("/kaggle/input/rk-puram-ambient-air/rk.csv")
data["PM25"] = data["PM2.5"]
data = data.drop("PM2.5", axis=1)
data = data.replace(0, float("nan"))
data.describe()
data = data[16091:]
data = data.drop("Toluene", axis=1)
data["datetime"] = pd.to_datetime(data["From Date"], format="%d/%m/%Y %H:%M")
data = data.drop(["From Date", "To Date", "VWS"], axis=1)
data = data.set_index("datetime")
data["Hour"] = data.index.hour
data["Year"] = data.index.year
data["Month"] = data.index.month
data["Weekday"] = data.index.weekday_name
data.isnull().sum()
# *Min-Max before imputations*
print("AT: ", min(data["AT"]), max(data["AT"]))
print("BP: ", min(data["BP"]), max(data["BP"]))
print("RH: ", min(data["RH"]), max(data["RH"]))
print("SR: ", min(data["SR"]), max(data["SR"]))
print("WD: ", min(data["WD"]), max(data["WD"]))
print("WS: ", min(data["WS"]), max(data["WS"]))
print("CO: ", min(data["CO"]), max(data["CO"]))
print("NH3: ", min(data["NH3"]), max(data["NH3"]))
print("NO: ", min(data["NO"]), max(data["NO"]))
print("NO2: ", min(data["NO2"]), max(data["NO2"]))
print("NOx: ", min(data["NOx"]), max(data["NOx"]))
print("Ozone: ", min(data["Ozone"]), max(data["Ozone"]))
print("SO2: ", min(data["SO2"]), max(data["SO2"]))
print("PM2.5: ", min(data["PM25"]), max(data["PM25"]))
print("PM10: ", min(data["PM10"]), max(data["PM10"]))
# *Find longest subsequence of nan values of each columns*
data_Ozone = pd.DataFrame(data["Ozone"])
a = data_Ozone.Ozone.values # Extract out relevant column from dataframe as array
m = np.concatenate(([True], ~np.isnan(a), [True])) # Mask
ss = np.flatnonzero(m[1:] != m[:-1]).reshape(-1, 2) # Start-stop limits
start, stop = ss[(ss[:, 1] - ss[:, 0]).argmax()] # Get max interval, interval limits
print("start: %d, stop: %d" % (start, stop))
print(data_Ozone[start : stop - 1].shape)
print(data_Ozone[start : stop - 1].isnull().sum())
# maximum 81 samples are continuously missing in each column
data["AT"] = data.AT.interpolate(method="linear", limit_area="inside")
data["BP"] = data.BP.fillna(
data.BP.rolling(
83,
min_periods=1,
).median()
)
data["RH"] = data.RH.interpolate(method="linear", limit_area="inside")
data["SR"] = data.SR.interpolate(method="linear", limit_area="inside")
data["WD"] = data.WD.interpolate(method="linear")
data["WS"] = data.WS.interpolate(method="linear", limit_area="inside")
data["CO"] = data.CO.interpolate(method="linear")
data["NH3"] = data.NH3.interpolate(method="linear")
data["NO"] = data.NO.interpolate(method="linear", limit_area="inside")
data["NO2"] = data.NO2.interpolate(method="linear", limit_area="inside")
data["NOx"] = data.NOx.interpolate(method="linear", limit_area="inside")
data["Ozone"] = data.Ozone.interpolate(method="linear", limit_area="inside")
data["PM10"] = data.PM10.interpolate(method="linear")
data["PM25"] = data.PM25.interpolate(method="linear", limit_area="inside")
data["SO2"] = data.SO2.fillna(
data.SO2.rolling(
83,
min_periods=1,
).median()
)
# *Min-Max After Imputation*
print("AT: ", min(data["AT"]), max(data["AT"]))
print("BP: ", min(data["BP"]), max(data["BP"]))
print("RH: ", min(data["RH"]), max(data["RH"]))
print("SR: ", min(data["SR"]), max(data["SR"])) #
print("WD: ", min(data["WD"]), max(data["WD"]))
print("WS: ", min(data["WS"]), max(data["WS"])) #
print("CO: ", min(data["CO"]), max(data["CO"]))
print("NH3: ", min(data["NH3"]), max(data["NH3"]))
print("NO: ", min(data["NO"]), max(data["NO"])) #
print("NO2: ", min(data["NO2"]), max(data["NO2"])) #
print("NOx: ", min(data["NOx"]), max(data["NOx"])) #
print("Ozone: ", min(data["Ozone"]), max(data["Ozone"])) #
print("SO2: ", min(data["SO2"]), max(data["SO2"]))
print("PM2.5: ", min(data["PM25"]), max(data["PM25"])) #
print("PM10: ", min(data["PM10"]), max(data["PM10"])) #
# AT: 1.65 45.95
# BP: 717.83 1098.6
# RH: 5.9 90.07
# SR: 0.25 439.62
# WD: 11.0 315.0
# WS: 0.05 37.53
# CO: 0.01 10.0
# NH3: 0.1 98.4
# NO: 0.1 499.9
# NO2: 0.1 468.23
# NOx: 0.05 499.6
# Ozone: 0.2 198.9
# SO2: 0.05 153.17
# PM2.5: 0.5 954.0
# PM10: 2.5 998.0
data.isnull().sum() # No. of missing values after imputing
# *Converting to CSV file*
data.to_csv("rk_imputed.csv", index=True)
# data['RH'] = data.RH.interpolate(method='akima', limit_area='inside')
data["AT"]
|
# # DS4G: Environmental Insights Explorer
# ### Exploring alternatives for emissions factor calculations
# 
# # CONTEXT
# Current emissions factors methodologies are based on **time-consuming** data collection and may include errors derived from a lack of access to granular datasets, inability to refresh data on a frequent basis, overly general modeling assumptions, and inaccurate reporting of emissions sources like fuel consumption.
# # PROJECT OVERVIEW
# The Environmental Insights Explorer team at Google is keen to gather insights on ways to improve calculations of global emissions factors for sub-national regions. The ultimate goal of this challenge is to test if **calculations of emissions factors using remote sensing techniques** are possible and on par with calculations of emissions factors from current methodologies.
# # PROBLEM STATEMENT
# Current [emissions factors methodologies](https://www.epa.gov/air-emissions-factors-and-quantification/basic-information-air-emissions-factors-and-quantification#About%20Emissions%20Factors) are based on time-consuming data collection and may include errors derived from a lack of access to granular datasets, inability to refresh data on a frequent basis, overly general modeling assumptions, and inaccurate reporting of emissions sources like fuel consumption. This begs the question: What if there was a different way to calculate or measure emissions factors?
# Is provided an initial list of datasets covering the geographic boundary of Puerto Rico to serve as the foundation for this analysis. As an island, there are fewer confounding factors from nearby areas. Puerto Rico also offers a unique fuel mix and distinctive energy system layout that should make it easier to isolate pollution attributable to power generation in the remote sensing data.
# **Documentation**
# - Is the code documented in a way that is easily reproducible (i.e. thorough comments, organized notebook, clear scripts/code)?
# - Does the notebook narrative clearly state all assumptions that are factored into the value and the potential impact of fluctuations? (i.e. Which plants / fuel types did the author use for their analysis, and why?)
# - Does the notebook contain data visualizations (e.g. time graphs, etc.) that help convey the author’s findings and/or recommendations?
# - Did the author upload and properly cite all files for any supporting datasets that were used for their analysis?
# **Recommendation**
# - Did the author write a compelling and coherent narrative explaining their rationale for the scalability and accuracy of their model and recommendation?
# - Does the recommendation include an explanation of what data/assumptions could be substituted to produce the value for another geospatial area or location?
# - Is there documentation about the pros and cons of the model, and the geographic nuances that may have impacted the emissions factor?
# - Does the explanation convey why/how this model improves current emissions factors calculations?
# - Does the recommendation indicate other datasets/factors/assumptions (and why) that could be useful to include to make future emissions factor calculation methodologies more robust?
# **Accuracy**
# - Does the model produce a value for the an annual average historical grid-level electricity emissions factor (based on rolling 12-months of data from July 2018 - July 2019) for the sub-national region?
# - Bonus points for smaller time slices of the average historical emissions factors, such as one per month for the 12-month period.
# - Bonus points for participants that develop a methodology to calculate a marginal emissions factor for the sub-national region using the provided datasets.
# # The general equation for emissions estimation is:
# ### E = A x EF x (1-ER/100)
# where:
# E = emissions; A = activity rate; EF = emission factor, and ER =overall emission reduction efficiency, %
# therefore
# ### EF = E / [A x (1-ER/100)]
# - To simplify things a bit, I'll reduce that equation to: EF = E / A
# - Simplified Emissions Factor = Emissions / Activity Rate
# ** Which again can be similified to the following:
# Simplified Emissions Factor = (Measure of NO2 emissions) / (Quanity of electricity generated)
# We can find a "measure of NO2 emissions" from the Sentinel-5p dataset. Likewise, we can find a measure of (quanitity of electricity generated) from the power plant dataset. To demonstrate how to get started with the data, we will load and preview those two data sources and then we will calculate a simplified emissions factor for a single power plant on the island of Vieques in Puerto Rico.**
# # About the data
# [Global Power Plant database ](https://developers.google.com/earth-engine/datasets/catalog/WRI_GPPD_power_plants) by WRI
# > Description
# The Global Power Plant Database is a comprehensive, open source database of power plants around the world. It centralizes power plant data to make it easier to navigate, compare and draw insights for one’s own analysis. The database covers approximately 30,000 power plants from 164 countries and includes thermal plants (e.g. coal, gas, oil, nuclear, biomass, waste, geothermal) and renewables (e.g. hydro, wind, solar). Each power plant is geolocated and entries contain information on plant capacity, generation, ownership, and fuel type. It will be continuously updated as data becomes available.
# [Sentinel 5P OFFL NO2](https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_S5P_OFFL_L3_NO2) by [EU/ESA/Copernicus](https://sentinel.esa.int/web/sentinel/user-guides/sentinel-5p-tropomi/document-library)
# > Sentinel-5 Precursor
# Sentinel-5 Precursor is a satellite launched on 13 October 2017 by the European Space Agency to monitor air pollution. The onboard sensor is frequently referred to as Tropomi (TROPOspheric Monitoring Instrument). The OFFL/NO2 is a dataset that provides offline high-resolution imagery of **NO2 concentration**.
# [Global Forecast System 384-Hour Predicted Atmosphere Data](https://developers.google.com/earth-engine/datasets/catalog/NOAA_GFS0P25) by NOAA/NCEP/EMC
# > The Global Forecast System (GFS) is a weather forecast model produced by the National Centers for Environmental Prediction (NCEP). The GFS dataset consists of selected model outputs (described below) as gridded forecast variables. The 384-hour forecasts, with 3-hour forecast interval, are made at 6-hour temporal resolution (i.e. updated four times daily). Use the 'creation_time' and 'forecast_time' properties to select data of interest.
# [Global Land Data Assimilation System](https://developers.google.com/earth-engine/datasets/catalog/NASA_GLDAS_V021_NOAH_G025_T3H) by NASA
# > Global Land Data Assimilation System (GLDAS) ingests satellite and ground-based observational data products. Using advanced land surface modeling and data assimilation techniques, it generates optimal fields of land surface states and fluxes. his dataset provided by NASA ingest satellite.
# Participants may also consider using other public datasets related to trade commodities for fuel types, total fuel consumed, and/or data from the [US Energy Information Agency (EIA)](https://www.eia.gov/state/data.php?sid=RQ#CarbonDioxideEmissions).
# # How green energy will change our future
# How will green energy change our future? What will our future look like with green energy? The growth of green energy goes together with change. Our future will not only include green energy, but our future will also be shaped by it
#
# Suppress warnings
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
from IPython.display import HTML
HTML(
'<iframe width="800" height="520" src="https://www.youtube.com/embed/cEjT2_NCeFc" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>'
)
# ### Imports
# We are using a typical data science stack: ``numpy``, ``pandas``, ``sklearn``, ``matplotlib``.
import matplotlib.pyplot as plt
import rasterio as rio
import folium
import os
import numpy as np
import pandas as pd
# ## Some Functions
def plot_points_on_map(
dataframe,
begin_index,
end_index,
latitude_column,
latitude_value,
longitude_column,
longitude_value,
zoom,
):
df = dataframe[begin_index:end_index]
location = [latitude_value, longitude_value]
plot = folium.Map(location=location, zoom_start=zoom)
for i in range(0, len(df)):
popup = folium.Popup(str(df.primary_fuel[i : i + 1]))
folium.Marker(
[df[latitude_column].iloc[i], df[longitude_column].iloc[i]], popup=popup
).add_to(plot)
return plot
def overlay_image_on_puerto_rico(file_name, band_layer):
band = rio.open(file_name).read(band_layer)
m = folium.Map([lat, lon], zoom_start=8)
folium.raster_layers.ImageOverlay(
image=band,
bounds=[
[
18.6,
-67.3,
],
[17.9, -65.2],
],
colormap=lambda x: (1, 0, 0, x),
).add_to(m)
return m
def plot_scaled(file_name):
vmin, vmax = np.nanpercentile(file_name, (5, 95)) # 5-95% stretch
img_plt = plt.imshow(file_name, cmap="gray", vmin=vmin, vmax=vmax)
plt.show()
def split_column_into_new_columns(
dataframe, column_to_split, new_column_one, begin_column_one, end_column_one
):
for i in range(0, len(dataframe)):
dataframe.loc[i, new_column_one] = dataframe.loc[i, column_to_split][
begin_column_one:end_column_one
]
return dataframe
# # Explore the power plant data
power_plants = pd.read_csv(
"/kaggle/input/ds4g-environmental-insights-explorer/eie_data/gppd/gppd_120_pr.csv"
)
power_plants = split_column_into_new_columns(power_plants, ".geo", "latitude", 50, 66)
power_plants = split_column_into_new_columns(power_plants, ".geo", "longitude", 31, 48)
lat = 18.200178
lon = -66.664513
plot_points_on_map(power_plants, 0, 425, "latitude", lat, "longitude", lon, 9)
# # Preview the NO2 emissions data
# Explore the nO2 emissions data
image = "/kaggle/input/ds4g-environmental-insights-explorer/eie_data/s5p_no2/s5p_no2_20180708T172237_20180714T190743.tif"
overlay_image_on_puerto_rico(image, band_layer=7)
# # Explore the weather data
image = "/kaggle/input/ds4g-environmental-insights-explorer/eie_data/gldas/gldas_20180702_1500.tif"
overlay_image_on_puerto_rico(image, band_layer=5)
## Working in progress
|
# ## Importing libraries
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
df = pd.read_csv("Mall_Customers.csv")
df
# ## Explore
df.describe()
df.info()
mask = df["Spending Score (1-100)"] > 50
df_score = df[mask]
df_score
df_score.describe()
# ## Histogram
plt.figure(figsize=(15, 6))
n = 0
for x in ["Age", "Annual Income (k$)", "Spending Score (1-100)"]:
n += 1
plt.subplot(1, 3, n)
plt.subplots_adjust(hspace=0.5, wspace=0.5)
sns.histplot(df[x], bins=20)
plt.title("Distplot of {}".format(x))
plt.show()
# these features as they have a little skeweenes are tends to be normally distributed
df_score["Age"].hist()
plt.xlabel("Age")
plt.ylabel("Count")
plt.title("Spending Score(51 ~ 100): Age Distribution")
# Our histogram is telling us that many of people who have spending score greater than 50 are younger.
# ## Count Plot Of Gender
plt.figure(figsize=(15, 5))
sns.countplot(y="Gender", data=df_score)
plt.title("Spending Score(51 ~ 100): Gender Distribution")
plt.show()
plt.figure(figsize=(15, 5))
sns.countplot(y="Gender", data=df)
plt.title("Gender Distribution")
plt.show()
# ## Ploting the Relation between Age , Annual Income and Spending Score
plt.figure(1, figsize=(15, 7))
n = 0
for x in ["Age", "Annual Income (k$)", "Spending Score (1-100)"]:
for y in ["Age", "Annual Income (k$)", "Spending Score (1-100)"]:
n += 1
plt.subplot(3, 3, n)
plt.subplots_adjust(hspace=0.5, wspace=0.5)
sns.regplot(x=x, y=y, data=df)
plt.ylabel(y.split()[0] + " " + y.split()[1] if len(y.split()) > 1 else y)
plt.show()
plt.figure(1, figsize=(15, 6))
for gender in ["Male", "Female"]:
plt.scatter(
x="Age",
y="Annual Income (k$)",
data=df[df["Gender"] == gender],
s=200,
alpha=0.5,
label=gender,
)
plt.xlabel("Age"), plt.ylabel("Annual Income (k$)")
plt.title("Age vs Annual Income w.r.t Gender")
plt.legend()
plt.show()
plt.figure(1, figsize=(15, 6))
for gender in ["Male", "Female"]:
plt.scatter(
x="Annual Income (k$)",
y="Spending Score (1-100)",
data=df[df["Gender"] == gender],
s=200,
alpha=0.5,
label=gender,
)
plt.xlabel("Annual Income (k$)"), plt.ylabel("Spending Score (1-100)")
plt.title("Annual Income vs Spending Score w.r.t Gender")
plt.legend()
plt.show()
# Distribution of values in Age , Annual Income and Spending Score according to Gender
plt.figure(1, figsize=(15, 7))
n = 0
for cols in ["Age", "Annual Income (k$)", "Spending Score (1-100)"]:
n += 1
plt.subplot(1, 3, n)
plt.subplots_adjust(hspace=0.5, wspace=0.5)
sns.violinplot(x=cols, y="Gender", data=df, palette="vlag")
sns.swarmplot(x=cols, y="Gender", data=df)
plt.ylabel("Gender" if n == 1 else "")
plt.title("Boxplots & Swarmplots" if n == 2 else "")
plt.show()
# ## Split
X = df.iloc[:, [3, 4]]
print(f"X Shape {X.shape}")
X.head()
# ## Clustring Using K-Means
# ### Iterate
# Use a for loop to build and train a K-Means model where n_clusters ranges from 2 to 12 (inclusive). Each time a model is trained, calculate the inertia and add it to the list inertia_errors, then calculate the silhouette score and add it to the list silhouette_scores.
# ## Segmentation using Annual Income and Spending Score
n_clusters = range(2, 13)
inertia_errors = []
silhouette_scores = []
# Add `for` loop to train model and calculate inertia, silhouette score.
for k in n_clusters:
model = KMeans(n_clusters=k, random_state=42)
# TRAIN MODEL
model.fit(X)
# CALCULATE INERTIA
inertia_errors.append(model.inertia_)
# CALCULATE SILHOUETTE SCORE
silhouette_scores.append(silhouette_score(X, model.labels_))
print("Inertia:", inertia_errors[:3])
print()
print("Silhouette Scores:", silhouette_scores[:3])
# ## Elbow Plot
# Create line plot of `inertia_errors` vs `n_clusters`
fig = px.line(
x=range(2, 13),
y=inertia_errors,
title="K-Means Model: Inertia vs Number of Clusters",
)
fig.update_layout(xaxis_title="Number of Clusters", yaxis_title="Inertia")
fig.show()
# Create a line plot of `silhouette_scores` vs `n_clusters`
fig = px.line(
x=n_clusters,
y=silhouette_scores,
title="K-Means Model: Silhouette Score vs Number of Clusters",
)
fig.update_layout(xaxis_title="Number of Clusters", yaxis_title="Silhouette Score")
fig.show()
# The best number of clusters is 5
final_model = KMeans(n_clusters=5, random_state=42)
final_model.fit(X)
labels = final_model.labels_
centroids = final_model.cluster_centers_
print(labels[:5])
# ## Communicate
# plot "Annual Income" vs "Spending Score" with final_model labels
sns.scatterplot(
x=df["Annual Income (k$)"],
y=df["Spending Score (1-100)"],
hue=labels,
palette="deep",
)
sns.scatterplot(x=centroids[:, 0], y=centroids[:, 1], color="gray", marker="*", s=500)
plt.xlabel("Annual Income (k$)")
plt.ylabel("Spending Score (1-100)")
plt.title("Annual Income vs. Spending Score")
# Create side-by-side bar chart of `xgb`
xgb = X.groupby(final_model.labels_).mean()
fig = px.bar(xgb, barmode="group", title="Annual income and spending score by Cluster")
fig.update_layout(xaxis_title="Clusters", yaxis_title="Value")
fig.show()
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
train_df = pd.read_csv("/kaggle/input/spaceshiptitanic/train.csv")
test_df = pd.read_csv("/kaggle/input/spaceshiptitanic/test.csv")
# ### spliting the train to train and target to merge train and test to apply the same work
train, target = train_df.iloc[:, :-1], train_df.iloc[:, -1]
# # understanding the dataset
train.info()
# #### printing all unique values in the dataset
nan_cols = []
for i in train.columns:
if train[i].dtype == "O":
nan_cols.append(i)
print(f"unique values in {i} : {train[i].unique()}")
# ##### from the output we see that there is nan values and we will replace it by np.nan
for i in nan_cols:
train[i].replace({"nan": np.nan}, inplace=True)
for i in train.columns:
if train[i].dtype == "O":
nan_cols.append(i)
print(f"unique values in {i} : {train[i].unique()}")
# ### inferences
# there is a missing values in various columns we will handle it
# passengerId is not important at all its just a sequence of numbers so we will drop it
# lets check every column to better understand it.
# we will divide the columns into numerical and categorical and fix the data type
# for each column we will get its values and put it in a nice chart so i will write a function to do so.
# # Handle Missing Values
# #### we will create a dataframe for null values and their percentage
null_columns = pd.DataFrame(
train[train.columns[train.isnull().any()]].isnull().sum() * 100 / train.shape[0],
columns=["Percentage of NaN values"],
)
null_columns["Total NaN Values"] = (
train[train.columns[train.isnull().any()]].isnull().sum()
)
null_columns
# #### visualize null values
null_cols = null_columns.index
# Visual representation of columns with missing values
import missingno as mno
mno.matrix(train[null_cols], figsize=(20, 6))
plt.show()
# ### inferences
# the number of missing value is too small
# there is no big correlation between missing values in the dataset
# we can either drop it or fill it
# here i will fill it
# there 4 main ways to fill a missing value
#
# mean
# median
# most frequent
# constant value
#
# to choose a one methode from here we should plot the distribution and box blot because mean get affected by outliers
#
for i, col in enumerate(null_cols):
plt.figure(figsize=(16, 8))
if train[col].dtype == "O":
plt.subplot(1, 1, 1)
sns.countplot(train[col], color="blue")
else:
plt.subplot(1, 2, 1)
sns.distplot(train[col], color="blue")
plt.subplot(1, 2, 2)
sns.boxplot(train[col])
plt.show()
# ### inferences
# as we can see from the figures cabin and name has no unique values so we sure it has no enfeluence on the target coloumn so i will drop them
# from the distributions above there is alot of outliers and most of the distribution is right skwed so we will choose the most frequent type to fill the miissing vakues
#
# droping the name and cabin coloumn from both datasets
train.drop(["Name", "Cabin", "PassengerId"], axis=1, inplace=True)
test_df.drop(["Name", "Cabin", "PassengerId"], axis=1, inplace=True)
# using SimpleImputer with most_frequent strategy
from sklearn.impute import SimpleImputer
most_freq_imputer = SimpleImputer(
strategy="most_frequent",
)
train_imputed = most_freq_imputer.fit_transform(train)
test_imputed = most_freq_imputer.fit_transform(test_df)
train_im = pd.DataFrame(train_imputed, columns=train.columns)
test_im = pd.DataFrame(test_imputed, columns=test_df.columns)
train_im.info()
cols = ["Age", "RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"]
for i in cols:
train_im[i] = train_im[i].astype(np.float64)
train_im.info()
# #### as we can see we handled all the null values
# # Data Preprocessing
# #### lets devide the train into numerical and categorical and analyze each category
numerical_columns = [i for i in train_im.columns if train_im[i].dtype == "float64"]
categorical_columns = [i for i in train_im.columns if train_im[i].dtype == "O"]
print("numerical_columns : ", numerical_columns)
print("categorical_columns : ", categorical_columns)
# #### ploting the counts of catigorical variables
plt.figure(figsize=(12, 16))
for i, col in enumerate(categorical_columns):
plt.subplot(3, 2, i + 1)
sns.countplot(data=train_im, x=col)
plt.title(f"count values for {col}")
plt.tight_layout()
plt.show()
# #### lets take a look at the numerical feautures and plot its distributions
for i, col in enumerate(numerical_columns):
plt.figure(figsize=(16, 8))
plt.subplot(1, 2, 1)
sns.distplot(train_im[col], color="green")
plt.title(f"distribution of {col}")
plt.subplot(1, 2, 2)
sns.boxplot(train_im[col])
plt.title(f"box plot of {col}")
plt.show()
# ### inferences
# there is alot of outliers
# we will try to handle it by using IQR based filtering
# # Outliers detection
def find_outliers(col):
q1 = train_im[col].quantile(0.25)
q3 = train_im[col].quantile(0.75)
iqr = q3 - q1
upper_limit = q3 + 1.5 * iqr
lower_limit = q1 - 1.5 * iqr
train_im[train_im[col] > upper_limit]
train_im[train_im[col] < lower_limit]
new_df = train_im[train_im[col] < upper_limit]
plt.figure(figsize=(16, 8))
plt.subplot(2, 2, 1)
sns.distplot(train_im[col])
plt.subplot(2, 2, 2)
sns.boxplot(train_im[col])
plt.subplot(2, 2, 3)
sns.distplot(new_df[col])
plt.subplot(2, 2, 4)
sns.boxplot(new_df[col])
return plt.show()
for col in numerical_columns:
find_outliers(col)
# ### inferences
# there is no much change in the distribution
# so no need to apply it the acual tain data
# # Data Scaling & Encoding
# data encoding is a technique to transform the categorical data feature into numerical feature so we can feed it to a machine learning algorithm
# data scaling is a technique to make the data on the same scale and there are alot of technique to do so
#
# StandardScaler => is a technique that tranform the data using its mean and std deviation by subtract the mean and devide by the std dev
# MinMaxScaler => is a technique that subtract the min and devide by the diff bettwen the max and min of the feature
#
#
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.compose import ColumnTransformer
transformer_list = [
("encoder", OneHotEncoder(dtype="int", drop="first"), categorical_columns),
("scale", StandardScaler(), numerical_columns),
]
column_transformer = ColumnTransformer(transformer_list)
train_transformed_raw = column_transformer.fit_transform(train_im)
test_transforme_raw = column_transformer.fit_transform(test_im)
scale_encoded_train = pd.DataFrame(
train_transformed_raw, columns=column_transformer.get_feature_names_out()
)
scale_encoded_test = pd.DataFrame(
test_transforme_raw, columns=column_transformer.get_feature_names_out()
)
print("scale_encoded_train shape : ", scale_encoded_train.shape)
print("scale_encoded_test shape : ", scale_encoded_test.shape)
# encode the label
lbl_col = LabelEncoder().fit_transform(target)
target_encoded = pd.Series(lbl_col, name="Transported")
target_encoded.head()
# # Feature Importance
# i will draw the correlation between the features and the target to select the best features to reduce the dimintion of the data
#
plt.figure(figsize=(13, 13))
corr_train = scale_encoded_train.copy()
corr_train["Transported"] = lbl_col
sns.heatmap(corr_train.corr(), annot=True, fmt=".2f")
plt.xticks(size=14)
plt.yticks(size=14)
plt.show()
# ### inferences
# from the correlation matrix we can see that there are one coloumn that doesnot affect our target at all so i will drop them
#
scale_encoded_train.drop(["encoder__Destination_PSO J318.5-22"], axis=1, inplace=True)
scale_encoded_test.drop(["encoder__Destination_PSO J318.5-22"], axis=1, inplace=True)
# # Model Selection
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
scale_encoded_train, target_encoded, random_state=0, test_size=0.2
)
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
classifiers = [
LogisticRegression(),
KNeighborsClassifier(),
SVC(kernel="linear", C=10),
SVC(kernel="poly", C=10),
SVC(kernel="rbf", C=10),
DecisionTreeClassifier(),
RandomForestClassifier(),
ExtraTreesClassifier(),
GradientBoostingClassifier(),
]
from time import time
for model in classifiers:
start = time()
model.fit(x_train, y_train)
train_time = time() - start
start = time()
y_predict_train = model.predict(x_train)
y_predict_test = model.predict(x_test)
prediction_time = time() - start
print(model)
print(f"train time {train_time}")
print(f"prediction time {train_time}")
print(f"model train accuracy {accuracy_score(y_train, y_predict_train)}")
print(f"model test accuracy {accuracy_score(y_test, y_predict_test)}")
print(classification_report(y_test, y_predict_test))
# ### inferences
# from the previous classifiers we found that Gradient Boosting Classifier is the best one so lets find its best parameter
# # Model Tuning
# ### Gradient Boosting Classifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import make_scorer
scoring = {
"accuracy": make_scorer(accuracy_score),
"precision": make_scorer(precision_score),
"recall": make_scorer(recall_score),
}
param_grid = {
"loss": ["deviance", "exponential"],
"learning_rate": [0.001, 0.01, 0.05],
"criterion": ["friedman_mse", "mse"],
"max_depth": [3, 5, 7],
"max_features": ["auto", "sqrt", "log2"],
}
gb = GradientBoostingClassifier()
grid_search = GridSearchCV(gb, param_grid, scoring=scoring, refit="accuracy", cv=2)
grid_search.fit(x_train, y_train)
print("Best parameters: ", grid_search.best_params_)
print("Best score: ", grid_search.best_score_)
gdc = GradientBoostingClassifier(
criterion="friedman_mse",
learning_rate=0.05,
loss="deviance",
max_depth=7,
max_features="log2",
)
gdc.fit(x_train, y_train)
y_predict_train = gdc.predict(x_train)
y_predict_test = gdc.predict(x_test)
train_accuracy = accuracy_score(y_train, y_predict_train)
test_accuracy = accuracy_score(y_test, y_predict_test)
print("train accuracy : ", train_accuracy)
print("test accuracy : ", test_accuracy)
test = pd.read_csv("/kaggle/input/spaceshiptitanic/test.csv")
submission = pd.DataFrame()
submission["PassengerId"] = test["PassengerId"]
submission["Transported"] = gdc.predict(scale_encoded_test)
submission["Transported"].replace({0: "False", 1: "True"}, inplace=True)
submission.to_csv("out.csv", index=False)
|
import glob
import cv2
import itertools
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras.layers import *
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.pyplot import imread
from matplotlib.pyplot import imshow
from tensorflow.keras.preprocessing import image
from keras.applications import DenseNet201
import os
from tensorflow.keras.preprocessing.image import ImageDataGenerator
NUM_CLASSES = 2
IMG_SIZE = 224
import os
import gc
images = []
labels = []
for sub_dir in os.listdir("/kaggle/input/wildfiredataset/WildFireNet"):
image_list = os.listdir(
os.path.join("/kaggle/input/wildfiredataset/WildFireNet", sub_dir)
) # list of all image names in the directory
image_list = list(map(lambda x: os.path.join(sub_dir, x), image_list))
images.extend(image_list)
labels.extend([sub_dir] * len(image_list))
df = pd.DataFrame({"Images": images, "Labels": labels})
df = df.sample(frac=1).reset_index(drop=True) # To shuffle the data
df_train = df.head(6000)
df_test = df.tail(1500)
train_data = df_train.groupby("Labels").count()
train_data
train_datagen = ImageDataGenerator(
rescale=1.0 / 255,
horizontal_flip=True,
vertical_flip=True,
shear_range=0.2,
zoom_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
rotation_range=50,
validation_split=0.2,
fill_mode="nearest",
)
train_generator = train_datagen.flow_from_dataframe(
dataframe=df_train,
directory="/kaggle/input/wildfiredataset/WildFireNet",
x_col="Images",
y_col="Labels",
subset="training",
batch_size=16,
seed=42,
shuffle=True,
class_mode="categorical",
target_size=(IMG_SIZE, IMG_SIZE),
)
valid_generator = train_datagen.flow_from_dataframe(
dataframe=df_train,
directory="/kaggle/input/wildfiredataset/WildFireNet",
x_col="Images",
y_col="Labels",
subset="validation",
batch_size=16,
seed=42,
shuffle=True,
class_mode="categorical",
target_size=(IMG_SIZE, IMG_SIZE),
)
from sklearn.utils import class_weight
class_weights = class_weight.compute_class_weight(
class_weight="balanced",
classes=np.unique(train_generator.classes),
y=train_generator.classes,
)
train_class_weights = dict(enumerate(class_weights))
train_class_weights
def ECA(x):
k_size = 3
squeeze = tf.reduce_mean(x, (2, 3), keepdims=False)
squeeze = tf.expand_dims(squeeze, axis=1)
attn = layers.Conv1D(
filters=1,
kernel_size=k_size,
padding="same",
kernel_initializer="random_normal",
use_bias=False,
)(squeeze)
attn = tf.expand_dims(tf.transpose(attn, [0, 2, 1]), 3)
attn = tf.math.sigmoid(attn)
scale = x * attn
return x * attn
def build_model(num_classes):
inputs = layers.Input(shape=(IMG_SIZE, IMG_SIZE, 3))
x = inputs
model = DenseNet201(include_top=False, input_tensor=x, weights="imagenet")
for layer in model.layers:
layer.trainable = False
# Rebuild top
x = layers.BatchNormalization()(model.output)
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
x1 = layers.Resizing(IMG_SIZE, IMG_SIZE)(inputs)
x2 = layers.Rescaling(1.0 / 255)(x1)
x3 = layers.Conv2D(
filters=32,
kernel_size=(3, 3),
activation="relu",
padding="same",
input_shape=(IMG_SIZE, IMG_SIZE, 3),
)(x2)
x4 = ECA(x3)
x5 = layers.BatchNormalization(axis=-1)(x4)
x6 = layers.MaxPool2D(pool_size=(2, 2))(x5)
x7 = layers.Conv2D(
filters=64, kernel_size=(3, 3), activation="relu", padding="same"
)(x6)
x8 = ECA(x7)
x9 = layers.BatchNormalization(axis=-1)(x8)
x10 = layers.MaxPool2D(pool_size=(2, 2))(x9)
x11 = layers.Conv2D(
filters=128, kernel_size=(3, 3), activation="relu", padding="same"
)(x10)
x12 = ECA(x11)
x13 = layers.BatchNormalization(axis=-1)(x12)
x14 = layers.MaxPool2D(pool_size=(2, 2))(x13)
x15 = layers.Conv2D(
filters=256, kernel_size=(3, 3), activation="relu", padding="same"
)(x14)
x16 = ECA(x15)
x17 = layers.BatchNormalization(axis=-1)(x16)
x18 = layers.MaxPool2D(pool_size=(2, 2))(x17)
y1 = layers.Flatten()(x18)
concat = Concatenate()([y1, x])
dense1 = layers.Dense(512, activation="relu")(concat)
dropout1 = Dropout(0.4)(dense1)
dense2 = layers.Dense(256, activation="relu")(dropout1)
dropout2 = Dropout(0.4)(dense2)
dense3 = layers.Dense(128, activation="relu")(dropout2)
dropout3 = Dropout(0.4)(dense3)
outputs = layers.Dense(NUM_CLASSES, activation="softmax", name="pred")(dropout3)
# Compile
model = tf.keras.Model(inputs, outputs, name="VGG16")
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-5)
model.compile(
optimizer=optimizer, loss="categorical_crossentropy", metrics=["Accuracy"]
)
return model
model = build_model(num_classes=NUM_CLASSES)
model.summary()
import matplotlib.pyplot as plt
def plot_hist(hist):
plt.figure(figsize=(20, 5))
plt.subplot(1, 2, 1)
plt.plot(hist.history["Accuracy"])
plt.plot(hist.history["val_Accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="lower right")
plt.subplot(1, 2, 2)
plt.plot(hist.history["loss"])
plt.plot(hist.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper right")
plt.show()
callback = tf.keras.callbacks.EarlyStopping(monitor="loss", patience=7)
epochs = 30
hist = model.fit_generator(
generator=train_generator,
validation_data=valid_generator,
epochs=epochs,
class_weight=train_class_weights,
callbacks=[callback],
verbose=1,
)
plot_hist(hist) # took 2h:41m to complete
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.impute import SimpleImputer
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# KNN Test
def train_test_KNN(X_train, y_train, X_test, y_test):
best_neighbors = 0
best_score = 0
current_train_score = 0
for neighbors in range(1, 100):
knn = KNeighborsClassifier(n_neighbors=neighbors)
knn.fit(X_train, y_train)
prediction_train = knn.score(X_train, y_train)
prediction_test = knn.score(X_test, y_test)
if prediction_test > best_score:
best_score = prediction_test
best_neighbors = neighbors
current_train_score = prediction_train
print(
"KNN Score:\t Train: {:.4f} \t Test: {:.4f} \t neighbors: {}".format(
current_train_score, best_score, best_neighbors
)
)
knn = KNeighborsClassifier(n_neighbors=best_neighbors)
knn.fit(X_train, y_train)
return knn
# Logistic Regression test
def train_test_logreg(X_train, y_train, X_test, y_test):
best_score = 0
current_prediction_train = 0
best_c = 0
for c in np.arange(0.1, 10, 0.1):
logreg = LogisticRegression(C=c, solver="newton-cg").fit(X_train, y_train)
prediction_train = logreg.score(X_train, y_train)
prediction_test = logreg.score(X_test, y_test)
if prediction_test > best_score:
best_score = prediction_test
best_c = c
current_prediction_train = prediction_train
print(
"LogReg Score:\t Train: {:.4f} \t Test: {:.4f} \t C: {}".format(
current_prediction_train, best_score, best_c
)
)
return logreg
# Random Forest test
def train_test_random_forest(X_train, y_train, X_test, y_test):
best_test_score = 0
current_train_score = 0
best_estimator = 0
best_depth = 0
for estimatorCount in range(1, 10):
for depth in range(1, 10):
forest = RandomForestClassifier(
n_estimators=estimatorCount, max_depth=depth, random_state=0
)
forest.fit(X_train, y_train)
prediction_train = forest.score(X_train, y_train)
prediction_test = forest.score(X_test, y_test)
if prediction_test > best_test_score:
best_test_score = prediction_test
current_train_score = prediction_train
best_estimator = estimatorCount
best_depth = depth
print(
"Forest Score:\t Train: {:.4f} \t Test: {:.4f} \t ({} estimators \t {} depth)".format(
current_train_score, best_test_score, best_estimator, best_depth
)
)
# Change rf parameters to match the best found, and re-train.
forest = RandomForestClassifier(
n_estimators=best_estimator, max_depth=best_depth, random_state=0
)
forest.fit(X_train, y_train)
return forest
def train_test_gradient_boost(X_train, y_train, X_test, y_test):
best_score = 0
current_training_score = 0
best_depth = 0
best_learning_rate = 0
for depth in range(1, 30):
print("Depth: \t {}".format(depth))
for rate in np.arange(0.01, 1, 0.1):
gb = GradientBoostingClassifier(
random_state=0, max_depth=depth, learning_rate=rate
)
gb.fit(X_train, y_train)
prediction_train = gnb.score(X_train, y_train)
prediction_test = gnb.score(X_test, y_test)
if prediction_test > best_score:
best_score = prediction_test
current_training_score = prediction_train
best_depth = depth
best_learning_rate = rate
print(
"Grad. Boost \t Train: {:.4f} \t Test: {:.4f} \t Depth: {} \t Rate: {}".format(
current_training_score, best_score, best_depth, best_learning_rate
)
)
return gb
# Gaussian Naive Bayes
def train_test_gnb(X_train, y_train, X_test, y_test):
gnb = GaussianNB()
gnb.fit(X_train, y_train)
prediction_train = gnb.score(X_train, y_train)
prediction_test = gnb.score(X_test, y_test)
print(
"GaussianNB: \t Train: {:.4f} \t Test: {:.4f}".format(
prediction_train, prediction_test
)
)
return gnb
# Decision Tree
def train_test_tree(X_Train, y_train, X_test, y_test):
best_score = 0
current_training_score = 0
best_depth = 0
for depth in range(1, 100):
tree = DecisionTreeClassifier(random_state=0, max_depth=depth)
tree.fit(X_train, y_train)
prediction_train = tree.score(X_train, y_train)
prediction_test = tree.score(X_test, y_test)
if prediction_test > best_score:
best_score = prediction_test
best_depth = depth
current_training_score = prediction_train
print(
"Tree: \t\t Train: {:.4f} \t Test: {:.4f} \t Depth: {}".format(
current_training_score, best_score, best_depth
)
)
tree = DecisionTreeClassifier(random_state=0, max_depth=best_depth)
tree.fit(X_train, y_train)
# print("Feature Names: \t{}".format(feature_names))
# print("Feature Importances: \t{}".format(tree.feature_importances_))
return tree
def output_predictions(model, data, passengerIds):
predictions = model.predict(data)
output = pd.DataFrame({"PassengerId": passengerIds, "Survived": predictions})
output.to_csv("submission.csv", index=False)
print("Predictions Saved.")
# Dataset Cleaning Function
def clean(raw_train_dataset, raw_competition_dataset):
# Drop columns which are not to be included in model.
processed_train_data = raw_train_dataset.drop(
["Name", "PassengerId", "Ticket", "Embarked"], axis=1
)
processed_competition_data = raw_competition_dataset.drop(
["Name", "PassengerId", "Ticket", "Embarked"], axis=1
)
# Instead of dropping "Sex", convert to int.
processed_train_data.Sex = processed_train_data.Sex.eq("male").mul(1)
processed_competition_data.Sex = processed_competition_data.Sex.eq("male").mul(1)
if "Survived" in processed_train_data.columns:
processed_train_data = processed_train_data.drop("Survived", axis=1)
# Add new column - Boolean - Has_Cabin_Number
processed_train_data["Has_Cabin_Number"] = ~processed_train_data["Cabin"].isnull()
processed_competition_data["Has_Cabin_Number"] = ~processed_competition_data[
"Cabin"
].isnull()
# Drop Cabin
processed_train_data = processed_train_data.drop(["Cabin"], axis=1)
processed_competition_data = processed_competition_data.drop(["Cabin"], axis=1)
# Handle Missing Values
si = SimpleImputer()
imputed_train_data = pd.DataFrame(si.fit_transform(processed_train_data))
imputed_competition_data = pd.DataFrame(si.transform(processed_competition_data))
# replace columns
imputed_train_data.columns = processed_train_data.columns
imputed_competition_data.columns = processed_competition_data.columns
processed_train_data.head()
return imputed_train_data, imputed_competition_data
data = pd.read_csv("/kaggle/input/titanic/train.csv")
data.head()
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# import data
train_dataset = pd.read_csv("/kaggle/input/titanic/train.csv")
raw_competition_dataset = pd.read_csv("/kaggle/input/titanic/test.csv")
# get labels and drop from train dataset
labels = train_dataset["Survived"]
train_dataset = train_dataset.drop("Survived", axis=1)
# drop fields wich are not beneficial
train_dataset = train_dataset.drop(["Name", "Ticket", "PassengerId"], axis=1)
competition_dataset = raw_competition_dataset.drop(
["Name", "Ticket", "PassengerId"], axis=1
)
# split into train and validation sets
X_train, X_valid, y_train, y_valid = train_test_split(
train_dataset, labels, random_state=0
)
# Define pre-processing steps.
# get numerical column labels
numerical_cols = train_dataset.select_dtypes(include=["int"]).columns
# Numerical - 'PassengerId', 'Pclass', 'SibSp', 'Parch'
# get categorical column labels
categorical_cols = train_dataset.select_dtypes(include=["object"]).columns
# Categorical - 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'
# numerical values pre-processing steps - impute
numerical_transformer = SimpleImputer()
# categorical values pre-processing steps - imput then One Hot Encode.
# to do - potentially drop high cardinality cateogrical variables.
categorical_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="most_frequent")),
("OneHotEncoder", OneHotEncoder(handle_unknown="ignore")),
]
)
# Bundle pre-processing steps into a column transformer
preprocessor = ColumnTransformer(
transformers=[
("numerical", numerical_transformer, numerical_cols),
("categorical", categorical_transformer, categorical_cols),
]
)
# Define model
model = RandomForestClassifier(random_state=0)
clf = Pipeline(steps=[("preprocessor", preprocessor), ("model", model)])
clf.fit(X_train, y_train)
predictions = clf.predict(X_valid)
accuracy = accuracy_score(y_valid, predictions)
accuracy
# generate predictions on competiton set
results = clf.predict(competition_dataset)
# output predictions in correct format to csv.
output = pd.DataFrame(
{"PassengerId": raw_competition_dataset.PassengerId, " Survived": results}
)
output.to_csv("submission.csv", index=False)
# Import, clean, and get labels from training dataset.
raw_train_dataset = pd.read_csv("/kaggle/input/titanic/train.csv")
labels = raw_train_dataset["Survived"]
raw_competition_dataset = pd.read_csv("/kaggle/input/titanic/test.csv")
clean_train_dataset, clean_competition_dataset = clean(
raw_train_dataset, raw_competition_dataset
)
feature_names = clean_train_dataset.columns.values
# For internal testing, split the train dataset into train & test.
X_train, X_test, y_train, y_test = train_test_split(
clean_train_dataset, labels, random_state=0, train_size=0.8
)
knn = train_test_KNN(X_train, y_train, X_test, y_test)
logreg = train_test_logreg(X_train, y_train, X_test, y_test)
rf = train_test_random_forest(X_train, y_train, X_test, y_test)
gnb = train_test_gnb(X_train, y_train, X_test, y_test)
tree = train_test_tree(X_train, y_train, X_test, y_test)
# gb = train_test_gradient_boost(X_train, y_train, X_test, y_test)
output_predictions(rf, clean_competition_dataset, raw_competition_dataset.PassengerId)
clean_train_dataset.head()
|
# ### İş Problemi
# #### ID'si verilen kullanıcı için item-based ve user-based recommender yöntemlerini kullanarak 10 film önerisi yapınız.
# ### Veri Seti Hikayesi
# #### Veri seti, bir film tavsiye hizmeti olan MovieLens tarafından sağlanmıştır. İçerisinde filmler ile birlikte bu filmlere yapılan derecelendirme puanlarını barındırmaktadır. 27.278 filmde 2.000.0263 derecelendirme içermektedir. Bu veri seti ise 17 Ekim 2016 tarihinde oluşturulmuştur. 138.493 kullanıcı ve 09 Ocak 1995 ile 31 Mart 2015 tarihleri arasında verileri içermektedir. Kullanıcılar rastgele seçilmiştir. Seçilen tüm kullanıcıların en az 20 filme oy verdiği bilgisi mevcuttur.
import pandas as pd
pd.set_option("display.max_columns", None)
pd.set_option("display.width", 500)
pd.set_option("display.float_format", lambda x: "%.5f" % x)
pd.set_option("display.expand_frame_repr", False)
# #### Veriyi Hazırlama
movie_df = pd.read_csv("/kaggle/input/movielens-20m-dataset/movie.csv")
rating_df = pd.read_csv("/kaggle/input/movielens-20m-dataset/rating.csv")
movie_df.head()
rating_df.head()
df = pd.merge(rating_df, movie_df, on="movieId", how="left")
df.head()
# Toplam oy kullanılma sayısı 1000'in altında olan filmlerin isimlerini listede tutalım ve veri setinden çıkaralım
comment_counts = pd.DataFrame(df["title"].value_counts())
rare_movies = comment_counts[comment_counts["title"] <= 1000].index
common_movies = df[~df["title"].isin(rare_movies)]
common_movies.head()
# index'te userID'lerin sutunlarda film isimlerinin ve değer olarak ratinglerin bulunduğu dataframe için pivot table oluşturalım
user_movie_df = common_movies.pivot_table(
index=["userId"], columns=["title"], values="rating"
)
user_movie_df.head()
# #### Yapılan İşlemleri Fonksiyonlaştırma
def prep_user_movie_df():
import pandas as pd
pd.set_option("display.max_columns", None)
pd.set_option("display.width", 500)
pd.set_option("display.float_format", lambda x: "%.5f" % x)
movie_df = pd.read_csv("movie.csv")
rating_df = pd.read_csv("rating.csv")
df = pd.merge(rating_df, movie_df, on="movieId", how="left")
comment_counts = pd.DataFrame(df["title"].value_counts())
rare_movies = comment_counts[comment_counts["title"] <= 1000].index
common_movies = df[~df["title"].isin(rare_movies)]
user_movie_df = common_movies.pivot_table(
index=["userId"], columns=["title"], values="rating"
)
return user_movie_df
# #### Öneri Yapılacak Kullanıcının İzlediği Filmlerin Belirlenmesi
random_user = int(pd.Series(user_movie_df.index).sample(1, random_state=45).values)
random_user
# Seçilen kullanıcıya ait gözlem birimlerinden oluşan random_user_df adında yeni bir dataframe oluşturalım
random_user_df = user_movie_df[user_movie_df.index == random_user]
movies_watched = random_user_df.columns[random_user_df.notna().any()].tolist()
# Seçilen kullanıcıların oy kullandığı filmleri movies_watched adında bir listeye atayalım
movies_watched = random_user_df.columns[random_user_df.notna().any()].tolist()
# #### Aynı Filmleri İzleyen Diğer Kullanıcıların Verisine ve Id'lerine Erişilmesi
movies_watched_df = user_movie_df[movies_watched]
user_movie_count = movies_watched_df.T.notnull().sum()
user_movie_count = user_movie_count.reset_index()
user_movie_count.columns = ["userid", "movie_count"]
user_movie_count.head()
perc = len(movies_watched) * 60 / 100
user_same_movies = user_movie_count[user_movie_count["movie_count"] > perc]["userid"]
user_same_movies.shape
# #### Öneri Yapılacak Kullanıcı ile En Benzer Kullanıcıların Belirlenmesi
final_df = movies_watched_df[movies_watched_df.index.isin(user_same_movies)]
final_df.head()
corr_df = final_df.T.corr().unstack().sort_values().drop_duplicates()
corr_df = pd.DataFrame(corr_df, columns=["corr"])
corr_df.index.names = ["userid_1", "userid_2"]
corr_df = corr_df.reset_index()
corr_df.head()
top_users = corr_df[(corr_df["userid_1"] == random_user) & (corr_df["corr"] > 0.65)][
["userid_2", "corr"]
]
top_users.head()
top_users.columns = ["userId", "corr"]
top_users_score = top_users.merge(
rating_df[["userId", "movieId", "rating"]], how="inner"
)
top_users_score.shape
# #### Weighted Average Recommendation Score'un Hesaplanması ve İlk 5 Filmin Tutulması
top_users_score["weighted_reting"] = top_users_score["corr"] * top_users_score["rating"]
top_users_score.head()
recommendation_df = top_users_score.groupby("movieId").agg({"weighted_reting": "mean"})
recommendation_df = recommendation_df.reset_index()
recommendation_df.head()
movies_to_be_recommended = recommendation_df[
recommendation_df["weighted_reting"] > 3.5
].sort_values("weighted_reting", ascending=False)
movies_to_be_recommended.head()
movies_to_be_recommended.merge(movie_df[["movieId", "title"]])["title"]
# #### Item Based Recommendation
# Kullanıcının izlediği en son ve en yüksek puan verdiği filme göre item-based öneri yapınız.
import pandas as pd
pd.set_option("display.max_columns", None)
pd.set_option("display.width", 500)
pd.set_option("display.float_format", lambda x: "%.5f" % x)
movie_df = pd.read_csv("/kaggle/input/movielens-20m-dataset/movie.csv")
rating_df = pd.read_csv("/kaggle/input/movielens-20m-dataset/rating.csv")
movie_id = (
rating_df[(rating_df["userId"] == random_user) & (rating_df["rating"] == 5.0)]
.sort_values("timestamp", ascending=False)["movieId"][0:1]
.values[0]
)
movie_df = user_movie_df[movie_df[movie_df["movieId"] == movie_id]["title"].values[0]]
user_movie_df.corrwith(movie_df).sort_values(ascending=False).head(20)
item_based_recommendations = (
user_movie_df.corrwith(movie_df).sort_values(ascending=False).index[1:6]
)
item_based_recommendations
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# import data
cross = pd.read_csv("/kaggle/input/mri-and-alzheimers/oasis_cross-sectional.csv")
long = pd.read_csv("/kaggle/input/mri-and-alzheimers/oasis_longitudinal.csv")
# print cross_sectional data and longitudinal data informations
print(cross.info())
print(long.info())
print(
"Highest age is:",
np.max([np.max(cross.Age), np.max(long.Age)]),
"and Lowest age is:",
np.min([np.min(cross.Age), np.min(long.Age)]),
)
|
# # Logistic Regression for Binary Classification
# Welcome to the wondeful world of logistic regression!
# Logistic regression is a basic machine learning algorithm mostly used for classification tasks. It's a very simple tool, great for interpretability, fast in computation, and sometimes capable to outperform much more advanced and computation-costly algorithms.
# While being an amazing classifier by itself, the logistic regression is also a core in many neural network architectures, and, thus, a good understanding of this basic algorithm is very useful for any data scientist.
# This kernel gives a light introduction in the world of logistic regression with one-feature based models for the famous Titanic survival classification problem.
# - - -
# ## One-Dimensional Logistic Regression
# ### Background
# For a binary classification tasks, the target variable is $y = 0$ or $y = 1$
# One of the possible ways to predict the probability of an object to belong to class 1 is to fit $y(x)$ by sigmoid which is in a one-dimensional space takes a form of
# $p(x) = sigmoid(wx + w_0) = \frac{1}{1 + exp(-(wx + w_0))}$,
# where
# - $x$ is a value of a known variable (our single feature),
# - $p$ is a probability that an object with a given value $x$ of feature belong to class $1$, and
# - $w$, $w_0$ are fit parameters
# __Let us plot a one-dimensional sigmoid__
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x, w, w0):
den = 1 + np.exp(-(x * w + w0))
return 1.0 / den
def plot_sigmoid(ax, w, w0):
x = [0.1 * i for i in range(-75, 76)]
y = [sigmoid(x_i, w=w, w0=w0) for x_i in x]
out = ax.scatter(x=x, y=y)
out = ax.axhline(y=0.5, color="black", linestyle="--")
out = ax.axhline(y=0, color="black", linestyle="--")
out = ax.axhline(y=1, color="black", linestyle="--")
out = ax.axvline(x=0, color="black", linestyle="--")
out = ax.set_title(
"One-dimensional sigmoid \n p = sigmoid(wx + w0), w = "
+ str(w)
+ ", w0 = "
+ str(w0),
fontsize=16,
)
out = ax.set_xlabel("x", fontsize=14)
out = ax.set_ylabel("p(x)", fontsize=14)
out = ax.grid()
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, nrows=1, figsize=(18, 6))
plot_sigmoid(ax1, w=1, w0=0)
plot_sigmoid(ax2, w=10, w0=40)
plot_sigmoid(ax3, w=0.001, w0=0)
# As one can see on the plots above, sigmoid maps any real number into a range from 0 to 1. On the left plot the weights of sigmoid are $w = 1$ and $w_0 = 0$. $w$ determines the slope of the function, and may vary the sigmoid from being a step function (center plot) to a constant within a certain range (right plot). The $w0/w$ fraction is responsible for the shift of the sigmoid, as one can notice looking carefully at the left and the center plots.
# #### Cost Function
# To find the values of $w$ and $w_0$ that best describe the observed data, the machine learning algorithm minimizes
# log loss $ = - \sum_{i = 1}^{N} (y_i ln(p_i) + (1 - y_i) ln(1 - p_i))$, where
# $N$ is a number of objects in the train data,
# $y_i$ are real labels of a target variable (0 or 1),
# and $p_i = sigmoid(x_i, w, w_0)$ for given $x_i$
# - - -
# One of the most popular implementations of logistic regression in python is [LogisticClassifier by sklearn](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html), and we are going to use it to predict who of Titanic passengers survived the tragedy, and who didn't.
# - - -
# ### Predcit Titanic Survival
# Titanic survival prediction is [a getting started competition on kaggle](https://www.kaggle.com/c/titanic) very much loved by beginners as well as by more experienced data scientists willing to test new tools and approaches on this well known dataset.
# The objective of the competition is to predict whether a given passenger in the test dataset survived or didn't survive (1 or 0) with accuracy as a performance metric. We will
# - start with a brief EDA,
# - train several one-feature models, and
# - identify possible steps for improvement and further learning.
import pandas as pd
train_df = pd.read_csv("../input/titanic/train.csv")
# What features are available in the dataframe?
train_df.head()
# Let us look at numerical features
train_df.describe()
# __'PassengerId'__ is irrelevant
# __'Survived'__ is our target
# __'Age'__ has missing values, let us drop it for now, so that we don't worry about the imputation
# In addition, let us construct a binary feature from a categorical feature __'Sex'__:
train_df["is_female"] = train_df["Sex"].apply(lambda x: 1 if x == "female" else 0)
train_df.drop(columns=["PassengerId", "Survived", "Age"]).describe()
# we are left __5 numerical features__, and our plan is to build a one-dimensional logistic regression based on each of them individually
# let us start with looking at feature distributions
def plot_one_col(df0, df1, col, ax, bins):
ax.hist(df0[col], label="didn't survive", density=True, bins=bins)
ax.hist(df1[col], label="survived", density=True, bins=bins, alpha=0.5)
ax.set_title(col, fontsize=16)
ax.set_xlabel(col + " value", fontsize=14)
ax.set_ylabel("N entries per bin", fontsize=14)
ax.legend(fontsize=14)
df0 = train_df.query("Survived == 0")
df1 = train_df.query("Survived == 1")
fig, ax = plt.subplots(ncols=3, nrows=2, figsize=(18, 12))
plot_one_col(df0, df1, col="Pclass", ax=ax[0, 0], bins=[0.5, 1.5, 2.5, 3.5])
plot_one_col(
df0,
df1,
col="SibSp",
ax=ax[0, 1],
bins=[-0.5, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5],
)
plot_one_col(
df0, df1, col="Parch", ax=ax[0, 2], bins=[-0.5, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5]
)
plot_one_col(
df0, df1, col="Fare", ax=ax[1, 0], bins=[0, 15, 50, 75, 100, 150, 200, 300, 500]
)
plot_one_col(df0, df1, col="is_female", ax=ax[1, 1], bins=[-0.5, 0.5, 1.5])
# ### Setting up the Cross Validation
import numpy as np
from sklearn.model_selection import KFold
N_folds = 5
kf = KFold(n_splits=N_folds, random_state=13, shuffle=True)
indexes = []
for train_index, valid_index in kf.split(train_df):
print("TRAIN:", train_index[0:5], "VALID:", valid_index[0:5])
indexes.append({"train": train_index, "valid": valid_index})
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
clf = LogisticRegression(random_state=13, solver="lbfgs")
threshold = 0.5
def fit_on_feature_set(features):
valid_acc = [0] * N_folds
train_acc = [0] * N_folds
acc = [0] * N_folds
for fold in range(N_folds):
inds_t = indexes[fold]["train"]
fold_train_df = train_df.loc[inds_t]
inds_v = indexes[fold]["valid"]
fold_valid_df = train_df.loc[inds_v]
clf.fit(fold_train_df[features], fold_train_df["Survived"])
predictions_train = clf.predict_proba(fold_train_df[features])[:, 1] > threshold
fold_train_df["predictions"] = predictions_train
train_acc[fold] = accuracy_score(
fold_train_df["Survived"], fold_train_df["predictions"]
)
clf.predict_proba(fold_valid_df[features])
predictions = clf.predict_proba(fold_valid_df[features])[:, 1] > threshold
fold_valid_df["predictions"] = predictions
valid_acc[fold] = accuracy_score(
fold_valid_df["Survived"], fold_valid_df["predictions"]
)
acc[fold] = min(valid_acc[fold], train_acc[fold])
return acc
num_features = ["Pclass", "SibSp", "Parch", "Fare", "is_female"]
d_acc = {}
for feat in num_features:
d_acc[feat] = fit_on_feature_set([feat])
df_acc = pd.DataFrame(d_acc)
df_acc["fold"] = list(x for x in range(N_folds))
df_acc
colors = ["blue", "orange", "green", "red", "purple", "black"]
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(12, 6))
for i in range(len(num_features)):
col = num_features[i]
ax.scatter(y=df_acc["fold"], x=df_acc[col], label=col, s=180, color=colors[i])
m = df_acc[col].mean()
s = df_acc[col].std() / (N_folds**0.5)
ax.axvline(x=m, color=colors[i], linestyle="--", alpha=0.5)
ax.axvline(x=m + s, color=colors[i], alpha=0.5)
ax.axvline(x=m - s, color=colors[i], alpha=0.5)
ax.axvspan(m - s, m + s, facecolor=colors[i], alpha=0.1)
ax.set_xlim(0.5, 1.0)
ax.set_ylabel("fold", fontsize=20)
ax.set_xlabel("accuracy", fontsize=20)
t1 = "Compare log-reg models on one feature"
t2 = "Accuracy score vs fold"
ax.set_title(t1 + "\n" + t2, fontsize=20)
ax.grid()
ax.legend(fontsize=16)
print("Mean accuracy score for one-feature based models: \n")
for col in num_features:
print(
col,
round(df_acc[col].mean(), 3),
"+-",
round(df_acc[col].std() / (N_folds**0.5), 3),
)
# ---
# ## Prepare Submission File
# In this karnel we have considered 5 one-feature models. The best score was achieved when using an engineered binary feature 'is_female'
# Let us train the model on the whole train data, make predictions on the kaggel test dataset, and submit the result
clf.fit(train_df[["is_female"]], train_df["Survived"])
test_df = pd.read_csv("../input/titanic/test.csv")
test_df["is_female"] = test_df["Sex"].apply(lambda x: 1 if x == "female" else 0)
predictions = clf.predict_proba(test_df[["is_female"]])[:, 1] > threshold
test_df["Survived"] = predictions
test_df["Survived"] = test_df["Survived"].astype(int)
test_df[["PassengerId", "Survived"]].to_csv("submission.csv", index=False)
|
# survival - Survival (0 = No; 1 = Yes)
# class - Passenger Class (1 = 1st; 2 = 2nd; 3 = 3rd)
# name - Name
# sex - Sex
# age - Age
# sibsp - Number of Siblings/Spouses Aboard
# parch - Number of Parents/Children Aboard
# ticket - Ticket Number
# fare - Passenger Fare
# cabin - Cabin
# embarked - Port of Embarkation (C = Cherbourg; Q = Queenstown; S = Southampton)
#
import pandas as pd
df = pd.read_csv("../input/titanic/train.csv")
df.head()
df.columns
df_new = df.drop(["PassengerId", "Name", "Ticket", "Cabin"], axis="columns")
df_new.head()
for col in df_new.columns:
print(col, df_new[col].isnull().values.any())
# *Embarked and Age has missing values*
age_median = df["Age"].median()
embarked_mode = df["Embarked"].mode()
print(embarked_mode)
df_new["Age"].fillna(age_median, inplace=True)
df_new["Embarked"].fillna("S", inplace=True)
df_new["Age"].isnull().values.any()
df_new["Embarked"].isnull().values.any()
from sklearn.preprocessing import LabelEncoder
le_sex = LabelEncoder()
df_new["sex_en"] = le_sex.fit_transform(df_new["Sex"])
df_new.head()
X = df_new.drop("Sex", axis="columns")
X.head()
embarked_dummies = pd.get_dummies(X["Embarked"])
X = pd.concat([X, embarked_dummies], axis="columns")
X = X.drop("Embarked", axis="columns")
X.head()
features = ["Pclass", "Age", "SibSp", "Parch", "Fare", "sex_en", "C", "Q", "S"]
y = X["Survived"]
X = X[features]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
# Using logistic regression
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(random_state=1)
lr.fit(X_train, y_train)
pred_lr = lr.predict(X_test)
lr.score(X_test, y_test)
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt
cm_lr = confusion_matrix(y_test, pred_lr)
sns.heatmap(cm_lr, annot=True)
plt.xlabel("Predicted")
plt.ylabel("Truth")
print(cm_lr)
# Using KNN
from sklearn.neighbors import KNeighborsClassifier
for n in range(4, 20):
knn = KNeighborsClassifier()
knn.fit(X_train, y_train)
# pred_knn = knn.predict(X_test)
score = knn.score(X_test, y_test)
print("n =", n, ", score =", score)
from sklearn.tree import DecisionTreeClassifier
dct = DecisionTreeClassifier(random_state=1)
dct.fit(X_train, y_train)
dct.score(X_test, y_test)
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(random_state=1)
rf.fit(X_train, y_train)
rf.score(X_test, y_test)
from sklearn.svm import SVC
sv = SVC(gamma="auto", random_state=1)
sv.fit(X_train, y_train)
sv.score(X_test, y_test)
# From these we can Logistic Regression is the best model
# Making Logistic Regression Model on the full data
lr_full_data = LogisticRegression(random_state=1)
lr_full_data.fit(X, y)
path = "../input/titanic/test.csv"
test_data = pd.read_csv(path)
test = test_data.copy()
test_data.head()
test_data = test_data.drop(["PassengerId", "Name", "Ticket", "Cabin"], axis="columns")
test_data.head(3)
test_data["sex_en"] = le_sex.fit_transform(test_data["Sex"])
test_data.head()
test_data = test_data.drop("Sex", axis="columns")
embarked_dummies = pd.get_dummies(test_data["Embarked"])
test_data = pd.concat([test_data, embarked_dummies], axis="columns")
test_data = test_data.drop("Embarked", axis="columns")
test_data.head(3)
for col in test_data.columns:
print(col, test_data[col].isnull().values.any())
test_data["Age"].fillna(age_median, inplace=True)
fare_median = df_new["Fare"].median()
test_data["Fare"].fillna(fare_median, inplace=True)
predictions = lr_full_data.predict(test_data)
print(predictions)
output = pd.DataFrame({"PassengerId": test["PassengerId"], "Survived": predictions})
output.to_csv("submission.csv", index=False)
|
import numpy as np
import random
import pandas as pd
import heapq
# random.seed(5)
random.seed(0)
from collections import Counter
# Make the PhD class
def generate_phd_class(size):
PhDs = pd.DataFrame(columns=["Field", "O_Value"], index=range(size))
for i in range(size):
# 25% are Anabra
if i < p * size:
PhDs.loc[i] = pd.Series(
{"Field": "Anabra", "O_Value": random.uniform(0, 1)}
)
else:
# 75% are Algasis
PhDs.loc[i] = pd.Series(
{"Field": "Algasis", "O_Value": random.uniform(0, 1)}
)
return PhDs
def subjective_value(student, faculty):
l = 0
# for each faculty member
for i in range(len(faculty)):
# if faculty member and student are of the same field, add value
if faculty.loc[i]["Field"] == student["Field"]:
l += student["O_Value"] + t
else:
# otherwise subtract
l += student["O_Value"] - t
# return mean of overall impression
return l / len(faculty)
def subjective_value_each_student(PhD_Class, faculty):
l = []
for i in PhD_Class.index:
l.append(subjective_value(PhD_Class.loc[i], faculty))
return l
def find_best_two(subjective_values):
indexes = heapq.nlargest(
2, range(len(subjective_values)), key=subjective_values.__getitem__
)
return indexes
# p is the proportion that study Anabra, 1-p Algasis
p = 0.25
# n is the number of faculty members, 50 in the blog post, but 52 for divisibility reasons
n = 52
# k is the number of departments
k = 10
# t is the value factor
t = 0.01
# size of PhD class
size = 40
random.seed(0)
departments = dict()
for i in range(k):
df = pd.DataFrame(columns=["Field", "Age"], index=range(n))
for j in range(n):
# 25% are Anabra
if j < p * n:
df.loc[j] = pd.Series({"Field": "Anabra", "Age": random.uniform(0, 1)})
else:
# 75% are Algasis
df.loc[j] = pd.Series({"Field": "Algasis", "Age": random.uniform(0, 1)})
df["Age"] = df["Age"].astype("float64")
departments[i] = df
proportion = pd.DataFrame(columns=range(k))
proportion.loc[0] = [0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75]
for y in range(800):
PhDs = generate_phd_class(size)
for i in range(k):
d = departments[i]
l = subjective_value_each_student(PhDs, d)
indexes = find_best_two(l)
# Add best two to department
d.loc[n + 1] = pd.Series({"Field": PhDs.loc[indexes[0]]["Field"], "Age": 0})
d.loc[n + 2] = pd.Series({"Field": PhDs.loc[indexes[1]]["Field"], "Age": 0})
# drop those Phd Students
PhDs.drop(indexes, inplace=True)
# relabel the indices
PhDs.index = range(len(PhDs))
# drop oldest two
d.drop(d["Age"].idxmax(), inplace=True)
d.drop(d["Age"].idxmax(), inplace=True)
# relabel the indices
d.index = range(len(d))
# add year to everyone's age
d["Age"] = d["Age"] + 1
l = []
for j in range(k):
l.append(departments[j]["Field"].value_counts()["Algasis"] / 52)
proportion.loc[y + 1] = l
for i in range(10):
print(Counter(departments[i]["Field"]))
proportion.head()
import matplotlib.pyplot as plt
lines = proportion.plot.line(
figsize=(12, 6),
title="Algasis Proportion over Time, t=0.01, Horizontal Line at 0.5",
)
lines.set_xlabel("Years")
lines.set_ylabel("Algasis Proportion")
plt.axhline(y=0.5, color="black", linestyle="--")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
world_pop = pd.read_csv("../input/country-wise-population-data/world_pop.csv")
stores = pd.read_csv("/kaggle/input/store-locations/directory.csv")
cities15000 = pd.read_csv(
"../input/cities-of-the-world/cities15000.csv", encoding="latin-1"
)
cities15000 = cities15000.rename(columns={"name": "City"})
cities15000
stores
cities15000 = cities15000.fillna(0)
data_1 = stores.groupby("City").count().reset_index()
data_2 = cities15000[["population", "City"]]
data = data_1.merge(data_2, on="City")
data
data = data[["City", "population", "Brand"]]
data = data.rename(columns={"Brand": "Count"})
data["population"] = pd.to_numeric(data["population"])
data["Count"] = pd.to_numeric(data["Count"])
data = data.sort_values("population")
corr_list = []
for i in range(100):
corr_list.append(
data.iloc[int(i * (data.shape[0] / 100)) : int((i + 1) * (data.shape[0] / 100))]
.corr()
.iloc[1][0]
)
import matplotlib.pyplot as plt
plt.plot(corr_list)
plt.xlabel("100 samples and their correlation rate")
plt.ylabel("Correlation ratio of population and starbucks store number")
plt.show()
|
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
for dirname, _, filenames in os.walk("/kaggle/input/glass/glass.csv"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
os.path.isfile("/kaggle/input/glass/glass.csv")
Input = pd.read_csv("/kaggle/input/glass/glass.csv")
Input.head(5)
Input.dtypes
Input.describe()
missing_values = Input.isnull()
missing_values.head(5)
sns.heatmap(data=missing_values, yticklabels=False, cbar=False, cmap="viridis")
Input.Type.value_counts().sort_index()
Input.shape
import seaborn as sns
sns.pairplot(Input)
# Comparision of different graphs
mask = np.zeros_like(Input.corr(), dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(16, 12))
plt.title("Pearson Correlation Matrix", fontsize=25)
sns.heatmap(
Input.corr(),
linewidths=0.25,
vmax=0.7,
square=True,
cmap="BuGn",
# "BuGn_r" to reverse
linecolor="w",
annot=True,
annot_kws={"size": 8},
mask=mask,
cbar_kws={"shrink": 0.9},
)
Input["Output"] = Input.Type.map({1: 0, 2: 0, 3: 0, 5: 1, 6: 1, 7: 1})
# Output is for good and bad classification
Input.head()
plt.scatter(Input.Al, Input.Output)
plt.xlabel("Al")
plt.ylabel("Output")
sns.regplot(x="Al", y="Output", data=Input, logistic=True, color="b")
X = Input[["Al"]]
# Dependent variable
Y = Input["Type"]
# Split training and testing data
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.3, random_state=200
)
print(X.shape)
# print(X_test.head())
# print(Y_train.head())
print(Y.shape)
# Run the model
# Import model for fitting
from sklearn.linear_model import LogisticRegression
# Create instance (i.e. object) of LogisticRegression
# model = LogisticRegression()
# You can try follwoing variation on above model, above is just default one
model = LogisticRegression()
# Fit the model using the training data
# X_train -> parameter supplies the data features
# Y_train -> parameter supplies the target labels
output_model = model.fit(X, Y)
# output =X_test
# output['vehicleTypeId'] = Y_test
output_model
from sklearn import linear_model
import pickle
model = LogisticRegression()
output_model = model.fit(X_train, Y_train)
output_model
pkl_filename = "pickle_model.pkl"
with open(pkl_filename, "wb") as file:
pickle.dump(model, file)
# Load from file
with open(pkl_filename, "rb") as file:
pickle_model = pickle.load(file)
# Calculate the accuracy score and predict target values
score = pickle_model.score(X_test, Y_test)
print("Test score: {0:.2f} %".format(100 * score))
Ypredict = pickle_model.predict(X_test)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
Y_pred = model.predict(X_test)
# Confusion matrix
results = confusion_matrix(Y_test, Y_pred)
print(results)
# Accuracy score
accuracy = accuracy_score(Y_test, Y_pred)
print("Accuracy rate : {0:.2f} %".format(100 * accuracy))
# Classification report
report = classification_report(Y_test, Y_pred)
print(report)
df = pd.DataFrame({"Actual": Y_test, "Predicted": Ypredict.flatten()})
df
from sklearn import metrics
print("Mean Absolute Error:", metrics.mean_absolute_error(Y_test, Ypredict))
print("Mean Squared Error:", metrics.mean_squared_error(Y_test, Ypredict))
print("Root Mean Squared Error:", np.sqrt(metrics.mean_squared_error(Y_test, Ypredict)))
import matplotlib.pyplot as plt
ax = plt.axes()
ax.scatter(X, Y)
plt.title("Input Data and regression line ")
ax.plot(X_test, Ypredict, color="Red")
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.axis("tight")
plt.show()
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
predictions = model.predict(X_test)
plt.style.use("fivethirtyeight")
## plotting residual errors in training data
plt.scatter(
model.predict(X_train),
model.predict(X_train) - Y_train,
color="green",
s=1,
label="Train data",
linewidth=5,
)
## plotting residual errors in test data
plt.scatter(
model.predict(X_test),
model.predict(X_test) - Y_test,
color="blue",
s=1,
label="Test data",
linewidth=4,
)
## plotting line for zero residual error
plt.hlines(y=0, xmin=0, xmax=4, linewidth=2)
## plotting legend
plt.legend(loc="upper right")
## plot title
plt.title("Residual errors")
## function to show plot
plt.show()
|
# # Electronic Structure using Qiskit Nature
# Link : https://qiskit.org/documentation/nature/tutorials/01_electronic_structure.html
# 
# 
# ## Hydrogen Atom
# $$H_{el} = -\sum_{i} \frac{\nabla^2_{r}}{m_e} - \sum_I\sum_i \frac{Z_I e^2}{|R_I - r_i|}$$
# $$H_{el} = \int \phi^*_p(r) (-\frac{1}{2} \nabla^2 - \sum_I \frac{Z_I e^2}{R_I - r_i}) \phi_q(r)dr$$
# ### STO-3G
# $$\phi_p(r) = C_1 (2\alpha_1)^{3/4} e^{-\alpha_1 r^2} + C_2 (2\alpha_2)^{3/4} e^{-\alpha_2 r^2} + C_3 (2\alpha_3)^{3/4} e^{-\alpha_3 r^2}$$
# $$\phi_q(r) = D_1 (2\alpha_1)^{3/4} e^{-\alpha_1 r^2} + D_2 (2\alpha_2)^{3/4} e^{-\alpha_2 r^2} + D_3 (2\alpha_3)^{3/4} e^{-\alpha_3 r^2}$$
# # Import Package
import numpy as np
import matplotlib.pyplot as plt
from qiskit_nature.units import DistanceUnit
from qiskit_nature.second_q.drivers import PySCFDriver
from qiskit_nature.second_q.algorithms import (
GroundStateEigensolver,
NumPyMinimumEigensolverFactory,
)
from qiskit_nature.second_q.mappers import JordanWignerMapper, QubitConverter
# # Pembentukan atom/ molekul
driver = PySCFDriver(
atom="H 0 0 0 ",
basis="sto3g",
charge=0,
spin=1,
unit=DistanceUnit.ANGSTROM,
)
# PySCFDriver --> A Second-Quantization driver for Qiskit Nature using the PySCF library.
driver.__dict__
# # Menjalankan driver
# - PySCFDriver.run( ) --> return ElectronicStructureProblem( )
# 
# - Attribute: Hamiltonian, nuclear repulsion_energy, num_alpha, etc.
problem = driver.run()
problem.__dict__
problem.basis.__dict__
problem.properties.__dict__
# ElectronicStructureProblem( ).hamiltonian --> return ElectronicEnergy( )
# ElectronicEnergy( ) --> return Electronic_Integrals( )
hamiltonian = problem.hamiltonian
hamiltonian.__dict__
#
# - Electronic_Integrals stores 3 PolynomialTensor
#
hamiltonian.electronic_integrals.__dict__
coefficients = hamiltonian.electronic_integrals
nr_energy = hamiltonian.constants
print(coefficients.alpha)
print(nr_energy)
# 
# "+-" = ET + EV \
# "++--" = EJ = -EK
# EJ = ionic energy
hamiltonian.register_length
hamiltonian.nuclear_repulsion_energy # NOT included in the second_q_op above
# # Transformasi ke Operator Fermion
second_q_op = hamiltonian.second_q_op()
print(second_q_op)
# 
# 
# 
# # Qubit Converter JW
# 
mapper = JordanWignerMapper()
converter = QubitConverter(JordanWignerMapper())
qubit_jw_op = mapper.map(second_q_op)
print(qubit_jw_op)
qubit_op = converter.convert(second_q_op)
qubit_op
converter.__dict__
from qiskit_nature.operators.second_quantization import FermionicOp
def label_to_qubit(label, converter):
qubit_converter = QubitConverter(converter)
f_op = FermionicOp(label)
qubit_op = qubit_converter.convert(f_op)
return qubit_op
for k in ("+_0", "+_1", "+_2", "+_3", "+_4"):
qubit_op = label_to_qubit(k, JordanWignerMapper())
print("{}:\n {}\n".format(k, qubit_op))
print(second_q_op)
print()
print(qubit_jw_op)
print()
for k in second_q_op:
qubit_op = label_to_qubit(k, JordanWignerMapper())
print("{}:\n {}\n".format(k, qubit_op))
# 
# # Perhitungan Energi Ground State dengan Transformasi JW
converter = QubitConverter(JordanWignerMapper())
solver = GroundStateEigensolver(
converter,
NumPyMinimumEigensolverFactory(),
)
# # Hasil solver
result = solver.solve(problem)
print(result)
dict_res = result.__dict__
no = 0
for key, value in dict_res.items():
no += 1
print(no, key, " : ", value, "\n")
result.eigenstates
NR = result._nuclear_repulsion_energy
GE = result._computed_energies.item()
Total = GE + NR
print(NR, GE, Total)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from stl import mesh #
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# ## Summary:
# In this note book we will do the following:
# 1. Explore the data from **ANSUR-II Anthropometric Data** which is a public dataset drawn from [2012 U.S. Army Anthropometric Survey](http://mreed.umtri.umich.edu/mreed/downloads.html), for some interesting observations.
# 3. Focus on the **wrist circumference measurements** from ANSUR-II data, and see how they compare with **data used for sizing Apple Watch bands**.
# 4. **Import and visualize 3-D full body models (in the form of STL files)** generated based on similar anthropometric data, with two different Body Mass Index values. These files were generated from Universigy of Michigan's [humanshape.org](http://humanshape.org/) website. They were also down sampled to reduce the size using [Autodesk meshmixer](http://www.meshmixer.com/).
#
# 5. **Extract wrist-circumferene from the imported STL files**, by first extracting the points around the wrist and computing approximate circumference from them, and see how those compares with the distributions we looked at in previous steps.
# Let us get started..
# #### Import ANSUR-II data, fix some column name issues and combine male and female data
df_ansur2_female = pd.read_csv(
"../input/ansur-ii/ANSUR II FEMALE Public.csv", encoding="latin-1"
)
df_ansur2_male = pd.read_csv(
"../input/ansur-ii/ANSUR II MALE Public.csv", encoding="latin-1"
)
df_ansur2_female = df_ansur2_female.rename(
columns={"SubjectId": "subjectid"}
) # Fixing a column name issue
df_ansur2_all = pd.concat([df_ansur2_female, df_ansur2_male])
print(
"Shapes of the dataframes (Female,Male,All): "
+ str((df_ansur2_female.shape, df_ansur2_male.shape, df_ansur2_all.shape))
)
df_ansur2_all.head()
def stl2mesh3d(stl_mesh):
# stl_mesh is read by nympy-stl from a stl file; it is an array of faces/triangles (i.e. three 3d points)
# this function extracts the unique vertices and the lists I, J, K to define a Plotly mesh3d
p, q, r = stl_mesh.vectors.shape # (p, 3, 3)
# the array stl_mesh.vectors.reshape(p*q, r) can contain multiple copies of the same vertex;
# extract unique vertices from all mesh triangles
vertices, ixr = np.unique(
stl_mesh.vectors.reshape(p * q, r), return_inverse=True, axis=0
)
I = np.take(ixr, [3 * k for k in range(p)])
J = np.take(ixr, [3 * k + 1 for k in range(p)])
K = np.take(ixr, [3 * k + 2 for k in range(p)])
return vertices, I, J, K
mymesh = [
mesh.Mesh.from_file(
"/kaggle/input/humanshapestlfiles/stature_1773_shs_0p52_age_38_bmi_16.stl"
),
mesh.Mesh.from_file(
"/kaggle/input/humanshapestlfiles/stature_1773_shs_0p52_age_38_bmi_37.stl"
),
mesh.Mesh.from_file(
"/kaggle/input/humanshapestlfiles/stature_1773_shs_0p52_age_38_bmi_55.stl"
),
] # https://github.com/empet/Datasets/blob/master/Rose_Petal.stl
fig = make_subplots(
rows=1,
cols=3,
specs=[[{"is_3d": True}, {"is_3d": True}, {"is_3d": True}]],
subplot_titles=("BMI: 16", "BMI: 37", "BMI: 55"),
print_grid=False,
)
for i in [1, 2, 3]:
vertices, I, J, K = stl2mesh3d(mymesh[i - 1])
triangles = np.stack((I, J, K)).T
x, y, z = vertices.T
fig.append_trace(
go.Mesh3d(
x=x,
y=y,
z=z,
i=I,
j=J,
k=K,
showscale=False,
flatshading=False,
lighting=dict(
ambient=0.5,
diffuse=1,
fresnel=4,
specular=0.5,
roughness=0.05,
facenormalsepsilon=0,
),
),
row=1,
col=i,
)
fig.update_layout(
width=1200,
height=700,
template="plotly_dark",
)
# fix the ratio in the top left subplot to be a cube
camera = dict(eye=dict(x=-1.25, y=-0.25, z=-0.25))
fig.update_layout(
scene_aspectmode="manual",
scene_aspectratio=dict(x=0.2, y=0.6, z=1),
scene_camera=camera,
)
# manually force the z-axis to appear twice as big as the other two
fig.update_layout(
scene2_aspectmode="manual",
scene2_aspectratio=dict(x=0.25, y=0.6, z=1),
scene2_camera=camera,
)
# draw axes in proportion to the proportion of their ranges
fig.update_layout(
scene3_aspectmode="manual",
scene3_aspectratio=dict(x=0.3, y=0.6, z=1),
scene3_camera=camera,
)
for i in fig["layout"]["annotations"]:
i["font"] = dict(size=25, color="#ffffff")
fig.show()
points = [[], [], []]
for i in range(3):
points_ = []
for triangle in list(mymesh[i].vectors):
for point in triangle:
points_.append(point)
points[i] = np.array(points_)
def nearest_neighbour_sort(df):
# cities = pd.read_csv("../input/cities.csv")
df["Id"] = list(range(df.shape[0]))
ids = df.Id.values[1:]
xy = np.array([df.X.values, df.Y.values]).T[1:]
path = [
0,
]
while len(ids) > 0:
last_x, last_y = df.X[path[-1]], df.Y[path[-1]]
dist = ((xy - np.array([last_x, last_y])) ** 2).sum(-1)
nearest_index = dist.argmin()
path.append(ids[nearest_index])
ids = np.delete(ids, nearest_index, axis=0)
xy = np.delete(xy, nearest_index, axis=0)
path.append(0)
return path
wrist_points = []
for wrist in points:
wrist_points_ = pd.DataFrame(
np.array(
list(
set(
[
tuple(item)
for item in wrist
if ((np.abs(item[2] - 920) < 5) and item[1] > 300)
]
)
)
),
columns=["X", "Y", "Z"],
)
wrist_points_ = wrist_points_.loc[
nearest_neighbour_sort(wrist_points_),
].reset_index(drop=True)
wrist_points_["distance"] = np.concatenate(
(
[0.0],
np.cumsum(
np.sqrt(
(wrist_points_.X[1:].values - wrist_points_.X[:-1].values) ** 2
+ (wrist_points_.Y[1:].values - wrist_points_.Y[:-1].values) ** 2
)
),
)
)
wrist_points.append(wrist_points_)
# Creates two subplots and unpacks the output array immediately
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharex="all", sharey="all")
ax1.plot(wrist_points[0]["X"], wrist_points[0]["Y"])
ax1.set_title("BMI: 16 Wrist Size")
ax2.plot(wrist_points[1]["X"], wrist_points[1]["Y"])
ax2.set_title("BMI: 37 Wrist Size")
ax3.plot(wrist_points[2]["X"], wrist_points[2]["Y"])
ax3.set_title("BMI: 55 Wrist Size")
wrist_points[2]
|
import os
import pickle
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
import torch
import torch.utils.data as data_utils
from torch.utils.data import Dataset, DataLoader
from torch import nn
from torchsummary import summary
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
for dirname, _, filenames in os.walk("/kaggle/working"):
for filename in filenames:
print(os.path.join(dirname, filename))
DATA_DIR = "/kaggle/working/cifar-10-batches-py"
# ## Let's explore the data format
# The complete details of format of the data is given [here](https://www.cs.toronto.edu/~kriz/cifar.html).
def unpickle(file):
with open(file, "rb") as fo:
dict = pickle.load(fo, encoding="bytes")
return dict
metadata = unpickle(os.path.join(DATA_DIR, "batches.meta"))[b"label_names"]
metadata = [m.decode("utf-8") for m in metadata]
metadata
data_batch_1 = unpickle(os.path.join(DATA_DIR, "data_batch_1"))
list(data_batch_1.keys())
print(f"Batch label: {data_batch_1[b'batch_label']}")
print(f"Shape of Labels: {len(data_batch_1[b'labels'])}")
print(f"Actual image data shape: {data_batch_1[b'data'].shape}")
print(f"Filenames: {len(data_batch_1[b'filenames'])}")
# The return dict contains following keys:
# * **batch_label:** The label of batch
# * **labels:** Labels of given images in `data` key for training
# * **data:** Flattened colored images for training
# * **filename:** Names of file from the image is read (Useless in our case)
def load_data(data_type="TRAIN"):
X, Y = [], []
if data_type == "TRAIN":
for i in range(5):
batch = unpickle(os.path.join(DATA_DIR, f"data_batch_{i+1}"))
X.append(batch[b"data"])
Y.append(batch[b"labels"])
else:
test_batch = unpickle(os.path.join(DATA_DIR, f"test_batch"))
X.append(test_batch[b"data"])
Y.append(test_batch[b"labels"])
return torch.from_numpy(np.concatenate(np.array(X), axis=0)), torch.from_numpy(
np.concatenate(np.array(Y), axis=0)
)
X_train, Y_train = load_data()
X_test, Y_test = load_data("TEST")
# ## Exploratory Data Analysis
print(f"Shape of X_train: {X_train.shape}")
print(f"Shape of Y_train: {Y_train.shape}")
print(f"Shape of X_test: {X_test.shape}")
print(f"Shape of Y_test: {Y_test.shape}")
# From the above results, we've 50k training images and 10k testing images. Training images will be further splitted into training and validation images.
X_train, X_val, Y_train, Y_val = train_test_split(
X_train.cpu().detach().numpy(),
Y_train.cpu().detach().numpy(),
test_size=0.1,
random_state=666,
)
# Convert to PyTorch tensor
X_train = torch.from_numpy(X_train)
X_val = torch.from_numpy(X_val)
Y_train = torch.from_numpy(Y_train)
Y_val = torch.from_numpy(Y_val)
print(f"Shape of X_train: {X_train.shape}")
print(f"Shape of Y_train: {Y_train.shape}")
print(f"Shape of X_test: {X_val.shape}")
print(f"Shape of Y_test: {Y_val.shape}")
IMG_SIZE = 32
CHANNELS = 3
# Visualize few samples of training dataset
fig, ax = plt.subplots(nrows=3, ncols=4, figsize=(16, 8))
count = 0
for row in ax:
for col in row:
col.imshow(
torch.stack(
[
X_train[count, :][:1024].reshape(IMG_SIZE, IMG_SIZE),
X_train[count, :][1024:2048].reshape(IMG_SIZE, IMG_SIZE),
X_train[count, :][2048:].reshape(IMG_SIZE, IMG_SIZE),
],
axis=2,
)
)
col.set_title(metadata[Y_train[count]])
count += 1
plt.show()
# ### Distribution of class
sns.set(rc={"figure.figsize": (13, 8)})
ax = sns.distplot(Y_train, kde=False)
ax.set(xlabel="Labels", ylabel="# of records", title="Distribution of targets")
plt.show()
# There are 5000 records for all 10 classes of images.
# ## Creating custom PyTorch data generator and Data Loader
# References:
# * https://pytorch.org/tutorials/beginner/data_loading_tutorial.html
# * https://github.com/utkuozbulak/pytorch-custom-dataset-examples
# * https://stackoverflow.com/questions/41924453/pytorch-how-to-use-dataloaders-for-custom-datasets
#
class CFAR10Dataset(Dataset):
"""
Custom CIFAR-10 dataset
"""
def __init__(self, X, Y):
self.X = X
self.Y = Y
def __len__(self):
return len(self.X)
def __getitem__(self, index):
return (
torch.stack(
[
self.X[index, :][:1024].reshape(IMG_SIZE, IMG_SIZE),
self.X[index, :][1024:2048].reshape(IMG_SIZE, IMG_SIZE),
self.X[index, :][2048:].reshape(IMG_SIZE, IMG_SIZE),
],
axis=2,
)
.permute(2, 1, 0)
.float()
/ 255,
self.Y[index],
)
batch_size = 4096
train_dataset = CFAR10Dataset(X_train, Y_train)
train_loader = data_utils.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
# ## Basic CNN Model using PyTorch
# Let's create a CNN model. Architecture reference: https://www.kaggle.com/kaushal2896/bengali-graphemes-starter-eda-multi-output-cnn
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.BatchNorm2d(num_features=32),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(5, 5), padding=2),
nn.ReLU(True),
nn.Dropout2d(p=0.3),
)
self.layer2 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), padding=1),
nn.BatchNorm2d(num_features=64),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(5, 5), padding=2),
nn.ReLU(True),
nn.Dropout2d(p=0.3),
)
self.layer3 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.BatchNorm2d(num_features=128),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(5, 5), padding=2),
nn.ReLU(True),
nn.Dropout2d(p=0.3),
)
self.layer4 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), padding=1),
nn.ReLU(True),
nn.BatchNorm2d(num_features=256),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(5, 5), padding=2),
nn.ReLU(True),
nn.Dropout2d(p=0.3),
)
self.flatten = nn.Flatten()
self.fc1 = nn.Linear(in_features=1024, out_features=1024)
self.fc1_dropout = nn.Dropout2d(p=0.3)
self.fc2 = nn.Linear(in_features=1024, out_features=512)
self.fc3 = nn.Linear(in_features=512, out_features=10)
def forward(self, X):
output = self.layer1(X)
output = self.layer2(output)
output = self.layer3(output)
output = self.layer4(output)
output = self.flatten(output)
output = self.fc1(output)
output = self.fc1_dropout(output)
output = self.fc2(output)
output = self.fc3(output)
return output
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = Model().to(device)
# Print summary of our model
summary(model, input_size=(CHANNELS, IMG_SIZE, IMG_SIZE))
LEARNING_RATE = 0.1
EPOCHS = 100
CLASSES = 10
model = Model().to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE)
criterion = nn.CrossEntropyLoss()
# https://discuss.pytorch.org/t/multi-class-classifier/29901/2
total_steps = len(train_loader)
loss_list, acc_list = [], []
for epoch in range(EPOCHS):
for i, (x_train, y_train) in enumerate(train_loader):
x_train = x_train.to(device)
y_train = y_train.to(device)
# Forward pass
y_preds = model(x_train)
# Calculate loss
loss = criterion(y_preds, y_train)
if i + 1 == total_steps:
loss_list.append(loss)
# Backpropagate
optimizer.zero_grad() # Reason: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch
loss.backward()
optimizer.step()
# Calculate the accuracy
total = len(y_preds)
_, predicted = torch.max(y_preds.data, axis=1)
correct = (predicted == y_train).sum().item()
if i + 1 == total_steps:
acc_list.append(correct / total)
# # Calculate validation accuracy
# val_preds = model(torch.stack([X_val[:1024].reshape(IMG_SIZE, IMG_SIZE), X_val[1024: 2048].reshape(IMG_SIZE, IMG_SIZE), X_val[2048:].reshape(IMG_SIZE, IMG_SIZE)], axis=2).permute(2, 1, 0).float()/255)
# total = len(val_preds)
# _, predicted = torch.max(val_preds.data, axis=1)
# correct = (predicted == Y_val).sum().item()
# if i+1 == total_steps:
# acc_list.append(correct / total)
if (i + 1) % 10 == 0:
print(
"Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Accuracy: {:.2f}%, ".format(
epoch + 1,
EPOCHS,
i + 1,
total_steps,
loss.item(),
(correct / total) * 100,
)
)
print()
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, EPOCHS), loss_list, label="train_loss")
plt.title("Loss")
plt.xlabel("# of epochs")
plt.ylabel("Loss")
plt.legend(loc="upper right")
plt.show()
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, EPOCHS), acc_list, label="train_accuracy")
plt.title("Accuracy")
plt.xlabel("# of epochs")
plt.ylabel("Accuracy")
plt.legend(loc="upper right")
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import seaborn as sns
from scipy import stats
import statsmodels.api as sm
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
data = pd.read_csv("../input/autonobot-sp20-challenge-2/train_Level1Data.csv", sep=",")
data.head(20)
data.columns
plt.figure(figsize=(20, 20), facecolor="white")
plt.scatter(data["X"], data["Y"])
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Scatter Plot for X & Y")
plt.tight_layout()
plt.grid(color="black", linestyle="dotted")
plt.show()
trainx = data.columns[0]
trainy = data.columns[1]
X = data[trainx]
Y = data[trainy]
X2 = sm.add_constant(X)
model = sm.OLS(Y, X2)
model_ = model.fit()
print(model_.summary())
|
# # WildCats Classification using a fined tuned ResNet101 and fully connected layers
# ## Overview
# This dataset consists of 10 unique classes of wild cats. I determined that because the class size is so low, we can use a relatively old architecture to keep the parameters low and the code simple. ResNet is already trained on enough images to contain convolutional features that can make out what these animals look like. All we have to do is addd a fully connected layer to predict the classes properly. We will be building this model out using Pytorch
# > Make sure to run this on a gpu accelerator to speed up computation.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Let's read out the csv in pandas to see all the available data
PATH = "/kaggle/input/cats-in-the-wild-image-classification"
wildcats_data = pd.read_csv(f"{PATH}/WILDCATS.CSV")
wildcats_data.head()
import torch
torch.manual_seed(101)
# ## DataSets and DataLoaders
# The main filepaths are listed in the pandas dataframe. We can use each path to load the images into memory. We don't want to load all the images into memory at once. Instead we want to use a DataLoader to generate these images on the fly without worrying about killing our RAM. Each iteration of the dataloader will lazy load all this data. We create a custom **WildCatDataset** to hold the information we will need for training and evaluation. When we iterate through the dataset, we want to get back a dictionary with the labels, images and names.
# Let's use a custom dataset to structure the data correctly for later training
from torch.utils.data import Dataset
import torchvision.transforms as transforms
from PIL import Image
class WildCatDataset(Dataset):
def __init__(self, data, transform=None):
self.data = data.reset_index(drop=True)
self.transform = transform
self.initial_transform = transforms.Compose(
[
transforms.PILToTensor(),
]
)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
class_id = self.data.loc[idx, "class id"]
file_path = self.data.loc[idx, "filepaths"]
name = self.data.loc[idx, "labels"]
# Open image and use intial transform to turn to a tensor
with Image.open(f"{PATH}/{file_path}") as img:
image = transforms.functional.rotate(self.initial_transform(img), 90)
if self.transform:
image = self.transform(image)
# Create a dictionary to hold the data
sample = {"label": class_id, "image": image, "name": name}
return sample
# As stated earlier, we have 10 data classes. The training split is rather large, taking up almost 95 percent of the data. Usually we want to keep maybe a 80 split for training and 10 split for test and validaiton. I decided to go with a roughly *90/5/5 train/test/valid* split by concatenating additional wildcats data to test and valid.
# > If you do not sample your dataframe before concatenating it to test and valid, it will cause problems when you are training! This is
# because you may not have enough examples of a particular wild cat in your training. Be careful when you are reshaping your data!
# Let's separate each data set
data = wildcats_data[wildcats_data["data set"] == "train"].sample(frac=1)
train_data = data[:2200]
test_data = pd.concat(
[wildcats_data[wildcats_data["data set"] == "test"], data[2200:2300]]
)
valid_data = pd.concat(
[wildcats_data[wildcats_data["data set"] == "valid"], data[2250:]]
)
len(train_data), len(test_data), len(valid_data)
scientific_names = wildcats_data.groupby("labels").size().to_dict().items()
plt.bar(
[name for name, _ in scientific_names], [count for _, count in scientific_names]
)
plt.tick_params(axis="x", which="major", pad=15, labelrotation=90)
plt.title("Number of Classifications")
plt.show()
labels = ["train", "test", "valid"]
plt.title("Data Splits")
plt.pie([train_data.shape[0], test_data.shape[0], valid_data.shape[0]], labels=labels)
plt.show()
# ## Batch Size
# I decided to go with a batch size of 64 after extensive testing. 32 seemed to work decently, but because of the small dataset it seemed to make the loss jump randomly. Typically the smaller the batch size, the more variable your loss will be. 64 seemed to work exceptionally well for this specific classificaiton task.
from torch.utils.data import DataLoader
batch_size = 64
train_dataloader = DataLoader(
WildCatDataset(train_data), batch_size=batch_size, shuffle=True
)
test_dataloader = DataLoader(
WildCatDataset(test_data), batch_size=batch_size, shuffle=False
)
valid_dataloader = DataLoader(
WildCatDataset(valid_data), batch_size=batch_size, shuffle=False
)
single_batch = next(iter(train_dataloader))
fig, axes = plt.subplots(batch_size // 8, 8, figsize=(10, 10))
axes = axes.flatten()
for i, ax in enumerate(axes):
# Switch channel with height index to show up in matplot imshow
ax.imshow(torch.transpose(single_batch["image"][i], 0, 2))
ax.set_axis_off()
# # Model Design
# I went with a resnet101 because of its simplicity in design and it's effectiveness in small classification task like the one I am working on. **ResNet101** has 101 convolutional layers, yet has less parameters than some of the older architectures thanks to the use of residual connections and some clever design. This means we can get faster training than some of the model's other contemporaries. The only thing we need to do is remove the last layer in ResNet. Why you ask? Well the model is pretrained with a final prediction layer that is used for a completely unrelated classification task. In order to train our model on wild cats, we must replace this layer with our own fully connected layer. You can do this with almost any pretrained model, but you need to know where the layers are in the model to do this properly. I ended up using 2 2048 fully connected layers after some testing as it seemd to work the best.
# > Play around with the number of layers, and sizes to find what works for you!
from torchvision import models
# Let's now setup the model
# We will use the pretrained Resnet100
class Model(torch.nn.Module):
def __init__(self, num_classes=10):
super(Model, self).__init__()
# Load the ResNet model
resnet = models.resnet101(weights=models.ResNet101_Weights.DEFAULT)
# I removed this because I get better results fine tuning the model
# for param in resnet.parameters():
# param.requires_grad = False
# Reconstruct the model without the last layer
self.net = torch.nn.Sequential(*list(resnet.children())[:-1])
# Build fully connected layers to handle predictions
self.fcs = torch.nn.Sequential(
torch.nn.Linear(2048, 2048),
torch.nn.BatchNorm1d(2048),
torch.nn.ReLU(),
torch.nn.Linear(2048, num_classes),
)
def forward(self, x):
# Pass the input through the ResNext model
x = self.net(x)
# Flatten the output of ResNet
x = x.view(x.shape[0], -1)
# Send to fully connected layer
return self.fcs(x)
# ## Inside ResNet Before Fine Tuning
# What's going on inside the resnet. Based on the images, it looks almost like noise as the image goes deeper and deeper into the network. What is actually happening is the network is learning specific features about the image. The initial convolutional layer is doing some type of edge detection. This is a filter the network decided to learn based on it's training. The more the image is filtered, the more abstract the feature become.
# Let's dig into the convolution networks and see what images look like
def view_resnet_outputs(model, img):
img_input = img.view(1, 3, 224, 224).float()
outputs = []
# loop through each sequential layer and save the outputs
for layer in model.net.children():
img_input = layer(img_input)
if isinstance(layer, torch.nn.Conv2d) or isinstance(layer, torch.nn.Sequential):
outputs.append(img_input)
# View outputs
fig, axes = plt.subplots(2, 3, figsize=(10, 6))
axes = axes.flatten()
axes[0].imshow(img.transpose(2, 0))
for i, output in enumerate(outputs, 1):
axes[i].imshow(output[0].transpose(2, 0).cpu().detach().numpy()[:, :, :3])
plt.show()
model = Model()
img = single_batch["image"][0]
view_resnet_outputs(model, img)
# ## Training
# We create a training function to take care of the iteration process. We take in a dataloader and simply iterate through each batch. Notice the .*float* calls. Because the tensor wrapping the image and label tensors is initially set with the datatype uint8, we must convert them to a float tensor. Then we go through the normal pytorch training process. Once we calculate the loss and predict a class, we calculate the stats for later data analysis.
# Setup training
def train(model, criterion, optimizer, dataloader, device="cpu", num_epochs=1):
loss_history = []
accuracy_history = []
for epoch in range(num_epochs):
running_loss = 0.0
total = 0.0
correct = 0.0
for i, data in enumerate(dataloader):
# Get the inputs and move them to the device
inputs, labels = data["image"].float(), data["label"].long()
inputs, labels = inputs.to(device), labels.to(device)
# Zero the parameter gradients
optimizer.zero_grad()
# Forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# Get prediction for accuracy
_, predicted = torch.max(outputs, axis=1)
# Calculate stats
running_loss += loss.item()
total += labels.size(0)
correct += (predicted == labels).sum().item()
# Print statistics
if i % 10 == 0 and i > 0: # Print every 100 mini-batches
print(
"[epoch: %d, batch: %5d] loss: %.3f"
% (epoch + 1, i + 1 + i % 10, running_loss / 100)
)
print(
"Accuracy of the model on the %d images: %.2f %%"
% (total, 100 * correct / total)
)
loss_history.append(running_loss / 100)
accuracy_history.append(100 * correct / total)
running_loss = 0.0
total = 0.0
correct = 0.0
return {"loss_history": loss_history, "accuracy_history": accuracy_history}
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Setup training variables
learning_rate = 0.1
weight_decay = 0.001
num_classes = len(wildcats_data["class id"].unique())
num_epochs = 6
model = Model(num_classes).to(device)
# Define the loss function and the optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(
model.parameters(), lr=learning_rate, weight_decay=weight_decay
)
history = train(
model, criterion, optimizer, train_dataloader, device=device, num_epochs=num_epochs
)
# ## Testing
# It doesn't take long to reach the lowest limit. After roughly 4 epochs, we reach minimum loss. Running any longer and it may start overfitting to the training data.
ax = plt.subplot(1, 2, 1)
ax.plot(
np.linspace(0, num_epochs, len(history["loss_history"])), history["loss_history"]
)
ax.set_title("Training Loss")
ax.set_ylabel("loss")
ax = plt.subplot(1, 2, 2)
ax.plot(
np.linspace(0, num_epochs, len(history["accuracy_history"])),
history["accuracy_history"],
)
ax.set_title("Accuracy")
plt.show()
def evaluate(model, dataloader):
with torch.inference_mode():
total, correct = 0, 0
for data in dataloader:
# Get the inputs and move them to the device
inputs, labels = data["image"].float(), data["label"].long()
inputs, labels = inputs.to(device), labels.to(device)
# Forward pass
outputs = model(inputs)
_, predicted = torch.max(outputs, axis=1)
# Record the accuracy
total += labels.size(0)
correct += (predicted == labels).sum().item()
# Print the accuracy
print(
"Accuracy of the model on the %d images: %.2f %%"
% (total, 100 * correct / total)
)
# Evaluate test data
print("Test Case")
evaluate(model, test_dataloader)
print("Valid Case")
# Evaluate valid data
evaluate(model, valid_dataloader)
# ## Inside ResNet After Fine Tuning
img = single_batch["image"][2]
with torch.inference_mode():
view_resnet_outputs(model, img)
# ## Conclusion
# After testing on many iterations of the design, it seems as though using two simple fully connected layers of the same size gave the best results without overfitting. We can acheive a pretty high accuracy for using an old architecture like ResNet101. Based on other notebooks, it seems InceptionV3 would be the best model to build from since you don't need to fine tune the model. I tried to run my model without finetunning but I can only reach roughly 94 to 96% roughly.
# Play around with the setting as see if you can acheive an even higher accuracy!
from torchmetrics import ConfusionMatrix
confmat = ConfusionMatrix(task="multiclass", num_classes=10)
with torch.inference_mode():
images = single_batch["image"].float()
labels = single_batch["label"].long()
outputs = model(images)
_, pred = torch.max(outputs, axis=1)
confusion_matrix = confmat(pred, labels)
plt.imshow(confusion_matrix)
plt.show()
print(confusion_matrix)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# Use seaborn style defaults and set the default figure size
sns.set(rc={"figure.figsize": (11, 4)})
from windrose import WindroseAxes
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# Installing windrose to have the wind direction overview
#!pip install windrose
# Importing the data needed for the analysis into panda dataframe
df = pd.read_csv("../input/wind-turbine-scada-dataset/T1.csv")
# checking the first 5 set of data in the dataframe
df.head()
# checking if the dataframe contains null
df.isna().sum()
# Covert Data/time to index and drop columns Date/Time
df.index = df["Date/Time"]
df.drop(["Date/Time"], axis=1, inplace=True)
# New DataFrame after dropping column Date/Time
df.head()
# plotting each data
cols_plot = [
"LV ActivePower (kW)",
"Wind Speed (m/s)",
"Theoretical_Power_Curve (KWh)",
"Wind Direction (°)",
]
axes = df[cols_plot].plot(
marker=".", alpha=0.5, linestyle="None", figsize=(11, 9), subplots=True
)
# Plot the data distributions
plt.figure(figsize=(10, 8))
for i in range(4):
plt.subplot(2, 2, i + 1)
sns.kdeplot(df.iloc[:, i], shade=True)
plt.title(df.columns[i])
plt.tight_layout()
plt.show()
# Create wind speed and direction variables
ax = WindroseAxes.from_ax()
ax.bar(
df["Wind Direction (°)"],
df["Wind Speed (m/s)"],
normed=True,
opening=0.8,
edgecolor="white",
)
ax.set_legend()
# The wind rose plot above shows that the wind direction is mostly from the north east while some significant wind also come from the south-west.
# Checking for maximum and minimum value of the wind direction to help in choosing the right binning value
print(df["Wind Direction (°)"].max())
print(df["Wind Direction (°)"].min())
# Bining the data by the wind direction
bins_range = np.arange(0, 375, 45)
print(bins_range)
# Write a short code to map the bins data
def binning(x, bins):
kwargs = {}
if x == max(bins):
kwargs["right"] = True
bin = bins[np.digitize([x], bins, **kwargs)[0]]
bin_lower = bins[np.digitize([x], bins, **kwargs)[0] - 1]
return "[{0}-{1}]".format(bin_lower, bin)
df["Bin"] = df["Wind Direction (°)"].apply(binning, bins=bins_range)
# group the binned data by mean and std
grouped = df.groupby("Bin")
grouped_std = grouped.std()
grouped_mean = grouped.mean()
grouped_mean.head()
# The analysis above shows that highest avearge wind speed was recorded around 180(°)-225(°).
# Contrary to the opinion once had from the windrose plot, south - southwest shows good site for wind turbine because it has the highest avearge wind speed. The region also also has highest theoretical power amd LV active power.
# Checking for maximum and minimum value of the windspeed to help in choosing the right binning value
print(df["Wind Speed (m/s)"].max())
print(df["Wind Speed (m/s)"].min())
# Bining the data by the wind direction
bins_range_ws = np.arange(0, 26, 0.5)
df["Bin"] = df["Wind Speed (m/s)"].apply(binning, bins=bins_range_ws)
# Group by windspeed bin
grouped = df.groupby("Bin")
grouped_std = grouped.std()
grouped_mean = grouped.mean()
grouped_mean
# lets rearrange the index for proper visualisation
step = bins_range_ws[1] - bins_range_ws[0]
new_index = ["[{0}-{1}]".format(x, x + step) for x in bins_range_ws]
new_index.pop(-1) # We dont need [360-375]...
grouped_mean = grouped_mean.reindex(new_index)
# Rearranged and visulaizing the mean of each windspeed bin
grouped_mean
# Looking at the table above, it can be assumed that the cut-in wind speed is 3.0-3.5 (m/s), rated wind speed is 12.5-13.0 (m/s) and cut-out wind speed is around 25(m/s). This analysis will be us to determine better filter condition in the power curve analysis.
# Power Curve Anaylsis
# Theoretical power curve
plt.scatter(df["Wind Speed (m/s)"], df["Theoretical_Power_Curve (KWh)"])
plt.ylabel("Theoretical_Power (KWh)")
plt.xlabel("Wind speed (m/s)")
plt.grid(True)
plt.legend([" Theoretical_Power_Curve"], loc="upper left")
plt.show()
# LV ActivePower (kW) CP_CURVE
plt.scatter(df["Wind Speed (m/s)"], df["LV ActivePower (kW)"])
plt.ylabel("LV ActivePower (kW)")
plt.xlabel("Wind speed (m/s)")
plt.grid(True)
plt.legend([" LV ActivePower (kW) CP_CURVE"], loc="upper left")
plt.show()
# Using the information gathered above, we can now set a filter condition for our LV ActivePower (kW) power curve
# Condition 1
# The first step is the removal of downtime events, which can be identified as near-zero power at high wind speeds.
new = df[(df["Wind Speed (m/s)"] < 4.5) | (df["LV ActivePower (kW)"] > 100.0)]
# Condition 2
new_1 = new[(new["Wind Speed (m/s)"] < 12.5) | (new["LV ActivePower (kW)"] >= 3000)]
# Condition 3
new_2 = new_1[
(new_1["Wind Speed (m/s)"] < 9.5) | (new_1["LV ActivePower (kW)"] >= 1500)
]
# Theoretical_Power_Curve and Filtered LV ActivePower (kW) CP_CURVE Visualisation
plt.scatter(new_2["Wind Speed (m/s)"], new_2["LV ActivePower (kW)"])
plt.scatter(
df["Wind Speed (m/s)"],
df["Theoretical_Power_Curve (KWh)"],
label="Theoretical_Power_Curve (KWh)",
)
plt.ylabel("Power (kW)")
plt.xlabel("Wind speed (m/s)")
plt.grid(True)
plt.legend(
["Theoretical_Power_Curve and Filtered LV ActivePower (kW) CP_CURVE"],
loc="upper left",
)
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn import preprocessing
from sklearn import utils
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, confusion_matrix
# # 1. Data Load
#
df = pd.read_csv("../input/heart-disease-uci/heart.csv", sep=",")
print("Row: ", df.shape[0])
print("Column: ", df.shape[1])
df.head(10)
df.describe()
# # 2. Data Cleaning
df = df.dropna()
correlations = df.corr()
correlations
# # 3. Features Selection
y = df.iloc[:, -1]
df = df[
[
"age",
"sex",
"cp",
"trestbps",
"chol",
"fbs",
"restecg",
"thalach",
"exang",
"oldpeak",
"slope",
"ca",
"thal",
]
]
df.head(2)
# # 4. Train and Test Dataset
xtrain, xtest, ytrain, ytest = train_test_split(df, y, test_size=0.20, random_state=0)
scaler = StandardScaler()
xtrain = scaler.fit_transform(xtrain)
xtest = scaler.fit_transform(xtest)
# # 5. Neural Network Model
from sklearn.neural_network import MLPClassifier
# 2 hidden layers, Neurons=2/3*input layer+output layer=10, learning rate=0.0001, activation function= Relu, Solver= Stochastic gradient descent
# model
mlp = MLPClassifier(
activation="relu",
solver="sgd",
alpha=1e-5,
learning_rate_init=0.0001,
hidden_layer_sizes=(10, 10),
max_iter=10000,
random_state=200,
)
mlp.fit(xtrain, ytrain)
# # 6. Prediction
# prediction
predictions = mlp.predict(xtest)
Model_accuracy = (mlp.score(xtest, ytest)) * 100
Model_accuracy
# # 7. Confusion Matrix
print(confusion_matrix(ytest, predictions))
print(classification_report(ytest, predictions))
|
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import numpy as np
import keras as K
import tensorflow as tf
import pandas as pd
import math
import pandas as pd
gender_submission = pd.read_csv("../input/titanic/gender_submission.csv")
test = pd.read_csv("../input/titanic/test.csv")
X = pd.read_csv("../input/titanic/train.csv")
# print(X)
X.head(3)
y = X["Survived"]
y.head(3)
def clean_data(data):
data["Fare"] = data["Fare"].fillna(data["Fare"].dropna().median())
data["Age"] = data["Age"].fillna(data["Age"].dropna().median())
data.loc[data["Sex"] == "male", "Sex"] = 0
data.loc[data["Sex"] == "female", "Sex"] = 1
data["Embarked"] = data["Embarked"].fillna("S")
data.loc[data["Embarked"] == "S", "Embarked"] = 0
data.loc[data["Embarked"] == "C", "Embarked"] = 1
data.loc[data["Embarked"] == "Q", "Embarked"] = 2
clean_data(X)
clean_data(test)
X.head()
print(X.isnull().sum())
del X["Cabin"]
print(X.isnull().sum())
print("check the nan value in test data")
# cabin has many null so remove
del test["Cabin"]
print(test.isnull().sum())
print(test.isnull().sum())
## combine test and train as single to apply some function and applying the feature scaling
all_data = [X, test]
# Create new feature FamilySize as a combination of SibSp and Parch
for dataset in all_data:
dataset["FamilySize"] = dataset["SibSp"] + dataset["Parch"] + 1
# Define function to extract titles from passenger names
import re
def get_title(name):
title_search = re.search(" ([A-Za-z]+)\.", name)
# If the title exists, extract and return it.
if title_search:
return title_search.group(1)
return ""
# Create a new feature Title, containing the titles of passenger names
for dataset in all_data:
dataset["Title"] = dataset["Name"].apply(get_title)
# Group all non-common titles into one single grouping "Rare"
for dataset in all_data:
dataset["Title"] = dataset["Title"].replace(
[
"Lady",
"Countess",
"Capt",
"Col",
"Don",
"Dr",
"Major",
"Rev",
"Sir",
"Jonkheer",
"Dona",
],
"Rare",
)
dataset["Title"] = dataset["Title"].replace("Mlle", "Miss")
dataset["Title"] = dataset["Title"].replace("Ms", "Miss")
dataset["Title"] = dataset["Title"].replace("Mme", "Mrs")
for dataset in all_data:
dataset["Age_Range"] = pd.cut(
dataset["Age"],
bins=[0, 12, 20, 40, 120],
labels=["Children", "Teenage", "Adult", "Elder"],
)
## create RAnge for fare features
for dataset in all_data:
dataset["Fare_Range"] = pd.cut(
dataset["Fare"],
bins=[0, 7.91, 14.45, 31, 120],
labels=["Low_fare", "median_fare", "Average_fare", "high_fare"],
)
traindf = X
testdf = test
all_dat = [traindf, testdf]
for dataset in all_dat:
drop_column = ["Age", "Fare", "Name", "Ticket"]
dataset.drop(drop_column, axis=1, inplace=True)
drop_column = ["PassengerId"]
traindf.drop(drop_column, axis=1, inplace=True)
# print(testdf)
testdf.drop(drop_column, axis=1, inplace=True)
print(testdf)
all_dat
testdf.head(5)
traindf = pd.get_dummies(
traindf,
columns=["Pclass", "Sex", "Title", "Age_Range", "Embarked", "Fare_Range"],
prefix=["Pclass", "Sexy", "Title", "Age_type", "Em_type", "Fare_type"],
)
testdf = pd.get_dummies(
testdf,
columns=["Pclass", "Sex", "Title", "Age_Range", "Embarked", "Fare_Range"],
prefix=["Pclass", "Sexy", "Title", "Age_type", "Em_type", "Fare_type"],
)
del traindf["Survived"]
print(traindf)
print(testdf)
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras import optimizers
# import keras.utils.np_utils.to_categorical
X = X.to_numpy()
y = y.to_numpy()
y = [y]
print(y)
print(np.shape(y))
model = Sequential()
model.add(Dense(50, input_shape=(24,)))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(100))
model.add(Activation("relu"))
model.add(Dropout(0.3))
model.add(Dense(250))
model.add(Activation("relu"))
model.add(Dropout(0.1))
model.add(Dense(100))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(20))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(1))
model.add(Activation("tanh"))
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
model.summary()
# train.plot(kind = 'scatter', x='Age', y = 'Fare', alpha = 0.5, color = 'red')
# plot.show()
model.fit(traindf, y, epochs=70, batch_size=16)
# //make data numerical
# //check for NaN
# predicting the results
Y_pred = model.predict(testdf)
Y_pred.dtype
Y_pred = Y_pred.round()
test = pd.read_csv("../input/titanic/test.csv")
predictions = model.predict(testdf)
predictions = pd.DataFrame(predictions, columns=["Survived"])
test = pd.read_csv(os.path.join("../input/titanic/", "test.csv"))
predictions = pd.concat((test.iloc[:, 0], predictions), axis=1)
predictions.to_csv("my_output.csv", sep=",", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# import library
import pandas as pd
import seaborn as sns
import numpy as np
import pandas_profiling
pd.set_option(
"display.float_format", lambda x: "{:.4f}".format(x)
) # Limiting 4 decimal places
print("Pandas version: -", pd.__version__)
print("Numpy version: -", np.__version__)
print("Seaborn version: -", sns.__version__)
# read CSV file
data_input = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
data_test = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/test.csv"
)
# First five rows of training set
data_input.head()
# training dataset description
data_input.describe()
# first five rows of test data
data_test.head()
print("Train Data Size: {}".format(data_input.shape))
print("Test Data Size: {}".format(data_test.shape))
report = pandas_profiling.ProfileReport(data_input)
report
data_input.columns
# Calculating missing data in feature columns
data_mis = (data_input.isnull().sum() / len(data_input)) * 100
data_mis = data_mis.drop(data_mis[data_mis == 0].index).sort_values(ascending=False)
data_mis = pd.DataFrame({"Percentage": data_mis})
data_mis["Id"] = data_mis.index
data_mis.reset_index(drop=True, level=0, inplace=True)
data_mis.head()
# Function for Calculating missing data in feature columns
def missing_ratio(data_mis):
data_mis = (data_input.isnull().sum() / len(data_input)) * 100
data_mis = data_mis.drop(data_mis[data_mis == 0].index).sort_values(ascending=False)
data_mis = pd.DataFrame({"Percentage": data_mis})
data_mis["Id"] = data_mis.index
data_mis.reset_index(drop=True, level=0, inplace=True)
return data_mis # .head()
# Plot the missing feature columns by ratio
with sns.axes_style("whitegrid"):
g = sns.catplot(
x="Id", y="Percentage", data=data_mis, aspect=1.5, height=8, kind="bar"
)
g.set_xlabels("Features")
g.fig.suptitle("Percentage of Missing Data in Feature Columns ")
g.set_xticklabels(rotation=45, horizontalalignment="right")
col_num = data_input.select_dtypes([np.object]).columns
col_num
col_num1 = data_input.select_dtypes([np.int64, np.float64]).columns
col_num1
# **Some numerical columns(LotFrontage, GarageYrBlt, MasVnrArea) having missing values are filled with None. We need to change it to avg value
# **
# check unique values of all feature columns
for i in data_mis["Id"]:
print(i + " values: ", data_input[i].unique())
# Histogram
import matplotlib.pyplot as plt
data_input.hist(bins=50, figsize=(22, 20))
plt.show()
# **Histogram plot shows numerical attribute of training dataset.
# **
new_data = pd.get_dummies(data_input)
new_test = pd.get_dummies(data_test)
print(new_data.columns)
print(new_test.columns)
repor = pandas_profiling.ProfileReport(new_data)
repor
x = missing_ratio(new_test)
x
# Plot the missing feature columns by ratio
with sns.axes_style("whitegrid"):
g = sns.catplot(x="Id", y="Percentage", data=d, aspect=1.5, height=8, kind="bar")
g.set_xlabels("Features")
g.fig.suptitle("Percentage of Missing Data in Feature Columns ")
g.set_xticklabels(rotation=45, horizontalalignment="right")
pd.set_option("display.max_rows", None)
new_data.max() - new_data.min()
new_data.dtypes
new_data.dropna(axis=1, how="any", thresh=None, subset=None, inplace=True)
new_test.dropna(axis=1, how="any", thresh=None, subset=None, inplace=True)
missing_ratio(new_data)
from sklearn.linear_model import LinearRegression
data = new_data.copy()
target = data.pop("SalePrice")
lr = LinearRegression(fit_intercept=True)
lr.fit(data, target)
from sklearn.metrics import mean_squared_error
print(lr.score(data, target))
prediction = lr.predict(data)
mse = mean_squared_error(target, prediction)
rmse = np.sqrt(mse)
print(rmse)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
import numpy as np
import seaborn as ss
import matplotlib.pyplot as plt
column = [
"status",
"duration(months)",
"credit_history",
"purpose",
"credit_amount",
"saving_account",
"employment_since",
"installment_rate",
"status/sex",
"guarantors",
"residence_since",
"property",
"age",
"installment_plans",
"housing",
"credits",
"job",
"liable people",
"telephone",
"foreig_worker",
"label",
]
df = pd.read_csv("/kaggle/input/german.data", sep=r"\s+", names=column)
df.head()
job = df.job.loc[df.label == 1]
job1 = df.job.loc[df.label == 2]
plt.hist([job, job1], label=["good", "bad"])
plt.legend(loc="upper right")
y_pos = np.arange(5)
plt.xticks(
y_pos,
["unemployed", "unskilled ", "skilled employee", "management", "highly qualified"],
)
plt.title("Job")
# Property Distribution among labels
status = df.status.loc[df.label == 1]
status1 = df.status.loc[df.label == 2]
plt.hist([status, status1], label=["good", "bad"])
plt.legend(loc="upper right")
plt.title("Status Graph")
plt.show()
# Status Distribution among labels
# there is more probablity that if status of checking amount
# is none
from scipy.stats import wilcoxon
data1 = df.query('status == "A14"')
# data2 = df.label.loc[df.status == 'A14']
# stat, p = wilcoxon(data1.status, data1.label)
# print(stat,p)
data1.label.count()
df.query(' status == "A14"').count()
credit = df.credit_history.loc[df.label == 1]
credit1 = df.credit_history.loc[df.label == 2]
plt.hist([credit, credit1], label=["good", "bad"])
plt.legend(loc="upper right")
# y_pos = np.arange(5)
# plt.xticks(y_pos,['no/all credits','paid back dully ','no credits','delay in paying','critical account'])
plt.title("Credit History")
plt.show()
# Credit History among labels
# no DAta for class A33
foreign = df.foreig_worker.loc[df.label == 1]
foreign1 = df.foreig_worker.loc[df.label == 2]
plt.hist([foreign, foreign1], 2, label=["good", "bad"])
plt.legend(loc="upper right")
y_pos = np.arange(2)
plt.xticks(y_pos, ["Yes", "No"])
plt.title("Credit History")
plt.show()
# if you are non foreign worker, there is more chance for allocation of loan
import pandas_profiling as pf
# pf.ProfileReport(df)
|
# # Overview
# #### This kernel is based on [2020 Starter Kernel Women](https://www.kaggle.com/hiromoon166/2020-women-s-starter-kernel)
# #### I added my GBDT pipelines to see the feature importance from LGB and CatBoost.
# Also see my starter for mens' games: https://www.kaggle.com/code1110/ncaam20-eda-and-lgb-catb-starter
# # Import Library & Load Data
# Libraries
import numpy as np
import pandas as pd
pd.set_option("max_columns", None)
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("fivethirtyeight")
from sklearn.preprocessing import StandardScaler, MinMaxScaler, OneHotEncoder
import copy
import datetime
from sklearn.utils import shuffle
from scipy import stats
from sklearn.model_selection import (
train_test_split,
StratifiedKFold,
KFold,
cross_val_score,
GridSearchCV,
RepeatedStratifiedKFold,
)
from sklearn.preprocessing import StandardScaler, LabelEncoder
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
import xgboost as xgb
import lightgbm as lgb
from catboost import CatBoostRegressor, CatBoostClassifier
import optuna
from optuna.visualization import plot_optimization_history
from sklearn import model_selection
from sklearn.metrics import (
mean_squared_error,
mean_absolute_error,
accuracy_score,
roc_auc_score,
log_loss,
classification_report,
confusion_matrix,
)
import json
import ast
import time
from sklearn import linear_model
# keras
import keras
from keras.layers import Dense
from keras.models import Sequential
from keras.callbacks import Callback, EarlyStopping, ReduceLROnPlateau, LambdaCallback
from keras.optimizers import Adam, SGD
from keras.models import Model
from keras.layers import (
Input,
Layer,
Dense,
Concatenate,
Reshape,
Dropout,
merge,
Add,
BatchNormalization,
GaussianNoise,
)
from keras.layers.embeddings import Embedding
from keras import backend as K
from keras.layers import Layer
from keras.callbacks import *
import tensorflow as tf
import math
import warnings
warnings.filterwarnings("ignore")
import os
import glob
import gc
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import LabelEncoder
print("Libraries imported!")
# # Model Class
class BaseModel(object):
"""
Base Model Class
"""
def __init__(
self,
train_df,
test_df,
target,
features,
categoricals=[],
n_splits=3,
cv_method="KFold",
group=None,
task="regression",
parameter_tuning=False,
scaler=None,
verbose=True,
):
self.train_df = train_df
self.test_df = test_df
self.target = target
self.features = features
self.n_splits = n_splits
self.categoricals = categoricals
self.cv_method = cv_method
self.group = group
self.task = task
self.parameter_tuning = parameter_tuning
self.scaler = scaler
self.cv = self.get_cv()
self.verbose = verbose
self.params = self.get_params()
(
self.y_pred,
self.score,
self.model,
self.oof,
self.y_val,
self.fi_df,
) = self.fit()
def train_model(self, train_set, val_set):
raise NotImplementedError
def get_params(self):
raise NotImplementedError
def convert_dataset(self, x_train, y_train, x_val, y_val):
raise NotImplementedError
def convert_x(self, x):
return x
def calc_metric(
self, y_true, y_pred
): # this may need to be changed based on the metric of interest
if self.task == "classification":
return log_loss(y_true, y_pred)
elif self.task == "regression":
return np.sqrt(mean_squared_error(y_true, y_pred))
def get_cv(self):
if self.cv_method == "KFold":
cv = KFold(n_splits=self.n_splits, shuffle=True, random_state=42)
return cv.split(self.train_df)
elif self.cv_method == "StratifiedKFold":
cv = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=42)
return cv.split(self.train_df, self.train_df[self.target])
elif self.cv_method == "TimeSeriesSplit":
cv = TimeSeriesSplit(max_train_size=None, n_splits=self.n_splits)
return cv.split(self.train_df)
elif self.cv_method == "GroupKFold":
cv = GroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42)
return cv.split(self.train_df, self.train_df[self.target], self.group)
elif self.cv_method == "StratifiedGroupKFold":
cv = StratifiedGroupKFold(
n_splits=self.n_splits, shuffle=True, random_state=42
)
return cv.split(self.train_df, self.train_df[self.target], self.group)
def fit(self):
# initialize
oof_pred = np.zeros((self.train_df.shape[0],))
y_vals = np.zeros((self.train_df.shape[0],))
y_pred = np.zeros((self.test_df.shape[0],))
if self.group is not None:
if self.group in self.features:
self.features.remove(self.group)
if self.group in self.categoricals:
self.categoricals.remove(self.group)
fi = np.zeros((self.n_splits, len(self.features)))
# scaling, if necessary
if self.scaler is not None:
numerical_features = [
f for f in self.features if f not in self.categoricals
]
if self.scaler == "MinMax":
scaler = MinMaxScaler()
elif self.scaler == "Standard":
scaler = StandardScaler()
df = pd.concat(
[self.train_df[numerical_features], self.test_df[numerical_features]],
ignore_index=True,
)
scaler.fit(df[numerical_features])
x_test = self.test_df.copy()
x_test[numerical_features] = scaler.transform(x_test[numerical_features])
x_test = [np.absolute(x_test[i]) for i in self.categoricals] + [
x_test[numerical_features]
]
else:
x_test = self.test_df[self.features]
# fitting with out of fold
for fold, (train_idx, val_idx) in enumerate(self.cv):
# train test split
x_train, x_val = (
self.train_df.loc[train_idx, self.features],
self.train_df.loc[val_idx, self.features],
)
y_train, y_val = (
self.train_df.loc[train_idx, self.target],
self.train_df.loc[val_idx, self.target],
)
# fitting & get feature importance
if self.scaler is not None:
x_train[numerical_features] = scaler.transform(
x_train[numerical_features]
)
x_val[numerical_features] = scaler.transform(x_val[numerical_features])
x_train = [np.absolute(x_train[i]) for i in self.categoricals] + [
x_train[numerical_features]
]
x_val = [np.absolute(x_val[i]) for i in self.categoricals] + [
x_val[numerical_features]
]
train_set, val_set = self.convert_dataset(x_train, y_train, x_val, y_val)
model, importance = self.train_model(train_set, val_set)
fi[fold, :] = importance
conv_x_val = self.convert_x(x_val)
y_vals[val_idx] = y_val
oof_pred[val_idx] = model.predict(conv_x_val).reshape(
oof_pred[val_idx].shape
)
x_test = self.convert_x(x_test)
y_pred += model.predict(x_test).reshape(y_pred.shape) / self.n_splits
print(
"Partial score of fold {} is: {}".format(
fold, self.calc_metric(y_val, oof_pred[val_idx])
)
)
# feature importance data frame
fi_df = pd.DataFrame()
for n in np.arange(self.n_splits):
tmp = pd.DataFrame()
tmp["features"] = self.features
tmp["importance"] = fi[n, :]
tmp["fold"] = n
fi_df = pd.concat([fi_df, tmp], ignore_index=True)
gfi = (
fi_df[["features", "importance"]].groupby(["features"]).mean().reset_index()
)
fi_df = fi_df.merge(gfi, on="features", how="left", suffixes=("", "_mean"))
# outputs
loss_score = self.calc_metric(self.train_df[self.target], oof_pred)
if self.verbose:
print("Our oof loss score is: ", loss_score)
return y_pred, loss_score, model, oof_pred, y_vals, fi_df
def plot_feature_importance(self, rank_range=[1, 50]):
# plot
fig, ax = plt.subplots(1, 1, figsize=(10, 20))
sorted_df = (
self.fi_df.sort_values(by="importance_mean", ascending=False)
.reset_index()
.iloc[self.n_splits * (rank_range[0] - 1) : self.n_splits * rank_range[1]]
)
sns.barplot(data=sorted_df, x="importance", y="features", orient="h")
ax.set_xlabel("feature importance")
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
return sorted_df
class LgbModel(BaseModel):
"""
LGB wrapper
"""
def train_model(self, train_set, val_set):
verbosity = 100 if self.verbose else 0
model = lgb.train(
self.params,
train_set,
num_boost_round=5000,
valid_sets=[train_set, val_set],
verbose_eval=verbosity,
)
fi = model.feature_importance(importance_type="gain")
return model, fi
def convert_dataset(self, x_train, y_train, x_val, y_val):
train_set = lgb.Dataset(x_train, y_train, categorical_feature=self.categoricals)
val_set = lgb.Dataset(x_val, y_val, categorical_feature=self.categoricals)
return train_set, val_set
def get_params(self):
# params from https://www.kaggle.com/vbmokin/mm-2020-ncaam-simple-lightgbm-on-kfold-tuning
params = {
"num_leaves": 127,
"min_data_in_leaf": 50,
"max_depth": -1,
"learning_rate": 0.005,
"boosting_type": "gbdt",
"bagging_seed": 11,
"verbosity": -1,
"random_state": 42,
}
if self.task == "regression":
params["objective"] = "regression"
params["metric"] = "rmse"
elif self.task == "classification":
params["objective"] = "binary"
params["metric"] = "binary_logloss"
# Bayesian Optimization by Optuna
if self.parameter_tuning == True:
# define objective function
def objective(trial):
# train, test split
train_x, test_x, train_y, test_y = train_test_split(
self.train_df[self.features],
self.train_df[self.target],
test_size=0.3,
random_state=42,
)
dtrain = lgb.Dataset(
train_x, train_y, categorical_feature=self.categoricals
)
dtest = lgb.Dataset(
test_x, test_y, categorical_feature=self.categoricals
)
# parameters to be explored
hyperparams = {
"num_leaves": trial.suggest_int("num_leaves", 24, 1024),
"boosting_type": "gbdt",
"objective": params["objective"],
"metric": params["metric"],
"max_depth": trial.suggest_int("max_depth", 4, 16),
"min_child_weight": trial.suggest_int("min_child_weight", 1, 20),
"feature_fraction": trial.suggest_uniform(
"feature_fraction", 0.4, 1.0
),
"bagging_fraction": trial.suggest_uniform(
"bagging_fraction", 0.4, 1.0
),
"bagging_freq": trial.suggest_int("bagging_freq", 1, 7),
"min_child_samples": trial.suggest_int("min_child_samples", 5, 100),
"lambda_l1": trial.suggest_loguniform("lambda_l1", 1e-8, 10.0),
"lambda_l2": trial.suggest_loguniform("lambda_l2", 1e-8, 10.0),
"early_stopping_rounds": 100,
}
# LGB
model = lgb.train(
hyperparams, dtrain, valid_sets=dtest, verbose_eval=500
)
pred = model.predict(test_x)
if self.task == "classification":
return log_loss(test_y, pred)
elif self.task == "regression":
return np.sqrt(mean_squared_error(test_y, pred))
# run optimization
study = optuna.create_study(direction="minimize")
study.optimize(objective, n_trials=50)
print("Number of finished trials: {}".format(len(study.trials)))
print("Best trial:")
trial = study.best_trial
print(" Value: {}".format(trial.value))
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value))
params = trial.params
# lower learning rate for better accuracy
params["learning_rate"] = 0.001
# plot history
plot_optimization_history(study)
return params
class CatbModel(BaseModel):
"""
CatBoost wrapper
"""
def train_model(self, train_set, val_set):
verbosity = 100 if self.verbose else 0
if self.task == "regression":
model = CatBoostRegressor(**self.params)
elif self.task == "classification":
model = CatBoostClassifier(**self.params)
model.fit(
train_set["X"],
train_set["y"],
eval_set=(val_set["X"], val_set["y"]),
verbose=verbosity,
cat_features=self.categoricals,
)
return model, model.get_feature_importance()
def convert_dataset(self, x_train, y_train, x_val, y_val):
train_set = {"X": x_train, "y": y_train}
val_set = {"X": x_val, "y": y_val}
return train_set, val_set
def get_params(self):
params = {
"task_type": "CPU",
"learning_rate": 0.01,
"iterations": 1000,
"random_seed": 42,
"use_best_model": True,
}
if self.task == "regression":
params["loss_function"] = "RMSE"
elif self.task == "classification":
params["loss_function"] = "Logloss"
return params
# Mish activation
class Mish(Layer):
def __init__(self, **kwargs):
super(Mish, self).__init__(**kwargs)
def build(self, input_shape):
super(Mish, self).build(input_shape)
def call(self, x):
return x * K.tanh(K.softplus(x))
def compute_output_shape(self, input_shape):
return input_shape
from keras import backend as K
# LayerNormalization
class LayerNormalization(keras.layers.Layer):
def __init__(
self,
center=True,
scale=True,
epsilon=None,
gamma_initializer="ones",
beta_initializer="zeros",
gamma_regularizer=None,
beta_regularizer=None,
gamma_constraint=None,
beta_constraint=None,
**kwargs
):
"""Layer normalization layer
See: [Layer Normalization](https://arxiv.org/pdf/1607.06450.pdf)
:param center: Add an offset parameter if it is True.
:param scale: Add a scale parameter if it is True.
:param epsilon: Epsilon for calculating variance.
:param gamma_initializer: Initializer for the gamma weight.
:param beta_initializer: Initializer for the beta weight.
:param gamma_regularizer: Optional regularizer for the gamma weight.
:param beta_regularizer: Optional regularizer for the beta weight.
:param gamma_constraint: Optional constraint for the gamma weight.
:param beta_constraint: Optional constraint for the beta weight.
:param kwargs:
"""
super(LayerNormalization, self).__init__(**kwargs)
self.supports_masking = True
self.center = center
self.scale = scale
if epsilon is None:
epsilon = K.epsilon() * K.epsilon()
self.epsilon = epsilon
self.gamma_initializer = keras.initializers.get(gamma_initializer)
self.beta_initializer = keras.initializers.get(beta_initializer)
self.gamma_regularizer = keras.regularizers.get(gamma_regularizer)
self.beta_regularizer = keras.regularizers.get(beta_regularizer)
self.gamma_constraint = keras.constraints.get(gamma_constraint)
self.beta_constraint = keras.constraints.get(beta_constraint)
self.gamma, self.beta = None, None
def get_config(self):
config = {
"center": self.center,
"scale": self.scale,
"epsilon": self.epsilon,
"gamma_initializer": keras.initializers.serialize(self.gamma_initializer),
"beta_initializer": keras.initializers.serialize(self.beta_initializer),
"gamma_regularizer": keras.regularizers.serialize(self.gamma_regularizer),
"beta_regularizer": keras.regularizers.serialize(self.beta_regularizer),
"gamma_constraint": keras.constraints.serialize(self.gamma_constraint),
"beta_constraint": keras.constraints.serialize(self.beta_constraint),
}
base_config = super(LayerNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
def compute_mask(self, inputs, input_mask=None):
return input_mask
def build(self, input_shape):
shape = input_shape[-1:]
if self.scale:
self.gamma = self.add_weight(
shape=shape,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
name="gamma",
)
if self.center:
self.beta = self.add_weight(
shape=shape,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
name="beta",
)
super(LayerNormalization, self).build(input_shape)
def call(self, inputs, training=None):
mean = K.mean(inputs, axis=-1, keepdims=True)
variance = K.mean(K.square(inputs - mean), axis=-1, keepdims=True)
std = K.sqrt(variance + self.epsilon)
outputs = (inputs - mean) / std
if self.scale:
outputs *= self.gamma
if self.center:
outputs += self.beta
return outputs
class NeuralNetworkModel(BaseModel):
"""
MLP wrapper: for now not so flexible
"""
def train_model(self, train_set, val_set):
# MLP model
inputs = []
embeddings = []
embedding_out_dim = self.params["embedding_out_dim"]
n_neuron = self.params["hidden_units"]
for i in self.categoricals:
input_ = Input(shape=(1,))
embedding = Embedding(
int(np.absolute(self.train_df[i]).max() + 1),
embedding_out_dim,
input_length=1,
)(input_)
embedding = Reshape(target_shape=(embedding_out_dim,))(embedding)
inputs.append(input_)
embeddings.append(embedding)
input_numeric = Input(shape=(len(self.features) - len(self.categoricals),))
embedding_numeric = Dense(n_neuron)(input_numeric)
embedding_numeric = Mish()(embedding_numeric)
inputs.append(input_numeric)
embeddings.append(embedding_numeric)
x = Concatenate()(embeddings)
for i in np.arange(self.params["hidden_layers"] - 1):
x = Dense(n_neuron // (2 * (i + 1)))(x)
x = Mish()(x)
x = Dropout(self.params["hidden_dropout"])(x)
x = LayerNormalization()(x)
if self.task == "regression":
out = Dense(1, activation="linear", name="out")(x)
loss = "mse"
elif self.task == "classification":
out = Dense(1, activation="sigmoid", name="out")(x)
loss = "binary_crossentropy"
model = Model(inputs=inputs, outputs=out)
# compile
model.compile(
loss=loss, optimizer=Adam(lr=1e-04, beta_1=0.9, beta_2=0.999, decay=1e-04)
)
# callbacks
er = EarlyStopping(
patience=10, min_delta=1e-4, restore_best_weights=True, monitor="val_loss"
)
ReduceLR = ReduceLROnPlateau(
monitor="val_loss",
factor=0.1,
patience=7,
verbose=1,
epsilon=1e-4,
mode="min",
)
model.fit(
train_set["X"],
train_set["y"],
callbacks=[er, ReduceLR],
epochs=self.params["epochs"],
batch_size=self.params["batch_size"],
validation_data=[val_set["X"], val_set["y"]],
)
fi = np.zeros(len(self.features)) # no feature importance computed
return model, fi
def convert_dataset(self, x_train, y_train, x_val, y_val):
train_set = {"X": x_train, "y": y_train}
val_set = {"X": x_val, "y": y_val}
return train_set, val_set
def get_params(self):
"""
for now stolen from https://github.com/ghmagazine/kagglebook/blob/master/ch06/ch06-03-hopt_nn.py
"""
params = {
"input_dropout": 0.0,
"hidden_layers": 3,
"hidden_units": 128,
"embedding_out_dim": 8,
"hidden_activation": "relu",
"hidden_dropout": 0.05,
"batch_norm": "before_act",
"optimizer": {"type": "adam", "lr": 0.001},
"batch_size": 128,
"epochs": 80,
}
return params
# # Load data
data_dict = {}
for i in glob.glob(
"/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/*"
):
name = i.split("/")[-1].split(".")[0]
if name != "WTeamSpellings":
data_dict[name] = pd.read_csv(i)
else:
data_dict[name] = pd.read_csv(i, encoding="cp1252")
# # Data Overview
# NCAAW20 has less data than NCAAM20. It may be easier for us to start with NCAAW20.
data_dict.keys()
fname = "Cities"
print(data_dict[fname].shape)
data_dict[fname].head()
fname = "WTeamSpellings"
print(data_dict[fname].shape)
data_dict[fname].head()
fname = "WSeasons"
print(data_dict[fname].shape)
data_dict[fname].head()
fname = "WTeams"
print(data_dict[fname].shape)
data_dict[fname].head()
fname = "WNCAATourneyCompactResults"
print(data_dict[fname].shape)
data_dict[fname].head()
fname = "WGameCities"
print(data_dict[fname].shape)
data_dict[fname].head()
fname = "Conferences"
print(data_dict[fname].shape)
data_dict[fname].head()
fname = "WNCAATourneySeeds"
print(data_dict[fname].shape)
data_dict[fname].head()
# get int from seed
data_dict["WNCAATourneySeeds"]["Seed"] = data_dict["WNCAATourneySeeds"]["Seed"].apply(
lambda x: int(x[1:3])
)
data_dict[fname].head()
fname = "WNCAATourneySlots"
print(data_dict[fname].shape)
data_dict[fname].head()
fname = "WTeamConferences"
print(data_dict[fname].shape)
data_dict[fname].head()
fname = "WNCAATourneyDetailedResults"
print(data_dict[fname].shape)
data_dict[fname].head()
fname = "WRegularSeasonDetailedResults"
print(data_dict[fname].shape)
data_dict[fname].head()
fname = "WRegularSeasonCompactResults"
print(data_dict[fname].shape)
data_dict[fname].head()
# let's also have a look at test
test = pd.read_csv(
"../input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv"
)
print(test.shape)
test.head()
# format ID
test = test.drop(["Pred"], axis=1)
test["Season"] = test["ID"].apply(lambda x: int(x.split("_")[0]))
test["WTeamID"] = test["ID"].apply(lambda x: int(x.split("_")[1]))
test["LTeamID"] = test["ID"].apply(lambda x: int(x.split("_")[2]))
test.head()
# # Data processing and feature engineering.
# The main idea is to extract features, which could be useful to understand how much one team is better than another one.
# merge tables ============
train = data_dict["WNCAATourneyCompactResults"] # use compact data only for now
# # compact <- detailed (Tourney files)
# train = pd.merge(data_dict['MNCAATourneyCompactResults'], data_dict['MNCAATourneyDetailedResults'], how='left',
# on=['Season', 'DayNum', 'WTeamID', 'WScore', 'LTeamID', 'LScore', 'WLoc', 'NumOT'])
print(train.shape)
train.head()
# Train =================================
# merge with Game Cities
gameCities = pd.merge(
data_dict["WGameCities"], data_dict["Cities"], how="left", on=["CityID"]
)
cols_to_use = gameCities.columns.difference(train.columns).tolist() + [
"Season",
"WTeamID",
"LTeamID",
]
train = train.merge(
gameCities[cols_to_use], how="left", on=["Season", "WTeamID", "LTeamID"]
)
train.head()
# merge with WSeasons
cols_to_use = data_dict["WSeasons"].columns.difference(train.columns).tolist() + [
"Season"
]
train = train.merge(data_dict["WSeasons"][cols_to_use], how="left", on=["Season"])
train.head()
# merge with WTeams
cols_to_use = data_dict["WTeams"].columns.difference(train.columns).tolist()
train = train.merge(
data_dict["WTeams"][cols_to_use],
how="left",
left_on=["WTeamID"],
right_on=["TeamID"],
)
train.drop(["TeamID"], axis=1, inplace=True)
train = train.merge(
data_dict["WTeams"][cols_to_use],
how="left",
left_on=["LTeamID"],
right_on=["TeamID"],
suffixes=("_W", "_L"),
)
train.drop(["TeamID"], axis=1, inplace=True)
print(train.shape)
train.head()
# merge with WNCAATourneySeeds
cols_to_use = data_dict["WNCAATourneySeeds"].columns.difference(
train.columns
).tolist() + ["Season"]
train = train.merge(
data_dict["WNCAATourneySeeds"][cols_to_use].drop_duplicates(
subset=["Season", "TeamID"]
),
how="left",
left_on=["Season", "WTeamID"],
right_on=["Season", "TeamID"],
)
train.drop(["TeamID"], axis=1, inplace=True)
train = train.merge(
data_dict["WNCAATourneySeeds"][cols_to_use].drop_duplicates(
subset=["Season", "TeamID"]
),
how="left",
left_on=["Season", "LTeamID"],
right_on=["Season", "TeamID"],
suffixes=("_W", "_L"),
)
train.drop(["TeamID"], axis=1, inplace=True)
print(train.shape)
train.head()
# test =================================
# merge with Game Cities
cols_to_use = gameCities.columns.difference(test.columns).tolist() + [
"Season",
"WTeamID",
"LTeamID",
]
test = test.merge(
gameCities[cols_to_use].drop_duplicates(subset=["Season", "WTeamID", "LTeamID"]),
how="left",
on=["Season", "WTeamID", "LTeamID"],
)
del gameCities
gc.collect()
test.head()
# merge with WSeasons
cols_to_use = data_dict["WSeasons"].columns.difference(test.columns).tolist() + [
"Season"
]
test = test.merge(
data_dict["WSeasons"][cols_to_use].drop_duplicates(subset=["Season"]),
how="left",
on=["Season"],
)
test.head()
# merge with WTeams
cols_to_use = data_dict["WTeams"].columns.difference(test.columns).tolist()
test = test.merge(
data_dict["WTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]),
how="left",
left_on=["WTeamID"],
right_on=["TeamID"],
)
test.drop(["TeamID"], axis=1, inplace=True)
test = test.merge(
data_dict["WTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]),
how="left",
left_on=["LTeamID"],
right_on=["TeamID"],
suffixes=("_W", "_L"),
)
test.drop(["TeamID"], axis=1, inplace=True)
test.head()
# merge with WNCAATourneySeeds
cols_to_use = data_dict["WNCAATourneySeeds"].columns.difference(
test.columns
).tolist() + ["Season"]
test = test.merge(
data_dict["WNCAATourneySeeds"][cols_to_use].drop_duplicates(
subset=["Season", "TeamID"]
),
how="left",
left_on=["Season", "WTeamID"],
right_on=["Season", "TeamID"],
)
test.drop(["TeamID"], axis=1, inplace=True)
test = test.merge(
data_dict["WNCAATourneySeeds"][cols_to_use].drop_duplicates(
subset=["Season", "TeamID"]
),
how="left",
left_on=["Season", "LTeamID"],
right_on=["Season", "TeamID"],
suffixes=("_W", "_L"),
)
test.drop(["TeamID"], axis=1, inplace=True)
print(test.shape)
test.head()
not_exist_in_test = [
c for c in train.columns.values.tolist() if c not in test.columns.values.tolist()
]
print(not_exist_in_test)
train = train.drop(not_exist_in_test, axis=1)
train.head()
# compact <- detailed (regular season files)
regularSeason = data_dict["WRegularSeasonCompactResults"]
# regularSeason = pd.merge(data_dict['WRegularSeasonCompactResults'], data_dict['WRegularSeasonDetailedResults'], how='left',
# on=['Season', 'DayNum', 'WTeamID', 'WScore', 'LTeamID', 'LScore', 'WLoc', 'NumOT'])
print(regularSeason.shape)
regularSeason.head()
# split winners and losers
team_win_score = (
regularSeason.groupby(["Season", "WTeamID"])
.agg({"WScore": ["sum", "count", "var"]})
.reset_index()
)
team_win_score.columns = [
" ".join(col).strip() for col in team_win_score.columns.values
]
team_loss_score = (
regularSeason.groupby(["Season", "LTeamID"])
.agg({"LScore": ["sum", "count", "var"]})
.reset_index()
)
team_loss_score.columns = [
" ".join(col).strip() for col in team_loss_score.columns.values
]
del regularSeason
gc.collect()
print(team_win_score.shape)
team_win_score.head()
print(team_loss_score.shape)
team_loss_score.head()
# merge with train
train = pd.merge(
train,
team_win_score,
how="left",
left_on=["Season", "WTeamID"],
right_on=["Season", "WTeamID"],
)
train = pd.merge(
train,
team_loss_score,
how="left",
left_on=["Season", "LTeamID"],
right_on=["Season", "LTeamID"],
)
train = pd.merge(
train,
team_loss_score,
how="left",
left_on=["Season", "WTeamID"],
right_on=["Season", "LTeamID"],
)
train = pd.merge(
train,
team_win_score,
how="left",
left_on=["Season", "LTeamID_x"],
right_on=["Season", "WTeamID"],
)
train.drop(["LTeamID_y", "WTeamID_y"], axis=1, inplace=True)
train.head()
# merge with test
test = pd.merge(
test,
team_win_score,
how="left",
left_on=["Season", "WTeamID"],
right_on=["Season", "WTeamID"],
)
test = pd.merge(
test,
team_loss_score,
how="left",
left_on=["Season", "LTeamID"],
right_on=["Season", "LTeamID"],
)
test = pd.merge(
test,
team_loss_score,
how="left",
left_on=["Season", "WTeamID"],
right_on=["Season", "LTeamID"],
)
test = pd.merge(
test,
team_win_score,
how="left",
left_on=["Season", "LTeamID_x"],
right_on=["Season", "WTeamID"],
)
test.drop(["LTeamID_y", "WTeamID_y"], axis=1, inplace=True)
test.head()
# preprocess
def preprocess(df):
df["x_score"] = df["WScore sum_x"] + df["LScore sum_y"]
df["y_score"] = df["WScore sum_y"] + df["LScore sum_x"]
df["x_count"] = df["WScore count_x"] + df["LScore count_y"]
df["y_count"] = df["WScore count_y"] + df["WScore count_x"]
df["x_var"] = df["WScore var_x"] + df["LScore count_y"]
df["y_var"] = df["WScore var_y"] + df["WScore var_x"]
return df
train = preprocess(train)
test = preprocess(test)
# make winner and loser train
train_win = train.copy()
train_los = train.copy()
train_win = train_win[
[
"Seed_W",
"Seed_L",
"TeamName_W",
"TeamName_L",
"x_score",
"y_score",
"x_count",
"y_count",
"x_var",
"y_var",
]
]
train_los = train_los[
[
"Seed_L",
"Seed_W",
"TeamName_L",
"TeamName_W",
"y_score",
"x_score",
"x_count",
"y_count",
"x_var",
"y_var",
]
]
train_win.columns = [
"Seed_1",
"Seed_2",
"TeamName_1",
"TeamName_2",
"Score_1",
"Score_2",
"Count_1",
"Count_2",
"Var_1",
"Var_2",
]
train_los.columns = [
"Seed_1",
"Seed_2",
"TeamName_1",
"TeamName_2",
"Score_1",
"Score_2",
"Count_1",
"Count_2",
"Var_1",
"Var_2",
]
# same processing for test
test = test[
[
"ID",
"Seed_W",
"Seed_L",
"TeamName_W",
"TeamName_L",
"x_score",
"y_score",
"x_count",
"y_count",
"x_var",
"y_var",
]
]
test.columns = [
"ID",
"Seed_1",
"Seed_2",
"TeamName_1",
"TeamName_2",
"Score_1",
"Score_2",
"Count_1",
"Count_2",
"Var_1",
"Var_2",
]
# feature enginnering
def feature_engineering(df):
df["Seed_diff"] = df["Seed_1"] - df["Seed_2"]
df["Score_diff"] = df["Score_1"] - df["Score_2"]
df["Count_diff"] = df["Count_1"] - df["Count_2"]
df["Var_diff"] = df["Var_1"] - df["Var_2"]
df["Mean_score1"] = df["Score_1"] / df["Count_1"]
df["Mean_score2"] = df["Score_2"] / df["Count_2"]
df["Mean_score_diff"] = df["Mean_score1"] - df["Mean_score2"]
df["FanoFactor_1"] = df["Var_1"] / df["Mean_score1"]
df["FanoFactor_2"] = df["Var_2"] / df["Mean_score2"]
return df
train_win = feature_engineering(train_win)
train_los = feature_engineering(train_los)
test = feature_engineering(test)
train_win["result"] = 1
print(train_win.shape)
train_win.head()
train_los["result"] = 0
print(train_los.shape)
train_los.head()
data = pd.concat((train_win, train_los)).reset_index(drop=True)
print(data.shape)
data.head()
# label encoding
categoricals = ["TeamName_1", "TeamName_2"]
for c in categoricals:
le = LabelEncoder()
data[c] = le.fit_transform(data[c])
test[c] = le.transform(test[c])
data.head()
test.shape
# # Predict & Make Submission File
target = "result"
features = data.columns.values.tolist()
features.remove(target)
# ## Fit MLP
nn = NeuralNetworkModel(
data,
test,
target,
features,
categoricals=categoricals,
n_splits=10,
cv_method="StratifiedKFold",
group=None,
task="classification",
scaler="MinMax",
verbose=True,
)
# ## Fit LGB
lgbm = LgbModel(
data,
test,
target,
features,
categoricals=categoricals,
n_splits=10,
cv_method="StratifiedKFold",
group=None,
task="classification",
scaler=None,
verbose=True,
)
# feature importance
lgbm.plot_feature_importance()
# ## Fit CatBoost
catb = CatbModel(
data,
test,
target,
features,
categoricals=categoricals,
n_splits=10,
cv_method="StratifiedKFold",
group=None,
task="classification",
scaler=None,
verbose=True,
)
# feature importance
catb.plot_feature_importance()
# ## Submission
submission_df = pd.read_csv(
"../input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv"
)
submission_df["Pred"] = 0.5 * lgbm.y_pred + 0.2 * catb.y_pred + 0.3 * nn.y_pred
submission_df
submission_df["Pred"].hist()
submission_df.to_csv("submission.csv", index=False)
|
# # Analysis and Modelling of Heart Disease Dataset
# ## Introduction
#
# This Kernel includes analysis data of Heart Diseases and Modeling an Machine Learning model.
# 
# Content:
# 1. [Analysis of Data](#1)
# * Variable Description
# 2. [Virtualization](#2)
# 3. [Modelling](#3)
# 4. [Conclusion](#4)
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
#
# # Analysis of Data
data = pd.read_csv("/kaggle/input/heart-disease-uci/heart.csv")
data.info()
# Variable Description:
# 1. age : Age of patient
# 1. sex : Gender of patient (1 = male; 0 = female)
# 1. cp : Chest pain type
# 1. trestbps : Resting blood pressure (in mm Hg on admission to the hospital)
# 1. chol : Serum cholestoral in mg/dl
# 1. fbs : Fasting blood sugar > 120 mg/dl (1 = true; 0 = false)
# 1. restecg : Resting electrocardiographic results
# 1. thalach : Maximum heart rate achieved
# 1. exang : Exercise induced angina (1 = yes; 0 = no)
# 1. oldpeak : ST depression induced by exercise relative to rest
# 1. slope : The slope of the peak exercise ST segment
# 1. ca : Number of major vessels (0-3) colored by flourosopy
# 1. thal : 3 = normal; 6 = fixed defect; 7 = reversable defect
# 1. target : Has disease (1 = yes; 0= no)
# float(1): oldpeak
# int(13):age, sex, cp, trestbps, chol, fbs, restecg, thalach, exang, slope, ca, thal and target
data.head()
data.tail()
data.shape
#
# # Virtualization
import matplotlib.pyplot as plt
import plotly as py
import seaborn as sns
#
# # Modelling
# We have a simple binary classification problem in here. I will use sklearn to develop a model.
y = data.target.values
x = data.drop(["target"], axis=1)
# * Normalization
x_normalized = (x - np.min(x)) / (np.max(x) - np.min(x))
# * Spliting data to train and test subdatas
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x_normalized, y, test_size=0.17, random_state=42
)
# * Training
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier()
dt.fit(x_train, y_train)
dt.score(x_test, y_test)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
print(os.listdir())
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
dataset = pd.read_csv("../input/heart.csv")
type(dataset)
dataset.shape
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import cv2
import os
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import tensorflow as tf
print(tf.__version__)
# tested on tensorflow v2.1.0
# Will not work on tensorflow v1
from tensorflow.keras import Model, Sequential
from tensorflow.keras import layers
from tensorflow.keras import optimizers
# from tensorflow.keras import ImageDatagenerator
from tensorflow.keras.callbacks import ModelCheckpoint
DATA_PATH = "/kaggle/input/mobile-gallery-image-classification-data/mobile_gallery_image_classification/mobile_gallery_image_classification"
print(os.listdir(DATA_PATH))
train_path = os.path.join(DATA_PATH, "train")
test_path = os.path.join(DATA_PATH, "test")
# print(os.listdir(train_path))
# print(os.listdir(test_path))
# # Analysis of the data
def show_samples_train(train_path, to_analyze):
for folder_name in os.listdir(train_path):
image_folder = os.path.join(train_path, folder_name)
count = 0
for image in os.listdir(image_folder):
image_path = os.path.join(image_folder, image)
if count < to_analyze:
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
plt.title(folder_name)
plt.xticks([])
plt.yticks([])
plt.show()
count += 1
else:
break
def show_samples_test(test_path, to_analyze):
count = 0
for image in os.listdir(test_path):
image_path = os.path.join(test_path, image)
if count < to_analyze:
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
plt.show()
count += 1
else:
break
# Given the train folder it finds the distribution of the number of classes in it
def get_distribution_train(train_path, display=False):
lengths = {}
for folder in os.listdir(train_path):
folder_path = os.path.join(train_path, folder)
length = len(os.listdir(folder_path))
lengths[folder] = length
if display is True:
names = list(lengths.keys())
values = list(lengths.values())
plt.bar(range(len(lengths)), values, tick_label=names)
plt.show()
return lengths
# to_analyze = 10
# Analyzing 10 images per folder
show_samples_train(train_path, to_analyze=10)
# There anre only 7 images in the test folder will add more
# to_analyze = 8
show_samples_test(test_path, to_analyze=8)
images_per_folder = get_distribution_train(train_path, display=True)
# # Buildinig a CNN Classifier
def build_model(i_shape, o_shape):
model = Sequential()
opt = optimizers.Adam(lr=0.000001)
chkpt = ModelCheckpoint() # Check sytanx and fill
callbacks_l = [chkpt]
model.compile(optimizer=opt, loss="categorical_crossentropy", metrics=["acc"])
# Note tf v2 will deprecate model.fit_generator we need to use model.fit instead
history = model.fit()
# # Analyzing the model performance
# Plot some graphs
# Use tf_explain
|
# ## Introduction
# * Predict comments is positive or negative with Logistic Regression algorithm
# * Calculate reliability -- Accuracy
import numpy as np
import pandas as pd
import nltk
from nltk.corpus import stopwords
import string
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
data = pd.read_csv(
"/kaggle/input/product-comments-dataset/data.csv", sep=",", encoding="utf-8"
)
data["Duygu"].value_counts()
# Get equal data from both statement for better results
data_n = data[data.Duygu == 0]
data_negative = data_n.iloc[:230]
data_p = data[data.Duygu == 1]
data_positive = data_p.iloc[:230]
# Concat datasets
dataset = pd.concat([data_positive, data_negative])
# split dataset
x = dataset["Yorum"].copy()
y = dataset["Duygu"].values.reshape(-1, 1)
WPT = nltk.WordPunctTokenizer()
stop_word_list = nltk.corpus.stopwords.words("turkish")
print(stop_word_list)
# function for remove stopwords and punctuations
def text_preprocess(text):
text = text.translate(str.maketrans("", "", string.punctuation))
text = [word for word in text.split() if word.lower() not in stop_word_list]
return " ".join(text)
x = x.apply(text_preprocess)
# train test split
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=42
)
# frequency of words appearing in a document is converted to a matrix
from sklearn.feature_extraction.text import CountVectorizer
vect = CountVectorizer(encoding="utf-8").fit(x_train) # fit and transform
x_train_vectorized = vect.transform(x_train)
# import LogisticRegression
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
# fitting model
lr.fit(x_train_vectorized, y_train)
# prediction
predictions = lr.predict(vect.transform(x_test))
# accuracy
from sklearn.metrics import roc_auc_score
print("AUC: ", roc_auc_score(y_test, predictions))
|
# # Introduction
# This is the second notebook in the sarcasm detection mentoring project series. While in the first one we dealt with data exploration and feature engineering, in this one we will train some models. We'll start by learning to properly split the data and then move on to training a basic model and understanding cross-validation.
# Series:
# 1. [Part 1](https://www.kaggle.com/yastapova/sarcasm-detection-2020-mentoring-proj-part-1): Exploring Data and Feature Engineering
# 2. Part 2: Splitting Data and Building a Basic Machine Learning Model
# 3. [Part 3](https://www.kaggle.com/yastapova/sarcasm-detection-2020-mentoring-proj-part-3): Building a Text-Based Machine Learning Model
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.model_selection import train_test_split, cross_validate
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# # Step 4: Split the Data
# Before we start training our models, we must first split the data into a training set and a test set. We do this because we want to make sure that our test set is entirely separate from the model as it's being trained. If we allowed the model to see the test set while it was training, it would taint our results. It would be like peeking at the answer key while taking an exam.
# We must also be vigilant to prevent any kind of data leakage, which is when information from the test set finds its way into training even if you aren't using the test data itself. For example, if we were to [standardize](https://medium.com/@swethalakshmanan14/how-when-and-why-should-you-normalize-standardize-rescale-your-data-3f083def38ff) a column by subtracting its mean and dividing by standard deviation, we must make sure to calculate the mean and standard deviation **only from the training data**. Then, we can standardize the training data and the test data using that same mean and standard deviation. This is because we must not allow the test set to influence those values. As far as we are concerned, the test set does not exist until we have a finished model.
# Before we get started, let's take a look at the first few rows of the dataset, just to refresh our memories about what it looks like. I am loading the data from the output file of my [first notebook](https://www.kaggle.com/yastapova/sarcasm-detection-2020-mentoring-proj-parts-1-3), which contained all of the data exploration and feature engineering for this project.
data = pd.read_csv(
"/kaggle/input/sarcasm-detection-2020-mentoring-proj-part-1/sarcasm_prepped_data.csv"
)
data.head(10)
# One thing to consider when we split the data is the balance of class labels in each split set. As we can see from the ```value_counts()``` function below, the full dataset is about 50/50 sarcastic and non-sarcastic. We should aim to maintain a similar ratio in the train and test sets.
data["label"].value_counts()
# While you can certainly simply write a quick function to select random data points to create your training and test sets, I will be using the ```train_test_split()``` function from Scikit-Learn's ```model_selection``` module. It's nice to have professionally pre-made functions so that we don't have to implement our own.
# I set the test size to be 0.25, which means that the resulting test set will consist of 25% of the original data set, and the training set will be the remaining 75%.
train_data, test_data = train_test_split(data, test_size=0.25, random_state=42)
test_data.shape
# As we can see from the number of rows in the test set, it contains about 25% of the approximately one million original data points.
# Consider this: we know our class labels are balanced, but what if they weren't? Would there be any issues with splitting the data that we would have to guard against?
# Indeed, if we had a huge disparity in the classes, randomly sampling data points could leave us with a test set that is entirely made up of one class! This would definitely not be useful for testing. To ensure that doesn't happen, we can perform [stratified sampling](https://datascience.stackexchange.com/questions/16265/is-stratified-sampling-necessary-random-forest-python), which, luckily, is already a feature in the ```train_test_split()``` function!
# # Step 5: Train a Simple Classifier
# Before we get into text representation and NLP techniques, we'll try making a classifier that doesn't need anything like that. Let's take some of the features we have and see if they can predict sarcasm. We'll train a Logistic Regression model, which is a good starting point for a basic classification model.
# (A nice brief description of Logistic Regression can be found in [this article](https://towardsdatascience.com/machine-learning-basics-part-1-a36d38c7916) and a more complex description in the [User Guide](https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression) of the Scikit-Learn library.)
# We'll be using the Scikit-Learn library to train all of the models in this project. It has an excellent selection of models, algorithms, and other helpful functions for any situation, as well as the most marvellous documentation ever written.
# Let's take a look at our data and determine which columns we want to use in our model.
train_data.head()
# As you may recall, during data exploration I decided that the *ups* and *downs* columns were unclear and unreliable. Therefore, I will be ommitting them from the model. I will also throw out all of the non-numeric columns, since the model algorithms have no way to deal with them.
# As a result, our training data will consist of the following columns: *score, comment_length, date_year, date_month* and *comment_day*. Of course, we will also keep *label* as our target variable.
basic_train_y = train_data["label"]
basic_train_X = train_data[
["score", "comment_length", "date_year", "date_month", "comment_day"]
]
basic_test_y = test_data["label"]
basic_test_X = test_data[
["score", "comment_length", "date_year", "date_month", "comment_day"]
]
basic_train_X.head()
# Now, let's build a Logistic Regression model.
from sklearn.linear_model import LogisticRegression
log_reg_model = LogisticRegression(random_state=42)
log_reg_model = log_reg_model.fit(basic_train_X, basic_train_y)
log_reg_model
# And that's it, we have a trained model! Yes, it **is** that easy. It's easy to train the model, but the hard part is making sure the model actually does a good job. As you can see from the model information printed out above, a Logistic Regression classifier has a lot of parameters. Initially, we have just trained it with defaults for everything, but this may not result in the best classifier.
# In order to ensure we get the best possible classifier, we have to try combinations of values for these parameters and test how well the model performs with each combination. Do we test it on the test set? **No!** Remember that the test set is only for testing our final product. While we're still tinkering with parameters, we can't touch the test set yet. What we could do is split the data again into a validation set, which we won't use for training and will only use to test our in-between attempts at models.
# However, this would take away a large amount of data that we could have used for training. We want as much data as possible for training because that will allow our models to train more thoroughly. Instead of splitting a separate validation set, we can tune our parameters through **cross-validation**.
# ## Cross Validation
# The process of **K-fold Cross Validation** is discussed in detail in the article linked somewhere above called *Machine Learning - Fundamentals*. The gist is that we can take our training set and divide it into k equal parts called *folds*. We then pick the parameters we want to test, pick one fold to be the validation set (or the "hold-out" fold), and then train the model on the remaining folds. After training, we test it on the hold-out fold, record the score, and then do *all of that again* for the same parameter values, except now we pick a different fold to be the hold-out fold. Then we aggregate all the validation scores together to get a result for those parameter values we were testing. We do this all over again for a new set of parameters. After we test all of them, we can see which parameter combinations had the best scores and choose those for our final model.
# If you think this sounds time-consuming and tedious, you're right. But it's an essential step of the machine learning process. Luckily, Scikit-Learn has some functions that will make our life easier. The first of these is the ```cross_validate()``` function, which performs one full run of the k-fold cross validation algorithm for one set of parameters. You can see how it works below.
cross_validate(log_reg_model, basic_train_X, basic_train_y, cv=5, scoring="accuracy")
# In the results of the model, you can see 5 values in each array. These correspond to each of the 5 folds we specified when we ran ```cross_validate``` with ```cv=5```. At a glance, we can see that our model results in about 51% accuracy on average. This isn't very good, but maybe by varying some of the other parameters we can make it better.
# If we use this ```cross_validate()``` function, we'll have to redefine the model, change the parameters, and rerun the validation manually every time, unless we wrote a loop to do it. But why resort to loops when Scikit Learn has anticipated your needs yet again?
# Let's use the implementation of cross-validation provided by ```GridSearchCV```, which automates the process for us. All we need to do is provide the model, data, and values for parameters we want to vary. I will set ```penalty``` to be "elasticnet" and vary the "l1_ratio" parameter. This will allow us to try different types of regularization (which is also discussed in the *Machine Learning - Fundamentals* article).
# (This will take several minutes to run.)
from sklearn.model_selection import GridSearchCV
log_reg_model = LogisticRegression(
random_state=42, penalty="elasticnet", solver="saga", max_iter=2000, n_jobs=-1
)
param_grid = {"l1_ratio": [0.0, 0.25, 0.50, 0.75, 1.0]}
grid = GridSearchCV(log_reg_model, param_grid, scoring="accuracy", cv=3, n_jobs=-1)
grid.fit(basic_train_X, basic_train_y)
print(grid.best_score_)
print(grid.best_params_)
# Above, we can see the results of our cross-validation attempts. The best model achieved an accuracy of 51% using an l1_ratio of 0. Now let's test this model on the test set.
log_reg_model = LogisticRegression(
random_state=42,
penalty="elasticnet",
l1_ratio=0.0,
solver="saga",
max_iter=2000,
n_jobs=-1,
)
log_reg_model = log_reg_model.fit(basic_train_X, basic_train_y)
score = log_reg_model.score(basic_test_X, basic_test_y)
score
# ## Discussion
# After a grueling process of cross validation, we found our best parameters. We trained this best model on all our training data and finally got to test it on our test set. Our final accuracy is 51%. Now we arrive at the question: **is that good?**
# This is a good time to talk about baselines and how to determine what our model evaluation metrics mean. Usually sometime in the beginning of a machine learning project, before any models are selected or any training is done, we must decide how we will evaluate the model and what the baseline score is that we will compare all our models to.
# There are plenty of different metrics we can use to evaluate models, such as accuracy, recall, precision, F1 score, and many others. Not all of them are well suited to every problem. In this project, since our classes were balanced, we can get away with using simply accuracy.
# ### Baselines
# But what constitutes a "good" model? Is 51% accuracy high enough? In some difficult prediction cases, it might be. In our case, it is not. This is because it does not noticeably beat either of the two most simple baselines. The first baseline is random guessing: if we have to classify a data point, we just flip a coin and either pick 0 or 1. This gives us a 50% chance to be correct on average. The second baseline is even simpler: just guess that everything is sarcastic, classifying everything as 1. Since our data is about half and half, this would also give us about 50% accuracy.
# Both of those baselines don't sound like very smart or useful classifiers. And yet, our trained and tested model performed equally as accurately as they would. This means that our model is not very good. What kind of accuracy would it need in order to qualify as "good"? That depends on each individual case. For very difficult problems, it may be that any gain above the random baseline is noteworthy. For very easy problems, it's possible that anything below 90% is junk.
# When creating baselines, it may be useful to research whether other people have tackled this problem before and see what their baselines and results were. It may also be useful to create a human baseline, in which you give a small subset of the data to people and have them try to solve the task. For this problem, out of my own experience/intuition, I would say that anything below 70% is not worth it and anything above 85% is probably pretty good.
# And remember that these baselines should be set when you **begin** working on the problem, not once you already have results. Moving the finish line after you've already started the race is dishonest.
# ### Improvements
# Now that we know what consititutes a good model, what improvements can we make in our model to get it there? Firstly, we can try using completely different algorithms, such as Random Forests or Support Vector Machines. It's possible that some algorithms simply don't work well for a certain problem. Second, we can try cross-validating more parameters or more values of parameters.
# However, in the case of this initial non-text-based classifier, I think the problem is more deep than that. I believe we would have to go back to the drawing board and engineer some better features to train on. If you recall, it didn't look like months or years were predictive of sarcasm at all. We would have to create some features that do have strong relationships with sarcasm. If we find that we have too many features, we can also try running feature selection algorithms to reduce the noise.
# What we can also do is use the actual text, instead of features generated from it like *comment_length*. This is much more likely to give us good results, and this is what we'll focus on in the next notebook.
# (I will also now output the training and test data frames, so that I can reuse the same split of the data in the next notebook.)
train_data.to_csv("sarcasm_train_split.csv", index=False)
test_data.to_csv("sarcasm_test_split.csv", index=False)
|
# ### Coronaviruses (CoV) are a large family of viruses that cause illness ranging from the common cold to more severe diseases such as Middle East Respiratory Syndrome (MERS-CoV) and Severe Acute Respiratory Syndrome (SARS-CoV). A novel coronavirus (nCoV) is a new strain that has not been previously identified in humans.
# #### Coronaviruses are zoonotic, meaning they are transmitted between animals and people. Detailed investigations found that SARS-CoV was transmitted from civet cats to humans and MERS-CoV from dromedary camels to humans. Several known coronaviruses are circulating in animals that have not yet infected human.
# 
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# visualization
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import folium
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# # Import DataSet
import pandas as pd
NCOV_data = pd.read_csv("../input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv")
time_series_2019_ncov_confirmed = pd.read_csv(
"../input/novel-corona-virus-2019-dataset/time_series_2019_ncov_confirmed.csv"
)
time_series_2019_ncov_deaths = pd.read_csv(
"../input/novel-corona-virus-2019-dataset/time_series_2019_ncov_deaths.csv"
)
time_series_2019_ncov_recovered = pd.read_csv(
"../input/novel-corona-virus-2019-dataset/time_series_2019_ncov_recovered.csv"
)
NCOV_data.head()
NCOV_data.tail()
NCOV_data.info()
# Top Country Affected in this scenario
# Countries affected
countries = NCOV_data["Country"].unique().tolist()
print(countries)
print("\nTotal countries affected by virus: ", len(countries))
# Combining China and Mainland China cases as they are provided separately
NCOV_data["Country"].replace({"Mainland China": "China"}, inplace=True)
countries = NCOV_data["Country"].unique().tolist()
print(countries)
print("\nTotal countries affected by virus: ", len(countries))
# ### Let's Fix the datetime
# Convert Last Update column to datetime64 format
from datetime import date
NCOV_data["Date"] = NCOV_data["Date"].apply(pd.to_datetime)
# NCOV_data.drop(['Sno'],axis=1,inplace=True)
# Set Date column as the index column.
# data.set_index('Last Update', inplace=True)
# NCOV_data.head()
d = NCOV_data["Date"][-1:].astype("str")
year = int(d.values[0].split("-")[0])
month = int(d.values[0].split("-")[1])
day = int(d.values[0].split("-")[2].split()[0])
data_latest = NCOV_data[NCOV_data["Date"] > pd.Timestamp(date(year, month, day))]
data_latest.head()
# Creating a dataframe with total no of confirmed cases for every country
Number_of_countries = len(data_latest["Country"].value_counts())
cases = pd.DataFrame(data_latest.groupby("Country")["Confirmed"].sum())
cases["Country"] = cases.index
cases.index = np.arange(1, Number_of_countries + 1)
global_cases = cases[["Country", "Confirmed"]]
global_cases
# Importing the world_coordinates dataset
# world_coordinates = pd.read_csv('../input/world-coordinates/world_coordinates.csv')
world_coordinates = pd.read_csv("../input/world-coordinates/world_coordinates.csv")
# Merging the coordinates dataframe with original dataframe
world_data = pd.merge(world_coordinates, global_cases, on="Country")
world_data.head()
# create map and display it
world_map = folium.Map(location=[10, -20], zoom_start=2.3, tiles="Stamen Toner")
for lat, lon, value, name in zip(
world_data["latitude"],
world_data["longitude"],
world_data["Confirmed"],
world_data["Country"],
):
folium.CircleMarker(
[lat, lon],
radius=10,
popup=(
"<strong>Country</strong>: " + str(name).capitalize() + "<br>"
"<strong>Confirmed Cases</strong>: " + str(value) + "<br>"
),
color="red",
fill_color="red",
fill_opacity=0.7,
).add_to(world_map)
world_map
time_series_2019_ncov_confirmed.head()
time_series_2019_ncov_deaths.head()
time_series_2019_ncov_recovered.head()
import pandas as pd
world_coordinates
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# # Objective
# This Data Science for Good Competition intends to use **remote sensing** techniques to understand Environmental Emissions. Since the whole concept of Satellite Imagery and can be a little overwhelming, this is just an introductory kernel, where I try to explain the various terms and datasets related to satellite Imagery.
# # Problem Statement: Measuring **Emissions factors** from Satellite Data
# Air Quality Management is an important area and influences a lot of decisions taken by countries. But how does one ascertain the Air quality of a place? This is done by calculating the Emissions Factor of that area.
# What is the Emission factor?
# A lot of activities today results in the release of Green House Gases(GHG) in the atmosphere. There are various activities that contribute to the release of GHG like burning fuel, vehicles, Power Plants, etc. Therefore, in order to estimate GHG emissions per unit of available activity, we need to use a factor called emission factor (EF).[source]
# For example: how many kgs of GHG are emitted by 1 kWh of natural gas?
# Thus, an emission factor is a coefficient that converts any activity's data into GHG emissions. [This factor attempts to relate the quantity of a pollutant released to the atmosphere with an activity associated with the release of that pollutant.](https://www.epa.gov/air-emissions-factors-and-quantification/basic-information-air-emissions-factors-and-quantification#About%20Emissions%20Factors)
# 
# 
# [Source](https://www.epa.gov/air-emissions-factors-and-quantification/basic-information-air-emissions-factors-and-quantification#About%20Emissions%20Factors)
# # Where does Satellite Data fit in?
# Today, a lot of activities related to the calculation of Emission factors entail a long and time-consuming process of Data Collection. Data Collection can in itself be erroneous and can introduce disparities. Here is an example of a typical emission factor :
# * pounds of NOx per million cubic feet of natural gas combusted (the million cubic feet of natural gas is the activity unit [A])[source]
# An emission factor is one of the most common questions in calculating an equipment's emissions and so what if there were a better way to calculate them. This is what the competition is all about. We need to try and see [if if it's possible to use remote sensing techniques to better model emissions factors](https://www.kaggle.com/c/ds4g-environmental-insights-explorer/overview/description)
# # Remote Sensing
# Remote sensing is the process of gathering information from an object or place without any actual contact with the object. In the case of Satellite, remote sensing means to use satellites to gather data.
# 
# *a NASA program comprising a series of satellite missions as of 2 February 2015*
# Satellite Imagery is the image of Earth(or other planets) which are collected by imaging satellites. Satellites have been collecting Earth Observation data for decades. Governments or private firms may own these Satellite. Some of the imagery that has been made available in the public domain are:
# * [**Landsat**](https://en.wikipedia.org/wiki/Landsat_program): It is the oldest continuous Earth-observing satellite imaging program.The [Landsat 7](https://en.wikipedia.org/wiki/Landsat_7) and [Landsat 8](https://en.wikipedia.org/wiki/Landsat_8 "Landsat 8") satellites are currently in orbit. [Landsat 9](https://en.wikipedia.org/wiki/Landsat_9 "Landsat 9") is planned.
# 
# *LANDSAT*
# * **MODIS** : [MODIS](https://modis.gsfc.nasa.gov/about/) stands for The **Moderate Resolution Imaging Spectroradiometer** (**MODIS**). It is a key instrument aboard the [Terra](http://terra.nasa.gov/) and [Aqua](http://aqua.nasa.gov/) satellites which viewing the entire Earth’s surface every 1 to 2 days.
# * [**Sentinel**](https://en.wikipedia.org/wiki/Copernicus_Programme#Sentinel_missions) : The Sentinel missions by European Space Agency([ESA](https://en.wikipedia.org/wiki/European_Space_Agency)) includes radar and super-spectral imaging for the land, ocean and atmospheric monitoring.
# 
# *Sentinel*
# # Analysing the different Datasets
# The following datasets have been provided as a starter kit to get started with the competition. Let’s understand them briefly:
# #### 1. [Global Power Plant Database](https://developers.google.com/earth-engine/datasets/catalog/WRI_GPPD_power_plants) by [World Resources Institute](http://datasets.wri.org/dataset/globalpowerplantdatabase)(WRI)
# The Global Power plant database is a fully open-sourced(licensed under [CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/).) and a comprehensive database that includes details of powerplants around the world. The database covers approximately 30,000 power plants from 164 countries and includes both thermal and renewable power plants.
# #### 2. [Sentinel 5P OFFL NO2](https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_S5P_OFFL_L3_NO2)
# **Sentinel-5 Precursor** (**Sentinel-5P**) is an [Earth observation satellite](https://en.wikipedia.org/wiki/Earth_observation_satellite "Earth observation satellite") developed by [ESA](https://en.wikipedia.org/wiki/European_Space_Agency "European Space Agency") as part of the [Copernicus Programme](https://en.wikipedia.org/wiki/Copernicus_Programme "Copernicus Programme"). The [Copernicus Programme](https://en.wikipedia.org/wiki/Copernicus_Programme "Copernicus Programme") is dedicated to monitoring [air pollution](https://en.wikipedia.org/wiki/Air_pollution "Air pollution") and Sentinel 5P Precursor is its first mission. It consists of an instrument called [Tropomi](https://en.wikipedia.org/wiki/Sentinel-5_Precursor) (TROPOspheric Monitoring Instrument) which is a spectrometer to monitor [ozone](https://en.wikipedia.org/wiki/Ozone "Ozone"), [methane](https://en.wikipedia.org/wiki/Methane "Methane"), [formaldehyde](https://en.wikipedia.org/wiki/Formaldehyde "Formaldehyde"), [aerosol](https://en.wikipedia.org/wiki/Aerosol "Aerosol"), [carbon monoxide](https://en.wikipedia.org/wiki/Carbon_monoxide "Carbon monoxide"), [NO2](https://en.wikipedia.org/wiki/Nitrogen_dioxide "Nitrogen dioxide") and [SO2](https://en.wikipedia.org/wiki/Sulfur_dioxide "Sulfur dioxide") in the atmosphere.
# The **OFFL/NO2 is a dataset** that provides offline high-resolution imagery of NO2 concentrations
# #### 3. [Global Forecast System 384-Hour Predicted Atmosphere Data](https://developers.google.com/earth-engine/datasets/catalog/NOAA_GFS0P25)
# Global Forecast System(GFS) is a model that forecasts weather.The GFS is a coupled model, composed of an atmosphere model, an ocean model, a land/soil model, and a sea ice model which work together to provide an accurate picture of weather conditions
# 
# [An animated image of GFS simulated total atmospheric ozone concentration](https://www.ncdc.noaa.gov/data-access/model-data/model-datasets/global-forcast-system-gfs)
# #### 4. [GLDAS-2.1: Global Land Data Assimilation System](https://developers.google.com/earth-engine/datasets/catalog/NASA_GLDAS_V021_NOAH_G025_T3H)
# This dataset provided by NASA ingest satellite- and ground-based observational data products, using advanced land surface modeling and data assimilation techniques, in order to generate optimal fields of land surface states and fluxes (Rodell et al., 2004a)
#
## Importing necessary libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Analysing datetime
import datetime as dt
from datetime import datetime
# Plotting geographical data
import folium
import rasterio as rio
# File system manangement
import os
# Suppress warnings
import warnings
warnings.filterwarnings("ignore")
# # Analysing the various datasets
# We have been given access to the Data for Puerto Rico from July 2018 to July 2019. This data has been Exported from Earth Engine.
# ## 1. Exploring the Global Power Plant Database for Puerto Rico
# The given `gppd_120_pr.csv` consists of all the power plants which belongs to the Puerto Rico, an unincorporated territory of the United States located in the northeast Caribbean Sea.The **latitude** of Puerto Rico is **18.200178**, and the longitude is **-66.664513**. The island has been chosen for the analysis since [there are fewer confounding factors from nearby areas](https://www.kaggle.com/c/ds4g-environmental-insights-explorer). Puerto Rico also offers a unique fuel mix and distinctive energy system layout that should make it easier to isolate pollution attributable to power generation in the remote sensing data.
# Total power plants in Puerto Rico
global_power_plants = pd.read_csv(
"../input/ds4g-environmental-insights-explorer/eie_data/gppd/gppd_120_pr.csv"
)
global_power_plants.head().T
# No of different powerplants
global_power_plants.shape
# Before analysing further, let's understand what some of the attributes mean.[[source](http://datasets.wri.org/dataset/globalpowerplantdatabase)]
# >
# * **capacity_mw** - electrical generating capacity in megawatts
# * **commissioning_year** - year of plant operation, weighted by unit-capacity when data is available
# * **estimated_generation_gwh** - estimated annual electricity generation in gigawatt-hours
# * **generation_gwh_2013** - electricity generation in gigawatt-hours for the year 2013
# * **gppd_idnr** - estimated annual electricity generation in gigawatt-hours for the year 2014
# * **name** - name or title of the power plant
# * **primary_fuel** - energy source used in primary electricity generation or export
# * **wepp_id** - a reference to a unique plant identifier in the widely-used PLATTS-WEPP datase
# * **year_of_capacity_data** -year the capacity information was reported
# * **source** - entity reporting the data
# * **owner** - majority shareholder of the power plant
# ### Kinds of Power Plants based on primary Fuel used
# Let's check the different kinds of Power Plants based on primary Fuel used.
sns.barplot(
x=global_power_plants["primary_fuel"].value_counts().index,
y=global_power_plants["primary_fuel"].value_counts(),
)
plt.ylabel("Count")
# ### How old are the plants
# Power plants built decades ago tend to pollute more since they donot meet the newer anti-pollution requirements.
global_power_plants["commissioning_year"].value_counts()
# Well, a lot of powerplants donot have their date of Commission. The plants are as old as 1942 and the latest one belongs to the year 2012.
# ### The data different sources of data
fig = plt.gcf()
fig.set_size_inches(10, 6)
colors = ["dodgerblue", "plum", "#F0A30A", "#8c564b", "orange", "green", "yellow"]
global_power_plants["source"].value_counts(ascending=True).plot(
kind="barh", color=colors, linewidth=2, edgecolor="black"
)
# The majority of the data came from CEPR followed by PREPA(Puerto Rico Electric Power Authority)
# ### Who owns the Power Plants
# Owner - majority shareholder of the power plant
fig = plt.gcf()
fig.set_size_inches(10, 6)
colors = ["dodgerblue", "plum", "#F0A30A", "#8c564b", "orange", "green", "yellow"]
global_power_plants["owner"].value_counts(ascending=True).plot(
kind="barh", color=colors
)
# PREPA is a government agency that owns the electricity transmission and distribution systems for the main island, Vieques, and Culebra, as well as 80% of the electricity generating capacity([source](https://www.eia.gov/state/analysis.php?sid=RQ#25))
# ### Total Installed Capacity
# The Total Installed capacity of a power plant refers to the maximum output of electricity that it can produce under ideal conditions but this won’t necessarily be the actual amount of electricity produced.It is usually expressed in megawatts (MW)
# Total capacity of all the plants
total_capacity_mw = global_power_plants["capacity_mw"].sum()
print("Total Installed Capacity: " + "{:.2f}".format(total_capacity_mw) + " MW")
capacity = (
global_power_plants.groupby(["primary_fuel"])["capacity_mw"].sum()
).to_frame()
capacity = capacity.sort_values("capacity_mw", ascending=False)
capacity["percentage_of_total"] = (capacity["capacity_mw"] / total_capacity_mw) * 100
capacity
fig = plt.gcf()
fig.set_size_inches(10, 6)
colors = ["dodgerblue", "plum", "#F0A30A", "#8c564b", "orange", "green", "yellow"]
capacity["percentage_of_total"].plot(kind="bar", color=colors)
# Oil run plants consitutes about 68% of Puerto Rico’s total installed capacity and natural gas accounted for 18%. Coal continues to fuel 7% of generation, while renewables supplied around 5.5%.
# ### Estimated generation
# Electricity generation, on the other hand, refers to the amount of electricity that is produced over a specific period of time. This is usually measured in kilowatt-hours, megawatt-hours o gigawatt-hours.
# Total generation of all the plants
total_gen_mw = global_power_plants["estimated_generation_gwh"].sum()
print("Total Generatation: " + "{:.2f}".format(total_gen_mw) + " GW")
generation = (
global_power_plants.groupby(["primary_fuel"])["estimated_generation_gwh"].sum()
).to_frame()
generation = generation.sort_values("estimated_generation_gwh", ascending=False)
generation["percentage_of_total"] = (
generation["estimated_generation_gwh"] / total_gen_mw
) * 100
generation
# More than 90% of estimated generation comes from Fossil Fuel powered plants while only a minority share can be attributed to the Renewable Resources fueled plants.
# ## A geographical view of the various Power Plants
# We can use the power plant dataset to visualise the existing locations of the various power plant. We will extract the latitudes and longitudes from the `geo` column
#
# Code source: https://www.kaggle.com/paultimothymooney/overview-of-the-eie-analytics-challenge
from folium import plugins
def plot_points_on_map(
dataframe,
begin_index,
end_index,
latitude_column,
latitude_value,
longitude_column,
longitude_value,
zoom,
):
df = dataframe[begin_index:end_index]
location = [latitude_value, longitude_value]
plot = folium.Map(location=location, zoom_start=zoom, tiles="Stamen Terrain")
for i in range(0, len(df)):
popup = folium.Popup(str(df.primary_fuel[i : i + 1]))
folium.Marker(
[df[latitude_column].iloc[i], df[longitude_column].iloc[i]],
popup=popup,
icon=folium.Icon(
color="white",
icon_color="red",
icon="bolt",
prefix="fa",
),
).add_to(plot)
return plot
def overlay_image_on_puerto_rico(file_name, band_layer, lat, lon, zoom):
band = rio.open(file_name).read(band_layer)
m = folium.Map([lat, lon], zoom_start=zoom)
folium.raster_layers.ImageOverlay(
image=band,
bounds=[
[
18.6,
-67.3,
],
[17.9, -65.2],
],
colormap=lambda x: (1, 0, 0, x),
).add_to(m)
return m
def split_column_into_new_columns(
dataframe, column_to_split, new_column_one, begin_column_one, end_column_one
):
for i in range(0, len(dataframe)):
dataframe.loc[i, new_column_one] = dataframe.loc[i, column_to_split][
begin_column_one:end_column_one
]
return dataframe
global_power_plants = split_column_into_new_columns(
global_power_plants, ".geo", "latitude", 50, 66
)
global_power_plants = split_column_into_new_columns(
global_power_plants, ".geo", "longitude", 31, 48
)
global_power_plants["latitude"] = global_power_plants["latitude"].astype(float)
a = np.array(global_power_plants["latitude"].values.tolist())
global_power_plants["latitude"] = np.where(a < 10, a + 10, a).tolist()
lat = 18.200178
lon = -66.664513 # Puerto Rico's co-ordinates
plot_points_on_map(global_power_plants, 0, 425, "latitude", lat, "longitude", lon, 9)
# ## 2. Exploring the Sentinel 5P OFFL NO2 dataset
# This dataset provides offline high-resolution imagery of NO2 concentrations in the troposphere and the stratosphere. Nitrogen Oxides are predominantly released during the burning of fossil fuels and also during other processes like wildfires, lightening and other microbiological processes in soils. This dataset is named as `s5p_no2` and consists of 387 `.tif` files. Before analysing the NO2 emissions, let us look at a single image and see what all information it contains.
# ### Analysing images using the the Rasterio module
# [Rasterio](https://automating-gis-processes.github.io/CSC18/lessons/L6/reading-raster.html) is a module for reading and writing several different raster formats in Python.
# A [raster image](https://www.computerhope.com/jargon/r/raster.htm) is an image file format that is defined by a pixel that has one or more numbers associated with it. The number defines the location, size, or color of the pixels. Raster images are commonly .BMP, .GIF, .JPEG, .PNG, and .TIFF files. Today, almost all of the images you see on the Internet and images taken by a digital camera are raster images.
# Let’s start with inspecting one of the files we downloaded:
image = "/kaggle/input/ds4g-environmental-insights-explorer/eie_data/s5p_no2/s5p_no2_20180701T161259_20180707T175356.tif"
# Opening the file
raster = rio.open(image)
# All Metadata for the whole raster dataset
raster.meta
# * Driver : Data Format
# * dtype : data type
# * width and Height : The dimensions of the image are : 475 X 148
# * count : There are 12 bands in the image
# * crs: Coordinate Reference Systems which refers to the way in which spatial data that represent the earth’s surface.A particular CRS can be referenced by its EPSG code (i.e.,epsg:4121). The EPSG is a structured dataset of CRS and Coordinate Transformations([link](http://www.epsg-registry.org/
# http://spatialreference.org/)]
# * transform : Affine transform (how raster is scaled, rotated, skewed, and/or translated)
#
from rasterio.plot import show
show(raster)
# Plotting the red channel.
show((raster, 4), cmap="Reds")
# ## Bands
# The satellites cover the full earth on 13 bands with a revisiting every 5 days.
# 
# source: https://arxiv.org/pdf/1709.00029.pdf
# Calculating the dimensions of the image on earth in metres
sat_data = raster
width_in_projected_units = sat_data.bounds.right - sat_data.bounds.left
height_in_projected_units = sat_data.bounds.top - sat_data.bounds.bottom
print(
"Width: {}, Height: {}".format(width_in_projected_units, height_in_projected_units)
)
print("Rows: {}, Columns: {}".format(sat_data.height, sat_data.width))
# ### Converting the pixel co-ordinates to longitudes and latitudes
# Upper left pixel
row_min = 0
col_min = 0
# Lower right pixel. Rows and columns are zero indexing.
row_max = sat_data.height - 1
col_max = sat_data.width - 1
# Transform coordinates with the dataset's affine transformation.
topleft = sat_data.transform * (row_min, col_min)
botright = sat_data.transform * (row_max, col_max)
print("Top left corner coordinates: {}".format(topleft))
print("Bottom right corner coordinates: {}".format(botright))
# ### Bands
# The image that we are inspecting is a multispectral image consisting of 4 bands int he order B,G,R,N where N stands for near infrared.each band is stored as a numpy array.
print(sat_data.count)
# sequence of band indexes
print(sat_data.indexes)
# ### Visualising the Satellite Imagery
# We will use matplotlib to visualise the image since it essentially consists of arrays.
# Load the 12 bands into 2d arrays
b01, b02, b03, b04, b05, b06, b07, b08, b09, b10, b11, b12 = sat_data.read()
# Displaying the second band.
fig = plt.imshow(b02)
plt.show()
fig = plt.imshow(b03)
fig.set_cmap("gist_earth")
plt.show()
fig = plt.imshow(b04)
fig.set_cmap("inferno")
plt.colorbar()
plt.show()
# Displaying the infrared band.
fig = plt.imshow(b08)
fig.set_cmap("winter")
plt.colorbar()
plt.show()
|
# # Lead Scoring : Logistic Regression Case Study
# #### Steps :
# - 1.Introduction
# - 2.Python Libraries
# - 3.Reading and Understanding the data
# - 4.Data Cleaning
# - 5.Exploratory Data Analysis
# - 6.Data Preparation
# - 7.Model Building
# - 8.Model Evaluation : Train dataset
# - 9.Making Predictions on test data set
# # 1.Introduction
# ## Problem Statement
# - An education company named X Education sells online courses to industry professionals. On any given day, many professionals who are interested in the courses land on their website and browse for courses.
#
# - The company markets its courses on several websites, search engines, and even social media sometimes. Once these people land on the website, they might browse the courses, fill out a form for the course, or watch some videos. When these people fill out a form with their email address or phone number, they are classified as leads. Moreover, the company also gets leads through past referrals. Once these leads are acquired, employees from the sales team start making calls, writing emails, etc. Through this process, some of the leads get converted into successful sales, while most of the leads do not. The typical lead to successful sale conversion rate at X education is around 30%.
#
# - Now, although X Education gets a lot of leads, its lead-to-sale conversion rate is very poor. For example, if they acquire 100 leads in a day, only about 30 of them are converted into successful sales. To make this process more efficient, the company wishes to identify the most potential leads, also known as ‘Hot Leads’. If they successfully identify this set of leads, the lead conversion rate would go up as the sales team would now be focusing more on communicating with the potential leads rather than making calls to everyone.The CEO, in particular, has given a ballpark estimate of the target lead conversion rate as being around 80%.
# ## Data
# - You were given a leads dataset from the past that contained approximately 9000 data points. This dataset consists of various attributes such as Lead Source, Total Time Spent on the Website, Total Visits, Last Activity, etc., which may or may not be useful in ultimately deciding whether a lead will be converted or not. The target variable, in this case, is the column ‘Converted’, which tells whether a past lead was converted or not, where 1 means it was converted and 0 means it wasn’t converted.
# - Another thing that you also need to check out is the levels present in the categorical variables. Many of the categorical variables have a level called ‘Select’ which needs to be handled because it is as good as a null value.
# ## Business Goal
# - 1.Build a logistic regression model to assign a lead score between 0 and 100 to each of the leads, which can be used by the company to target potential leads. A higher score would mean that the lead is hot, i.e., most likely to convert, whereas a lower score would mean that the lead is cold and will mostly not get converted.
# - 2.There are some more problems presented by the company that your model should be able to adjust to if the company’s requirements change in the future, so you will need to handle these as well. These problems are provided in a separate doc file. Please fill it out based on the logistic regression model you got in the first step. Also, make sure you include this in your final PowerPoint presentation, where you’ll make recommendations.
#
# ## 2. Python libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# supress warnings
import warnings
warnings.filterwarnings("ignore")
# ## 3.Reading & Understanding the data
pd.set_option("display.max_columns", None)
lead = pd.read_csv("/Users/sakshimunde/Downloads/Lead Scoring Assignment/Leads.csv")
lead.head()
# let's see dimension
lead.shape
# checking numerical columns statistically
lead.describe()
lead.info()
# - There are null values in the dataset.Let's inspect the null values first.
# ## 4.Data Cleaning
# There are few columns which has 'select' has one of the category.
# This is bcz the person has not filled that field.So we will replace it by nan
lead = lead.replace("Select", np.nan)
lead.head()
# column wise null values
round(lead.isnull().sum() / len(lead.index), 3) * 100
# Row wise null values
round(lead.isnull().sum(axis=1) / lead.shape[1], 2) * 100
# let's see rows with more than 50% of null values
len(lead[lead.isnull().sum(axis=1) / lead.shape[1] > 0.5])
# - There is no row that has null values greater than 50%
# - There are 17 columns that has null values.7 columns are having null values more than 45%.
# Checking duplicates
lead.duplicated(subset="Prospect ID").sum()
# - "Prospect ID" and "lead number" are variables that are just indicative of id number of the contacted people and can be dropped.We will also drop columns that have null values more than 45%. Thus there are other variables that are not useful for further analysis, we will drop those as well.
# dropping unnecessary columns
lead.drop(
[
"Prospect ID",
"Lead Number",
"How did you hear about X Education",
"Tags",
"Lead Profile",
"Lead Quality",
"Asymmetrique Activity Index",
"Asymmetrique Profile Index",
"Asymmetrique Activity Score",
"Asymmetrique Profile Score",
"Last Notable Activity",
],
axis=1,
inplace=True,
)
# let's see number of columns in our dataset after dropping unnecessary vars
len(lead.columns)
# #### Segregating Numerical and Categorical values
# categorical vars
cat_col = lead.select_dtypes(exclude=["number"]).columns.values
cat_col
# numerical columns
numeric_col = lead.select_dtypes(include=["number"]).columns.values
numeric_col
# #### Categorical columns null values treatment
# Checking unique value of categorical columns
for value in cat_col:
df = lead[value].nunique()
print(value)
print(df)
# - There are some columns that has only one category like 'magazine','receive more updates about our courses' etc.This columns don't add any value to the model so we will deop them.
# dropping colums that has only one category value
lead.drop(
[
"Magazine",
"Receive More Updates About Our Courses",
"Update me on Supply Chain Content",
"Get updates on DM Content",
"I agree to pay the amount through cheque",
],
axis=1,
inplace=True,
)
len(lead.columns)
# now checking null values
round(lead.isnull().sum() / len(lead.index), 3) * 100
# ###### Country
# country
lead.Country.value_counts(normalize=True) * 100
# - 96% of the data is mapped as india.Country data is heavily skewed.Thus country data is not required for the modelling purpose thus we will drop it.
# dropping country column
lead.drop("Country", axis=1, inplace=True)
# ###### Specialization
# Specialization
lead.Specialization.value_counts()
# - It may posiible that the lead don't have any specialization or may be a student and has no wrok experience.So we will create a new category 'Others' to replace the null values.
# we will impute nan values with 'others'
lead["Specialization"] = lead.Specialization.replace(np.nan, "Others")
# ###### What is your current occupation
lead["What is your current occupation"].value_counts(normalize=True) * 100
# - 85% of values are unemployed. If we impute null values with unemployed then the data will become more skewed. Thus we will impute null values with Unknown
# impute null values with 'unknown' in 'what is your current occupation'
lead["What is your current occupation"] = lead[
"What is your current occupation"
].replace(np.nan, "Unknown")
lead["What is your current occupation"].value_counts()
# ###### What matters most to you in choosing a course
lead["What matters most to you in choosing a course"].value_counts()
# - As the data is skewed ,we will delete the column
# drop 'what matters most to you in choosing a course' variable
lead.drop("What matters most to you in choosing a course", axis=1, inplace=True)
# ##### City
lead.City.value_counts(normalize=True) * 100
# - There are 40% of null values in the city column.We can impute nan with mode but this will make whole data skewed.Thus X Education is an online teaching platform, city column is not much useful. We will drop city column.
# drop city column
lead.drop("City", axis=1, inplace=True)
# ###### Lead Source
lead["Lead Source"].value_counts(normalize=True) * 100
# we will impute nan values by 'google'
lead["Lead Source"] = lead["Lead Source"].replace(np.nan, "Google")
lead["Lead Source"] = lead["Lead Source"].replace("google", "Google")
lead["Lead Source"].value_counts(normalize=True) * 100
# ###### Last Activity
lead["Last Activity"].value_counts(normalize=True) * 100
# Impute nan values with email opened
lead["Last Activity"] = lead["Last Activity"].replace(np.nan, "Email Opened")
lead["Last Activity"].value_counts(normalize=True) * 100
# ### Numerical columns null value treatment
# ##### Total Visits
sns.boxplot(lead["TotalVisits"])
lead["TotalVisits"].median()
# there are so many outliers thus we will impute null values with median ,not with mean
lead["TotalVisits"].fillna(lead["TotalVisits"].median(), inplace=True)
lead["TotalVisits"].isnull().sum()
# ##### Page Views Per Visit
sns.boxplot(lead["Page Views Per Visit"])
# we will impute with mdeian not with as there are many outliers
lead["Page Views Per Visit"].fillna(lead["Page Views Per Visit"].median(), inplace=True)
lead["Page Views Per Visit"].isnull().sum()
# Checking null values
lead.isnull().sum()
# # 5.Exploratory Data Analysis
lead.head()
# let's see converted lead
(lead.Converted.sum() / len(lead.Converted)) * 100
# - Converted is target variable, indiactes whether a lead has converted or not.
# - 38.5% of lead converted.
lead.columns
# Renaming column headers that has longer headers
lead.rename(
columns={
"What is your current occupation": "Occupation",
"Through Recommendations": "Recommendation",
"A free copy of Mastering The Interview": "Free Copy",
},
inplace=True,
)
lead.columns
# ### Univariate Analysis - categorical
# ### Lead Origin
# categorical columns
cat_col = lead.select_dtypes(exclude="number").columns.values
cat_col
# Lead origin : actual lead and converted lead to customers
lead_origin_count = lead["Lead Origin"].value_counts()
lead_origin_percentage = lead["Lead Origin"].value_counts(normalize=True) * 100
lead_conversion_rate = lead.groupby("Lead Origin")["Converted"].mean() * 100
result = pd.concat(
[lead_origin_count, lead_origin_percentage, lead_conversion_rate], axis=1
)
result.columns = ["lead_origin_count", "lead_origin_percentage", "lead_conversion_rate"]
result
lead_conversion_rate = lead_conversion_rate.reset_index()
# Plotting to see the distribution of conversion
plt.figure(figsize=[13, 6])
plt.subplot(1, 2, 1)
sns.countplot(x="Lead Origin", hue="Converted", data=lead)
plt.title("Lead Origin")
plt.xlabel("Lead origin", fontdict={"color": "navy", "size": 20})
plt.ylabel("Count", fontdict={"color": "navy", "size": 15})
plt.xticks(rotation=90)
# percentage of converted leads
plt.subplot(1, 2, 2)
sns.barplot(x="Lead Origin", y="Converted", data=lead_conversion_rate)
plt.title("Lead orgin (converted %)")
plt.xlabel("Lead origin", fontdict={"color": "navy", "size": 20})
plt.ylabel("Perecent of converted leads[%]", fontdict={"color": "navy", "size": 15})
plt.xticks(rotation=90)
plt.show()
# - Lead origin referes to the source or channel through which lead was generated.For example, a lead may have originated from a website form, a cold call ,a referral ,or a social media ad. By analysing the lead origin, a business can gain insights into which channels are most effective for generating leads and allocate their resources accordingly.
# ----
# - Most of the leads are originated from "landing page submission" and "API" i.e 52% and 38% where around 31% of lead got converted to customers.
# - "Lead import" has very few leads, and the conversion rate is also very low.
# - The Quick add form has a 100% lead conversion rate but there is only 1 lead .
# - The "Lead add form" has a very high conversion rate of 92%.
# - However it is important to consider that the overall number of leads generated from this source is very low.Therefore, even though the conversion rate is high,the number of conversions may not be significant.Therefore we need to generate more lead from "lead add form".
# - So to improve overall lead conversion rate we need to focus more on improving lead conversion of "landing page submission and API".
# ### Lead Source
# Countplot
plt.figure(figsize=[15, 6])
sns.countplot(x="Lead Source", data=lead, hue="Converted")
plt.xticks(rotation=90)
plt.show()
# Lead soucre count
lead["Lead Source"].value_counts()
# we will combine smaller 'lead sources' as 'other sources'
lead["Lead Source"] = lead["Lead Source"].replace(
[
"bing",
"Click2call",
"Social Media",
"Live Chat",
"Press_Release",
"Pay per Click Ads",
"blog",
"WeLearn",
"welearnblog_Home",
"youtubechannel",
"testone",
"NC_EDM",
],
"Others",
)
# let's see total leads from lead source and leads conversion rate
lead_source_count = lead["Lead Source"].value_counts()
lead_source_percentage = lead["Lead Source"].value_counts(normalize=True) * 100
lead_source_conversion_rate = lead.groupby("Lead Source")["Converted"].mean() * 100
# concatenate
result = pd.concat(
[lead_source_count, lead_source_percentage, lead_source_conversion_rate], axis=1
)
result.columns = [
"lead_source_count",
"lead_source_percentage",
"lead_source_conversion_rate",
]
result
lead_source_conversion_rate = lead_source_conversion_rate.reset_index()
# Running the function again to check the updated
plt.figure(figsize=[15, 6])
plt.subplot(1, 2, 1)
sns.countplot(x="Lead Source", data=lead, hue="Converted")
plt.title("Lead source")
plt.xticks(rotation=90)
# percentage of converted leads
plt.subplot(1, 2, 2)
sns.barplot(x="Lead Source", y="Converted", data=lead_source_conversion_rate)
plt.xticks(rotation=90)
plt.title("Lead source (converted%)")
plt.show()
# - Maximum number of leads are generated from google and direct traffic.
# - Conversion rate of leads through reference and Welingak Website are high.
# - So "reference" and "Welingak Website" options should be explored more to increase leads.
# - And should focus on improving the overall conversion rate of leads from sources like google ,olark chat ,direct traffic,organic search as they generate significant number of leads.
# ### Do not email & Do not call
# conversion rate of people who said 'ok' to receive 'mail' and 'call' from the company
print(lead.groupby("Do Not Email")["Converted"].mean() * 100)
print("\n")
# call conversion rate
conver_rate = (lead.groupby("Do Not Call")["Converted"].mean() * 100).reset_index()
print(conver_rate)
# plotting
# Do not email
sns.countplot(x="Do Not Email", hue="Converted", data=lead)
plt.title("DO NOT EMAIL")
plt.show()
# - Majority of the people said ok to receive email .People who said ok for mail has around 40% of conversion rate.
# - People who have not opted to receive email has less conversion rate.
# ### Last Activity
lead["Last Activity"].value_counts()
# we will keep considerable 'last activity' and club all other activities to 'Other activity'
lead["Last Activity"] = lead["Last Activity"].replace(
[
"Had a Phone Conversation",
"Approached upfront",
"View in browser link Clicked",
"Email Received",
"Email Marked Spam",
"Visited Booth in Tradeshow",
"Resubscribed to emails",
],
"Other Activity",
)
# now lets see total leads and conversion rate of leads from 'last activity'
last_activity_count = lead["Last Activity"].value_counts()
last_activity_perc = lead["Last Activity"].value_counts(normalize=True) * 100
conversion_rate = lead.groupby("Last Activity")["Converted"].mean() * 100
result = pd.concat([last_activity_count, last_activity_perc, conversion_rate], axis=1)
result.columns = ["last_activity_count", "last_activity_perc", "conversion_rate"]
result.reset_index()
conversion_rate = conversion_rate.reset_index()
# Last activity
plt.figure(figsize=[10, 5])
plt.subplot(1, 2, 1)
sns.countplot(x="Last Activity", hue="Converted", data=lead)
plt.title("Last activity")
plt.xticks(rotation=90)
# last activity converted
plt.subplot(1, 2, 2)
sns.barplot(x="Last Activity", y="Converted", data=conversion_rate)
plt.xticks(rotation=90)
plt.title("Last activity(converted%)")
plt.show()
# - Most of the lead have their 'email opened' has their last activity.
# - Conversion rate of leads with last activity as 'SMS sent' is almost 60%.
# - After combining smaller 'last activities' to 'other activity', the lead conversion rate is very high around 80%.
# ### Specialization
# let's see total leads and conversion rate of leads from specialization
Speci_lead_count = lead.Specialization.value_counts()
Speci_lead_perc = lead.Specialization.value_counts(normalize=True) * 100
Speci_conver_rate = lead.groupby("Specialization")["Converted"].mean() * 100
# concatenate
result = pd.concat([Speci_lead_count, Speci_lead_perc, Speci_conver_rate], axis=1)
result.columns = ["Speci_lead_count", "Speci_lead_perc", "Speci_conver_rate"]
result
Speci_conver_rate = Speci_conver_rate.reset_index()
# plot
plt.figure(figsize=[12, 5])
plt.subplot(1, 2, 1)
sns.countplot(x="Specialization", hue="Converted", data=lead)
plt.xticks(rotation=90)
plt.title("Specialization")
# conversion rate
plt.subplot(1, 2, 2)
sns.barplot(
x="Specialization",
y="Converted",
data=Speci_conver_rate,
order=Speci_conver_rate.sort_values("Converted", ascending=False)["Specialization"],
)
plt.xticks(rotation=90)
plt.title("Specialization (converted%)")
plt.show()
# - Specialization was not specified by most of the leads, and around 38% of leads fall in this category.
# - Leads with Finance Management ,Human Resource Management & Marketing Management are high and their conversion rate is around 45%.
# ### Occupation
# let's see total leads and converted leads rate from occupation
lead_perc = lead.Occupation.value_counts(normalize=True) * 100
lead_conver_rate = lead.groupby("Occupation")["Converted"].mean() * 100
# concatenate them
result = pd.concat([lead_perc, lead_conver_rate], axis=1)
result.columns = ["lead_perc", "lead_conver_rate"]
result
lead_conver_rate = lead_conver_rate.reset_index()
plt.figure(figsize=[12, 5])
plt.subplot(1, 2, 1)
sns.countplot(x="Occupation", hue="Converted", data=lead)
plt.xticks(rotation=90)
plt.title("Occupation")
# conversion rate
plt.subplot(1, 2, 2)
sns.barplot(
x="Occupation",
y="Converted",
data=lead_conver_rate,
order=lead_conver_rate.sort_values("Converted", ascending=False)["Occupation"],
)
plt.xticks(rotation=90)
plt.title("Occupation(converted%)")
plt.show()
# - There are fewer leads from housewife category but conversion rate is 100%.
# - Most of the leads are from Unemployed category but their conversion rate is very low so we should focus more on unemployed leads for conversion.
# - "Working professionals ,businessman and Other" occupation leads are having high conversion rate.So we need to focus on this occupation to generate more leads.
# #### Search ,Newspaper Article, X Education Forums, Newspaper, Digital Advertisement,Recommendation, Free Copy
#
# search
count = lead["Search"].value_counts()
count
plt.pie(count, autopct="%1.1f%%", labels=["No", "Yes"])
plt.title("Search")
plt.show()
def pieplot(col):
counts = lead[col].value_counts()
plt.pie(counts, autopct="%1.1f%%", labels=["NO", "Yes"])
plt.title(col)
return counts
columns = [
"Do Not Call",
"Newspaper Article",
"X Education Forums",
"Newspaper",
"Digital Advertisement",
"Recommendation",
"Free Copy",
]
plt.figure(figsize=[12, 8])
i = 1
for each_col in columns:
plt.subplot(2, 4, i)
pieplot(each_col)
i = i + 1
plt.tight_layout()
plt.show()
# free copy conversion rate of lead
free_copy_perc = lead["Free Copy"].value_counts(normalize=True) * 100
conver_rate = lead.groupby("Free Copy")["Converted"].mean() * 100
r = pd.concat([free_copy_perc, conver_rate], axis=1)
r
# - Search, Do not call ,Newspaper article ,xeducation forum , newspaper ,digital advertisement : Indicates whether the customer had seen the ad in any of the listed items.
# - Recommendation : Indicates whether the customer came in through recommendations.
# - Most entries are 'No'. From this no inference can be drawn.
# - Free copy : Indicates whether the customer wants a free copy of 'Mastering the Interview' or not.
# - Search, Newspaper article, xeducation forum, newspaper, digital advertisement & Recommendation are heavily skewed and unlikely to add any value to the model so we will drop them.
# - Free copy don't add much value as conversion rate is almost same.
# let's drop this column
lead.drop(
[
"Search",
"Do Not Call",
"Newspaper Article",
"X Education Forums",
"Newspaper",
"Digital Advertisement",
"Recommendation",
"Free Copy",
],
axis=1,
inplace=True,
)
lead.columns
len(lead.columns)
# ### Univariate analysis - Numerical
numeric_col
# boxplot of totalvisits
plt.figure(figsize=[10, 5])
plt.subplot(1, 3, 1)
sns.boxplot(x="TotalVisits", data=lead)
# Total time spent on website
plt.subplot(1, 3, 2)
sns.boxplot(x="Total Time Spent on Website", data=lead)
# page views per visit
plt.subplot(1, 3, 3)
sns.boxplot(x="Page Views Per Visit", data=lead)
plt.tight_layout()
plt.show()
lead.describe()
# Checking the detailed percentile values
lead.describe(percentiles=[0.1, 0.05, 0.25, 0.75, 0.90, 0.95, 0.99])
# - We can see there are so many outliers in "Totalvisists" and "Page Views Per Visit".We will cap the outliers to 95th percentile.
percentiles = lead["TotalVisits"].quantile(0.95)
percentiles
# capping the Totalvisits data at 95th percetile
lead["TotalVisits"][lead["TotalVisits"] >= percentiles] = percentiles
# lets see number of rows getting capped
lead["TotalVisits"][lead["TotalVisits"] >= percentiles]
# capping 'page views per visit' at 95th percentile
percentiles = lead["Page Views Per Visit"].quantile(0.95)
lead["Page Views Per Visit"][lead["Page Views Per Visit"] >= percentiles] = percentiles
# lets see number of rows getting capped
lead["Page Views Per Visit"][lead["Page Views Per Visit"] >= percentiles]
# - Total number of rows getting capped for "totalvisits" and "page views per visit" columns are 467 and 604.
# - Number of rows getting capped are quite high. This is the reason we are capping them instead of deleting.
# Total visits
plt.figure(figsize=[10, 5])
plt.subplot(1, 3, 1)
sns.boxplot(x="TotalVisits", data=lead)
# Total time spent on website
plt.subplot(1, 3, 2)
sns.boxplot(x="Total Time Spent on Website", data=lead)
# page views per visit
plt.subplot(1, 3, 3)
sns.boxplot(x="Page Views Per Visit", data=lead)
plt.tight_layout()
plt.show()
# ## Bivariate analysis
sns.pairplot(lead[numeric_col], hue="Converted")
plt.show()
# - Data is not normally distributed.
# # 6.Data Preparation
# ###### Converting Binary categories
lead.head()
lead.nunique()
# applying map func to convert yes/no to 1/0
lead["Do Not Email"] = lead[["Do Not Email"]].apply(
lambda x: x.map({"Yes": 1, "No": 0})
)
lead["Do Not Email"].value_counts()
# ### Dummy vars
# for categorical vars with multiple levels ,create dummy vars(one-hot encoded)
dummy = pd.get_dummies(
lead[
["Lead Origin", "Lead Source", "Last Activity", "Specialization", "Occupation"]
],
drop_first=True,
)
# adding result to main dataframe
lead = pd.concat([lead, dummy], axis=1)
lead.head()
# we have created dummies for below vars so we can drop them
lead = lead.drop(
["Lead Origin", "Lead Source", "Last Activity", "Specialization", "Occupation"],
axis=1,
)
lead.head()
lead.info()
# ### Heatmap
# Visualising the data using barplot
plt.figure(figsize=[30, 20])
sns.heatmap(lead.corr(), annot=True, cmap="RdYlGn", fmt=".2f")
plt.show()
# - There are 51 columns in heatmap which makes it difficult to interpret.
# # Train Test Split
y = lead["Converted"]
y.head()
X = lead.drop(["Converted"], axis=1)
X.head()
from sklearn.model_selection import train_test_split
# splitting data into train and test
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.7, test_size=0.3, random_state=100
)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
# # Feature Scaling
# - Scaling helps us in faster convergence of gradient descent.
# - Standard scaler centers mean to 0
# - The formula for standardising a value in a dataset is given by:
# - (X − μ)/σ
# import standard scaler
from sklearn.preprocessing import StandardScaler
# Creating an object of the class
scaler = StandardScaler()
# fit and transform data
X_train[
["TotalVisits", "Total Time Spent on Website", "Page Views Per Visit"]
] = scaler.fit_transform(
X_train[["TotalVisits", "Total Time Spent on Website", "Page Views Per Visit"]]
)
X_train.head()
# # 7.Model Building
# ## Feature selection using RFE
# - Now that we built our first model based on the summary statistics, we inferred that many of the variables might be insignificant and hence, we need to do some feature elimination.
# - Since the number of features is huge, let's first start with an automated feature selection technique (RFE) and then move to manual feature elimination (using p-values and VIFs)
# import logistic regression & rfe
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import RFE
# creating an object of class logistic regression
lr = LogisticRegression()
# Create and object of class RFE
rfe = RFE(lr, n_features_to_select=20)
rfe.fit(X_train, y_train)
# Checking the output of RFE
list(zip(X_train.columns, rfe.support_, rfe.ranking_))
# select 20 columns
rfe_col = X_train.columns[rfe.support_]
rfe_col
# let's see columns which eleminated after rfe
X_train.columns[~rfe.support_]
# ## Manual Feature Reduction
# ## Model 1
X_train_rfe = X_train[rfe_col]
# adding constant
import statsmodels.api as sm
X_train_sm = sm.add_constant(X_train_rfe)
# building logistic regression model and fitting it
logm1 = sm.GLM(y_train, X_train_sm, sm.families.Binomial()).fit()
logm1
logm1.summary()
# # VIF
from statsmodels.stats.outliers_influence import variance_inflation_factor
vif = pd.DataFrame()
vif["Features"] = X_train_rfe.columns
# VIF VALUES
vif["VIF"] = [
variance_inflation_factor(X_train_rfe.values, i)
for i in range(X_train_rfe.shape[1])
]
vif["VIF"] = round(vif["VIF"], 2)
# sorting
vif = vif.sort_values(by="VIF", ascending=False)
vif
# - We will drop Occupation_Housewife feature due to high p value i.e., 0.99.
# ## Model 2
rfe_col_2 = rfe_col.drop("Occupation_Housewife")
rfe_col
X_train_rfe = X_train[rfe_col_2]
# add constan
X_train_sm = sm.add_constant(X_train_rfe)
# build model
logm2 = sm.GLM(y_train, X_train_sm, sm.families.Binomial()).fit()
logm2.summary()
# # VIF
vif = pd.DataFrame()
vif["Features"] = X_train_rfe.columns
vif["VIF"] = [
variance_inflation_factor(X_train_rfe.values, i)
for i in range(X_train_rfe.shape[1])
]
vif["VIF"] = round(vif["VIF"], 2)
vif = vif.sort_values(by="VIF", ascending=False)
vif
# - we will drop 'Specialization_Retail Management' feature due to high p value :0.209.VIF values are significant.
# # Model3
# dropping Specialization_Retail Management
rfe_col_3 = rfe_col_2.drop("Specialization_Retail Management")
X_train_rfe = X_train[rfe_col_3]
# add constant
X_train_sm = sm.add_constant(X_train_rfe)
# build model
logm3 = sm.GLM(y_train, X_train_sm, sm.families.Binomial()).fit()
logm3.summary()
# # VIF
vif = pd.DataFrame()
vif["Features"] = X_train_rfe.columns
vif["VIF"] = [
variance_inflation_factor(X_train_rfe.values, i)
for i in range(X_train_rfe.shape[1])
]
vif["VIF"] = round(vif["VIF"], 2)
vif = vif.sort_values(by="VIF", ascending=False)
vif
# - 'Lead Source_Facebook' is not significant. We will drop 'Lead Source_Facebook' due to high p value:0.204.
# # Model4
# dropping Lead Source_Facebook
rfe_col_4 = rfe_col_3.drop("Lead Source_Facebook")
X_train_rfe = X_train[rfe_col_4]
# add constant
X_train_sm = sm.add_constant(X_train_rfe)
# build model4
logm4 = sm.GLM(y_train, X_train_sm, sm.families.Binomial()).fit()
logm4.summary()
# # VIF
vif = pd.DataFrame()
vif["Features"] = X_train_rfe.columns
vif["VIF"] = [
variance_inflation_factor(X_train_rfe.values, i)
for i in range(X_train_rfe.shape[1])
]
vif["VIF"] = round(vif["VIF"], 2)
vif = vif.sort_values(by="VIF", ascending=False)
vif
# - 'Specialization_Rural and Agribusiness' is insignificant. We will drop 'Specialization_Rural and Agribusiness' due to high p value i.e.,0.174.
# # Model 5
rfe_col_5 = rfe_col_4.drop("Specialization_Rural and Agribusiness")
X_train_rfe = X_train[rfe_col_5]
# add constant
X_train_sm = sm.add_constant(X_train_rfe)
# build model 5
logm5 = sm.GLM(y_train, X_train_sm, sm.families.Binomial()).fit()
logm5.summary()
# # VIF
vif = pd.DataFrame()
vif["Features"] = X_train_rfe.columns
vif["VIF"] = [
variance_inflation_factor(X_train_rfe.values, i)
for i in range(X_train_rfe.shape[1])
]
vif["VIF"] = round(vif["VIF"], 2)
vif = vif.sort_values(by="VIF", ascending=False)
vif
# - P value and VIF of all vars are significant.
# finding predictive value on train data set
y_train_pred = logm5.predict(X_train_sm).values.reshape(-1)
y_train_pred
# creating a dataframe
y_train_pred_final = pd.DataFrame(
{"converted": y_train.values, "converted_prob": y_train_pred}
)
y_train_pred_final["Prospect ID"] = y_train.index
y_train_pred_final.head()
# creating a new column predicted and taking 0.5 as a threshold value
y_train_pred_final["Predicted"] = y_train_pred_final.converted_prob.map(
lambda x: 1 if x > 0.5 else 0
)
y_train_pred_final.head()
# # 8.Model Evaluation : Train dataset
# ##### Confusion matrix & Accuracy score
# import metrics
from sklearn import metrics
# confusion matrix
confusion = metrics.confusion_matrix(
y_train_pred_final.converted, y_train_pred_final.Predicted
)
confusion
# accuracy score
accuracy = metrics.accuracy_score(
y_train_pred_final.converted, y_train_pred_final.Predicted
)
# predicted negative positive
# Actual
# negative : 3548 454
# positive : 717 1749
# ### Metrics beyond simple accuracy
TN = confusion[0, 0] # true negative
FP = confusion[0, 1] # false positive
FN = confusion[1, 0] # false negative
TP = confusion[1, 1] # true positive
# sensitivity ,specificity ,precision, recall, True positive rate(TPR), False positive rate(FPR),
# positive prediction value, Negative prediction value
sensi = TP / (TP + FN) # actual positive
speci = TN / (TN + FP) # actual negative
precision = TP / (TP + FP) # predicted positive
recall = TP / (TP + FN) # actual positive
TPR = TP / (TP + FN) # actual positive
TNR = TN / (TN + FP) # actual negative
FPR = FP / (FN + FP) # actual false positive
FNR = FN / (FN + TP) # actual false negative
posi_pred_val = TP / (TP + FP) # predicted positive
neg_pred_val = TN / (TN + FN) # negative predicted
print("Accuracy :", round(accuracy * 100, 2), "%")
print("Sensitivity :", round(sensi * 100, 2), "%")
print("Specificity :", round(speci * 100, 2), "%")
print("Precision :", round(precision * 100, 2), "%")
print("Recall :", round(recall * 100, 2), "%")
print("TPR :", round(TPR * 100, 2), "%")
print("TNR :", round(TNR * 100, 2), "%")
print("FPR :", round(FPR * 100, 2), "%")
print("FNR :", round(FNR * 100, 2), "%")
print("Positive predicted value :", round(posi_pred_val * 100, 2), "%")
print("negative predicted value :", round(neg_pred_val * 100, 2), "%")
# # Finding the optimal cutoff point
# let's create columns with different probability cutoffs
numbers = [float(x / 10) for x in range(10)]
for i in numbers:
y_train_pred_final[i] = y_train_pred_final.converted_prob.map(
lambda x: 1 if x > i else 0
)
y_train_pred_final.head()
# - accuracy : (TP+TN)/(TP+TN+FP+FN).
# Let's calculate sensitivity ,specificity, accuarcy for various cutoffs
cut_off = pd.DataFrame(
columns=["Probability", "accuracy", "Sensitivity", "Specificity"]
)
num = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
for i in num:
confusion1 = metrics.confusion_matrix(
y_train_pred_final.converted, y_train_pred_final[i]
)
total = sum(sum(confusion1))
# now lets find accuracy, sensitivity,specificity
accu = (confusion1[0, 0] + confusion1[1, 1]) / total
sensi = confusion1[1, 1] / (confusion1[1, 1] + confusion1[1, 0])
speci = confusion1[0, 0] / (confusion1[0, 0] + confusion1[0, 1])
cut_off.loc[i] = [i, accu, sensi, speci]
print(cut_off)
# Let's plot accuracy ,sensitivity ,specificity for various probabilities
cut_off.plot.line(x="Probability", y=["accuracy", "Sensitivity", "Specificity"])
plt.axvline(x=0.35, color="r", linestyle="--")
plt.xticks(np.arange(0, 1, step=0.05), size=8)
plt.show()
# - From the above graph 0.35 seems to be a cutoff point.
# predicted value for optimum threshold i.e., 0.35 is
y_train_pred_final["final_predicted"] = y_train_pred_final.converted_prob.map(
lambda k: 1 if k > 0.35 else 0
)
y_train_pred_final.head(20)
y_train_pred_final.drop(
[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], axis=1, inplace=True
)
y_train_pred_final.head()
# # Lead Score
# lets assign leads score for the leads in the Train dataset
y_train_pred_final["Lead Score"] = (y_train_pred_final.converted_prob * 100).astype(
"int64"
)
# or y_train_pred_final['lead score'] = y_train_pred_final.converted_prob.map(lambda x : round(x*100))
y_train_pred_final.sort_values(by="converted_prob", ascending=False)
# - Higher lead score have a higher conversion chance and the customers with lower lead score have a lower conversion chance.
# ### Confusion matrix
conf_matrix = metrics.confusion_matrix(
y_train_pred_final["converted"], y_train_pred_final["final_predicted"]
)
sns.heatmap(conf_matrix, annot=True, cmap="Blues", fmt="g")
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.show()
# predicted not converted converted
# actual
# not converted 3251 751 # 3251 leads didn't converted to customers
# converted 476 1990 # 1990 leads converted to customers
# let's check the accuracy
accuracy = metrics.accuracy_score(
y_train_pred_final["converted"], y_train_pred_final.final_predicted
)
accuracy
TP = conf_matrix[1, 1] # true positive
TN = conf_matrix[0, 0] # true negatives
FP = conf_matrix[0, 1] # false positives
FN = conf_matrix[1, 0] # false negatives
# Sensitivity,Specificity,True Positive Rate (TPR) ,False Positive Rate (FPR)
sensi = TP / (TP + FN)
speci = TN / (TN + FP)
TPR = TP / (TP + FN)
FPR = FP / (FP + TN)
FNR = FN / (FN + TP)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
print("Sensitivity :", sensi * 100)
print("Specificity :", speci * 100)
print("TPR :", TPR * 100)
print("FPR :", FPR * 100)
print("FNR :", FNR * 100)
print("Precision :", precision * 100)
print("Recall :", recall * 100)
# - A high precision indicates a low false positive rate, while a high recall indicates a low false negative rate. A balanced approach is needed to ensure that both false positives and false negatives are minimized, depending on the specific context and consequences of false predictions in your problem domain.
# F1 score
metrics.f1_score(y_train_pred_final.converted, y_train_pred_final.final_predicted)
# Classification report : precision ,recall and f1 score
print(
metrics.classification_report(
y_train_pred_final.converted, y_train_pred_final.final_predicted
)
)
# - At 0.35 threshold accuracy and sensitivity of our model is 81.02% and 80.7%.Sensitivity in this case indicates how many leads the model identify correctly out of all potential leads which are converting.More than 80% is what the CEO has requested in this case study.
# - F1 score and precision value in the model are high for 0's than 1's.This indicates that the model predicts leads which will not convert(i.e., filtering leads who will not convert) than the leads which will convert. This indirectly improved the model's perfomance to identify correct leads to be contacted.
# ### ROC Curve
# - An ROC curve demonstrates several things:
# - It shows the tradeoff between sensitivity and specificity (any increase in sensitivity will be accompanied by a decrease in specificity).
# - The closer the curve follows the left-hand border and then the top border of the ROC space, the more accurate the test.
# - The closer the curve comes to the 45-degree diagonal of the ROC space, the less accurate the test.
def draw_roc(actual, probs):
fpr, tpr, threshold = metrics.roc_curve(actual, probs, drop_intermediate=False)
auc_score = metrics.roc_auc_score(actual, probs)
plt.plot(fpr, tpr, label="ROC curve(area = %0.2f)" % auc_score)
plt.plot([0, 1], [0, 1])
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.title("Receiver operating characteristic example")
plt.legend(loc="lower right")
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.show()
return None
# roc curve
fpr, tpr, threshold = metrics.roc_curve(
y_train_pred_final.converted,
y_train_pred_final.converted_prob,
drop_intermediate=False,
)
draw_roc(y_train_pred_final.converted, y_train_pred_final.converted_prob)
# - ROC curve area is 0.89, which indicates that the model is good.
# ### Precision-Recall Trade off
# - Precision: Probability that a predicted 'Yes' is actually a 'Yes'. OR out of all leads which are predicted as 1, how many have truly converted.
# - For instance, if 10 points were predicted to be positive, and of these, only 9 are actually positive, then the precision is 0.9.
# - Recall : Probability that an actual 'Yes' case is predicted correctly. OR out of all leads that have converted, how many of them were correctly identifies as 1.
# - Continuing from the earlier example, if 18 datapoints were actually positive, then the recall would be 9/18 or 0.5.
# - What do the values of precision and recall mean? How should you interpret them? Suppose a model has a precision of 0.9 and a recall of 0.5. It can be said that the datapoints that are predicted as positive by the model will most likely be positive, but the model will be able to correctly predict only half of the actual positives.
# - Precision-Recall trade-off point is used to decide the cut-off point especially when there is huge imbalance in data.
from sklearn.metrics import precision_recall_curve
p, r, threshold = precision_recall_curve(
y_train_pred_final.converted, y_train_pred_final.converted_prob
)
plt.plot(threshold, p[:-1], "g")
plt.plot(threshold, r[:-1], "r")
plt.axvline(x=0.42, color="b")
plt.show()
# - Based on precision-recall trade off curve, the cutoff points seems to be 0.42
# Plotting the train dataset again with 0.42 as cutoff
y_train_pred_final["final_predicted_2"] = y_train_pred_final.converted_prob.map(
lambda k: 1 if k > 0.42 else 0
)
y_train_pred_final.head()
# confusion matrix
confusion_m = metrics.confusion_matrix(
y_train_pred_final.converted, y_train_pred_final.final_predicted_2
)
confusion_m
# accuracy score
metrics.accuracy_score(
y_train_pred_final.converted, y_train_pred_final.final_predicted_2
)
# sensitivity,specificity ,FPR,FNR ,Precision,recall
sen = confusion_m[1, 1] / (confusion_m[1, 1] + confusion_m[1, 0])
spec = confusion_m[0, 0] / (confusion_m[0, 0] + confusion_m[0, 1])
FPR = confusion_m[0, 1] / (confusion_m[0, 1] + confusion_m[0, 0])
FNR = confusion_m[1, 0] / (confusion_m[1, 0] + confusion_m[1, 1])
precision = confusion_m[1, 1] / (confusion_m[1, 1] + confusion_m[0, 1])
recall = confusion_m[1, 1] / (confusion_m[1, 1] + confusion_m[1, 0])
print("Sensitivity : ", sen * 100)
print("Specificity : ", spec * 100)
print("FPR : ", FPR * 100)
print("FNR : ", FNR * 100)
print("Precision : ", precision * 100)
print("Recall : ", recall * 100)
# classification report
print(
metrics.classification_report(
y_train_pred_final.converted, y_train_pred_final.final_predicted_2
)
)
# - By using Precision-Recall tradeoff's cutoff point =0.4 the model output has changed the following way:
# - True positive number has decreased
# - True negative number has increased
# - False positive number has decreased
# - False negative number has increased
# - Basically CEO wants to identify the people correctly who will convert to leads. Thus, we cannot use precision-recall trade-off method as it reduced True Positive(sensitivity). Thus we will use 0.35 as cutoff point.
# # 9.Making Predictions on test data set
X_test.head()
# ### Feature scaling
X_test[
["TotalVisits", "Total Time Spent on Website", "Page Views Per Visit"]
] = scaler.transform(
X_test[["TotalVisits", "Total Time Spent on Website", "Page Views Per Visit"]]
)
rfe_col_5
X_test_rfe = X_test[rfe_col_5]
# add constant
X_test_sm = sm.add_constant(X_test_rfe)
logm5
# predicting y test
y_test_pred = logm5.predict(X_test_sm)
# create a data frame
y_test_pred_final = pd.DataFrame({"converted": y_test, "converted_prob": y_test_pred})
y_test_pred_final["Prospect ID"] = y_test.index
y_test_pred_final.head()
# We will use 0.35 cutoff point
y_test_pred_final["final_predicted"] = y_test_pred_final.converted_prob.map(
lambda k: 1 if k > 0.35 else 0
)
y_test_pred_final.head()
# confusion metrics
conf_met = metrics.confusion_matrix(
y_test_pred_final.converted, y_test_pred_final.final_predicted
)
conf_met
TP = conf_met[1, 1] # true positive
TN = conf_met[0, 0] # true negatives
FP = conf_met[0, 1] # false positives
FN = conf_met[1, 0] # false negatives
#
sens = TP / (TP + FN)
spec = TN / (TN + FP)
TPR = TP / (TP + FN)
FPR = FP / (FP + TN)
FNR = FN / (FN + TP)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
print("Sensitivity :", sens * 100)
print("Specificity :", spec * 100)
print("TPR :", TPR * 100)
print("FPR :", FPR * 100)
print("FNR :", FNR * 100)
print("Precision :", precision * 100)
print("Recall :", recall * 100)
# coeeficients (m and c or beta0 ,beta1) of our model
logm5.params
# lets see them in descending order
feature_names = logm5.params.index
sorted_coefficients = sorted(logm5.params, reverse=True)
list(zip(feature_names, sorted_coefficients))
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_val_score
from collections import Counter
from IPython.core.display import display, HTML
sns.set_style("darkgrid")
df = pd.read_csv("/kaggle/input/glass/glass.csv")
df.head()
print(df.iloc[3, 3])
corr = df.corr()
# Plot figsize
fig, ax = plt.subplots(figsize=(10, 8))
# Generate Heat Map, allow annotations and place floats in map
sns.heatmap(corr, cmap="coolwarm", annot=True, fmt=".2f")
# Apply xticks
plt.xticks(range(len(corr.columns)), corr.columns)
# Apply yticks
plt.yticks(range(len(corr.columns)), corr.columns)
# show plot
plt.show()
print(corr)
print(corr(1, 2))
print(corr.iloc[3, 2])
df.drop(type)
corr[["Mg"]].idxmin()
print(corr[["K"]].idxmin())
|
# codes from Rodrigo Lima @rodrigolima82
from IPython.display import Image
Image(
url="https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcRH1ZkhpgLMpW8mwLLLXs8IGaYaIRQSlTgyuN1luLQ0KFXqdp43",
width=400,
height=400,
)
# Image behance.net - Russian Premier League identity.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
nRowsRead = 1000 # specify 'None' if want to read whole file
df = pd.read_csv(
"../input/russian-premier-league/repository/ilikeevb--football-prediction-29a122c/data/RPL.csv",
delimiter=";",
encoding="cp1251",
nrows=nRowsRead,
)
df.dataframeName = "RPL.csv"
nRow, nCol = df.shape
print(f"There are {nRow} rows and {nCol} columns")
df.head()
df.dtypes
df["Год"].plot.hist()
plt.show()
df["Удары"].plot.hist()
plt.show()
df["Пропущено"].plot.hist()
plt.show()
df["Точные навесы"].plot.box()
plt.show()
df["Минуты"].plot.box()
plt.show()
sns.pairplot(df, x_vars=["Забито"], y_vars="Передачи", markers="+", size=4)
plt.show()
dfcorr = df.corr()
dfcorr
sns.heatmap(dfcorr, annot=True, cmap="winter")
plt.show()
fig, axes = plt.subplots(1, 1, figsize=(14, 6))
sns.boxplot(x="Пропущено", y="Минуты", data=df, showfliers=False)
fig, axes = plt.subplots(1, 1, figsize=(14, 6))
sns.boxplot(x="Удары", y="Удары в створ", data=df, showfliers=False)
fig, axes = plt.subplots(1, 1, figsize=(14, 6))
sns.boxplot(x="Точные навесы", y="Навесы", data=df, showfliers=False)
g = sns.jointplot(x="Часть", y="Минуты", data=df, kind="kde", color="m")
g.plot_joint(plt.scatter, c="w", s=30, linewidth=1, marker="+")
g.ax_joint.collections[0].set_alpha(0)
g.set_axis_labels("$Часть$", "$Минуты$")
# word cloud
from wordcloud import WordCloud, ImageColorGenerator
text = " ".join(str(each) for each in df.Победитель)
# Create and generate a word cloud image:
wordcloud = WordCloud(
max_words=200, colormap="Set3", background_color="black"
).generate(text)
plt.figure(figsize=(10, 6))
plt.figure(figsize=(15, 10))
# Display the generated image:
plt.imshow(wordcloud, interpolation="Bilinear")
plt.axis("off")
plt.figure(1, figsize=(12, 12))
plt.show()
# word cloud
from wordcloud import WordCloud, ImageColorGenerator
text = " ".join(str(each) for each in df.Проигравший)
# Create and generate a word cloud image:
wordcloud = WordCloud(
max_words=200, colormap="Set3", background_color="green"
).generate(text)
plt.figure(figsize=(10, 6))
plt.figure(figsize=(15, 10))
# Display the generated image:
plt.imshow(wordcloud, interpolation="Bilinear")
plt.axis("off")
plt.figure(1, figsize=(12, 12))
plt.show()
nRowsRead = 1000 # specify 'None' if want to read whole file
df1 = pd.read_csv(
"../input/russian-premier-league/data/RPL.csv",
delimiter=";",
encoding="cp1251",
nrows=nRowsRead,
)
df1.dataframeName = "RPL.csv"
nRow, nCol = df.shape
print(f"There are {nRow} rows and {nCol} columns")
df1.head()
# word cloud
from wordcloud import WordCloud, ImageColorGenerator
text = " ".join(str(each) for each in df1.Соперник)
# Create and generate a word cloud image:
wordcloud = WordCloud(max_words=200, colormap="Set3", background_color="blue").generate(
text
)
plt.figure(figsize=(10, 6))
plt.figure(figsize=(15, 10))
# Display the generated image:
plt.imshow(wordcloud, interpolation="Bilinear")
plt.axis("off")
plt.figure(1, figsize=(12, 12))
plt.show()
|
# All Imports
import numpy as np
import pandas as pd
import sklearn as sk
import matplotlib.pyplot as plt
import seaborn as sb
import matplotlib.pyplot as plt
import re
# SuicideByState = pd.read_csv("../input/suicidebystate/SuicideByState.csv")
conv = pd.read_csv("../input/statedata/SBS.csv")
conv.head(3)
conv.dtypes
colormap = plt.cm.RdBu
plt.figure(figsize=(14, 12))
plt.title("Correlation Suicide", y=1.2, size=15)
sb.heatmap(
conv.corr(),
linewidths=0.1,
vmax=1.0,
square=True,
cmap=colormap,
linecolor="white",
annot=True,
)
sb.pairplot(conv)
sb.pairplot(conv, vars=["SuicideRate", "lackOfFirearmReg"])
Polynomial = np.polynomial.Polynomial
X = conv["SuicideRate"]
Y = conv["lackOfFirearmReg"]
pfit, stats = Polynomial.fit(X, Y, 1, full=True)
plt.plot(X, Y, "o")
plt.title("How lax gun laws impact suicide rate")
plt.xlabel("Suicide Rate")
plt.ylabel("Lack of Gun Regulation")
plt.plot(X, pfit(X))
np.corrcoef(X, Y)[0, 1]
Polynomial = np.polynomial.Polynomial
X = conv["SuicideRate"]
Y = conv["PopDensity "]
pfit, stats = Polynomial.fit(X, Y, 1, full=True)
plt.plot(X, Y, "o")
plt.title("S vs PD with outliers")
plt.xlabel("Suicide Rate")
plt.ylabel("Population Density")
plt.plot(X, pfit(X))
plt.ylim(-0.1, 1)
np.corrcoef(X, Y)[0, 1]
Q1 = Y.quantile(0.25)
Q3 = Y.quantile(0.75)
IQR = Q3 - Q1
filter = (Y >= Q1 - 1.5 * IQR) & (Y <= Q3 + 1.5 * IQR)
X = X.loc[filter]
Y = Y.loc[filter]
Y.size
pfit, stats = Polynomial.fit(X, Y, 1, full=True)
plt.plot(X, Y, "o")
plt.title("S vs PD removal of outliers")
plt.xlabel("Suicide Rate")
plt.ylabel("Population Density")
plt.plot(X, pfit(X))
plt.ylim(-0.1, 1)
np.corrcoef(X, Y)[0, 1]
Polynomial = np.polynomial.Polynomial
X = conv["SuicideHotlineCalls "]
Y = conv["Population"]
pfit, stats = Polynomial.fit(X, Y, 1, full=True)
plt.plot(X, Y, "o")
plt.title("Population vs Suicide Hotline Calls")
plt.xlabel("Suicide Hotline Calls")
plt.ylabel("Population")
plt.plot(X, pfit(X))
np.corrcoef(X, Y)[0, 1]
Polynomial = np.polynomial.Polynomial
X = conv["CrisisCenters"]
Y = conv["Population"]
pfit, stats = Polynomial.fit(X, Y, 1, full=True)
plt.plot(X, Y, "o")
plt.title("Population vs Crisis Centers")
plt.xlabel("Crisis Centers")
plt.ylabel("Population")
plt.plot(X, pfit(X))
np.corrcoef(X, Y)[0, 1]
Polynomial = np.polynomial.Polynomial
X = conv["CrisisCenters"]
Y = conv["SuicideRate"]
pfit, stats = Polynomial.fit(X, Y, 1, full=True)
plt.plot(X, Y, "o")
plt.title("SuicideRate vs Crisis Centers")
plt.xlabel("Crisis Centers")
plt.ylabel("SuicideRate")
plt.plot(X, pfit(X))
np.corrcoef(X, Y)[0, 1]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
from glob import glob
from PIL import Image
import matplotlib.pyplot as plt
import cv2
import fnmatch
import keras
from time import sleep
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import (
Dense,
Conv2D,
MaxPool2D,
Dropout,
Flatten,
BatchNormalization,
MaxPooling2D,
Activation,
)
from keras.optimizers import RMSprop, Adam
from tensorflow.keras.callbacks import EarlyStopping
from keras import backend as k
from keras.applications.vgg16 import VGG16
print(os.listdir("../input/ai-yanxi"))
train = pd.read_csv("../input/ai-yanxi/train.csv", header=None)
train.columns = ["ID", "Category"]
# train_bboxes = pd.read_csv("../input/ai-yanxi/train_bboxes.csv")
path_train = "../input/ai-yanxi/train/"
path_test = "../input/ai-yanxi/test/"
total_images_train = os.listdir(path_train)
total_images_test = os.listdir(path_test)
train["dir"] = path_train + train["ID"].map(str) + ".jpg"
train.head()
# imagePatches = glob('../input/ai-yanxi/train/*.jpg', recursive=True)
# print(len(imagePatches))
# os.path.splitext(imagePatches[0])[1]
# imagePatches
### show 8547.jpeg,7653.jpeg,...(which are location of pneumonia figure)
# print(total_images_train[0])
# len(total_images_train)
# train["dir"]=path_train+train["ID"].map(str)+".jpg"
image = cv2.imread(path_train + "5.jpg")
plt.imshow(image)
print(image.shape)
plt.imshow(image[:, :, 2])
# Get few samples for the classes
class0_samples = (train[train["Category"] == 0]["dir"].iloc[:5]).tolist()
class1_samples = (train[train["Category"] == 1]["dir"].iloc[:5]).tolist()
class2_samples = (train[train["Category"] == 2]["dir"].iloc[:5]).tolist()
class3_samples = (train[train["Category"] == 3]["dir"].iloc[:5]).tolist()
class4_samples = (train[train["Category"] == 4]["dir"].iloc[:5]).tolist()
# Concat the data in a single list and del the above two list
samples = (
class0_samples + class1_samples + class2_samples + class3_samples + class4_samples
)
del class0_samples, class1_samples, class2_samples, class3_samples, class4_samples
# Plot the data
f, ax = plt.subplots(5, 5, figsize=(30, 25))
for i in range(25):
img = plt.imread(samples[i])
ax[i // 5, i % 5].imshow(img, cmap="gray")
if i < 5:
ax[i // 5, i % 5].set_title("class0")
elif i < 10:
ax[i // 5, i % 5].set_title("class1")
elif i < 15:
ax[i // 5, i % 5].set_title("class2")
elif i < 20:
ax[i // 5, i % 5].set_title("class3")
else:
ax[i // 5, i % 5].set_title("class4")
ax[i // 5, i % 5].axis("off")
ax[i // 5, i % 5].set_aspect("auto")
plt.show()
x = []
for i in range(train.shape[0]):
full_size_image = cv2.imread(path_train + str(i) + ".jpg")
im = cv2.resize(full_size_image, (224, 224), interpolation=cv2.INTER_CUBIC)
x.append(im)
if i % 2500 == 0:
print(i)
x = np.array(x)
y = np.array(train["Category"])
y = to_categorical(y, num_classes=5)
import keras
from keras.models import Sequential, Input, Model
from keras.layers import (
InputLayer,
Conv2D,
MaxPooling2D,
MaxPooling1D,
GlobalAveragePooling2D,
Dense,
Dropout,
Flatten,
Input,
LSTM,
TimeDistributed,
)
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
model = Sequential()
# model.add(InputLayer(input_shape=(224,224,3)))
model.add(Conv2D(32, (7, 7), activation="relu"))
model.add(MaxPooling2D((2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.15))
model.add(Conv2D(64, (5, 5), activation="relu"))
model.add(MaxPooling2D((2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.15))
model.add(Conv2D(128, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.15))
model.add(Conv2D(128, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.15))
model.add(GlobalAveragePooling2D())
model.add(Dense(1000, activation="relu"))
model.add(Dense(5, activation="softmax"))
model.build(input_shape=(None, 224, 224, 3))
model.summary()
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
from sklearn.model_selection import train_test_split
x_train, x_valid, y_train, y_valid = train_test_split(
x, y, test_size=0.05, random_state=101
)
# print(y_train.shape)
# print(y_train.shape)
# del x, y
from keras.callbacks import ModelCheckpoint
mcp = ModelCheckpoint(
filepath="model_check_path.hdf5",
monitor="val_accuracy",
save_best_only=True,
save_weights_only=False,
)
# hist = model.fit(x_train,y_train,batch_size = 32, epochs = 20, verbose=1, validation_split=0.2)
hist = model.fit(
x_train,
y_train,
batch_size=64,
epochs=20,
verbose=1,
validation_data=(x_valid, y_valid),
callbacks=[mcp],
)
print(hist.history.keys())
model.load_weights("model_check_path.hdf5") ######################### 含泪重要!!!!
model.evaluate(x_valid, y_valid)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_facecolor("w")
ax.grid(b=False)
ax.plot(hist.history["accuracy"], color="red")
ax.plot(hist.history["val_accuracy"], color="green")
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="lower right")
plt.show()
x_test = []
for i in range(len(total_images_test)):
full_size_image = cv2.imread(path_test + str(i) + ".jpg")
im = cv2.resize(full_size_image, (224, 224), interpolation=cv2.INTER_CUBIC)
x_test.append(im)
if len(x_test) % 1000 == 0:
print(len(x_test))
x_test = np.array(x_test)
print(x_test.shape)
predictions = model.predict(x_test)
predict = np.argmax(predictions, axis=1)
# idpre = pd.DataFrame({
# 'Id':total_images_test,
# 'pre':predict
# })
idpre = pd.DataFrame({"ID": np.arange(len(total_images_test)), "pre": predict})
idpre.to_csv("idpre4.csv", index=False, header=False)
# # 以下是vgg16模型,效果不是很好
# from keras.applications.vgg16 import VGG16
# from keras.preprocessing import image
# from keras.applications.vgg16 import preprocess_input
# from keras.layers import Input, Flatten, Dense
# from keras.models import Model
# import numpy as np
# #Get back the convolutional part of a VGG network trained on ImageNet
# model_vgg16_conv = VGG16(weights='imagenet', include_top=False)
# model_vgg16_conv.summary()
# #Create your own input format (here 224x224x3)
# input = Input(shape=(224,224,3),name = 'image_input')
# #Use the generated model
# output_vgg16_conv = model_vgg16_conv(input)
# #Add the fully-connected layers
# point = Flatten(name='flatten')(output_vgg16_conv)
# point = Dense(4096, activation='relu', name='fc1')(point)
# point = Dense(4096, activation='relu', name='fc2')(point)
# point = Dense(5, activation='softmax', name='predictions')(point)
# #Create your own model
# my_model = Model(input=input, output=point)
# #In the summary, weights and layers from VGG part will be hidden, but they will be fit during the training
# my_model.summary()
# #Then training with your data !
# my_model.compile(optimizer='adam', loss = 'categorical_crossentropy', metrics=['accuracy'])
# from keras.callbacks import ModelCheckpoint
# check = ModelCheckpoint(filepath='model_vg_check_path.hdf5',monitor="val_accuracy", save_best_only=True, save_weights_only=False)
# #hist = model.fit(x_train,y_train,batch_size = 32, epochs = 20, verbose=1, validation_split=0.2)
# # hist = my_model.fit(x,y,batch_size = 64, epochs = 5, verbose=1, validation_split=0.1,callbacks=[check])
# hist = my_model.fit(x_train,y_train,batch_size = 64, epochs = 10, verbose=1, validation_data=(x_valid,y_valid),callbacks=[check])
# model = Sequential()
# model.add(Conv2D(input_shape=(224,224,3),filters=64,kernel_size=(3,3),padding="same", activation="relu"))
# model.add(Conv2D(filters=64,kernel_size=(3,3),padding="same", activation="relu"))
# model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
# model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
# model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
# model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
# model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
# model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
# model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
# model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
# model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
# model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
# model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
# model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
# model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
# model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
# model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
# model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
# model.add(Flatten())
# model.add(Dense(units=4096,activation="relu"))
# model.add(Dense(units=4096,activation="relu"))
# model.add(Dense(units=5, activation="softmax"))
# model.summary()
# model.compile(optimizer='adam', loss = 'categorical_crossentropy', metrics=['accuracy'])
|
from sklearn import datasets
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
np.random.seed(0)
X, y = datasets.make_circles(n_samples=1000, factor=0.3, noise=0.15)
plt.figure()
plt.subplots(1, 1, figsize=(5, 5))
# plt.subplot(1,2,3, projection='polar')
plt.title("Original Data")
reds = y == 0
blues = y == 1
plt.scatter(X[reds, 0], X[reds, 1], c="red", s=20, edgecolor="k")
plt.scatter(X[blues, 0], X[blues, 1], c="blue", s=20, edgecolor="k")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=1, stratify=y
)
k = np.bincount(y)
m = np.bincount(y_test)
n = np.bincount(y_train)
fig = plt.figure()
ax1 = fig.add_axes([0, 0, 0.5, 0.5], aspect=1)
ax1.pie(m, labels=m)
ax2 = fig.add_axes([0.5, 0.0, 0.5, 0.5], aspect=1)
ax2.pie(n, labels=n)
ax3 = fig.add_axes([1, 0.0, 0.5, 0.5], aspect=1)
ax3.pie(k, labels=k)
ax1.set_title("y_test")
ax2.set_title("y_train")
ax3.set_title("y")
plt.show()
pass
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X_train)
X_train_scale = sc.transform(X_train)
X_test_scale = sc.transform(X_test)
from sklearn.linear_model import Perceptron
ppn = Perceptron(eta0=0.1, random_state=1)
ppn.fit(X_train_scale, y_train)
y_pred = ppn.predict(X_test_scale)
print("Misclassified examples: %d" % (y_test != y_pred).sum())
from sklearn.metrics import accuracy_score
print("Prediction Accuracy: %.3f" % accuracy_score(y_test, y_pred))
print("Training Accuracy: %.3f" % ppn.score(X_train_scale, y_train))
print("Test Accuracy: %.3f" % ppn.score(X_test_scale, y_test))
from mlxtend.plotting import plot_decision_regions
plot_decision_regions(X, y, clf=ppn, legend=2)
# ## SVM RBF kernal
from sklearn.svm import SVC
svm = SVC(kernel="rbf", C=1, gamma=100)
svm.fit(X_train_scale, y_train)
svc_y_pred = svm.predict(X_test_scale)
print("False Negative: %d" % (y_test != svc_y_pred).sum())
print("Prediction Accuracy SVC: %.3f" % accuracy_score(y_test, svc_y_pred))
print("Training Accuracy: %.3f" % svm.score(X_train_scale, y_train))
print("Test Accuracy: %.3f" % svm.score(X_test_scale, y_test))
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, svc_y_pred)
print(cm)
plot_decision_regions(X, y, clf=svm, legend=2)
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_axes([0, 0, 0.5, 0.5], aspect=1)
plot_decision_regions(X_test_scale, y_test, clf=ppn, legend=2)
ax2 = fig.add_axes([0.5, 0.0, 0.5, 0.5], aspect=1)
plot_decision_regions(X_test_scale, y_test, clf=svm, legend=2)
ax1.set_title("Perceptron")
ax2.set_title("SVC-RBF")
plt.show()
pass
# ## Multi-layer Perceptron
from sklearn.neural_network import MLPClassifier
clf_mlp = MLPClassifier(
solver="lbfgs", alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1
)
clf_mlp.fit(X_train_scale, y_train)
mlf_y_pred = clf_mlp.predict(X_test_scale)
print("False Negative: %d" % (y_test != mlf_y_pred).sum())
print("Prediction Accuracy SVC: %.3f" % accuracy_score(y_test, mlf_y_pred))
plot_decision_regions(X_test_scale, y_test, clf=clf_mlp, legend=2)
[coef.shape for coef in clf_mlp.coefs_]
clf_mlp.predict_proba(X_test_scale)
# ## Multiple Models
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
import matplotlib.gridspec as gridspec
import itertools
clf_pct = Perceptron()
clf_rfc = RandomForestClassifier(random_state=1, n_estimators=100)
clf_nb = GaussianNB()
clf_svc = SVC(gamma=100)
gs = gridspec.GridSpec(2, 2)
fig = plt.figure(figsize=(10, 8))
labels = ["Perceptron", "Random Forest", "Naive Bayes", "SVM"]
for clf, lab, grd in zip(
[clf_pct, clf_rfc, clf_nb, clf_svc], labels, itertools.product([0, 1], repeat=2)
):
clf.fit(X, y)
ax = plt.subplot(gs[grd[0], grd[1]])
fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2)
plt.title(lab)
plt.show()
|
import pandas as pd
curve = pd.read_csv("../input/curve.csv")
import numpy as np
import seaborn as sn
import matplotlib.pyplot as plt
curve.head()
def fit_poly(degree):
p = np.polyfit(curve.x, curve.y, deg=degree)
curve["fit"] = np.polyval(p, curve.x)
sn.regplot(curve.x, curve.y, fit_reg=False)
return plt.plot(curve.x, curve.fit, label="fit")
fit_poly(3)
plt.xlabel("x values")
plt.ylabel("y values")
from sklearn.model_selection import train_test_split
from sklearn import metrics
train_X, test_X, train_y, test_y = train_test_split(
curve.x, curve.y, test_size=0.40, random_state=100
)
rmse_df = pd.DataFrame(columns=["degree", "rmse_train", "rmse_test"])
def get_rmse(y, y_fit):
return np.sqrt(metrics.mean_squared_error(y, y_fit))
for i in range(1, 15):
# fitting model
p = np.polyfit(train_X, train_y, deg=i)
rmse_df.loc[i - 1] = [
i,
get_rmse(train_y, np.polyval(p, train_X)),
get_rmse(test_y, np.polyval(p, test_X)),
]
rmse_df
plt.plot(rmse_df.degree, rmse_df.rmse_train, label="RMSE_TRAIN", c="red")
plt.plot(rmse_df.degree, rmse_df.rmse_test, label="RMSE_TEST", c="green")
plt.xlabel("Degree")
plt.ylabel("RMSE")
plt.legend()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
train = pd.read_csv("/kaggle/input/Kannada-MNIST/train.csv")
test = pd.read_csv("/kaggle/input/Kannada-MNIST/test.csv")
X = train.iloc[:, 1:].values
y = train.iloc[:, 1].values
# reshape data
X = X.reshape(X.shape[0], 28, 28) / 255.0
X = X.reshape(X.shape[0], 28, 28, 1)
test = test.iloc[:, 1:].values
test = test.reshape(test.shape[0], 28, 28) / 255.0
test = test.reshape(test.shape[0], 28, 28, 1)
# import keras libraries
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Convolution2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense, Dropout
# initialize
classifier = Sequential()
# layer1
classifier.add(Convolution2D(28, 3, 3, input_shape=(28, 28, 1), activation="relu"))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
classifier.add(Dropout(0.25))
# layer2
classifier.add(Convolution2D(32, padding="same", kernel_size=3, activation="relu"))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
# layer3
classifier.add(Convolution2D(64, padding="same", kernel_size=3, activation="relu"))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
# flattening
classifier.add(Flatten())
classifier.add(Dense(units=128, activation="relu"))
classifier.add(Dropout(0.5))
classifier.add(Dense(units=10, activation="softmax"))
# compile
classifier.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
# fit model
classifier.fit(X, y, batch_size=12, epochs=20)
# predict
y_pred = classifier.predict(test)
import numpy as np
results = np.argmax(y_pred, axis=1)
data_out = pd.DataFrame({"id": range(len(test)), "label": results})
data_out.to_csv("try.csv", index=None)
|
#
#
# Dataviz - Data Science Specialization Program - FACENS
# # Exercício 1 - Primeiro contato com o Kaggle
# (valendo nota)
# * **Data de entrega:** até o final da aula
# * **Professor:** Matheus Mota
# * **Aluno: Diogo Henrique da Silva**
# * **RA: 173235**
# ## Questão 1
# **Enunciado:** Este notebook está associado ao *Kaggle Dataset* chamado "Exercício 1". Este *Kaggle Dataset* possui dois arquivos em formato CSV (anv.csv e BR_eleitorado_2016_municipio ). Escolha um dos datasets disponíveis e já conhecidos, a seu critério. Uma vez definido o csv, escolha no mínimo 7 e no máximo 12 variáveis (colunas) que você avalia como sendo relevantes. Para cada uma das suas variáveis escolhidas, forneça:
# ### Questão 1 - Item A - Classificação das variáveis
# Classifique todas as variáveis escolhidas, e construa um dataframe com sua resposta.
# Exemplo:
import pandas as pd
df = pd.read_csv(
"../input/dataviz-facens-20182-aula-1-exerccio-2/anv.csv", delimiter=","
)
df.head(1)
variaveis = [
["aeronave_tipo_veiculo", "Qualitativa Nominal"],
["aeronave_modelo", "Qualitativa Nominal"],
["aeronave_operador_categoria", "Qualitativa Nominal"],
["aeronave_motor_tipo", "Qualitativa Nominal"],
["total_fatalidades", "Quantitativa Discreta"],
["aeronave_fase_operacao", "Qualitativa Discreta"],
["aeronave_ano_fabricacao", "Quantitativa Discreta"],
]
variaveis = pd.DataFrame(variaveis, columns=["Variavel", "Classificação"])
variaveis
# ### Questão 1 - Item B - Tabela de frequência
# Construa uma tabela de frequência para cada uma das **variáveis qualitativas** que você escolheu (caso não tenha escolhido nenhuma, deixe esta questão em branco). Uma dica: a função *value_counts()* do Pandas pode ser muito útil. =)
#
variaveis
tp_veic = pd.DataFrame(df["aeronave_tipo_veiculo"].value_counts())
tp_veic
ae_mod = pd.DataFrame(df["aeronave_modelo"].value_counts())
ae_mod
op_cat = pd.DataFrame(df["aeronave_operador_categoria"].value_counts())
op_cat
f_op = pd.DataFrame(df["aeronave_fase_operacao"].value_counts())
f_op
m_tipo = pd.DataFrame(df["aeronave_motor_tipo"].value_counts())
m_tipo
# ### Questão 1 - Item C - Representação Gráfica
# Para cada uma das variáveis, produza um ou mais gráficos, usando matplotlib, que descreva seu comportamento / caracteristica. Lembre-se que estes gráficos precisam ser compatíveis com a classificação da variável.
import matplotlib.pyplot as plt
variaveis
tit = "Top 5 - Ocorrências por Tipo de Veículo"
df["aeronave_tipo_veiculo"].value_counts().head(5).plot(kind="bar", title=tit)
tit = "Top 3 - Ocorrências por Modelo de Aeronave"
df["aeronave_modelo"].value_counts().head(3).plot(kind="barh", title=tit)
tit = "Top 5 - Ocorrências por Categoria do Operador"
df["aeronave_operador_categoria"].value_counts().head(5).plot(kind="bar", title=tit)
tit = "Top 3 - Ocorrências por Tipo de Motor"
df["aeronave_motor_tipo"].value_counts().head(3).plot(kind="barh", title=tit)
tit = "Top 5 - Ocorrências por Fase de Operação"
df["aeronave_fase_operacao"].value_counts().head(5).plot(kind="bar", title=tit)
# # Dicas
# - Exemplo de acesso aos CSVs disponíveis no dataset
# > df = pd.read_csv('../input/anv.csv', delimiter=',')
df = pd.read_csv(
"../input/dataviz-facens-20182-aula-1-exerccio-2/anv.csv", delimiter=","
)
df.head(1)
|
li = ["abc", 34, 4.34, 23]
print(li)
st = "Hello World"
print(st)
st = "Hello World"
print(st)
st = """This is a multi-line string that uses triple quotes. """
print(st)
tu = (23, "abc", 4.56, (2, 3), "def")
print(tu[1])
print(tu[-1])
|
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.linear_model import LinearRegression
df = pd.read_csv(
"../input/nfl-combine-data/combine_data_since_2000_PROCESSED_2018-04-26.csv"
)
df.head()
df["Pick"] = df.Pick.fillna(260)
df
# Predict AV based on factors
# Predict drafted
# drafted variable
# clustering for positions
df.dtypes
df.isna().sum()
# Height vs Weight
sns.regplot(x="Wt", y="Ht", data=df)
qb = df[df["Pos"] == "QB"]
activeqb = df[df["AV"] > 4]
from scipy import stats
def r2(Ht, AV):
return stats.pearsonr(Ht, AV)[0] ** 2
sns.jointplot(x="Ht", y="AV", data=activeqb, kind="reg", stat_func=r2)
# corrplot
plt.figure(figsize=(8, 9))
corr = df.corr()
sns.heatmap(corr, square=True, linewidths=0.5, cbar_kws={"shrink": 0.5}, cmap="binary")
plt.figure(figsize=(20, 9))
sns.violinplot(x="Round", y="AV", data=df, palette="Set3", bw=0.2, cut=1, linewidth=1)
df.Pos.value_counts().iloc[:10].plot(kind="barh")
df.Pos.unique()
line = df[
(df["Pos"] == "OT")
| (df["Pos"] == "OG")
| (df["Pos"] == "EDGE")
| (df["Pos"] == "NT")
| (df["Pos"] == "DT")
| (df["Pos"] == "DE")
| (df["Pos"] == "C")
]
oskill = df[
(df["Pos"] == "QB")
| (df["Pos"] == "RB")
| (df["Pos"] == "WR")
| (df["Pos"] == "TE")
| (df["Pos"] == "FB")
]
special = df[(df["Pos"] == "K") | (df["Pos"] == "P") | (df["Pos"] == "LS")]
db = df[
(df["Pos"] == "SS")
| (df["Pos"] == "FS")
| (df["Pos"] == "S")
| (df["Pos"] == "CB")
| (df["Pos"] == "DB")
]
lb = df[
(df["Pos"] == "OLB")
| (df["Pos"] == "ILB")
| (df["Pos"] == "LB")
| (df["Pos"] == "EDGE")
]
sns.violinplot(x="Pos", y="Ht", data=line, palette="Set3", bw=0.2, cut=1, linewidth=1)
plt.title("Lineman Height by Position")
plt.show()
sns.violinplot(x="Pos", y="Wt", data=line, palette="Set3", bw=0.2, cut=1, linewidth=1)
plt.title("Lineman Weight by Position")
plt.show()
sns.violinplot(x="Pos", y="Ht", data=oskill, palette="Set3", bw=0.2, cut=1, linewidth=1)
plt.title("Offensive Skill Height by Position")
plt.show()
sns.violinplot(x="Pos", y="Wt", data=oskill, palette="Set3", bw=0.2, cut=1, linewidth=1)
plt.title("Offensive Skill Weight by Position")
plt.show()
sns.violinplot(
x="Pos", y="Ht", data=special, palette="Set3", bw=0.2, cut=1, linewidth=1
)
plt.title("Special Teamer Height by Position")
plt.show()
sns.violinplot(
x="Pos", y="Wt", data=special, palette="Set3", bw=0.2, cut=1, linewidth=1
)
plt.title("Special Teamer Weight by Position")
plt.show()
sns.violinplot(x="Pos", y="Ht", data=db, palette="Set3", bw=0.2, cut=1, linewidth=1)
plt.title("Defensive Back Height by Position")
plt.show()
sns.violinplot(x="Pos", y="Wt", data=db, palette="Set3", bw=0.2, cut=1, linewidth=1)
plt.title("Defensive Back Weight by Position")
plt.show()
sns.violinplot(x="Pos", y="Ht", data=lb, palette="Set3", bw=0.2, cut=1, linewidth=1)
plt.title("Linebacker Height by Position")
plt.show()
sns.violinplot(x="Pos", y="Wt", data=lb, palette="Set3", bw=0.2, cut=1, linewidth=1)
plt.title("Linebacker Weight by Position")
plt.show()
df["Forty"] = df.groupby(["Pos"]).Forty.apply(lambda x: x.fillna(x.median()))
df["Vertical"] = df.groupby(["Pos"]).Vertical.apply(lambda x: x.fillna(x.median()))
df["BenchReps"] = df.groupby(["Pos"]).BenchReps.apply(lambda x: x.fillna(x.median()))
df["BroadJump"] = df.groupby(["Pos"]).BroadJump.apply(lambda x: x.fillna(x.median()))
df["Cone"] = df.groupby(["Pos"]).Cone.apply(lambda x: x.fillna(x.median()))
df["Shuttle"] = df.groupby(["Pos"]).Shuttle.apply(lambda x: x.fillna(x.median()))
df["Pick"] = df["Pick"].astype(int)
df = df.dropna(subset=["Cone", "Shuttle"])
# linear regression to predict AV. Try Ht,Wt,Forty,Vertical,Bench,Braod,Cone,Shuttle,Pick
X = df[
[
"Ht",
"Wt",
"Forty",
"Vertical",
"BenchReps",
"BroadJump",
"Cone",
"Shuttle",
"Pick",
]
]
Y = df[["AV"]]
model = LinearRegression()
model.fit(X, Y)
model = LinearRegression().fit(X, Y)
import statsmodels.api as sm
model = sm.OLS(Y, X).fit()
predictions = model.predict(X)
model.summary()
X = df[["Wt", "Vertical", "Cone", "Pick"]]
Y = df[["AV"]]
model = LinearRegression()
model.fit(X, Y)
model = LinearRegression().fit(X, Y)
model = sm.OLS(Y, X).fit()
predictions = model.predict(X)
model.summary()
brady = df[df["Player"] == "Tom Brady"]
brady.head()
X_predict = brady[
["Wt", "Vertical", "Cone", "Pick"]
] # put the dates of which you want to predict kwh here
y_predict = model.predict(X_predict)
# Tom Brady actual value over predicted value
brady["AV"] - y_predict
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import matplotlib.pyplot as plt
import seaborn as sns
import keras
from keras.layers import Conv2D, pooling, Flatten
from keras.layers.pooling import MaxPool2D
data = pd.read_csv("/kaggle/input/Kannada-MNIST/train.csv")
data.head()
from collections import Counter
Counter(data["label"])
data.shape
x_train = (data.iloc[:, 1:].values).astype("float32")
y_train = (data.iloc[:, 0].values).astype("int")
y_train = keras.utils.to_categorical(y_train)
x_train = x_train / 255.0
import sklearn
from sklearn.model_selection import train_test_split
x_train = x_train.reshape(-1, 28, 28, 1)
X_train, X_test, Y_train, y_test = train_test_split(x_train, y_train)
from keras.models import Sequential
import keras
from keras.layers import Dense
from keras.callbacks.callbacks import EarlyStopping
Modelnew = Sequential()
b = EarlyStopping(patience=3, monitor="val_loss")
from keras.layers import Dropout
Modelnew.add(
Conv2D(filters=64, kernel_size=3, activation="relu", input_shape=(28, 28, 1))
)
Modelnew.add(Conv2D(filters=32, kernel_size=3, activation="relu"))
Modelnew.add(Dropout(0.2))
Modelnew.add(Flatten())
Modelnew.add(Dense(10, activation="softmax"))
Modelnew.compile(
optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]
)
Modelnew.fit(
X_train,
Y_train,
validation_data=(X_test, y_test),
epochs=15,
batch_size=32,
callbacks=[b],
)
Modelnew.evaluate(X_test, y_test)
pred = Modelnew.predict(X_test)
Y_pred_classes = np.argmax(pred, axis=1)
Y_Act_Classes = np.argmax(y_test, axis=1)
from sklearn.metrics import confusion_matrix, auc, f1_score, classification_report
confusion_matrix(Y_Act_Classes, Y_pred_classes)
test_data = pd.read_csv("/kaggle/input/Kannada-MNIST/test.csv")
testdata = test_data.iloc[:, 1:]
testdata
test_data = testdata.values.reshape(-1, 28, 28, 1)
predicted_classes = Modelnew.predict_classes(test_data)
submission = pd.read_csv("../input/Kannada-MNIST/sample_submission.csv")
submission["label"] = predicted_classes
submission.to_csv("submission.csv", index=False)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# import seaborn as sns
file_path = "/kaggle/input/bitcoin-historical-data/bitstampUSD_1-min_data_2012-01-01_to_2021-03-31.csv"
df = pd.read_csv(file_path)
df.head()
# Convert the Timestamp column to a datetime object and set it as the index
df["Timestamp"] = pd.to_datetime(df["Timestamp"], unit="s")
df.set_index("Timestamp", inplace=True)
# Resample the data to get daily average prices
daily_data = df.resample("D").mean()
# Display the daily data
daily_data.head()
# Check for missing data
daily_data.isna().sum()
# Fill missing data with the previous valid value
daily_data.fillna(method="ffill", inplace=True)
# Check for missing data again
daily_data.isna().sum()
plt.figure(figsize=(12, 6))
plt.plot(daily_data["Weighted_Price"])
plt.title("Bitcoin Daily Weighted Price (2012-2021)")
plt.xlabel("Year")
plt.ylabel("Price (USD)")
plt.show()
# Calculate daily returns
daily_data["Returns"] = daily_data["Weighted_Price"].pct_change() * 100
# Plot the daily returns
plt.figure(figsize=(12, 6))
plt.plot(daily_data["Returns"])
plt.title("Bitcoin Daily Returns (2012-2021)")
plt.xlabel("Year")
plt.ylabel("Returns")
plt.show()
# Calculate the rolling 30-day volatility
daily_data["Volatility"] = daily_data["Returns"].rolling(window=30).std()
# Plot the rolling 30-day volatility
plt.figure(figsize=(12, 6))
plt.plot(daily_data["Volatility"])
plt.title("Bitcoin 30-Day Rolling Volatility (2012-2021)")
plt.xlabel("Year")
plt.ylabel("Volatility")
plt.show() # Calculate moving averages (30, 90, and 180 days)
daily_data["30_day_MA"] = daily_data["Weighted_Price"].rolling(window=30).mean()
daily_data["90_day_MA"] = daily_data["Weighted_Price"].rolling(window=90).mean()
daily_data["180_day_MA"] = daily_data["Weighted_Price"].rolling(window=180).mean()
# Plot the moving averages
plt.figure(figsize=(12, 6))
plt.plot(daily_data["Weighted_Price"], label="Weighted_Price")
plt.plot(daily_data["30_day_MA"], label="30-day MA")
plt.plot(daily_data["90_day_MA"], label="90-day MA")
plt.plot(daily_data["180_day_MA"], label="180-day MA")
plt.title("Bitcoin Moving Averages (2012-2021)")
plt.xlabel("Year")
plt.ylabel("Price (USD)")
plt.legend()
plt.show()
# Calculate moving averages (30, 90, and 180 days)
daily_data["30_day_MA"] = daily_data["Weighted_Price"].rolling(window=30).mean()
daily_data["90_day_MA"] = daily_data["Weighted_Price"].rolling(window=90).mean()
daily_data["180_day_MA"] = daily_data["Weighted_Price"].rolling(window=180).mean()
# Plot the moving averages
plt.figure(figsize=(12, 6))
plt.plot(daily_data["Weighted_Price"], label="Weighted_Price")
plt.plot(daily_data["30_day_MA"], label="30-day MA")
plt.plot(daily_data["90_day_MA"], label="90-day MA")
plt.plot(daily_data["180_day_MA"], label="180-day MA")
plt.title("Bitcoin Moving Averages (2012-2021)")
plt.xlabel("Year")
plt.ylabel("Price (USD)")
plt.legend()
plt.show()
# Calculate cumulative returns
daily_data["Cumulative_Returns"] = (1 + daily_data["Returns"]).cumprod()
# Plot the cumulative returns
plt.figure(figsize=(12, 6))
plt.plot(daily_data["Cumulative_Returns"])
plt.title("Bitcoin Cumulative Returns (2012-2021)")
plt.xlabel("Year")
plt.ylabel("Cumulative Returns")
plt.show() # Calculate correlation between daily returns and daily trading volume
correlation = daily_data["Returns"].corr(daily_data["Volume_(Currency)"])
print(f"Correlation between daily returns and daily trading volume: {correlation:.4f}")
# Calculate correlation between daily returns and daily trading volume
correlation = daily_data["Returns"].corr(daily_data["Volume_(Currency)"])
print(f"Correlation between daily returns and daily trading volume: {correlation:.4f}")
|
# # Titanic Machine Learning problem
# Importing Modules
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
titanic = pd.read_csv("/kaggle/input/titanic/train.csv")
titanic.shape
titanic.head()
X = titanic.iloc[:, 1 : titanic.shape[1]]
X
# # Preprocessing training data
# Deleting unnecessary columns
del X["Name"]
del X["Ticket"]
del X["Cabin"]
del X["Fare"]
X
# Dealing with null entries
mean = X["Age"].mean()
X["Age"].fillna(mean, inplace=True)
X.isnull().sum()
X.dropna(inplace=True)
X.isnull().sum()
X
# Y is training output
Y = X.iloc[:, 0]
X = X.iloc[:, 1 : X.shape[1]]
print(X.shape)
print(Y.shape)
# converting male to 0 and female to 1
genders = {"male": 0, "female": 1}
data = [X]
for i in data:
i["Sex"] = i["Sex"].map(genders)
# converting embarkment
data1 = data
embarked = {"S": 0, "Q": 1, "C": 2}
for i in data1:
i["Embarked"] = i["Embarked"].map(embarked)
X
# # Preprocessing test data
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
# Removing unnecessary columns from test data
del test_data["Name"]
del test_data["Ticket"]
del test_data["Cabin"]
del test_data["Fare"]
# Dealing with null entries of test data
mean = test_data["Age"].mean()
test_data["Age"].fillna(mean, inplace=True)
test_data.isnull().sum()
# converting male to 0 and female to 1 for testing data
genders = {"male": 0, "female": 1}
data = [test_data]
for i in data:
i["Sex"] = i["Sex"].map(genders)
# converting embarkment for testing data
data1 = data
embarked = {"S": 0, "Q": 1, "C": 2}
for i in data1:
i["Embarked"] = i["Embarked"].map(embarked)
data_test = test_data.iloc[:, 1 : test_data.shape[1]]
data_test
# Feature scaling the training and testing data.
scaler = preprocessing.StandardScaler()
scaler.fit(X)
x_train = scaler.transform(X)
x_test = scaler.transform(data_test)
# Classifier
clf = RandomForestClassifier(max_depth=6, random_state=0)
clf.fit(x_train, Y)
print(clf.score(x_train, Y))
y_pred = clf.predict(x_test)
y_pred
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": y_pred})
output.to_csv("submission.csv", index=False)
print("Your submission was successfully saved!")
|
import pandas as pd
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
import warnings
from tensorflow.keras.layers import Input, Lambda, Dense, Flatten, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.applications.vgg19 import VGG19
from tensorflow.keras.applications.vgg19 import preprocess_input
from tensorflow.keras.preprocessing import image, image_dataset_from_directory
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow import keras
import tensorflow
import tensorflow as tf
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
"/kaggle/input/alzheimers-dataset-4-class-of-images/Alzheimer_s Dataset/train",
validation_split=0.2,
subset="training",
seed=1337,
image_size=[180, 180],
batch_size=16,
)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
"/kaggle/input/alzheimers-dataset-4-class-of-images/Alzheimer_s Dataset/train",
validation_split=0.2,
subset="validation",
seed=1337,
image_size=[180, 180],
batch_size=16,
)
test_ds = tf.keras.preprocessing.image_dataset_from_directory(
"/kaggle/input/alzheimers-dataset-4-class-of-images/Alzheimer_s Dataset/test",
seed=1337,
image_size=[180, 180],
batch_size=16,
)
classnames = train_ds.class_names
len(classnames), train_ds.class_names
NUM_IMAGES = []
for label in classnames:
dir_name = (
"/kaggle/input/alzheimers-dataset-4-class-of-images/Alzheimer_s Dataset/train/"
+ label[:-2]
+ "ed"
)
NUM_IMAGES.append(len([name for name in os.listdir(dir_name)]))
NUM_IMAGES, classnames
# Performing Image Augmentation to have more data samples
from tensorflow.keras.preprocessing.image import ImageDataGenerator as IDG
IMG_SIZE = 180
IMAGE_SIZE = [180, 180]
DIM = (IMG_SIZE, IMG_SIZE)
ZOOM = [0.99, 1.01]
BRIGHT_RANGE = [0.8, 1.2]
HORZ_FLIP = True
FILL_MODE = "constant"
DATA_FORMAT = "channels_last"
WORK_DIR = (
"/kaggle/input/alzheimers-dataset-4-class-of-images/Alzheimer_s Dataset/train"
)
work_dr = IDG(
rescale=1.0 / 255,
brightness_range=BRIGHT_RANGE,
zoom_range=ZOOM,
data_format=DATA_FORMAT,
fill_mode=FILL_MODE,
horizontal_flip=HORZ_FLIP,
)
train_data_gen = work_dr.flow_from_directory(
directory=WORK_DIR, target_size=DIM, batch_size=6500, shuffle=False
)
train_data, train_labels = train_data_gen.next()
# before oversampling
print(train_data.shape, train_labels.shape)
# Performing over-sampling of the data, since the classes are imbalanced
# after oversampling
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=42)
train_data, train_labels = sm.fit_resample(
train_data.reshape(-1, IMG_SIZE * IMG_SIZE * 3), train_labels
)
train_data = train_data.reshape(-1, IMG_SIZE, IMG_SIZE, 3)
print(train_data.shape, train_labels.shape)
# Splitting the data into train, test, and validation sets
from sklearn.model_selection import train_test_split
train_data, test_data, train_labels, test_labels = train_test_split(
train_data, train_labels, test_size=0.2, random_state=42
)
train_data, val_data, train_labels, val_labels = train_test_split(
train_data, train_labels, test_size=0.2, random_state=42
)
from tensorflow.keras.applications.inception_v3 import InceptionV3
inception_model = InceptionV3(
input_shape=(180, 180, 3), include_top=False, weights="imagenet"
)
for layer in inception_model.layers:
layer.trainable = False
from tensorflow.keras.layers import (
SeparableConv2D,
BatchNormalization,
GlobalAveragePooling2D,
)
custom_inception_model = Sequential(
[
inception_model,
Dropout(0.5),
GlobalAveragePooling2D(),
Flatten(),
BatchNormalization(),
Dense(512, activation="relu"),
BatchNormalization(),
Dropout(0.5),
Dense(256, activation="relu"),
BatchNormalization(),
Dropout(0.5),
Dense(128, activation="relu"),
BatchNormalization(),
Dropout(0.5),
Dense(64, activation="relu"),
Dropout(0.5),
BatchNormalization(),
Dense(4, activation="softmax"),
],
name="inception_cnn_model",
)
# Defining a custom callback function to stop training our model when accuracy goes above 99%
from tensorflow.keras.callbacks import ReduceLROnPlateau
class MyCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if logs.get("acc") > 0.99:
print("\nReached accuracy threshold! Terminating training.")
self.model.stop_training = True
my_callback = MyCallback()
# ReduceLROnPlateau to stabilize the training process of the model
rop_callback = ReduceLROnPlateau(monitor="val_loss", patience=3)
METRICS = [
tf.keras.metrics.CategoricalAccuracy(name="acc"),
tf.keras.metrics.AUC(name="auc"),
]
CALLBACKS = [my_callback, rop_callback]
custom_inception_model.compile(
optimizer="rmsprop", loss=tf.losses.CategoricalCrossentropy(), metrics=METRICS
)
# Fit the training data to the model and validate it using the validation data
EPOCHS = 20
history = custom_inception_model.fit(
train_data,
train_labels,
validation_data=(val_data, val_labels),
callbacks=CALLBACKS,
epochs=EPOCHS,
)
fig, ax = plt.subplots(1, 2, figsize=(20, 3))
ax = ax.ravel()
for i, met in enumerate(["acc", "loss"]):
ax[i].plot(history.history[met])
ax[i].plot(history.history["val_" + met])
ax[i].set_title("Model {}".format(met))
ax[i].set_xlabel("epochs")
ax[i].set_ylabel(met)
ax[i].legend(["train", "val"])
test_scores = custom_inception_model.evaluate(test_data, test_labels)
# print("Training Accuracy: %.2f%%"%(train_scores[1] * 100))
# print("Validation Accuracy: %.2f%%"%(val_scores[1] * 100))
print("Testing Accuracy: %.2f%%" % (test_scores[1] * 100))
pred_labels = custom_inception_model.predict(test_data)
# Plot the confusion matrix to understand the classification in detail
from sklearn.metrics import classification_report, confusion_matrix
import seaborn as sns
pred_ls = np.argmax(pred_labels, axis=1)
test_ls = np.argmax(test_labels, axis=1)
conf_arr = confusion_matrix(test_ls, pred_ls)
plt.figure(figsize=(8, 6), dpi=80, facecolor="w", edgecolor="k")
ax = sns.heatmap(
conf_arr,
cmap="Greens",
annot=True,
fmt="d",
xticklabels=classnames,
yticklabels=classnames,
)
plt.title("Alzheimer's Disease Diagnosis")
plt.xlabel("Prediction")
plt.ylabel("Truth")
plt.show(ax)
export_dir = "/kaggle/working/"
tf.saved_model.save(custom_inception_model, export_dir)
import pathlib
tflite_model_name = "alzahimer.tflite"
# Convert the model.
converter = tf.lite.TFLiteConverter.from_saved_model(export_dir)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_model = converter.convert()
# save model as tflite for mobile app (Flutter)
tflite_model_file = pathlib.Path(tflite_model_name)
tflite_model_file.write_bytes(tflite_model)
import tensorflow as tf
# save model as hdf5 for web app (streamlit)
tf.keras.models.save_model(custom_inception_model, "alzahimer.hdf5")
|
# ## Exploratory Analysis
#
# Importing required libraries.
import pandas as pd
import numpy as np
import seaborn as sns # visualisation
import matplotlib.pyplot as plt # visualisation
sns.set(color_codes=True)
df = pd.read_csv("/kaggle/input/AgricultureInKarnataka.csv")
# To display the top 5 rows
df.head(5)
# Let's take a quick look at what the data looks like:
# To display the bottom 5 rows
df.tail(5)
# Checking the data type
df.dtypes
df.columns
# Total number of rows and columns
print(df.shape)
# Used to count the number of rows
df.count()
# Finding the null values.
print(df.isnull().sum())
|
import numpy as np
import os
import cv2
import zipfile
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from keras import backend as K
import random
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score
from keras.models import Sequential
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.core import Dense, Flatten, Dropout, Activation
from keras.layers import BatchNormalization
from keras.optimizers import Adam
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint
from keras.models import save_model, load_model
from keras.preprocessing.image import ImageDataGenerator
from mlxtend.plotting import plot_confusion_matrix
training_data = "../working/train/train"
testing_data = "../working/test/test"
for dirname, _, filenames in os.walk("/kaggle/input/"):
for filename in filenames:
print(os.path.join(dirname, filename))
train_data = []
RESIZE = 100
X = []
y = []
def create_training_data():
for img in tqdm(os.listdir(training_data)):
try:
img_array = cv2.imread(
os.path.join(training_data, img), cv2.IMREAD_GRAYSCALE
)
img2 = cv2.resize(img_array, (RESIZE, RESIZE))
img2 = (img2 - img2.mean()) / img2.std()
if img[:3] == "dog":
class_num = 0
else:
class_num = 1
X.append(img2)
y.append(class_num)
except Exception as e:
pass
create_training_data()
X = np.array(X).reshape(-1, RESIZE, RESIZE, 1)
y = np.asarray(y)
(X_train, X_val, y_train, y_val) = train_test_split(
X, y, test_size=0.3, random_state=42
)
aug_train = ImageDataGenerator(
rotation_range=20,
zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode="nearest",
)
generator_val = ImageDataGenerator()
aug_train.fit(X_train)
generator_val.fit(X_val)
K.set_image_data_format("channels_last")
model = Sequential()
model.add(Conv2D(64, (3, 3), input_shape=(RESIZE, RESIZE, 1)))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(2, activation="softmax"))
model.compile(
loss="sparse_categorical_crossentropy", optimizer=Adam(), metrics=["accuracy"]
)
model.summary()
# earlystop = EarlyStopping(patience=5)
history = model.fit_generator(
aug_train.flow(X_train, y_train, batch_size=32),
validation_data=generator_val.flow(X_val, y_val, batch_size=32),
epochs=100,
)
# callbacks=[earlystop]
LABELS = ["DOG", "CAT"]
test_data = []
RESIZE = 100
X_test = []
X_id = []
def create_test_data():
for img in tqdm(os.listdir(testing_data)):
try:
img_array = cv2.imread(
os.path.join(testing_data, img), cv2.IMREAD_GRAYSCALE
)
img2 = cv2.resize(img_array, (RESIZE, RESIZE))
X_test.append(img2)
img_num = img.split(".")[0]
X_id.append(np.array(img_num))
except Exception as e:
pass
create_test_data()
X_test = np.array(X_test).reshape(-1, RESIZE, RESIZE, 1)
arr_test = model.predict(X_test.astype(float))
submission = pd.DataFrame({"id": X_id, "label": arr_test[:, 0]})
submission.head()
filename = "Prediction1.csv"
submission.to_csv(filename, index=False)
print("Saved file: " + filename)
test_predicted_label = np.argmax(arr_test, axis=1)
fig = plt.figure(figsize=(20, 20))
for counter, img in enumerate(X_test[:40]):
ax = fig.add_subplot(10, 4, counter + 1)
ax.imshow(X_test[counter, :, :, 0], cmap="gray")
plt.title(LABELS[test_predicted_label[counter]])
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
plt.tight_layout()
plt.show()
|
# ## PTC Advanced Workshop
# Welcome to the SQL Scavenger Hunt!
# Over the course of the next few hours, we're going to be using different SQL commands to help us find the data to uncover a puzzling medical mystery. This handbook gives a brief introduction to SQL and BigQuery. We'll help you hit the ground running with a few example commands.
# ### What is SQL?
# Before you jump right in, there are probably a few questions you have. What is SQL and why should you use it? Well, the answer is fairly straightforward.
# SQL (short for “Structured Query Language”, and said like either "see-quill" or "S-Q-L" ) is a programming language that allows you to interact with databases. For many databases out there, SQL is the *only* way to access the information in them and, as a result, it's an important skill for any data scientist or aspiring data scientist. (You don't need to take my word on this one: in our survey of data scientists we found that SQL was [the third most popular software tool for data science](https://www.kaggle.com/surveys/2017), right after Python and R.)
# > **Why learn SQL?**: If you're currently looking for a data science job, being able to show that you're comfortable with SQL will open up more job opportunities for you. If you're currently *doing* data science, brushing up on your SQL skills will help you access more data sources and make it easier to get a subset of a database to work with locally. Plus, it's fun! :)
# ### What is BigQuery
# [BigQuery](https://cloud.google.com/bigquery/) is a Google Cloud product for storing and accessing very large databases very quickly. We've recently started making [some BigQuery datasets](https://www.kaggle.com/datasets?filetype=bigQuery) accessible via Kaggle. Since SQL is the easiest way to access these data in these datasets they make the perfect playground to help you get comfortable with this language.
# > Because the datasets on BigQuery can be very large, there are some restrictions on how much data you can access. The good news is that **each Kaggle user can scan 5TB every 30 days for free.** The bad news is that If you go over your quota you're going to have to wait for it to reset.
# Don't worry, through this workshop, we'll teach you how to be careful when looking at BigQuery data to make sure you don't accidentally go over your quota.
# ### Getting Started
# In order to use a BigQuery Dataset, you can start by going to the [Datasets page](https://www.kaggle.com/datasets?fileType=bigQuery) and selecting one of the many available ones. Once you've chosen one, head to the dataset page and start a new kernel with it by clicking the new kernel button, like with any other dataset. Currently, BigQuery datasets are only usable with Python kernels.
# To make life easier, we can alternatively use a helper package called bq_helper, and pre-load it into our kernels. It has some helper functions for getting the data out of BigQuery that will speed up our process a lot. You can use `bq_helper` by importing it:
# importing bq_helper package
import bq_helper
# Now that we've added the correct package, we can import our dataset! From the above [Datasets page](https://www.kaggle.com/datasets?fileType=bigQuery), you must have seen many datasets. To use them, we need to create a helper for our BigQuery dataset.
# Using our helper package, we can assign the value of a variable to be any BigQuery dataset, like so:
# import the dataset using our bq_helper package
crime_dataset = bq_helper.BigQueryHelper(
active_project="bigquery-public-data", dataset_name="chicago_crime"
)
# Now that the helper object has been create, we can start interacting with our database!
# ### Useful BigQuery Methods
# Now before we start querying our database with SQL, we can use a few BigQuery methods to help simplify the process. We can start by looking at the schema.
# > **Schema**: A description of how data is organized within a dataset.
# Using the `BigQueryHelper.list_tables()` method, list all the files in the medicare_dataset.
# > Hint: Type your_dataset_name.list_tables()
# lists all tables within the medicare_dataset
# Now that we know what tables are in this dataset, we can get information on the columns in a specific table. In this example, we're looking at the information on the "full" table.
# print information on all the columns in the "full" table in the medicare dataset
crime_dataset.table_schema("crime")
# Each SchemaField tells us about a specific column. In order, the information is:
# * The name of the column
# * The datatype in the column
# * [The mode of the column](https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.mode) (NULLABLE means that a column allows NULL values, and is the default)
# * A description of the data in that column
# ## Check the size of your query before you run it
# ____
# BigQuery datasets are, I guess, BIG. The [biggest dataset we've got on Kaggle so far](https://www.kaggle.com/github/github-repos) is 3 terabytes. Since the monthly quota for BigQuery queries is 5 terabytes, you can easily go past your 30-day quota by running just a couple of queries!
# > **What's a query?** A query is small piece of SQL code that specifies what data would you like to scan from a databases, and how much of that data you would like returned. (Note that your quota is on data *scanned*, not the amount of data returned.)
# One way to help avoid this is to estimate how big your query will be before you actually execute it. You can do this with the `BigQueryHelper.estimate_query_size()` method. For the rest of this notebook, I'll be using an example query that finding the scores for every Hacker News post of the type "job". Let's see how much data it will scan if we actually ran it.
# this query looks in the crime table in the hacker_news
# dataset, then gets the score column from every row where
# the type column has "job" in it.
query = """SELECT fbi_code
FROM `bigquery-public-data.chicago_crime.crime`
WHERE year = 2020 """
# check how big this query will be
crime_dataset.estimate_query_size(query)
# Running this query will take around 8 MB or 0.08 GB. Not too bad for our first query!
# > **Important:** When you're writing your query, make sure that the name of the table (next to FROM) is in back ticks (\`), not single quotes ('). The reason for this is that the names of BigQuery tables contain periods in them, which in SQL are special characters. Putting the table name in back ticks protects the table name, so it's treated as a single string instead of being run as code.
# ### Safely run a query
# ___
# Now that we know how to check the size of the query (and make sure we're not scanning several terabytes of data!) we're ready to actually run our first query. You have two methods available to help you do this:
# * *`BigQueryHelper.query_to_pandas(query)`*: This method takes a query and returns a Pandas dataframe.
# * *`BigQueryHelper.query_to_pandas_safe(query, max_gb_scanned=1)`*: This method takes a query and returns a Pandas dataframe only if the size of the query is less than the upperSizeLimit (1 gigabyte by default).
# ### Avoiding common mistakes 🤭
# ____
# Big data is great! Until working at a bigger scale suddenly it makes your problems bigger too, like [this poor professor whose experiment racked up an unexpected $1000 bill](https://www.wired.com/2012/04/aws-bill-in-minutes/). Although Kaggle isn't charging for accessing BigQuery datasets, following these best practices can help you avoid trouble down the line. If you'd like to learn more, you can check out [all the BigQuery best practices here](https://cloud.google.com/bigquery/docs/best-practices).
# * *Avoid using the asterisk *(**) in your queries.* As you might have seen before in regular expressions, the asterisk means “everything”. While this may be okay with smaller datasets, if you send a query to a 4 terabyte dataset and ask for “everything” you're going to scan waaaaay more than you bargained for (or that a kernel can handle).
# * *For initial exploration, look at just part of the table instead of the whole thing.* If you're just curious to see what data's in a table, preview it instead of scanning the whole table. We've included a method, `BigQueryHelper.head()`, in our helper package to help with this. Like `head()` in Pandas or R, it will just return the first few rows for you to look at.
# * *Double-check the size of complex queries.* If you're planning on running what might be a large query, either estimate the size first or run it using the `BigQueryHelper.query_to_pandas_safe()` method.
# * *Be cautious about joining tables.* In particular, avoid joining a table with itself (i.e. a self-join) and try to avoid joins that return a table that's larger than the ones you're joining together. (If you want to double-check yourself, you can try the join on just the heads of the tables involved.)
# * *Don't rely on LIMIT*: One of the things that can be confusing when working with BigQuery datasets is the difference between the data you *scan* and the data you actually *get back* especially since it's the first one that actually counts against your quota. When you do something like select a column with LIMIT = 10, you'll only get 10 results back... but you'll actually be scanning the whole column. It's not a big deal if your table has 1000 rows, but it's a much bigger deal if it has 10,000,000 rows!
# ### Now let's get started 🚀
# ## Exercises Part I
# ### SELECT, FROM & WHERE
# In this section, we're going to learn how to use SELECT, FROM and WHERE to get data from a specific column based on the value of another column. For the purposes of this explanation, we'll be using this imaginary database, `pet_records` which has just one table in it, called `pets`, which looks like this:
# 
# ### SELECT ... FROM
# ___
# The most basic SQL query is to select a single column from a specific table. To do this, you need to tell SELECT which column to select and then specify what table that column is from using from.
# > **Do you need to capitalize SELECT and FROM?** No, SQL doesn't care about capitalization. However, it's customary to capitalize your SQL commands and it makes your queries a bit easier to read.
# So, if we wanted to select the "Name" column from the pets table of the pet_records database (if that database were accessible as a BigQuery dataset on Kaggle , which it is not, because I made it up), we would do this:
# SELECT Name
# FROM `bigquery-public-data.pet_records.pets`
# Which would return the highlighted data from this figure.
# 
# ### WHERE ...
# ___
# When you're working with BigQuery datasets, you're almost always going to want to return only certain rows, usually based on the value of a different column. You can do this using the WHERE clause, which will only return the rows where the WHERE clause evaluates to true.
# Let's look at an example:
# SELECT Name
# FROM `bigquery-public-data.pet_records.pets`
# WHERE Animal = "Cat"
# This query will only return the entries from the "Name" column that are in rows where the "Animal" column has the text "Cat" in it. Those are the cells highlighted in blue in this figure:
# 
# ### Example: What are all the U.S. cities in the OpenAQ dataset?
# ___
# Now that you've got the basics down, let's work through an example with a real dataset. Today we're going to be working with the OpenAQ dataset, which has information on air quality around the world. (The data in it should be current: it's updated weekly.)
# To help get you situated, I'm going to run through a complete query first. Then it will be your turn to get started running your queries!
# First, I'm going to set up everything we need to run queries and take a quick peek at what tables are in our database.
# import package with helper functions
import bq_helper
# create a helper object for this dataset
open_aq = bq_helper.BigQueryHelper(
active_project="bigquery-public-data", dataset_name="openaq"
)
# print all the tables in this dataset (there's only one!)
open_aq.list_tables()
# print the first couple rows of the "global_air_quality" dataset
open_aq.head("global_air_quality")
# Great, everything looks good! Now that I'm set up, I'm going to put together a query. I want to select all the values from the "city" column for the rows there the "country" column is "us" (for "United States").
# > **What's up with the triple quotation marks (""")?** These tell Python that everything inside them is a single string, even though we have line breaks in it. The line breaks aren't necessary, but they do make it much easier to read your query.
# query to select all the items from the "city" column where the
# "country" column is "us"
my_query = """SELECT city
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE country = 'US'
"""
# the query_to_pandas_safe will only return a result if it's less
# than one gigabyte (by default)
us_cities = open_aq.query_to_pandas_safe(my_query)
# What five cities have the most measurements taken there?
us_cities.city.value_counts().head()
# ### Try it yourself!
# - How do I select all pollutants which have a value of exactly 0?
# ```
# query1 = """SELECT pollutant
# FROM `bigquery-public-data.openaq.global_air_quality`
# WHERE value = 0
# """
# ```
# write your own code here (click above if you're stuck)
query1 = """
"""
pollutants = open_aq.query_to_pandas_safe(query1)
pollutants.head()
# ## Exercises Part II
# ### GROUP BY... HAVING and COUNT
# Now that we know how to select the content of a column, we're ready to learn how to group your data and count things within those groups. This can help you answer questions like:
# * How many of each kind of fruit has our store sold?
# * How many species of animal has the vet office treated?
# To do this, we're going to learn about three new techniques: GROUP BY, HAVING and COUNT. Once again, we're going to use this 100% made up table of information on various pets, which has three columns: one with the unique ID number for each pet, one with the name of the pet and one with the species of the animal (rabbit, cat or dog).
# 
# ### COUNT
# ___
# COUNT(), as you may have guessed from the name, returns a count of things. If you pass it the name of a column, it will return the number of entries in that column. So if we SELECT the COUNT() of the ID column, it will return the number of ID's in that column.
# SELECT COUNT(ID)
# FROM `bigquery-public-data.pet_records.pets`
#
# This query, based on the table above, will return 4 because there are 4 ID's in this table.
#
# ### GROUP BY
# ___
# GROUP BY takes the name of one or more column and tells SQL that we want to treat rows that have the same value in that column as a single group when we apply aggregate functions like COUNT().
# > An **aggregate function** takes in many values and returns one. Here, we're learning about COUNT() but there are other aggregate functions like SUM() and AVERAGE().
# Note that because it tells SQL how to apply aggregate functions, it doesn't make sense to use GROUP BY without something like COUNT().
# Let's look at an example. We want to know how many of each type of animal we have in our table. We can get this information by using GROUP BY to group together rows that have the same value in the “Animal” column, while using COUNT() to find out how many ID's we have in each group. You can see the general idea in this image:
# 
# The query that will get us this information looks like this:
# SELECT Animal, COUNT(ID)
# FROM `bigquery-public-data.pet_records.pets`
# GROUP BY Animal
# This query will return a table with two columns (Animal & COUNT(ID)) three rows (one for each distinct Animal).
# One thing to note is that if you SELECT a column that you don't pass to 1) GROUP BY or 2) use as input to an aggregate function, you'll get an error. So this query won't work, because the Name column isn't either passed to either an aggregate function or a GROUP BY clause:
# # NOT A VALID QUERY! "Name" isn't passed to GROUP BY
# # or an aggregate function
# SELECT Name, Animal, COUNT(ID)
# FROM `bigquery-public-data.pet_records.pets`
# GROUP BY Animal
#
# If make this error, you'll get the error message `SELECT list expression references column (column's name) which is neither grouped nor aggregated at`.
# ### GROUP BY ... HAVING
# ___
# Another option you have when using GROUP BY is to specify that you want to ignore groups that don't meet certain criteria. So this query, for example, will only include groups that have more than one ID in them:
# SELECT Animal, COUNT(ID)
# FROM `bigquery-public-data.pet_records.pets`
# GROUP BY Animal
# HAVING COUNT(ID) > 1
# The only group that this query will return information on is the one in the cells highlighted in blue in this figure:
# 
# As a result, this query will return a table with only one row, since this there only one group remaining. It will have two columns: one for "Animal", which will have "Cat" in it, and one for COUNT(ID), which will have 2 in it.
# ### Example: Which Hacker News comments generated the most discussion?
# ___
# Now we're ready to work through an example on a real dataset. Today, we're going to be using the Hacker News dataset, which contains information on stories & comments from the Hacker News social networking site. I want to know which comments on the site generated the most replies.
# First, just like yesterday, we need to get our environment set up. I already know that I want the "comments" table, so I'm going to look at the first couple of rows of that to get started.
# import package with helper functions
import bq_helper
# create a helper object for this dataset
hacker_news = bq_helper.BigQueryHelper(
active_project="bigquery-public-data", dataset_name="hacker_news"
)
# print the first couple rows of the "comments" table
hacker_news.head("comments")
# By looking at the table, I learned that the "parent" column has information on the comment that each comment was a reply to and the "id" column has the unique id used to identify each comment. So I can group by the "parent" column and count the "id" column in order to figure out the number of comments that were made as responses to a specific comment.
# Because I'm more interested in popular comments than unpopular comments, I'm also only going to return the groups that have more than ten id's in them. In other words, I'm only going to look at comments that had more than ten comment replies to them.
# query to pass to
query = """SELECT parent, COUNT(id)
FROM `bigquery-public-data.hacker_news.comments`
GROUP BY parent
HAVING COUNT(id) > 10
"""
# the query_to_pandas_safe method will cancel the query if
# it would use too much of your quota, with the limit set
# to 1 GB by default
popular_stories = hacker_news.query_to_pandas_safe(query)
# To view the first few lines, we can run:
popular_stories.head()
# # Scavenger Hunt Time🕵🏻
# Phew! That was quite a bit to digest. Now it's your turn. Get yourself ready to find the answers to the following few queries
# import package with helper functions
import bq_helper
# create a helper object for this dataset
world_bank_dataset = bq_helper.BigQueryHelper(
active_project="bigquery-public-data", dataset_name="world_bank_health_population"
)
world_bank_dataset.list_tables()
|
# # 0 - Pré-código
# Começo o notebook importando as bibliotecas mais comuns a serem utilizadas
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# para poder visualizar os gráficos no notebook
# Como cada problema ira utilizar um dataset diferente, irei salvar e trabalhar cada dataset no inicio da resolução de cada problema. Caso todos utilizassem os mesmos dados, poderia começar a trabalhar desde o inicio, facilitando e agilizando posteriormente o trabalho.
# # 1 - Regressão
# Objetivo: Prever o volume mensal total de chuva em diferentes cidades para cada mês e ano.
# Dados: Temos um total de 4 datasets. Contendo dados de hora em hora sobre diferentes condições climaticas em cada cidade
# Avaliação: É pedido o uso de RSME para a avaliação dos resultados.
# ## 1.1 - Tratamento dos dados:
# Irei fazer uma rapida analise dos dados apresentados com o objetivo de melhor identificar o seu comportamento e suas correlações, além de possiveis limpezas e alterações necessárias.
reg_test_data = pd.read_csv("../input/epistemicselecao/regression_features_test.csv")
reg_test_label = pd.read_csv("../input/epistemicselecao/regression_targets_test.csv")
reg_train_data = pd.read_csv("../input/epistemicselecao/regression_features_train.csv")
reg_train_label = pd.read_csv("../input/epistemicselecao/regression_targets_train.csv")
# Estou utilizando a ground truth como label
# Começando a analise dos dados atraves do reg_train_data
reg_train_data
# Começarei convertendo os dados de tempo para uma time series para todos os dados.
tempo_data = pd.to_datetime(reg_train_data[["year", "month", "day", "hour"]])
train_data = reg_train_data.set_index(tempo_data).drop(
["year", "month", "day", "hour"], axis=1
)
reg_train_label["day"] = 1
tempo_data = pd.to_datetime(reg_train_label[["year", "month", "day"]])
train_label = reg_train_label.set_index(tempo_data).drop(["year", "month"], axis=1)
tempo_data = pd.to_datetime(reg_test_data[["year", "month", "day", "hour"]])
test_data = reg_test_data.set_index(tempo_data).drop(
["year", "month", "day", "hour"], axis=1
)
reg_test_label["day"] = 1
tempo_data = pd.to_datetime(reg_test_label[["year", "month", "day"]])
test_label = reg_test_label.set_index(tempo_data).drop(["year", "month"], axis=1)
# Vou iniciar removendo quais NaN presentes
test_data = test_data.dropna()
test_label = test_label.dropna()
train_data = train_data.dropna()
train_label = train_label.dropna()
# Vamos ver como os dados se comportam
train_data.describe()
train_data.loc[(train_data["DEWP"] == -9999.0) | (train_data["HUMI"] == -9999.0)]
# Podemos notar que isso praticamente não ocorre (4 vezes em um dado com mais de 100k). Podemos assumir que se trata de uma falha momentanea na coleta dos dados. Analisando o dia em questão:
train_data.loc[(train_data["city"] == 2)]["2013-12-14"]
# Irei fazer uma simples substituição pelos valores proximos (13.2 para DEWP e 100 para HUMI). Caso houvesse mais casos, um estudo maior poderia ser feito, mas considerando o valor diminuto de ocorrencia, creio que um caminho simples é melhor (considerando realmente o quão pequeno é, talvez nem seja necessário)
train_data.loc[train_data.DEWP == -9999.0, ["DEWP", "HUMI"]] = 13.2, 100
train_data.loc[(train_data["city"] == 2)]["2013-12-14"]
# Analisando agora para o teste
test_data.describe()
# Felizmente, o mesmo não ocorre.
train_data = pd.get_dummies(train_data, columns=["cbwd"])
test_data = pd.get_dummies(test_data, columns=["cbwd"])
train_data
# ## 1.2 Analise dos dados
# Agora irei analisar a correlação dos dados presentes. No caso, para reg_train_data temos o dado da precipitação no dia, que é o que queremos. Porém queremos o valor mensal total da precipitação. Portanto farei 3 dados de correlação. Um direto com os dados atuais. Um fazendo uma media de cada dia. E o ultimo, fazendo a media do mês. Primeiramente vou criar tais dados para correlação.
temp_dic = {}
for i in list(train_data):
temp_dic[i] = np.mean
temp_dic["precipitation"] = np.sum
# como pretendemos achar volume total de precipitação, queremos manter seu valor total, e não a média que nem os outros dados
diaria = pd.DataFrame()
diaria = train_data.loc[(train_data["city"] == 0)].resample("D").agg(temp_dic)
for city in range(1, 5):
diaria = diaria.append(
train_data.loc[(train_data["city"] == city)].resample("D").agg(temp_dic)
)
# resample dos dados para o tamanho desejado seguindo o dicionario das funções que criamos anteriormente
diaria.dropna(inplace=True)
diaria
mensal = pd.DataFrame()
mensal = train_data.loc[(train_data["city"] == 0)].resample("M").agg(temp_dic)
for city in range(1, 5):
mensal = mensal.append(
train_data.loc[(train_data["city"] == city)].resample("M").agg(temp_dic)
)
# analogo ao anterior, porém com tamanho diferente
mensal.dropna(inplace=True)
mensal
corr_matrix = pd.DataFrame()
corr_matrix["/hora"] = train_data.corr(method="pearson").precipitation
corr_matrix["/dia"] = diaria.corr(method="pearson").precipitation
corr_matrix["/mes"] = mensal.corr(method="pearson").precipitation
corr_matrix
# Podemos notar aqui que gerar dados diarios e mensais se mostrou eficiente. Diversos dados tiveram indices de correlação amplificados nesse processo.
# Temos como principais dados para hora: DEWP, HUMI, PRES (proximos de valor absoluto 0,1)
# Já para valores diarios: DEWP, HUMI, PRES, TEMP são classificadores principais, com season e cbwd_NE podendo ser considerados
# Para valores mensais: DEWP, HUMI, PRES, TEMP são ótimos classificadores, com season, cbwd_NW, cbwd_SE sendo bons classificadores e lws, cbwd_NE e cbwd_cv podendo ser considerados
# Devido aos baixos valores de correlação para os dados de hora em hora, não irei prosseguir sua analise. O que é esperado para situações climaticas, uma vez que seus dados tendem a afetar situações futuras mais do que a situação instantânea.
# Começarei a analise pelos valores mensais, em especial darei peso aos classificadores principais (DEWP,HUMI,PRES,TEMP) duplicando suas colunas, não adicionarei os classificadores lws,cbwd_NE e cbwd_cv, começando então a regressão.
# Porém antes disso, devemos aplicar o que foi feito aos dados de teste, farei só o mensal. O diario farei se necessário.
temp_dic = {}
for i in list(test_data):
temp_dic[i] = np.mean
# Dessa vez nao temos os dados 'precipitation'
teste_mensal = pd.DataFrame()
teste_mensal = test_data.loc[(test_data["city"] == 0)].resample("M").agg(temp_dic)
for city in range(1, 5):
teste_mensal = teste_mensal.append(
test_data.loc[(test_data["city"] == city)].resample("M").agg(temp_dic)
)
# analogo ao anterior, porém com tamanho diferente
teste_mensal.dropna(inplace=True)
# # 1.3 Regressão por random forest
treino_mes = pd.concat(
[
mensal[["season", "DEWP", "HUMI", "PRES", "TEMP", "cbwd_NW", "cbwd_SE"]],
mensal[["DEWP", "HUMI", "PRES", "TEMP"]].rename(
columns={"DEWP": "DEWP2", "HUMI": "HUMI2", "PRES": "PRES2", "TEMP": "TEMP2"}
),
],
axis=1,
)
# Ao fazer a duplicação das colunas, renomeio elas para evitar possiveis confusões
target_mes = mensal["precipitation"]
treino_mes
# antes de podermos aplicar o random forest devemos normalizar os dados
treino_mes = (treino_mes - treino_mes.mean(axis=0)) / treino_mes.std(axis=0)
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators=50, max_depth=40)
rf.fit(treino_mes, target_mes)
teste_mes = pd.concat(
[
teste_mensal[["season", "DEWP", "HUMI", "PRES", "TEMP", "cbwd_NW", "cbwd_SE"]],
teste_mensal[["DEWP", "HUMI", "PRES", "TEMP"]].rename(
columns={"DEWP": "DEWP2", "HUMI": "HUMI2", "PRES": "PRES2", "TEMP": "TEMP2"}
),
],
axis=1,
)
# Ao fazer a duplicação das colunas, renomeio elas para evitar possiveis confusões
teste_mes
reg = rf.predict(teste_mes)
# Adicionando esse valor ao data frame:
teste_mensal["monthly_precipitation"] = reg
teste_mensal
# Fazendo a comparação com o label, já utilizando sort_values para ficarem na mesma ordem:
test_label = test_label.rename_axis("MyIdx").sort_values(
by=["city", "MyIdx"], ascending=[True, True]
)
test_label
# Calculando agora o erro por MSE
from sklearn.metrics import mean_squared_error
verdade = test_label["monthly_precipitation"]
pred = teste_mensal["monthly_precipitation"]
mean_squared_error(verdade, pred)
# O erro deu valor bem alto, o que é estranho inicialmente. Vou testar utilizando os valores diarios e depois farei minhas conclusões.
temp_dic = {}
for i in list(test_data):
temp_dic[i] = np.mean
# Dessa vez nao temos os dados 'precipitation'
teste_diaria = pd.DataFrame()
teste_diaria = test_data.loc[(test_data["city"] == 0)].resample("D").agg(temp_dic)
for city in range(1, 5):
teste_diaria = teste_diaria.append(
test_data.loc[(test_data["city"] == city)].resample("D").agg(temp_dic)
)
# resample dos dados para o tamanho desejado seguindo o dicionario das funções que criamos anteriormente
teste_diaria.dropna(inplace=True)
teste_diaria
treino_dia = pd.concat(
[
diaria[["season", "DEWP", "HUMI", "PRES", "TEMP"]],
diaria[["DEWP", "HUMI", "PRES"]].rename(
columns={
"DEWP": "DEWP2",
"HUMI": "HUMI2",
"PRES": "PRES2",
}
),
],
axis=1,
)
# Ao fazer a duplicação das colunas, renomeio elas para evitar possiveis confusões, note que dessa vez mudamos os classificadores escolhidos tal como dito em
# 1.2, removemos os cbwd e não duplicamos TEMP
target_dia = diaria["precipitation"]
treino_dia
# antes de podermos aplicar o random forest devemos normalizar os dados
treino_dia = (treino_dia - treino_dia.mean(axis=0)) / treino_dia.std(axis=0)
rf = RandomForestRegressor(n_estimators=50, max_depth=40)
rf.fit(treino_dia, target_dia)
teste_dia = pd.concat(
[
teste_diaria[["season", "DEWP", "HUMI", "PRES", "TEMP"]],
teste_diaria[["DEWP", "HUMI", "PRES"]].rename(
columns={
"DEWP": "DEWP2",
"HUMI": "HUMI2",
"PRES": "PRES2",
}
),
],
axis=1,
)
reg = rf.predict(teste_dia)
teste_dia["daily_precipitation"] = reg
teste_dia["city"] = teste_diaria["city"]
teste_dia
temp_dic = {}
for i in list(teste_dia):
temp_dic[i] = np.mean
temp_dic["daily_precipitation"] = np.sum
# como pretendemos achar volume total de precipitação, queremos manter seu valor total, e não a média que nem os outros dados
dia_mes = pd.DataFrame()
dia_mes = teste_dia.loc[(teste_dia["city"] == 0)].resample("M").agg(temp_dic)
for city in range(1, 5):
dia_mes = dia_mes.append(
teste_dia.loc[(teste_dia["city"] == city)].resample("M").agg(temp_dic)
)
# analogo ao anterior, porém com tamanho diferente
dia_mes.dropna(inplace=True)
dia_mes
test_label = test_label.rename_axis("MyIdx").sort_values(
by=["city", "MyIdx"], ascending=[True, True]
)
test_label
verdade = test_label["monthly_precipitation"]
pred = dia_mes["daily_precipitation"]
mean_squared_error(verdade, pred)
# Não só deu valor muito alto, como deu muito proximo do anterior. Vamos analisar os dados agora e entender o por quê.
test_label.describe()
# Através do describe, notamos rapidamente um dos motivos possiveis. A std dos dados reais é altissima, da ordem de 10e4, superando até sua média. Podemos verificar isso por um histograma dos dados.
test_label["monthly_precipitation"].hist(bins=30, figsize=(6, 6))
plt.show()
dia_mes["daily_precipitation"].hist(bins=30, figsize=(6, 6))
plt.show()
teste_mensal["monthly_precipitation"].hist(bins=30, figsize=(6, 6))
plt.show()
# Como podemos ver, a variação pesada de valores altera drasticamente os valores das regressões, que tenta gerar curvas dos valores enquanto os valores reais são muito mais pontuais e com mudanças drásticas.
# # 2 - Classificadores
# Objetivo: Prever se um certo dia choveu para diferentes cidades para cada mês e ano.
# Dados: Temos um total de 4 datasets. Contendo dados de hora em hora sobre diferentes condições climaticas em cada cidade
# Avaliação: É pedido o uso de scores roc_auc para a avaliação dos resultados.
# ## 2.1 - Tratamento dos dados:
# Tal como anteriormente, irei fazer uma rapida analise dos dados apresentados com o objetivo de melhor identificar o seu comportamento e suas correlações, além de possiveis limpezas e alterações necessárias.
train_class_data = pd.read_csv(
"../input/epistemicselecao/classification_features_train.csv"
)
train_class_label = pd.read_csv(
"../input/epistemicselecao/classification_targets_train.csv"
)
test_class_data = pd.read_csv(
"../input/epistemicselecao/classification_features_test.csv"
)
test_class_label = pd.read_csv(
"../input/epistemicselecao/classification_targets_test.csv"
)
train_class_data
# Tendo em mente que se tratam dos mesmo dados do caso anterior, irei aplicar o mesmo tratamento, sem quaisquer variações.
tempo_data = pd.to_datetime(train_class_data[["year", "month", "day", "hour"]])
train_data = train_class_data.set_index(tempo_data).drop(
["year", "month", "day", "hour"], axis=1
)
# diferente do anterior, dessa vez o label tem os dados Dias
tempo_data = pd.to_datetime(train_class_label[["year", "month", "day"]])
train_label = train_class_label.set_index(tempo_data).drop(
["year", "month", "day"], axis=1
)
tempo_data = pd.to_datetime(test_class_data[["year", "month", "day", "hour"]])
test_data = test_class_data.set_index(tempo_data).drop(
["year", "month", "day", "hour"], axis=1
)
# idem
tempo_data = pd.to_datetime(test_class_label[["year", "month", "day"]])
test_label = test_class_label.set_index(tempo_data).drop(
["year", "month", "day"], axis=1
)
# Vou iniciar removendo quais NaN presentes
test_data = test_data.dropna()
test_label = test_label.dropna()
train_data = train_data.dropna()
train_label = train_label.dropna()
train_data.describe()
# O mesmo erro do caso anterior. Uso a mesma correção.
train_data.loc[train_data.DEWP == -9999.0, ["DEWP", "HUMI"]] = 13.2, 100
# Tratando cbwd
train_data = pd.get_dummies(train_data, columns=["cbwd"])
test_data = pd.get_dummies(test_data, columns=["cbwd"])
# ## 2.2 Analise dos dados
# Apesar de se tratar dos mesmos dados, o objetivo é diferente. Enquanto antes queriamos descobrir valores totais de chuva em um mes. Dessa vez queremos saber apenas se choveu ou não em um dia. A média mensal obviamente se torna inutil. A media diaria não perde totalmente seu valor no entanto.
# Um tratamento importante será tornar a precipitação em um dado booleano. Como se trata de chuva em mm/m^2 (assumo, pois é o padrão para medição de chuva), basta ter que precipitatio > 0 para considerar chuva.
# Gerando inicialmente as médias diarias da mesma forma que antes.
temp_dic = {}
for i in list(train_data):
temp_dic[i] = np.mean
temp_dic["precipitation"] = np.sum
# Dessa vez o np.sum na precipitação é para evitar zeros por causa de numeros pequenos.
diaria = pd.DataFrame()
diaria = train_data.loc[(train_data["city"] == 0)].resample("D").agg(temp_dic)
for city in range(1, 5):
diaria = diaria.append(
train_data.loc[(train_data["city"] == city)].resample("D").agg(temp_dic)
)
# resample dos dados para o tamanho desejado seguindo o dicionario das funções que criamos anteriormente
diaria.dropna(inplace=True)
diaria
train_data.loc[train_data["precipitation"] != 0, "precipitation"] = True
train_data.loc[train_data["precipitation"] == 0, "precipitation"] = False
diaria.loc[diaria["precipitation"] != 0, "precipitation"] = True
diaria.loc[diaria["precipitation"] == 0, "precipitation"] = False
# Substituindo os valores de precipitação por booleanos.
correlation = pd.DataFrame(train_data.corr(method="pearson").precipitation)
corr_D = pd.DataFrame(diaria.corr(method="pearson").precipitation)
correlation.join(corr_D, rsuffix="_Dia").sort_values(by="precipitation", ascending=True)
# Podemos notar que as correlações por valores diarios são maiores.
# Usarei os seguintes classificadores em ambos os casos: PRES, DEWP e HUMI
# Porém usarei adicionalmente TEMP e season e uma duplicata de HUMI nos diarios.
# Gerando logo os dados necessários para facilitar a classificação na parte seguinte.
diaria_teste = pd.DataFrame()
diaria_teste = (
test_data.loc[(test_data["city"] == 0)].resample("D").mean()
) # não necessita do dicionario pois não tem precipitation
for city in range(1, 5):
diaria_teste = diaria_teste.append(
test_data.loc[(test_data["city"] == city)].resample("D").mean()
)
# resample dos dados para o tamanho desejado seguindo o dicionario das funções que criamos anteriormente
diaria_teste.dropna(inplace=True)
diaria_teste
treino_class = train_data[["PRES", "DEWP", "HUMI"]]
target_class = train_data["precipitation"]
treino_dia = pd.concat(
[
diaria[["PRES", "DEWP", "HUMI", "season", "TEMP"]],
diaria[["HUMI"]].rename(columns={"HUMI": "HUMI2"}),
],
axis=1,
)
target_dia = diaria["precipitation"]
test_class = test_data[["PRES", "DEWP", "HUMI"]]
test_dia = pd.concat(
[
diaria_teste[["PRES", "DEWP", "HUMI", "season", "TEMP"]],
diaria_teste[["HUMI"]].rename(columns={"HUMI": "HUMI2"}),
],
axis=1,
)
# ## 2.3 Classificação
# ### 2.3.1 Naive Bayes
# Fazendo inicialmente o dataframe dos valores diarios
from sklearn.naive_bayes import BernoulliNB
bnb = BernoulliNB()
bnb.fit(treino_dia, target_dia)
choveu = bnb.predict(test_dia)
# Adicionando os valores previsto no dataframe junto com cidade para melhor visualizar e comparar com a tabela label
test_dia_temp = pd.DataFrame()
test_dia_temp = test_dia
test_dia_temp["precipitation"] = choveu
test_dia_temp["city"] = diaria_teste[
"city"
] # para podemos verificar se estamos com o local certo ao comparar com o label
test_dia_temp.rename_axis("MyIdx", inplace=True)
test_dia_temp = test_dia_temp.reset_index(drop=False)
test_dia_temp
# Sabendo que a tabela label estara com os dias em ordem e as cidades na ordem 0,1,2,3,4, mudo o sistema do time-series para um label, podendo então ordenar ambos dataframes igualmente pelo sort_values(by = ['city', 'MyIdx'])
test_label = test_label.rename_axis("MyIdx").sort_values(
by=["city", "MyIdx"], ascending=[True, True]
)
test_label.dropna(inplace=True)
test_label_temp = test_label.reset_index(drop=False)
test_label_temp
# No entanto, ambas não possuem o mesmo tamanho. Isso provavelmente ocorreu ao tratar os dados, certas linhas foram perdidas. Felizmente não foram muitas, apenas 11 linhas. Fazendo um merge com o MyIdx para certificar dias iguais e City para certificar cidades iguais temos:
result = pd.merge(test_dia_temp, test_label_temp, on=["MyIdx", "city"], sort=False)
result
# Analisando resultados pelo metodo pedido
from sklearn.metrics import roc_auc_score
y_true = result["rain"]
y_pred = result["precipitation"]
roc_auc_score(y_true, y_pred)
# O resultado não foi nada bom. O sistema está basicamente chutando se choveu ou não.
# Vamos verificar para os dados de hora em hora.
bnb = BernoulliNB()
bnb.fit(treino_class, target_class)
choveu = bnb.predict(test_class)
test_class_temp = pd.DataFrame()
test_class_temp = test_class
test_class_temp.loc[:, "precipitation"] = choveu
test_class_temp.loc[:, "city"] = test_data[
"city"
] # para podemos verificar se estamos com o local certo ao comparar com o label
test_class_temp
# Os dados estão de hora em hora. Vamos mudar para cada dia.
hora_dia = pd.DataFrame()
hora_dia = (
test_class_temp.loc[(test_class_temp["city"] == 0)].resample("D").mean()
) # não necessita do dicionario pois não tem precipitation
for city in range(1, 5):
hora_dia = hora_dia.append(
test_class_temp.loc[(test_class["city"] == city)].resample("D").mean()
)
# resample dos dados para o tamanho desejado seguindo o dicionario das funções que criamos anteriormente
hora_dia.dropna(inplace=True)
hora_dia.loc[hora_dia["precipitation"] != 0, "precipitation"] = True
hora_dia.loc[hora_dia["precipitation"] == 0, "precipitation"] = False
hora_dia
# Novamente, 11 linhas perdidas, fazendo o merge:
hora_dia.rename_axis("MyIdx", inplace=True)
hora_dia_temp = hora_dia.reset_index(drop=False)
result = pd.merge(hora_dia, test_label_temp, on=["MyIdx", "city"], sort=False)
result
y_true = result["rain"]
y_pred = result["precipitation"]
roc_auc_score(y_true, y_pred)
# Talvez Naive-Bayes não seja bem aplicado ao problema em questão. Aplicando um sistema de KNN:
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=20)
knn.fit(treino_dia, target_dia)
test_dia.drop(["precipitation", "city"], axis=1, inplace=True)
predictions = knn.predict(test_dia)
y_true = result["rain"]
y_pred = predictions
roc_auc_score(y_true, y_pred)
# O resultado em si não é dos melhores. Porém é bem melhor do que o Naive-Bayes. Com melhor otimização dos classificadores, valores melhores podem ser atingidos. Por exemplo, modificar dados como HUMI para 1 ou 0, pois caso a umidade esteja em 100% significa que está chovendo. Além disso seria ideal separar cada cidade e fazer sua classificação separadamente.
# # 3 - Clusterização
# Objetivo: Clusterizar o dataset das 5 cidades de acordo com seu comportamento sazonal de forma não supervisionada
# Dados: Clustering data set com diversos dados por estação para cada cidade
# Avaliação: Diferente dos outros, a avaliação fica aberto para varias formas, com exemplo dado o silhouette score do sklearn.
# ## 3.1 - Tratamento dos dados:
# Tal como anteriormente, irei fazer uma rapida analise dos dados apresentados com o objetivo de melhor identificar o seu comportamento e suas correlações, além de possiveis limpezas e alterações necessárias.
cluster_data = pd.read_csv("../input/epistemicselecao/clustering_dataset.csv")
cluster_data
# Dados relativamente compactos principalmente em relação aos outros datasets. Podemos notar rapidamente na linha 10 uma media total de chuva na estação fora do comum, um outlier. Também é possivel notar que não há NaN presente e o dado está completo. ótimo, não será necessario limpar!
cluster_data.describe()
# Para aplicar a clusterização não supervisionada, usarei o metodo KMeans.
# Além disso, aplicarei min-max para regular os dados na mesma escala, no entanto, o caso 200k de chuva media total na estação sera separado, como se trata de um outlier ele ira afetar pesadamente o min-max, como queremos apenas aproximar as escalas de cada um deles, tratarei o segundo maior valor como máx. Removo então o outlier
cluster_data_outlier = cluster_data.loc[10, :]
cluster_data = cluster_data.drop([10], axis=0)
# removendo a linha outlier para podermos estudar os dados melhor
cluster_data.describe()
labels = list(cluster_data)
labels.remove("city")
labels.remove("season")
# lista com os labels que precisam do minmaxscale
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
cluster_data_mm = pd.DataFrame()
cluster_data_mm = cluster_data.copy()
scaler.fit(cluster_data_mm[labels])
cluster_data_mm[labels] = scaler.transform(cluster_data_mm[labels])
cluster_data_mm.reset_index(drop=True, inplace=True)
cluster_data_mm
# Felizmente, não houve comportamento estranho aos outros dados do outlier, apenas a media total, tal como esperado.
# Uma vez tratados os dados podemos começar a clusterizar
# ## 3.2 - Cluster por K Means
# Sabendo que temos 4 estações e 5 cidades, irei fazer duas cluster diferentes. A primeira será apenas referente as estações, logo, 4 clusters. A segunda, pelas cidades, logo, 5 clusters. Por ultimo, irei usar metodo do cotovelo para decidir o numero de clusters.
from sklearn.cluster import KMeans
# ### 3.2.1 - 4 estações, sem diferenciar cidades
# Como estamos ignorando as diferentes cidades, gero um dataframe sem a coluna das cidades
cluster_seasons = cluster_data_mm.drop("city", axis=1)
# Perceba que como as estações serão as clusters, podemos chamar elas de "target"
cluster_seasons_target = cluster_seasons.loc[:, "season"]
cluster_seasons.drop("season", axis=1, inplace=True)
km = KMeans(n_clusters=4)
y_predicted = km.fit_predict(cluster_seasons)
y_predicted.size == cluster_seasons.shape[
0
] # apenas verificando se o tamanho está de acordo
cluster_seasons["cluster"] = y_predicted
cluster_seasons
labels = list(cluster_seasons)
labels.remove("average_total_seasonal_precipitation")
labels.remove("cluster")
# Vou plotar a partir da media total de chuva
# Ao fazer a plot, irei plotar nossos dados utilizando como base (x) average_total_seasonal_precipitation por ser os estudo dos dados. Irei fazer as plots das clusters lado a lado com os reais, isto é, plots com clusters baseados na cluster_seasons_target
target_plot = pd.DataFrame()
target_plot = cluster_seasons.copy()
target_plot["season"] = (
cluster_seasons_target - 1
) # season começa em 1, queremos que ela comece no 0 tal como a cluster
cores = ["green", "red", "black", "blue"]
x = 1
f = plt.figure(figsize=(10, 50))
for y in labels:
for i in range(0, 4):
ax = f.add_subplot(12, 2, x)
df_temp = cluster_seasons[cluster_seasons.cluster == i]
ax.scatter(
df_temp[y], df_temp["average_total_seasonal_precipitation"], color=cores[i]
)
plt.title(y)
ax2 = f.add_subplot(12, 2, x + 1)
df_temp = target_plot[target_plot.season == i]
ax2.scatter(
df_temp[y], df_temp["average_total_seasonal_precipitation"], color=cores[i]
)
plt.title(y + "real")
x += 2
plt.show()
# As cores não estarão iguais para agrupamentos com mesmo significado. Mas devemos observar agrupamentos similares nos dados. O modelo espera um comportamento muito mais uniforme do que o real. No entanto, podemos notar que os extremos por si possuem comportamentos mais similares. Creio que isso ocorre devido a estações como inverno e verão serem mais acentuadas em suas condições meteorologicas, enquanto estações como outono e primavera possuem maior variação no seu periodo.
from sklearn.metrics import silhouette_score
labels1 = list(cluster_seasons)
labels1.remove("cluster")
silhouette_score(cluster_seasons[labels1], cluster_seasons["cluster"])
|
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import librosa
import librosa.display
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# # 1. Importando os Arquivos de Áudio
# * audio1 - beethoven 5th symphony
# * audio2 - Public Enemy - 911 is a Joke
def import_signal(path):
s, sr = librosa.core.load(path)
return s
def plot_signals(s):
fig, a = plt.subplots(1, figsize=(10, 8))
a.plot(s)
paths = ["/kaggle/input/audios/audio1.wav", "/kaggle/input/audios/audio2.wav"]
signals = []
for p in paths:
signals.append(import_signal(p))
signals = np.asarray(signals)
print(signals.shape)
for s in signals:
plot_signals(s)
# # 2. Transformada de Fourier de Curto Termo
# **hop_length** : int > 0 [scalar] - Usamos 1024
# number of audio samples between adjacent STFT columns.
# return magnitude S
def stft(signal):
S, phase = librosa.magphase(np.abs(librosa.stft(signal, hop_length=1024)))
return S
def plot_spect(index, S):
plt.figure(figsize=(10, 8))
librosa.display.specshow(
librosa.amplitude_to_db(S, ref=np.max), y_axis="log", x_axis="time"
)
title = "spectrogram audio {}".format(index + 1)
plt.title(title)
plt.colorbar(format="%+2.0f dB")
plt.tight_layout()
plt.show()
signals_stft = []
for s in signals:
signals_stft.append(stft(s))
signals_stft = np.asarray(signals_stft)
signals_stft.shape
for index, s in enumerate(signals_stft):
plot_spect(index, s)
# # 4. Features
# # **4.1 Centróide**
def get_centroid(S):
return librosa.feature.spectral_centroid(S=S)
def plot_centroids(c):
fig, a = plt.subplots(1, figsize=(10, 8))
a.plot(c)
centroids = []
for s in signals_stft:
c = get_centroid(s)
centroids.append(c[0])
centroids = np.asarray(centroids)
for c in centroids:
plot_centroids(c)
# # 4.2 Flatness
def get_flatness(S):
return librosa.feature.spectral_flatness(S=S)
def plot_flatness(f):
fig, a = plt.subplots(1, figsize=(10, 8))
a.axis([0, 500, 0, 0.125])
a.plot(f)
flatness = []
for s in signals_stft:
f = get_flatness(s)
flatness.append(f[0])
flatness = np.asarray(flatness)
for f in flatness:
plot_flatness(f)
# # 4.3 RMS
# RMSe não está mais presente no librosa
def get_rms(s):
return librosa.feature.rms(s, hop_length=1024)
def plot_rms(x, y):
print(y)
fig, a = plt.subplots(1, figsize=(10, 8))
a.axis([0, 4, 0, 2])
a.plot(x, y, "ro")
plt.xticks(np.arange(min(x), max(x) + 1, 1.0))
plt.xlabel("Áudio")
plt.ylabel("RMS")
plt.title("Valor RMS para os sinais de áudio")
for a, b in zip(x, y):
plt.text(
a,
b,
" " + str(b),
horizontalalignment="left",
verticalalignment="bottom",
)
rms_arr = []
indexes = []
for i, s in enumerate(signals_stft):
rms = get_rms(s)
rms_arr.append(round(rms[0][0], 3))
indexes.append(i + 1)
plot_rms(indexes, rms_arr)
|
#
# ## Objective
# **The challenge is to create a model that uses data from the first 24 hours of intensive care to predict patient survival. MIT's GOSSIS community initiative, with privacy certification from the Harvard Privacy Lab, has provided a dataset of more than 130,000 hospital Intensive Care Unit (ICU) visits from patients, spanning a one-year timeframe. This data is part of a growing global effort and consortium spanning Argentina, Australia, New Zealand, Sri Lanka, Brazil, and more than 200 hospitals in the United States.**
# ## Data Description
# MIT's GOSSIS community initiative, with privacy certification from the Harvard Privacy Lab, has provided a dataset of more than 130,000 hospital Intensive Care Unit (ICU) visits from patients, spanning a one-year timeframe. This data is part of a growing global effort and consortium spanning Argentina, Australia, New Zealand, Sri Lanka, Brazil, and more than 200 hospitals in the United States.
# The data includes:
# **Training data** for 91,713 encounters.
# **Unlabeled test data** for 39,308 encounters, which includes all the information in the training data except for the values for hospital_death.
# **WiDS Datathon 2020 Dictionary** with supplemental information about the data, including the category (e.g., identifier, demographic, vitals), unit of measure, data type (e.g., numeric, binary), description, and examples.
# **Sample submission files**
# ## Ensemble Learning :
# A collection of several models working together on a single set is called an Ensemble and the method is called **Ensemble Learning.**
# **Ensemble methods combine several trees base algorithms to construct better predictive performance than a single tree base algorithm. The main principle behind the ensemble model is that a group of weak learners come together to form a strong learner, thus increasing the accuracy of the model. When we try to predict the target variable using any machine learning technique, the main causes of difference in actual and predicted values are noise, variance, and bias.**
# Pic Credit: medium.com
# ## Voting Classifier :
# **A Voting Classifier** is a machine learning model that trains on an ensemble of numerous models and predicts an output (class) based on their highest probability of chosen class as the output.It simply aggregates the findings of each classifier passed into Voting Classifier and predicts the output class based on the highest majority of voting.
# Voting Classifier supports two types of votings.
# **Hard Voting:** In hard voting, the predicted output class is a class with the highest majority of votes i.e the class which had the highest probability of being predicted by each of the classifiers.
# **Soft Voting:** In soft voting, the output class is the prediction based on the average of probability given to that class.
# Pic Credit : iq.opengenus.org
# ## When to use Voting Classifier ?
# Voting classifier is a powerful method and can be a very good option when a single method shows bias towards a particular factor. This method can be used to derive a generalized fit of all the individual models.
# ## Scikit-learn :
# Scikit-learn is the most useful library for machine learning in Python and the library contains a lot of effiecient tools for machine learning and statistical modeling including classification, regression, clustering and dimensionality reduction.**
# ## Import the relevant libraries
# importing libraries
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.ensemble import (
RandomForestClassifier,
GradientBoostingClassifier,
VotingClassifier,
)
import matplotlib.pyplot as plt
import seaborn as sns
# roc curve and auc score
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
# ## Read the dataset
# loading dataset
training_v2 = pd.read_csv("../input/widsdatathon2020/training_v2.csv")
test = pd.read_csv("../input/widsdatathon2020/unlabeled.csv")
# creating independent features X and dependant feature Y
y = training_v2["hospital_death"]
X = training_v2
X = training_v2.drop("hospital_death", axis=1)
test = test.drop("hospital_death", axis=1)
# Remove Features with more than 75 percent missing values
train_missing = (X.isnull().sum() / len(X)).sort_values(ascending=False)
train_missing = train_missing.index[train_missing > 0.75]
X = X.drop(columns=train_missing)
test = test.drop(columns=train_missing)
categoricals_features = [
"hospital_id",
"ethnicity",
"gender",
"hospital_admit_source",
"icu_admit_source",
"icu_stay_type",
"icu_type",
"apache_3j_bodysystem",
"apache_2_bodysystem",
]
X = X.drop(columns=categoricals_features)
test = test.drop(columns=categoricals_features)
# Imputation transformer for completing missing values.
my_imputer = SimpleImputer()
new_data = pd.DataFrame(my_imputer.fit_transform(X))
test_data = pd.DataFrame(my_imputer.fit_transform(test))
new_data.columns = X.columns
test_data.columns = test.columns
X = new_data
test = test_data
# ## Train/Test Split :
# The data is split into training data and test data. The training set contains a known output and the model learns on this data in order to be generalized to other data later on. We have the test dataset (or subset) in order to test our model’s prediction on this subset.The above is achieved in Scikit-Learn library using the train_test_split method.
# Split into training and validation set
X_train, valid_features, Y_train, valid_y = train_test_split(
X, y, test_size=0.25, random_state=1
)
# ## Gradient boosting Classifier :
# Gradient boosting is a machine learning technique for regression and classification problems, which produces a prediction model in the form of an ensemble of weak prediction models, typically decision trees. The intuition behind gradient boosting algorithm is to repetitively leverage the patterns in residuals and strengthen a model with weak predictions and make it better.
# Gradient Boosting Classifier
GBC = GradientBoostingClassifier(random_state=1)
# ## Random Forest Classifier
# Random forest consists of a large number of individual decision trees that operate as an ensemble. Each individual tree in the random forest spits out a class prediction and the class with the most votes becomes our model’s prediction . The fudamental concept of Random Forest is that large number of relatively uncorrelated modelsoperating as a committee will outperform any of the individual constituent models
# Random Forest Classifier
RFC = RandomForestClassifier(n_estimators=100)
# Voting Classifier with soft voting
votingC = VotingClassifier(estimators=[("rfc", RFC), ("gbc", GBC)], voting="soft")
votingC = votingC.fit(X_train, Y_train)
predict_y = votingC.predict(valid_features)
# ## AUC - ROC Curve :
# AUC - ROC curve is a performance measurement for classification problem at various thresholds settings. ROC is a probability curve and AUC represents degree or measure of separability. It tells how much model is capable of distinguishing between classes. Higher the AUC, better the model is at predicting 0s as 0s and 1s as 1s. Higher the AUC, better the model is at distinguishing between patients with disease and no disease.
# The ROC curve is plotted with TPR against the FPR where TPR is on y-axis and FPR is on the x-axis.
#
def plot_roc_curve(fpr, tpr):
plt.plot(fpr, tpr, color="orange", label="ROC")
plt.plot([0, 1], [0, 1], color="darkblue", linestyle="--")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("Receiver Operating Characteristic (ROC) Curve")
plt.legend()
plt.show()
probs = votingC.predict_proba(valid_features)
probs = probs[:, 1]
auc = roc_auc_score(valid_y, probs)
fpr, tpr, thresholds = roc_curve(valid_y, probs)
plot_roc_curve(fpr, tpr)
print("AUC-ROC :", auc)
# ## Submissions :
# Submissions will be evaluated on the Area under the Receiver Operating Characteristic (ROC) curve between the predicted mortality and the observed target (hospital_death)
test1 = test.copy()
test1["hospital_death"] = votingC.predict(test)
test1[["encounter_id", "hospital_death"]].to_csv("submission5.csv", index=False)
test1[["encounter_id", "hospital_death"]].head()
|
# **Insights and Exploratory Data Analysis(EDA) on Novel Corona Virus 2020**
# Import all the required libraries
# Graphical Libraries
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import folium
from folium.plugins import MarkerCluster, MiniMap, Fullscreen
import branca
from IPython.display import IFrame, YouTubeVideo
# Manipulation
from datetime import date
import pandas as pd
import numpy as np
# Kaggle default
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# To supress warnings
import warnings
warnings.filterwarnings("ignore")
# To get the geolocation details
from geopy.extra.rate_limiter import RateLimiter
from geopy.geocoders import Nominatim
# **Coronaviruses (CoV):**
# "Coronaviruses (CoV) are a large family of viruses that cause illness ranging from the common cold to more severe diseases. Some coronaviruses transmit between animals, some between animals and people, and others from people to people." [[1].(https://www.canada.ca/en/public-health/services/diseases/coronavirus.html)]
# The Wall Street Journal has also released an informative article [[2](https://www.wsj.com/articles/what-we-know-about-the-wuhan-virus-11579716128)] on the virus. More information about the virus can be found at the WHO [[3](https://www.who.int/emergencies/diseases/novel-coronavirus-2019)] website. WHO has also published a video [[4](https://youtu.be/mOV1aBVYKGA)] on youtube to make people aware of the health emergency caused by the novel Coronavirus. The video is given below:
from IPython.display import IFrame, YouTubeVideo
YouTubeVideo("mOV1aBVYKGA", width=600, height=400)
# **Purpose of this notebook:**
# The purpose of this notebook is to provide insights into the data [[5](https://www.kaggle.com/sudalairajkumar/novel-corona-virus-2019-dataset)] scrapped from a dashboard [[6](https://gisanddata.maps.arcgis.com/apps/opsdashboard/index.html#/bda7594740fd40299423467b48e9ecf6)] created by Johns Hopkins University. The data is made available in csv format by SRK [[7](https://www.kaggle.com/sudalairajkumar)].
# Please note that the virus and information available on it is relatively new which means that the information available now might change in the future [[4](https://youtu.be/mOV1aBVYKGA)]. It also implies that the insights and observations drafted in this notebook might change based on the changing dataset.
# Import the dataset
df = pd.read_csv("../input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv")
# Verify the first five rows for the sanity check
df.head()
# Verify the shape of the data
print("Shape of dataframe: ", df.shape)
# Convert the columns from float to int, and respective date columns for further analysis
df = df.astype(
{
"Confirmed": "int32",
"Deaths": "int32",
"Recovered": "int32",
"Last Update": "datetime64",
"Date": "datetime64",
}
)
df["Country"] = df["Country"].replace({"Mainland China": "China"})
# Get the data of the latest date from the dataset
maxDate = max(df["Date"])
df_lastDate = df[
df["Date"] > pd.Timestamp(date(maxDate.year, maxDate.month, maxDate.day))
]
# **Total number of Corona Virus cases worldwide:**
# Print the total number of observations on cases -Worldwide
print("\033[1mTotal Confirmed cases worldwide: ", df_lastDate.Confirmed.sum())
print("\033[1mTotal Death cases worldwide: ", df_lastDate.Deaths.sum())
print("\033[1mTotal Recovered cases worldwide: ", df_lastDate.Recovered.sum())
# **Analysis 1: Total number of Confirmed, Death and Recovered cases in each country**
# To view this information, the data is first processed and then displayed on an interactive table created using plotly [[8](https://plot.ly/)]. The table is scrollable and the columns can be rearranged in any order.
# Process data for each country
df_tempC = df_lastDate.groupby("Country").Confirmed.sum().to_frame()
df_tempD = df_lastDate.groupby("Country").Deaths.sum().to_frame()
df_tempR = df_lastDate.groupby("Country").Recovered.sum().to_frame()
# Merge the above data frames into one for convenient processing
df_temp = pd.merge(df_tempC, df_tempD, how="inner", left_index=True, right_index=True)
df_temp = pd.merge(df_temp, df_tempR, how="inner", left_index=True, right_index=True)
df_temp = df_temp.sort_values(["Confirmed"], ascending=[False])
# Create an interactive table based and fill the final data frame values
fig = go.Figure(
data=[
go.Table(
header=dict(
values=[
"<b>Country</b>",
"<b>Confirmed Cases</b>",
"<b>Death Cases</b>",
"<b>Recovered Cases</b>",
],
fill_color="paleturquoise",
align=["left", "center"],
font=dict(color="black", size=16),
height=40,
),
cells=dict(
values=[
df_temp.index,
df_temp.Confirmed.values,
df_temp.Deaths.values,
df_temp.Recovered.values,
],
fill_color="lavender",
align=["left", "center"],
font=dict(color="black", size=14),
height=23,
),
)
]
)
# Cosmetic changes
fig.update_layout(
title={
"text": "<b>Number of Confirmed, Death and Recovered cases in each country</b>",
"y": 0.92,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
"font": dict(size=22),
}
)
fig.update_layout(height=600)
print("\033[1mTotal number of countries affected:", len(df_temp.Confirmed.values))
# > **Observations based on Analysis 1:**
# >
# > 1. Most of the cases are found in China.
# > 2. Few other countries where the high number of cases observed are Thailand, Singapore and Japan.
# > 3. The total number of countries affected by the virus is 28.
# > 4. The number of recovered cases is much lower relative to the confirmed cases.
# **Analysis 2: Comparision of cases in China with Rest of the World**
# To view this information, the data is processed and then displayed by interactive bar charts created using plotly [[8](https://plot.ly/)]. The exact number of cases can be viewed by mouse hover on each bar. Though the purpose of the bar chart is to compare the cases in China with the Rest of the World, however, the legend is selectable. So, by clicking on any of the legend items, the bar charts would show data specific to the selected legend item. As the number of confirmed cases in China are more, so bars having lower counts can also be selected to view data only specific to the selection.
# Process cases of China and Rest of the World
chinaConfirmed = df_lastDate[df_lastDate.Country == "China"].Confirmed.sum()
notChinaConfirmed = df_lastDate[df_lastDate.Country != "China"].Confirmed.sum()
chinaDeaths = df_lastDate[df_lastDate.Country == "China"].Deaths.sum()
notChinaDeaths = df_lastDate[df_lastDate.Country != "China"].Deaths.sum()
chinaRecovered = df_lastDate[df_lastDate.Country == "China"].Recovered.sum()
notChinaRecovered = df_lastDate[df_lastDate.Country != "China"].Recovered.sum()
# yAxis labels for the figure
yAxisChina = [chinaConfirmed, chinaDeaths, chinaRecovered]
yAxisNotChina = [notChinaConfirmed, notChinaDeaths, notChinaRecovered]
x = ["Confirmed", "Death", "Recovered"]
fig = go.Figure(
go.Bar(
x=x,
y=[chinaConfirmed, chinaDeaths, chinaRecovered],
text=yAxisChina,
textposition="outside",
hovertemplate="%{x}: %{y} </br>",
name="China",
marker_color="rgb(55, 83, 109)",
)
)
fig.add_trace(
go.Bar(
x=x,
y=[notChinaConfirmed, notChinaDeaths, notChinaRecovered],
text=yAxisNotChina,
textposition="outside",
hovertemplate="%{x}: %{y} </br>",
name="Rest of the World",
marker_color="rgb(26, 118, 255)",
)
)
fig.update_layout(barmode="group", xaxis={"categoryorder": "category ascending"})
fig.update_layout(
title={
"text": "<b>Number of Confirmed, Death and Recovered cases in China and Rest of the World</b>",
"x": 0.1,
"xanchor": "left",
"font": dict(size=20, color="black"),
},
xaxis_tickfont_size=14,
legend=dict(
x=1, y=1, bgcolor="rgba(255, 255, 255, 0)", bordercolor="rgba(255, 255, 255, 0)"
),
barmode="group",
bargap=0.15 # gap between bars of adjacent location coordinates.
# bargroupgap=0.1 # gap between bars of the same location coordinate.
)
# Update xaxis properties
fig.update_xaxes(title_text="Type of Cases", titlefont_size=16, tickfont_size=15)
# Update yaxis properties
fig.update_yaxes(title_text="Number of Cases", titlefont_size=16, tickfont_size=15)
fig.show()
# > **Observations based on Analysis 2:**
# >
# > 1. The number of cases in China are extremely high as compared to the rest of the world.
# > 2. Due to the lack of much information on the virus, there is no vaccine to prevent it [[9](https://www.cdc.gov/coronavirus/2019-ncov/about/prevention-treatment.html)]. Hence, the recovery rate is low in China as well as in the rest of the world.
# > 3. Though the number of deaths are comparatively lower than the number of confirmed cases worldwide yet the target is to control any further rise in the death count.
# > 4. Despite global fears, the cases related to this virus are concentrated in China.
# **Analysis 3: A geographical analysis of Confirmed cases with the Death cases worldwide**
# To view this information, the data is processed and then displayed by an interactive map created using folium [[10](https://python-visualization.github.io/folium/)]. A legend is also created to provide a precise understanding of the markers on the map. The exact number of cases can be viewed by clicking on each marker. The map can also be viewed in fullscreen mode by selecting the option given on the top right side of the map.
# Process and Merge the location details
df_lastDate["Province/State Copy"] = df_lastDate["Province/State"].fillna(" ")
df_lastDate["fullAddress"] = np.where(
(df_lastDate["Province/State Copy"] == " ")
| (df_lastDate["Province/State Copy"] == df_lastDate["Country"]),
df_lastDate["Country"],
df_lastDate["Province/State Copy"] + ", " + df_lastDate["Country"],
)
locator = Nominatim(user_agent="myGeocoder")
# put a delay of 1 second and fetch the geolocation details of each location
geocode = RateLimiter(locator.geocode, min_delay_seconds=1)
df_lastDate["location"] = df_lastDate["fullAddress"].apply(geocode)
df_lastDate["point"] = df_lastDate["location"].apply(
lambda loc: tuple(loc.point) if loc else None
)
df_lastDate[["latitude", "longitude", "altitude"]] = pd.DataFrame(
df_lastDate["point"].tolist(), index=df_lastDate.index
)
# Initialize map
intializeMap = folium.Figure(height=500)
custom_map = folium.Map(
location=[42, 12], zoom_start=2, tiles="cartodbpositron"
).add_to(intializeMap)
fullscreen = Fullscreen(
position="topright",
title="Fullscreen",
title_cancel="Exit Fullscreen",
force_separate_button=True,
).add_to(custom_map)
# Create a custom html to show legend on map
legend_html = """
{% macro html(this, kwargs) %}
<div style="
position: fixed;
bottom: 50px;
left: 50px;
width: 250px;
height: 80px;
z-index:9999;
font-size:14px;
">
<p>  <i class="fa fa-map-marker fa-2x" style="color:darkblue"></i> Confirmed cases</p>
<p>  <i class="fa fa-map-marker fa-2x" style="color:red"></i> Confirmed Deaths cases</p>
</div>
<div style="
position: fixed;
bottom: 50px;
left: 50px;
width: 150px;
height: 80px;
z-index:9998;
font-size:14px;
background-color: #ffffff;
filter: blur(8px);
-webkit-filter: blur(8px);
opacity: 0.7;
">
</div>
{% endmacro %}
"""
legend = branca.element.MacroElement()
legend._template = branca.element.Template(legend_html)
custom_map.get_root().add_child(legend)
# Add locations and styling on map
df_lastDate.apply(
lambda row: folium.Marker(
location=[row["latitude"], row["longitude"]],
popup=(
"<b>Province/Country:</b> "
+ row["fullAddress"]
+ "<br>"
+ "<b>Confirmed:</b> "
+ str(row["Confirmed"])
+ "<br>"
+ "<b>Deaths:</b> "
+ str(row["Deaths"])
+ "<br>"
+ "<b>Recovered:</b> "
+ str(row["Recovered"])
),
icon=folium.Icon(color="darkblue", icon="info-sign"),
color="rgb(55, 83, 109)",
fill_color="rgb(55, 83, 109)",
).add_to(custom_map)
if (row["Deaths"] == 0)
else folium.Marker(
location=[row["latitude"], row["longitude"]],
popup=(
"<b>Province/Country:</b> "
+ row["fullAddress"]
+ "<br>"
+ "<b>Confirmed:</b> "
+ str(row["Confirmed"])
+ "<br>"
+ "<b>Deaths:</b> "
+ str(row["Deaths"])
+ "<br>"
+ "<b>Recovered:</b> "
+ str(row["Recovered"])
),
icon=folium.Icon(color="red", icon="info-sign"),
color="rgb(26, 118, 255)",
fill_color="rgb(26, 118, 255)",
).add_to(custom_map),
axis=1,
)
custom_map
# > **Observations based on Analysis 3:**
# >
# > 1. The number of death cases is relatively lower than the confirmed cases globally.
# > 2. Considering the diameter of the death cases on the map, currently, the number of death observations have been recorded in China and nearby countries.
# **Analysis 4: A detailed view of cases within Chinese Provinces**
# To view this information, the data is processed and then displayed by interactive horizontal bar charts created using plotly [[8](https://plot.ly/)]. The exact number of cases can be viewed by mouse hover on each bar. Though the purpose of the bar chart is to compare all three types of cases i.e. Confirmed, Death and Recovered, however, the legend is selectable. So, by clicking on any of the legend items, the bar charts would show data specific to the selected legend item. As the number of confirmed cases in China are more, so bars having lower counts can also be selected to view data only specific to the selection.
# Process data of Chinese provinces
chinaConfirmed = df_lastDate[df_lastDate.Country == "China"].Confirmed[:10]
chinaDeath = df_lastDate[df_lastDate.Country == "China"].Deaths[:10]
chinaRecovered = df_lastDate[df_lastDate.Country == "China"].Recovered[:10]
chinaProvinceName = df_lastDate[df_lastDate.Country == "China"]["Province/State"][:10]
# Initialize the figure and start adding the traces
# China Confirmed cases
fig = go.Figure()
fig.add_trace(
go.Bar(
y=chinaProvinceName,
x=chinaConfirmed,
name="Confirmed",
hovertemplate="%{x}: %{y} </br>",
orientation="h",
marker=dict(color="yellow", line=dict(color="yellow", width=3)),
)
)
# China Death cases
fig.add_trace(
go.Bar(
y=chinaProvinceName,
x=chinaDeath,
name="Death",
hovertemplate="%{x}: %{y} </br>",
orientation="h",
marker=dict(color="red", line=dict(color="red", width=3)),
)
)
# China Recovered cases
fig.add_trace(
go.Bar(
y=chinaProvinceName,
x=chinaRecovered,
name="Recovered",
hovertemplate="%{x}: %{y} </br>",
orientation="h",
marker=dict(color="green", line=dict(color="green", width=3)),
)
)
# Cosmetic changes to figure
fig.update_layout(
title={
"text": "<b>Top 10 provinces of China having highest number of Corona Virus cases</b>",
"x": 0.15,
"xanchor": "left",
"font": dict(size=20, color="black"),
}
)
fig.update_layout(
legend_orientation="h",
legend=dict(x=0.25, y=-0.2, bgcolor="rgba(255, 255, 255, 0)", bordercolor="red"),
)
# Update xaxis properties
fig.update_xaxes(title_text="Number of cases", titlefont_size=18, tickfont_size=15)
# Update yaxis properties
fig.update_yaxes(title_text="China Provinces", titlefont_size=18, tickfont_size=15)
fig.update_layout(barmode="stack", height=600)
fig.show()
# > **Observations based on Analysis 4:**
# >
# > 1. The highest number of all three cases i.e. Confirmed, Death and Recovered have been observed in Hubei, Wuhan
# > 2. The other provinces where cases have been observed are: Zhejiang, Guangdong, Henan and Hunan.
# > * Distance between Wuhan and Zhejiang: 410 miles
# > * Distance between Wuhan and Zhejiang: 619 miles
# > * Distance between Wuhan and Zhejiang: 307 miles
# > * Distance between Wuhan and Zhejiang: 309 miles
# > Considering the distance between other provinces and Wuhan, and the high number of cases in Wuhan supports the news all over the web that virus has started spreading in Wuhan [[11](https://jamanetwork.com/journals/jama/fullarticle/2760500)].
# > 3. Selecting only Recovered and Death cases from the legend of the above visualization, it can also be observed that the death cases are only linked to Wuhan. In all other provinces, there are much higher recovered cases than the death cases.
# > Note: All the distances are measured using Google Maps [[12](https://www.google.com/maps)].
# **Analysis 5: A detailed view of cases within China based on each increasing day since Jan 22, 2020**
# To view this information, the data is processed and then displayed by interactive scatter plot charts created using plotly [[8](https://plot.ly/)]. The exact number of cases can be viewed by mouse hover on each bar. By clicking on any of the legend items, the scatter plot would show data specific to the selected legend item. Data from different dates can also be selected to display relevant key points.
# Process the data of China based grouped by date
chinaTimelineC = (
df[df["Country"] == "China"].groupby(df["Date"].dt.date)["Confirmed"].sum()
)
chinaTimelineD = (
df[df["Country"] == "China"].groupby(df["Date"].dt.date)["Deaths"].sum()
)
chinaTimelineR = (
df[df["Country"] == "China"].groupby(df["Date"].dt.date)["Recovered"].sum()
)
# Create figure with subplots
fig = make_subplots(
rows=1,
cols=2,
vertical_spacing=0.1,
subplot_titles=("Confirmed Cases", "Death and Recovered Cases"),
)
# China confirmed cases
fig.add_trace(
go.Scatter(
name="Confirmed Cases",
y=chinaTimelineC.values,
x=chinaTimelineC.index,
text=chinaTimelineC.values,
textposition="top center",
mode="lines+markers",
hovertemplate="%{x}: %{y} </br>",
marker=dict(
color="yellow", size=10, line=dict(color="rgb(55, 83, 109)", width=3)
),
line=dict(color="rgb(55, 83, 109)", width=4),
),
row=1,
col=1,
)
# China death cases
fig.add_trace(
go.Scatter(
name="Death Cases",
y=chinaTimelineD.values,
x=chinaTimelineD.index,
text=chinaTimelineD.values,
textposition="bottom right",
hovertemplate="%{x}: %{y} </br>",
mode="lines+markers",
marker=dict(color="red", size=10, line=dict(color="rgb(55, 83, 109)", width=3)),
line=dict(color="rgb(55, 83, 109)", width=4),
),
row=1,
col=2,
)
# China recovered cases
fig.add_trace(
go.Scatter(
name="Recovered Cases",
y=chinaTimelineR.values,
x=chinaTimelineR.index,
text=chinaTimelineR.values,
textposition="bottom right",
hovertemplate="%{x}: %{y} </br>",
mode="lines+markers",
marker=dict(
color="rgb(0, 196, 0)",
size=10,
line=dict(color="rgb(55, 83, 109)", width=3),
),
line=dict(color="rgb(55, 83, 109)", width=4),
),
row=1,
col=2,
)
# Cosmetic changes to figure
fig.update_layout(
title={
"text": "<b>Comparision of Confirmed with Death and Recovered cases in China</b>",
"x": 0.2,
"xanchor": "left",
"font": dict(size=20, color="black"),
}
)
fig.update_layout(
legend_orientation="h",
legend=dict(x=0.25, y=-0.3, bgcolor="rgba(255, 255, 255, 0)", bordercolor="red"),
)
# Update xaxis properties
fig.update_xaxes(
title_text="Timeline", titlefont_size=16, tickfont_size=15, row=1, col=1
)
fig.update_xaxes(
title_text="Timeline", titlefont_size=16, tickfont_size=15, row=1, col=2
)
# Update yaxis properties
fig.update_yaxes(
title_text="Number of Cases", titlefont_size=16, tickfont_size=15, row=1, col=1
)
fig.update_yaxes(
title_text="Number of Cases", titlefont_size=16, tickfont_size=15, row=1, col=2
)
fig.show()
# > **Observations based on Analysis 5:**
# >
# > 1. There is an exponential growth in the Confirmed cases in China.
# > 2. With each passing day, around 3K to 4K Confirmed cases are being reported in China.
# > 3. The number of Recovered and Death cases has been continuously increasing until Feb 1, 2020. However, by getting more information on the virus and better prevention and awareness mechanisms, there is an increase in number of recovered cases as compared to Death cases i.e. 890:490 by Feb 4, 2020.
# **Analysis 6: A detailed view of cases Worldwide excluding China based on each increasing day since Jan 22, 2020**
# To view this information, the data is processed and then displayed by interactive scatter plot charts created using plotly [[8](https://plot.ly/)]. The exact number of cases can be viewed by mouse hover on each bar. By clicking on any of the legend items, the scatter plot would show data specific to the selected legend item. Data from different dates can also be selected to display relevant key points.
# Process the data of Rest of the world grouped by date
notChinaTimelineC = (
df[df["Country"] != "China"].groupby(df["Date"].dt.date)["Confirmed"].sum()
)
notChinaTimelineD = (
df[df["Country"] != "China"].groupby(df["Date"].dt.date)["Deaths"].sum()
)
notChinaTimelineR = (
df[df["Country"] != "China"].groupby(df["Date"].dt.date)["Recovered"].sum()
)
# Create figure with subplots
fig = make_subplots(
rows=1,
cols=2,
vertical_spacing=0.1,
subplot_titles=("Confirmed Cases", "Death and Recovered Cases"),
)
# Rest of the World confirmed cases
fig.add_trace(
go.Scatter(
name="Confirmed Cases",
y=notChinaTimelineC.values,
x=notChinaTimelineC.index,
text=notChinaTimelineC.values,
textposition="top center",
mode="lines+markers",
hovertemplate="%{x}: %{y} </br>",
marker=dict(
color="yellow", size=10, line=dict(color="rgb(55, 83, 109)", width=3)
),
line=dict(color="rgb(26, 118, 255)", width=4),
),
row=1,
col=1,
)
# Rest of the World death cases
fig.add_trace(
go.Scatter(
name="Death Cases",
y=notChinaTimelineD.values,
x=notChinaTimelineD.index,
text=notChinaTimelineD.values,
textposition="bottom right",
mode="lines+markers",
hovertemplate="%{x}: %{y} </br>",
marker=dict(color="red", size=10, line=dict(color="rgb(55, 83, 109)", width=3)),
line=dict(color="rgb(26, 118, 255)", width=4),
),
row=1,
col=2,
)
# Rest of the World recovered cases
fig.add_trace(
go.Scatter(
name="Recovered Cases",
y=notChinaTimelineR.values,
x=notChinaTimelineR.index,
text=notChinaTimelineR.values,
textposition="bottom right",
mode="lines+markers",
hovertemplate="%{x}: %{y} </br>",
marker=dict(
color="rgb(0, 196, 0)",
size=10,
line=dict(color="rgb(55, 83, 109)", width=3),
),
line=dict(color="rgb(26, 118, 255)", width=4),
),
row=1,
col=2,
)
# Cosmetic changes to figure
fig.update_layout(
title={
"text": "<b>Comparision of Confirmed cases v/s Death and Recovered cases globally excluding China</b>",
"x": 0.09,
"xanchor": "left",
"font": dict(size=20, color="black"),
}
)
fig.update_layout(
legend_orientation="h",
legend=dict(x=0.25, y=-0.3, bgcolor="rgba(255, 255, 255, 0)", bordercolor="red"),
)
# Update xaxis properties
fig.update_xaxes(
title_text="Timeline", titlefont_size=16, tickfont_size=15, row=1, col=1
)
fig.update_xaxes(
title_text="Timeline", titlefont_size=16, tickfont_size=15, row=1, col=2
)
# Update yaxis properties
fig.update_yaxes(
title_text="Number of Cases", titlefont_size=16, tickfont_size=15, row=1, col=1
)
fig.update_yaxes(
title_text="Number of Cases", titlefont_size=16, tickfont_size=15, row=1, col=2
)
fig.show()
|
# # Who survived the Titanic
# Titanic is well known to all. It's the ship that sank into Atlantic ocean travelling with more than a thousand passengers.
# In this notebook, we study the data regarding persons who have survived the accident.
# Based on the study, we will try to predict who else have survived i.e. people we do not know have survived or not.
# **Table of Contents** for the analysis is quite simple:
# - Get Data and necessary Libraries
# - Perform Initial Assesment on Data
# - Feature Engineering
# - Missing Value Imputation
# - Outlier Detection
# - Data Grouping
# - One-Hot Encoding
# - Data Vizualization
# - Univariate Analysis *using Column Chart*
# - Bivariate and Trivariate Analysis *using Pivot Table*
# - Modeling
# - Prepare dataset (train-test split)
# - Build Baseline Models (Logistics Regression, Perceptron, Multilayer Perceptron)
# - Predict Test Data
# -Ratio Analysis
# ## Get Library
import pandas as pd
import numpy as np
import datetime
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import plotly.graph_objs as go
from plotly.subplots import make_subplots
from sklearn.metrics import r2_score
import warnings
warnings.warn = False
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# ## Get Data
gender_submission = pd.read_csv("../input/titanic/gender_submission.csv")
test = pd.read_csv("../input/titanic/test.csv")
train = pd.read_csv("../input/titanic/train.csv")
print("Training set = ", train.shape)
print("Testing set = ", test.shape)
print(
"Sum of Missing Values (Train/Test)= ",
train.isna().sum().sum(),
"(",
test.isna().sum().sum(),
")",
)
print(
"Survival Rate (in Training Data) =",
round(train.Survived.sum() / train.shape[0] * 100, 2),
"%",
)
# ## Initial Assesment of Data
# - Descriptive Statistics
# - Missing Values
train.describe(include="all")
print(
"Missing Values in Training Dataset:\n",
round(train.isna().sum() / train.shape[0] * 100, 2),
)
print(
"Missing Values in Testing Dataset:\n",
round(test.isna().sum() / test.shape[0] * 100, 2),
)
# Features - Age, Cabin, Embarked, Fare have missing values and engineered as follows,
# - Age: Missing values can be obtained from other features such as SibSp, Parch, Fare through specific value, if-else condition or linear regression (used here)
# - Cabin: Missing values cannot be obtained from other features. Therefore, missing values can be
# - removed (due to high percentage of missing values), or
# - imputed as a seperate category (done here)
# - Embarked: Missing values can be obtained from other features such as Pclass, Fare, Parch through linear regression, similar to Age imputation.
# - Fare: Missing values can be obtained from other features such as Pclass, Parch through specific value such as mean through if-else condition.
# ## Feature Engineering - I
# #### Regression - To impute missing value in Age
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
def get_accuracy_score(data, col):
X_train, X_test, y_train, y_test = train_test_split(
data[col], data["Age"], test_size=0.3, random_state=7
)
lr1 = LinearRegression()
lr1.fit(X_train, y_train)
y_pred = lr1.predict(X_test)
return mean_squared_error(y_test, y_pred)
# #### Impute Missing Value - Age
def get_missing_age(data):
index = np.where(data.Age.isnull())
# Find Prediction based on Present Elements
data1 = data[~data.Age.isnull()]
acc_score0 = get_accuracy_score(data1, ["SibSp", "Parch", "Fare"])
acc_score1 = get_accuracy_score(data1, ["SibSp", "Parch"])
acc_score2 = get_accuracy_score(data1, ["SibSp", "Fare"])
acc_score3 = get_accuracy_score(data1, ["Parch", "Fare"])
min_score = min(
acc_score0, acc_score1, acc_score2, acc_score3
) # Error to be minimized
# Fit Model by Best Feature Selection
data2 = data[data.Age.isnull()]
if min_score == acc_score0:
col = ["SibSp", "Parch", "Fare"]
else:
if min_score == acc_score1:
col = ["SibSp", "Parch"]
else:
if min_score == acc_score2:
col = ["SibSp", "Fare"]
else:
col = ["Parch", "Fare"]
X_train, y_train, X_test = data1[col], data1["Age"], data2[col]
# Do Prediction on Absent Elements
lr = LinearRegression()
lr.fit(X_train, y_train)
data2 = data2.drop(columns=["Age"], axis=1)
data2["Age"] = [max(0, min(100, i)) for i in lr.predict(X_test)]
temp = data1["Age"]
data1 = data1.drop(columns=["Age"], axis=1)
data1["Age"] = temp
return data1.append(data2)
# #### Impute Missing Value - Fare
def impute_fare(dataset):
if dataset.Fare.isnull().sum() <= 0:
return dataset
else:
ok_set = dataset[~dataset.Fare.isnull()]
to_set = dataset[dataset.Fare.isnull()]
l = list()
for i in range(to_set.shape[0]):
l.append(
ok_set.Fare[
(ok_set.Pclass == to_set.iloc[i]["Pclass"])
& (ok_set.Parch == to_set.iloc[i]["Parch"])
].mean()
)
# Structural Re-format (Faster compared to For Loop)
temp = ok_set["Fare"]
ok_set = ok_set.drop(columns="Fare", axis=1)
ok_set["Fare"] = temp
to_set = to_set.drop(columns="Fare", axis=1)
to_set["Fare"] = l
return ok_set.append(to_set)
# ### Impute Missing Value - Cabin
def impute_cabin(data):
if data.Cabin.isnull().sum() <= 0:
return data
else:
ok_set = data[~data.Cabin.isnull()]
to_set = data[data.Cabin.isnull()]
# l=list()
# for i in range(ok_set.shape[0]):
# l.append(ok_set.iloc[i]['Cabin'][0])
l = [
ok_set.iloc[i]["Cabin"][0] for i in range(ok_set.shape[0])
] # Cannot put in next assignment, drop used
ok_set = ok_set.drop(columns=["Cabin"], axis=1)
ok_set["Cabin"] = l
to_set = to_set.drop(columns="Cabin", axis=1)
to_set["Cabin"] = "U"
return ok_set.append(to_set)
# ### Impute Missing Value - Embarked
def impute_embarked(data):
data.Embarked.replace("S", 1, inplace=True)
data.Embarked.replace("C", 2, inplace=True)
data.Embarked.replace("Q", 3, inplace=True)
if data.Embarked.isnull().sum() <= 0:
return data
else:
ok_data = data[~data.Embarked.isnull()]
to_data = data[data.Embarked.isnull()]
lr = LinearRegression() # Using Regression, the mode of Embarked is captured
lr.fit(ok_data[["Pclass", "Fare", "Parch"]], ok_data["Embarked"])
temp = ok_data["Embarked"]
ok_data = ok_data.drop(columns="Embarked", axis=1)
ok_data["Embarked"] = temp
to_data = to_data.drop(columns="Embarked", axis=1)
temp = [int(i) for i in lr.predict(to_data[["Pclass", "Fare", "Parch"]])]
to_data["Embarked"] = temp
return ok_data.append(to_data)
# Impute Missing Value
# ~ Fare
test = impute_fare(test)
# ~ Age
train = get_missing_age(train)
test = get_missing_age(test)
# ~ Cabin
train = impute_cabin(train)
test = impute_cabin(test)
# ~ Embarked
train = impute_embarked(train)
test = impute_embarked(test)
# ## Feature Engineering - II
# ### Age Grouping
# Grouped into 3 groups (1=Young, 2=Mid Age, 3=Old).
# Age Grouping
age_group = 3
max_age = max(train.Age)
min_age = min(train.Age)
train["Age_Group"] = [
int(i)
for i in round((train.Age - min_age) / ((max_age - min_age) / age_group), 0) + 1
]
test["Age_Group"] = [
int(i)
for i in round((test.Age - min_age) / ((max_age - min_age) / age_group), 0) + 1
]
train = train.drop(columns="Age")
test = test.drop(columns="Age")
# ### Overall Fare
# Cannot be grouped, as Fare is dependent on Pclass and Parch Feature (look at pivot table below)
# Fare: Based on Pclass and Parch
# Depending on Passenger class and number of family members, Fare is provided.
# Thus, Fare is more like a dependent feature (not correlated) when compared to other features.
pd.pivot_table(
train, values="Fare", index=["Pclass"], columns=["Parch"], aggfunc=np.mean
)
# Fare is a function of Passenger Class and Number of Family Members. However, certain differences are noticable, which arises due to cost of space. For example: A room with capacity of 4 people will have optimal fare when no less or no more than 4 people are allocated.
# ## Visualize Data
# #### Univariate Analysis
def get_feature_count(data, col_names):
df_all = pd.DataFrame()
for i in col_names:
u = data[i].unique()
temp = pd.DataFrame()
for j in u:
m = (data[i] == j).sum()
temp = temp.append([[j, m]])
temp["col_name"] = i
df_all = df_all.append(temp)
df_all.columns = ["X", "Y", "Feature"]
return df_all
df = get_feature_count(
train, ["Pclass", "Sex", "Cabin", "Embarked", "Age_Group", "SibSp", "Parch"]
)
fig = px.bar(
data_frame=df,
x="X",
y="Y",
color="Y",
facet_col="Feature",
facet_col_wrap=7,
width=1000,
height=350,
)
fig.update_xaxes(matches=None)
fig.update_yaxes(matches=None)
fig.show()
# **Observation**:
# - Pclass: Most passenger are from class 3, which is more than the number of passengers from Class 1 and 2 combined.
# - More male passengers have boarded the Titanic compared to number of female passengers.
# - Very limited information on Cabin allocated to passengers is available.
# - Embarked: Most passenger have board Titanic from berth S, which is more than the number of passengers who have board from berth C and Q combined.
# - Age Group: Mostly middle aged people are on Titanic, which is higher than the number of people from other age groups (young and old) combined.
# - SibSp and Parch: Most passengers are without any sibling or relatives on board.
# How people paid Fair based on Pclass
fig = px.histogram(train, x="Fare", color="Pclass", height=300)
fig.show()
# #### Bivariate Analysis - Using Pivot Table
# Who survived Titanic?
# Who survided and who didn't get to?
temp_table = train
temp_table["Survive_Copy"] = 1
cols = ["Pclass", "Sex", "SibSp", "Parch", "Cabin", "Embarked", "Age_Group"]
for i in cols:
print(
"\nSurvival per",
i,
"\n",
"_" * 70,
"\n",
pd.pivot_table(
temp_table,
values="Survive_Copy",
index=["Survived"],
columns=[i],
aggfunc=np.sum,
),
)
# - Most passengers from Pclass = 1 is saved, where as most people from Pclass = 3 could not be saved.
# - More women were saved compared to men
# - Contrary to total head count, ratio suggests that people with sibling or kids were given priority, besides women.
# *Note: Family of more than one children is also seen in SibSp feature column.*
# - Cabin findings is not conclusive, thus no comments
# - Number of people who embarked at C have higher survival rate compared to S and Q.
# - Young peple seem to have a higher survival rate compared to older people.
# To gain a more granular look at data, multivariate analysis is performed on 3 variables at a time.
# #### Tri-Variate Analysis - Using Pivot Table
# Survivor Selection
cols = ["Pclass", "Sex", "SibSp", "Parch", "Cabin", "Embarked", "Age_Group"]
for i in cols:
for j in cols:
if i == j:
# do nothing
d = 0
else:
print(
"\n",
i,
"vs",
j,
"\n",
"_" * 70,
"\n",
pd.pivot_table(
train, values="Survived", index=[i], columns=[j], aggfunc=np.sum
),
)
# Findings which contradicts previous results are reported:
# - Irrespective of feature Cabin and Embarked, First class passenger were given more priority to be saved.
# - Though women with chhildren were given higher priority to be saved,
# it is observed that people having fewer children had higher chance of survival
# - Most survivors from first class were from Cabin A-E, which previously could not be identified.
# *Please Note:* NaN in above table implies no possible combination
# ## Model Building
result = pd.DataFrame()
# Rearrange Columns (Before Modeling)
X_train_ID = train[["PassengerId"]]
X_train = train[
["Pclass", "Sex", "SibSp", "Parch", "Cabin", "Embarked", "Fare", "Age_Group"]
]
y_train = train[["Survived"]]
X_test_ID = test[["PassengerId"]]
X_test = test[
["Pclass", "Sex", "SibSp", "Parch", "Cabin", "Embarked", "Fare", "Age_Group"]
]
print("Original Dimension\n", X_train.shape, y_train.shape, X_test.shape)
result["PassengerID"] = X_test_ID.PassengerId
# One hot Encoding for Sex, Cabin, Embarked
X_train = pd.get_dummies(X_train, columns=["Sex", "Cabin", "Embarked"])
X_test = pd.get_dummies(X_test, columns=["Sex", "Cabin", "Embarked"])
print("Post OHE Dimension\n", X_train.shape, X_test.shape)
# Post OHE: Remove irrelevant column (remove multi-collinearity) & rename column (for consistency)
X_train = X_train.drop(
columns=["Sex_female", "Cabin_U", "Cabin_T", "Embarked_1.0"], axis=1
)
X_test = X_test.drop(columns=["Sex_female", "Cabin_U", "Embarked_1"], axis=1)
X_train.columns = X_test.columns
print("Post Dimension Process\n", X_train.shape, X_test.shape)
print("List of columns:\n", *X_train.columns)
# ##### Train-Test split of Training Set
# - To check accuracy score
from sklearn.model_selection import train_test_split
temp_train = X_train
cols = temp_train.columns
temp_train["target"] = y_train
a_train, a_test, b_train, b_test = train_test_split(
temp_train[cols], temp_train["target"], test_size=0.3, random_state=7
)
print(
"Re-Split dimenstions\n", a_train.shape, a_test.shape, b_train.shape, b_test.shape
)
# #### Logistic Regression
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(solver="lbfgs", max_iter=1000, tol=0.001, random_state=7)
lr.fit(a_train, b_train)
print(
"Accuracy on Training Set=",
round(accuracy_score(b_test, lr.predict(a_test)) * 100, 2),
)
result["Log_Reg"] = lr.predict(X_test)
# #### Perceptron
from sklearn.linear_model import Perceptron
p = Perceptron(max_iter=50, shuffle=True, tol=0.001, random_state=7)
p.fit(a_train, b_train)
print(
"Accuracy on Training Set=",
round(accuracy_score(b_test, p.predict(a_test)) * 100, 2),
)
result["Perceptron"] = p.predict(X_test)
# #### Multi-Layer Perceptron
from sklearn.neural_network import MLPClassifier
mlp = MLPClassifier(
solver="lbfgs",
hidden_layer_sizes=(10, 5),
max_iter=1000,
n_iter_no_change=5,
learning_rate="constant",
shuffle=True,
validation_fraction=0.1,
tol=0.001,
random_state=7,
)
mlp.fit(a_train, b_train)
print(
"Accuracy on Training Set=",
round(accuracy_score(b_test, mlp.predict(a_test)) * 100, 2),
)
result["MLP"] = mlp.predict(X_test)
# ## Ratio Analysis
#
print(
"Survival Rate from Logistic Regression =",
round(result.Log_Reg.sum() / result.shape[0] * 100, 2),
"%",
)
print(
"Survival Rate from Perceptron =",
round(result.Perceptron.sum() / result.shape[0] * 100, 2),
"%",
)
print(
"Survival Rate from Multi-layer Perceptron =",
round(result.MLP.sum() / result.shape[0] * 100, 2),
"%",
)
# Assuming training and testing dataset samples represent total population adequately, the quality of solution obtained can be analyzed.
# The total survival rate in training dataset is around 38.38 %. Similar percentage can be expected from Testing dataset.
# - Logistic regression model with an accuracy of 80.6% predicts survival of passenger, which is 38.76% (very close to 38.38%)
# - Perceptron model with an accuracy of 70.52% predicts survival of passenger, which is 47.85% (far from 38.38%)
# - Multi-layered Perceptron (MLP) model with an accuracy of 82.09% predicts survival of passenger, which is 36.36% (close to 38.38%). Thus, MLP is an improvement over Perceptron.
# Observing the results closely, it can seen that Logistic Regression and Multi-layer Perceptron captures non-survival adequately where Perceptron model do not.
# *Please note*: Using more models (different) can have better results through aggregation but is computationally expensive.
submission = result[["PassengerID", "MLP"]]
submission.to_csv("submission.csv", index=False)
|
### Libraries to be imported
import pandas as pd
import numpy as np
import nltk, re, string
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
from sklearn.svm import SVC
from sklearn import model_selection
from sklearn.model_selection import GridSearchCV
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from nltk.tokenize import word_tokenize, sent_tokenize, RegexpTokenizer
from wordcloud import WordCloud
from sklearn.linear_model import LogisticRegression
data_frame = pd.read_csv("../input/nlp-getting-started/train.csv")
data_test = pd.read_csv("../input/nlp-getting-started/test.csv")
data_test.head(20)
data_frame.head(20)
# # Data Analysis
# ### General analysis on data present
# ### 1.sample count
# ### 2.unique kewords present
# ### 3.positive class tweet
# ### 4.negative class tweet
# ### Total length calculation for :
# numnber of characters present per tweet
# kw analysis
# location anlysis
# word count
# unique word count
# stop word count
# url count
# character count
# mention count
# hashtag count
# punctuation count
# mean word length
data_test.isnull().sum()
data_frame.isnull().sum()
def show_word_distrib(target=1, field="text"):
txt = (
data_frame[data_frame["target"] == target][field]
.str.lower()
.str.replace(r"\|", " ")
.str.cat(sep=" ")
)
words = nltk.tokenize.word_tokenize(txt)
words_except_stop_dist = nltk.FreqDist(w for w in words if w not in stop)
rslt = pd.DataFrame(
words_except_stop_dist.most_common(top_N), columns=["Word", "Frequency"]
).set_index("Word")
print(rslt)
# score_df = pd.DataFrame(columns={'Model Description','Score'})
data_frame["keyword"] = data_frame["keyword"].map(
lambda s: s.replace("%20", " ") if isinstance(s, str) else s
)
un_KW = {kw for kw in data_frame["keyword"].values if isinstance(kw, str)}
tot_KW = len(data_frame) - len(data_frame[data_frame["keyword"].isna()])
print(len(un_KW))
print("total", tot_KW)
print("Samples with no KW", len(data_frame[data_frame["keyword"].isna()]))
data_test["keyword"] = data_test["keyword"].map(
lambda s: s.replace("%20", " ") if isinstance(s, str) else s
)
un_KW = {kw for kw in data_test["keyword"].values if isinstance(kw, str)}
tot_KW = len(data_test) - len(data_test[data_test["keyword"].isna()])
print(len(un_KW))
print("total", tot_KW)
print("Samples with no KW", len(data_test[data_test["keyword"].isna()]))
un_LOC = {kw for kw in data_frame["location"].values if isinstance(kw, str)}
tot_LOC = len(data_frame) - len(data_frame[data_frame["location"].isna()])
print(len(un_LOC))
print("total", tot_LOC)
print("Samples with no location", len(data_frame[data_frame["location"].isna()]))
# KW and location analysis
# remove space between keywords
data_frame["keyword"] = data_frame["keyword"].map(
lambda s: s.replace("%20", " ") if isinstance(s, str) else s
)
total_keyword = {kw for kw in data_frame["keyword"].values if isinstance(kw, str)}
print(total_keyword)
# for location
locations = {loc for loc in data_frame["location"].values if isinstance(loc, str)}
# print(locations)
# KW and location analysis
# remove space between keywords
data_test["keyword"] = data_test["keyword"].map(
lambda s: s.replace("%20", " ") if isinstance(s, str) else s
)
total_keyword = {kw for kw in data_test["keyword"].values if isinstance(kw, str)}
# print(total_keyword)
# for location
locations = {loc for loc in data_test["location"].values if isinstance(loc, str)}
print(locations)
# disaster keywords and regular keywords
# disaster
disaster_keyword = [kw for kw in data_frame.loc[data_frame.target == 1].keyword]
disaster_keywords_counts = dict(
pd.DataFrame(data={"x": disaster_keyword}).x.value_counts()
)
# print(disaster_keywords_counts)
# regualar
regular_kw = [kw for kw in data_frame.loc[data_frame.target == 0].keyword]
regular_keywords_counts = dict(pd.DataFrame(data={"x": regular_kw}).x.value_counts())
print(regular_keywords_counts)
# exploring regular tweets
regular_tweets = data_frame[data_frame["target"] == 0]["text"]
print(regular_tweets.values[11])
# exploring disaster tweets
disaster_tweets = data_frame[data_frame["target"] == 1]["text"]
print(disaster_tweets.values[69])
# word count
data_frame["word_count"] = data_frame["text"].apply(lambda x: len(str(x).split()))
print(data_frame["word_count"])
# unique_word_count
data_frame["unique_word_count"] = data_frame["text"].apply(
lambda x: len(set(str(x).split()))
)
print(data_frame["unique_word_count"])
# url count
data_frame["url_count"] = data_frame["text"].apply(
lambda x: len([w for w in str(x).lower().split() if "http" in w or "https" in w])
)
print(data_frame["url_count"])
# character count
data_frame["char_count"] = data_frame["text"].apply(lambda x: len(str(x)))
print(data_frame["char_count"])
# mention count
data_frame["mention_count"] = data_frame["text"].apply(
lambda x: len([c for c in str(x) if c == "@"])
)
print(data_frame["mention_count"])
# hashtag count
data_frame["hashtag_count"] = data_frame["text"].apply(
lambda x: len([c for c in str(x) if c == "#"])
)
# print(data_frame['text'])
# print(data_frame['hashtag_count'])
hashtag_exists = data_frame[(data_frame.hashtag_count != 0)]
print(hashtag_exists["text"])
# mean word length
data_frame["mean_word_length"] = data_frame["text"].apply(
lambda x: np.mean([len(w) for w in str(x).split()])
)
print(data_frame["mean_word_length"])
# word count
data_test["word_count"] = data_test["text"].apply(lambda x: len(str(x).split()))
print(data_test["word_count"])
# unique_word_count
data_test["unique_word_count"] = data_test["text"].apply(
lambda x: len(set(str(x).split()))
)
print(data_test["unique_word_count"])
# url count
data_test["url_count"] = data_test["text"].apply(
lambda x: len([w for w in str(x).lower().split() if "http" in w or "https" in w])
)
print(data_test["url_count"])
# character count
data_test["char_count"] = data_test["text"].apply(lambda x: len(str(x)))
print(data_test["char_count"])
# mention count
data_test["mention_count"] = data_test["text"].apply(
lambda x: len([c for c in str(x) if c == "@"])
)
print(data_test["mention_count"])
# hashtag count
data_test["hashtag_count"] = data_test["text"].apply(
lambda x: len([c for c in str(x) if c == "#"])
)
# print(data_frame['text'])
# print(data_frame['hashtag_count'])
hashtag_exists = data_test[(data_test.hashtag_count != 0)]
print(hashtag_exists["text"])
# mean word length
data_test["mean_word_length"] = data_test["text"].apply(
lambda x: np.mean([len(w) for w in str(x).split()])
)
print(data_test["mean_word_length"])
# # Data Cleaning
# ##### remove stopwords,urls,htmls,emojis,Punctuations,numerical values
# removing noises : removing urls,html punctuations, numerical values
def text_clean(text):
# to lower case
test = text = text.lower()
text = re.sub("\[.*?\]", "", text)
# remove urls
text = re.sub("https?://\S+|www\.\S+", "", text)
# remove html
text = re.sub("<.*?>+", "", text)
# remove punctuations
text = re.sub("[%s]" % re.escape(string.punctuation), "", text)
# remove noises
text = re.sub("\n", "", text)
text = re.sub("\w*\d\w*", "", text)
return text
data_frame["text"] = data_frame["text"].apply(lambda x: text_clean(x))
print(data_frame["text"])
print("test data")
data_test["text"] = data_test["text"].apply(lambda x: text_clean(x))
print(data_test["text"])
# tokenization
tokenizer = nltk.tokenize.RegexpTokenizer(r"\w+")
# for x in data_frame['text']:
# print(x)
data_frame["text"] = data_frame["text"].apply(lambda x: tokenizer.tokenize(x))
print(data_frame["text"])
data_test["text"] = data_test["text"].apply(lambda x: tokenizer.tokenize(x))
print(data_test["text"])
# remove stopwords
stop = stopwords.words("english")
def remove_stopwords(text):
words = [w for w in text if w not in stop]
return words
data_frame["text"] = data_frame["text"].apply(lambda x: remove_stopwords(x))
print(data_frame["text"])
data_test["text"] = data_test["text"].apply(lambda x: remove_stopwords(x))
print(data_test["text"])
# Note : No need for stemming since it can remove the actual words required for tweets analysis. So we can further proceed to making our text restored to original format but without noises or stopwords.
# return data to original format
def combine_text(list_of_text):
combined_text = " ".join(list_of_text)
return combined_text
data_frame["text"] = data_frame["text"].apply(lambda x: combine_text(x))
data_frame.head()
data_test["text"] = data_test["text"].apply(lambda x: combine_text(x))
data_test.head()
# ## BAG OF WORDS
# ##### countvectorizer - to convert collection of text documnets to token counts.
count_vectorizer = CountVectorizer()
data_vectors = count_vectorizer.fit_transform(data_frame["text"])
data_test_vectors = count_vectorizer.fit_transform(data_test["text"])
print(data_vectors[0].todense())
print(data_test_vectors[0].todense())
# ### TF_IDF
tfidf = TfidfVectorizer(min_df=4, max_df=0.5, ngram_range=(1, 2))
train_tfidf = tfidf.fit_transform(data_frame["text"])
# ### Logistic Regression Classifier
# Fitting a simple Logistic Regression on Counts
classifier = LogisticRegression(C=1.0)
scores = model_selection.cross_val_score(
classifier, data_vectors, data_frame["target"], cv=5, scoring="f1"
)
scores
classifier.fit(data_vectors, data_frame["target"])
# Fitting a simple Logistic Regression on TFIDF
clf_tfidf = LogisticRegression(C=1.0)
scores = model_selection.cross_val_score(
clf_tfidf, train_tfidf, data_frame["target"], cv=5, scoring="f1"
)
scores
# ## Apply NB Gaussain
# X_train, X_test, y_train, y_test = \
# train_test_split(data_frame['text'], data_frame['target'], random_state=20)
# ## Apply Tfidf tranformation
# vector = TfidfVectorizer().fit(X_train)
# X_train_vector = vector.transform(X_train)
# X_test_vector = vector.transform(X_test)
# df_test_vector = vector.transform(data_test['text'])
# gb_model= GaussianNB().fit(X_train_vector.todense(),y_train)
# predict = gb_model.predict(X_test_vector.todense())
# print('Roc AUC score - %3f'%(roc_auc_score(y_test,predict)))
# score_df = score_df.append({'Model Description':'Naive Bayes',
# 'Score':roc_auc_score(y_test,predict)}
# ,ignore_index=True)
# # SVM Classification
vector = TfidfVectorizer().fit(data_frame["text"])
df_train_vector = vector.transform(data_frame["text"])
df_test_vector = vector.transform(data_test["text"])
svc_model = SVC()
grid_values = {"kernel": ["linear", "poly", "rbf"], "C": [0.001, 0.01, 1, 10]}
grid_search_model = GridSearchCV(svc_model, param_grid=grid_values, cv=3)
grid_search_model.fit(df_train_vector, data_frame["target"])
print(grid_search_model.best_estimator_)
print(grid_search_model.best_score_)
print(grid_search_model.best_params_)
# score_df = score_df.append({'Model Description':'SVC - with Grid Search',
# 'Score':grid_search_model.best_score_}
# ,ignore_index=True)
predict = grid_search_model.predict(df_test_vector)
predict_df = pd.DataFrame()
predict_df["id"] = data_test["id"]
predict_df["target"] = predict
predict_df.to_csv("sample_submission_1.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
import time
# from user_functions import *
from datetime import datetime
import pickle
import warnings
warnings.filterwarnings("ignore")
CB91_Blue = "#2CBDFE"
CB91_Green = "#47DBCD"
CB91_Pink = "#F3A0F2"
CB91_Purple = "#9D2EC5"
CB91_Violet = "#661D98"
CB91_Amber = "#F5B14C"
color_list = [CB91_Blue, CB91_Pink, CB91_Green, CB91_Amber, CB91_Purple, CB91_Violet]
plt.rcParams["axes.prop_cycle"] = plt.cycler(color=color_list)
sns.set_context(
"notebook", rc={"font.size": 16, "axes.titlesize": 20, "axes.labelsize": 18}
)
sns.set(
font="Franklin Gothic Book",
rc={
"axes.axisbelow": False,
"axes.edgecolor": "lightgrey",
# 'axes.edgecolor': 'white',
"axes.facecolor": "None",
"axes.grid": False,
"axes.labelcolor": "dimgrey",
# 'axes.labelcolor': 'white',
"axes.spines.right": False,
"axes.spines.top": False,
"axes.prop_cycle": plt.cycler(color=color_list),
"figure.facecolor": "white",
"lines.solid_capstyle": "round",
"patch.edgecolor": "w",
"patch.force_edgecolor": True,
"text.color": "dimgrey",
# 'text.color': 'white',
"xtick.bottom": False,
"xtick.color": "dimgrey",
# 'xtick.color': 'white',
"xtick.direction": "out",
"xtick.top": False,
"ytick.color": "dimgrey",
# 'ytick.color': 'white',
"ytick.direction": "out",
"ytick.left": False,
"ytick.right": False,
},
)
# READING DATASETS
aisles = pd.read_csv("/kaggle/input/instacart-market-basket-analysis/aisles.csv")
departments = pd.read_csv(
"/kaggle/input/instacart-market-basket-analysis/departments.csv"
)
order_products_prior = pd.read_csv(
"/kaggle/input/instacart-market-basket-analysis/order_products__prior.csv"
)
order_products_train = pd.read_csv(
"/kaggle/input/instacart-market-basket-analysis/order_products__train.csv"
)
orders = pd.read_csv("/kaggle/input/instacart-market-basket-analysis/orders.csv")
products = pd.read_csv("/kaggle/input/instacart-market-basket-analysis/products.csv")
# Mearging products, aisels, departments
#
products_aisles = pd.merge(products, aisles, on="aisle_id", how="left")
products_desc = pd.merge(products_aisles, departments, on="department_id", how="left")
products_desc.head()
# Analysing products_desc
plt.figure(figsize=(14, 7))
sns.countplot(
x="department",
data=products_desc,
order=products_desc.department.value_counts().index,
)
plt.title("Number of Products per Department")
plt.xticks(rotation=70)
plt.ylabel("Count")
plt.xlabel("Department")
plt.figure(figsize=(14, 7))
sns.countplot(
x="aisle", data=products_desc, order=products_desc.aisle.value_counts().index
)
plt.title("Number of Products per Aisle")
plt.xticks(rotation=90, fontsize=8)
plt.ylabel("Count")
plt.xlabel("Aisle")
# Uh oh, 'missing' is our most popular aisle
# And I see there is a 'missing' value for department above as well
products_desc[products_desc["aisle"] == "missing"]
# It appears that 1258 products have 'missing' aisle 100 and department 21
# Analysing orders
orders.head(15)
plt.figure(figsize=(14, 7))
sns.countplot(x="order_hour_of_day", data=orders)
plt.title("Number of Orders Taken by Hour of the Day.")
plt.ylabel("Count")
plt.xlabel("Hour")
plt.figure(figsize=(14, 7))
sns.countplot(x="order_dow", data=orders)
plt.title("Number of Orders Taken by Day of the Week.")
plt.ylabel("Count")
plt.xlabel("Day")
plt.figure(figsize=(14, 7))
sns.countplot(x="days_since_prior_order", data=orders)
plt.title("Days Since Prior Order")
plt.ylabel("Count")
plt.xlabel("Days")
# Creating test dataset
orders_test = orders[orders["eval_set"] == "test"] # 75000 orders in our test set
# So basically for these test orders, I don't have the answers. I don't know what products were ordered.
# If I do market basket analysis I will get to these
orders_test
# Analysing order_products_train and order_products_prior
order_products_train.head()
order_products_prior.head()
# Data Preprocessing
# Merge order_products together
print(len(order_products_train))
print(len(order_products_prior))
len(order_products_train) + len(order_products_prior)
merged_order_products = pd.merge(
order_products_train, order_products_prior, how="outer"
)
merged_order_products.head(10)
products_per_order = merged_order_products.groupby("order_id").count()
# Gives u the count of rows of above table based on order id
products_per_order.head()
products_per_order[products_per_order["product_id"] == 1]
# 5 is the most common number of products per order
plt.figure(figsize=(14, 7))
sns.countplot(x="product_id", data=products_per_order)
plt.title("Number of Products per Order")
plt.xticks(rotation=90)
plt.ylabel("Number of Orders")
plt.xlabel("Products")
# Mearging merged_order_products and products_desc
order_products_desc = pd.merge(merged_order_products, products_desc, on="product_id")
order_products_desc.head()
# Analysing order_products_desc
plt.figure(figsize=(14, 7))
sns.countplot(
x="product_name",
data=order_products_desc,
order=order_products_desc.product_name.value_counts().index[:20],
)
plt.title("Most Ordered Products")
plt.xticks(rotation=70)
plt.ylabel("Count")
plt.xlabel("Product Name")
plt.figure(figsize=(14, 7))
sns.countplot(
x="product_name",
data=order_products_desc,
order=order_products_desc.product_name.value_counts().index[:200],
)
plt.title("Most Ordered Products")
plt.xticks([])
plt.ylabel("Count")
plt.xlabel("Product Name")
plt.figure(figsize=(14, 7))
sns.countplot(
x="department",
data=order_products_desc,
order=order_products_desc.department.value_counts().index,
)
plt.title("Most Ordered Departments")
plt.xticks(rotation=70)
plt.yticks(
[2000000, 4000000, 6000000, 8000000, 10000000],
["2 mil", "4 mil", "6 mil", "8 mil", "10 mil"],
)
plt.ylabel("Count")
plt.xlabel("Department")
plt.figure(figsize=(14, 7))
sns.countplot(
x="aisle",
data=order_products_desc,
order=order_products_desc.aisle.value_counts().index[:20],
)
plt.title("20 Most Ordered Aisles")
plt.xticks(rotation=70)
plt.yticks(
[1000000, 2000000, 3000000, 4000000],
["1,000,000", "2,000,000", "3,000,000", "4,000,000"],
)
plt.ylabel("Count")
plt.xlabel("Aisle")
# Merge user orders
# Output: merged_orders
merged_orders = pd.merge(orders, order_products_desc, on="order_id")
len(merged_orders)
merged_orders = merged_orders[merged_orders["user_id"] <= 100]
len(merged_orders)
merged_orders[merged_orders["user_id"] == 43]["product_name"].value_counts()
user_items = merged_orders[["user_id", "product_id", "product_name"]]
# Removing duplicate rows since one user may have purchased product more than once
ratings_details = user_items.drop_duplicates()
ratings_details.shape
ratings_details
# Creating a random rating list
import random
ratings = []
for i in range(6167):
ratings.append(round(random.uniform(1, 5), 1))
# Adding the ratings to ratings_details
ratings_details["ratings"] = ratings
merged_orders.to_csv("merged_orders.csv", index=False)
ratings_details.to_csv("ratings_details.csv", index=False)
|
import os
import gc
import cv2
import json
import time
import numpy as np
import pandas as pd
from pathlib import Path
from keras.utils import to_categorical
import seaborn as sns
from matplotlib import colors
import matplotlib.pyplot as plt
import torch
T = torch.Tensor
import torch.nn as nn
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
os.listdir("../input/abstraction-and-reasoning-challenge")
SIZE = 1000
EPOCHS = 50
CONV_OUT_1 = 50
CONV_OUT_2 = 100
BATCH_SIZE = 128
TEST_PATH = Path("../input/abstraction-and-reasoning-challenge/") / "test"
SUBMISSION_PATH = (
Path("../input/abstraction-and-reasoning-challenge/") / "sample_submission.csv"
)
test_task_files = sorted(os.listdir(TEST_PATH))
test_tasks = []
for task_file in test_task_files:
with open(str(TEST_PATH / task_file), "r") as f:
task = json.load(f)
test_tasks.append(task)
Xs_test, Xs_train, ys_train = [], [], []
for task in test_tasks:
X_test, X_train, y_train = [], [], []
for pair in task["test"]:
X_test.append(pair["input"])
for pair in task["train"]:
X_train.append(pair["input"])
y_train.append(pair["output"])
Xs_test.append(X_test)
Xs_train.append(X_train)
ys_train.append(y_train)
def replace_values(a, d):
return np.array([d.get(i, -1) for i in range(a.min(), a.max() + 1)])[a - a.min()]
def repeat_matrix(a):
return np.concatenate([a] * ((SIZE // len(a)) + 1))[:SIZE]
def get_new_matrix(X):
if len(set([np.array(x).shape for x in X])) > 1:
X = np.array([X[0]])
return X
def get_outp(outp, dictionary=None, replace=True):
if replace:
outp = replace_values(outp, dictionary)
outp_matrix_dims = outp.shape
outp_probs_len = outp.shape[0] * outp.shape[1] * 10
outp = to_categorical(outp.flatten(), num_classes=10).flatten()
return outp, outp_probs_len, outp_matrix_dims
class ARCDataset(Dataset):
def __init__(self, X, y, stage="train"):
self.X = get_new_matrix(X)
self.X = repeat_matrix(self.X)
self.stage = stage
if self.stage == "train":
self.y = get_new_matrix(y)
self.y = repeat_matrix(self.y)
def __len__(self):
return SIZE
def __getitem__(self, idx):
inp = self.X[idx]
if self.stage == "train":
outp = self.y[idx]
if idx != 0:
rep = np.arange(10)
orig = np.arange(10)
np.random.shuffle(rep)
dictionary = dict(zip(orig, rep))
inp = replace_values(inp, dictionary)
if self.stage == "train":
outp, outp_probs_len, outp_matrix_dims = get_outp(outp, dictionary)
if idx == 0:
if self.stage == "train":
outp, outp_probs_len, outp_matrix_dims = get_outp(outp, None, False)
return inp, outp, outp_probs_len, outp_matrix_dims
class BasicCNNModel(nn.Module):
def __init__(self, inp_dim=(10, 10), outp_dim=(10, 10)):
super(BasicCNNModel, self).__init__()
CONV_IN = 3
KERNEL_SIZE = 3
DENSE_IN = CONV_OUT_2
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
self.dense_1 = nn.Linear(DENSE_IN, outp_dim[0] * outp_dim[1] * 10)
if inp_dim[0] - KERNEL_SIZE + 1 <= 0 or inp_dim[1] - KERNEL_SIZE + 1 <= 0:
KERNEL_SIZE = min([inp_dim[0], inp_dim[1]])
self.conv2d_1 = nn.Conv2d(CONV_IN, CONV_OUT_1, kernel_size=KERNEL_SIZE)
self.conv2d_2 = nn.Conv2d(CONV_OUT_1, CONV_OUT_2, kernel_size=KERNEL_SIZE)
def forward(self, x, outp_dim):
x = torch.cat([x.unsqueeze(0)] * 3)
x = x.permute((1, 0, 2, 3)).float()
self.conv2d_1.in_features = x.shape[1]
conv_1_out = self.relu(self.conv2d_1(x))
self.conv2d_2.in_features = conv_1_out.shape[1]
conv_2_out = self.relu(self.conv2d_2(conv_1_out))
self.dense_1.out_features = outp_dim
feature_vector, _ = torch.max(conv_2_out, 2)
feature_vector, _ = torch.max(feature_vector, 2)
logit_outputs = self.dense_1(feature_vector)
out = []
for idx in range(logit_outputs.shape[1] // 10):
out.append(self.softmax(logit_outputs[:, idx * 10 : (idx + 1) * 10]))
return torch.cat(out, axis=1)
def transform_dim(inp_dim, outp_dim, test_dim):
return (
test_dim[0] * outp_dim[0] / inp_dim[0],
test_dim[1] * outp_dim[1] / inp_dim[1],
)
def resize(x, test_dim, inp_dim):
if inp_dim == test_dim:
return x
else:
return cv2.resize(flt(x), inp_dim, interpolation=cv2.INTER_AREA)
def flt(x):
return np.float32(x)
def npy(x):
return x.detach().numpy()
def itg(x):
return np.int32(np.round(x))
idx = 0
start = time.time()
test_predictions = []
for X_train, y_train in zip(Xs_train, ys_train):
print("TASK " + str(idx + 1))
train_set = ARCDataset(X_train, y_train, stage="train")
train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True)
inp_dim = np.array(X_train[0]).shape
outp_dim = np.array(y_train[0]).shape
network = BasicCNNModel(inp_dim, outp_dim)
optimizer = Adam(network.parameters(), lr=0.01)
for epoch in range(EPOCHS):
for train_batch in train_loader:
train_X, train_y, out_d, _ = train_batch
train_preds = network.forward(train_X, out_d)
train_loss = nn.MSELoss()(train_preds, train_y)
optimizer.zero_grad()
train_loss.backward()
optimizer.step()
end = time.time()
print(
"Train loss: "
+ str(np.round(train_loss.item(), 3))
+ " "
+ "Total time: "
+ str(np.round(end - start, 1))
+ " s"
+ "\n"
)
X_test = np.array([resize(flt(X), np.shape(X), inp_dim) for X in Xs_test[idx - 1]])
for X in X_test:
test_dim = np.array(T(X)).shape
test_preds = npy(network.forward(T(X).unsqueeze(0), out_d))
test_preds = np.argmax(test_preds.reshape((10, *outp_dim)), axis=0)
test_predictions.append(
itg(
resize(
test_preds,
np.shape(test_preds),
tuple(itg(transform_dim(inp_dim, outp_dim, test_dim))),
)
)
)
idx += 1
torch.cat([T(X).unsqueeze(0).unsqueeze(0)] * 3).permute((1, 0, 2, 3)).shape
def flattener(pred):
str_pred = str([row for row in pred])
str_pred = str_pred.replace(", ", "")
str_pred = str_pred.replace("[[", "|")
str_pred = str_pred.replace("][", "|")
str_pred = str_pred.replace("]]", "|")
return str_pred
for idx, pred in enumerate(test_predictions):
test_predictions[idx] = flattener(pred)
submission = pd.read_csv(SUBMISSION_PATH)
submission["output"] = test_predictions
submission.to_csv("submission.csv", index=False)
|
# **Genetic algorithm** for grouping the correlated signals using a wrapper approach
"""Python genetic algorithm for grouping the correlated signals"""
# We start by loading all the modules we need for the wrapping
import pandas as pd
import numpy as np
import random
from tqdm import tqdm
from sklearn.decomposition import PCA
from sklearn import preprocessing
import matplotlib.pyplot as plt
from IPython import display
# Nasa turbofan engine dataset
# Note: this data has already been preprocessed, removing everything that wasn't associated with a sensor, also, only flights
# far from failure were considered for training and validation
df_val = pd.read_csv("/kaggle/input/enginecleanednormalized/Nasa_val.txt", header=None)
df_train = pd.read_csv(
"/kaggle/input/enginecleanednormalized/Nasa_train.txt", header=None
)
df_train.head()
# As shown above, the imported Dataframes are not normalized:
scaler = preprocessing.StandardScaler()
Train = np.array(df_train)
Train = scaler.fit_transform(Train)
df_train = pd.DataFrame(Train) # The normalized train:
# We save the mean and std of the train in a new dataframe, usefull later in PCA
normalizing = pd.DataFrame({"Mean": scaler.mean_, "Std": scaler.var_**0.5})
normalizing.head()
# We now start to define some basic functions for this *GA*, the one below allow us to generate a pool of genes to start from
def generate_parent(length, groups, min_size=4):
# This function is used to generate an array of 0 and n, used as the genes for our genetic algorithm,
randBinList = lambda n: [random.randint(0, groups - 1) for b in range(1, n + 1)]
genes = np.array(randBinList(length))
# Let's generate bounded parents:
for group_id in range(groups):
Genes_vect = group_id == genes
# Recursive to respect the limit of at least 4 sensor each group
if sum(Genes_vect) < min_size:
genes = generate_parent(length, groups)
return genes
# We can now create the **fitness function** of our GA, this will be used to check who survive in the next generation
def get_fitness_wrapper(genes, group_id, val, train, normalizing, n_comp=1):
# We have to isolate the grouped signals:
Genes_vect = group_id == genes
val_fun = val.iloc[:, Genes_vect]
train_fun = train.iloc[:, Genes_vect]
norm_fun = normalizing.iloc[Genes_vect, :]
# We transform them into numpy arrays
val_group = np.array(val_fun)
train_group = np.array(train_fun)
norm_group = np.array(norm_fun)
val_n = np.zeros(val_fun.shape)
for isig in range(norm_fun.shape[0]):
val_n[:, isig] = (val_group[:, isig] - norm_group[isig, 0]) / norm_group[
isig, 1
]
# PCA Reconstruction
pca = PCA()
pca.fit(train_group)
eigen = pca.components_
eigen = np.transpose(eigen[:n_comp])
inveigen = np.transpose(eigen)
Xhat_n = val_n @ eigen @ inveigen
Xhat = np.zeros(val_fun.shape)
for isig in range(norm_fun.shape[0]):
Xhat[:, isig] = Xhat_n[:, isig] * norm_group[isig, 1] + norm_group[isig, 0]
MSE = sum(sum((val_group - Xhat) ** 2) / len(Xhat)) / val.shape[1]
return MSE
# We calculate the MSE for a single group approach, to confront it later with the genetic alogirthm:
genes = generate_parent(df_val.shape[1], 1)
MSE = get_fitness_wrapper(genes, 0, df_val, df_train, normalizing)
MSE
# We can now define the function related to the **evolution** of our chromosomes, starting from the mutation:
def mutate(child, groups, min_size=4):
# This function is used to mutate a random gene in the chromosome, as it is important to explore all the space
mutated_child = child
genes = np.array(mutated_child)
# We swap a single value if a mutation occours
index = random.randrange(0, len(child))
swap = random.randint(0, groups - 1)
genes[index] = swap
child = genes
for group_id in range(groups):
Genes_vect = group_id == child
# Recursive to respect the limit of at least 4 sensor each group
if sum(Genes_vect) < min_size:
child = mutate(child, groups, min_size)
return child
# The breding function:
def breeder_scattered(parent1, parent2, groups, min_size=4):
# Here we mix random element from the parents and make two child
child = parent1 # We start from the first parent
selectionscattered = generate_parent(
len(parent1), 2
) # Vector of 0 and 1, used as a mask
mask = 1 == selectionscattered # Boolean mask for our substitution
child_d = np.array(child)
child_d[mask] = parent2[mask]
child = child_d
for group_id in range(groups):
Genes_vect = group_id == child
# Recursive to respect the limit of at least {min_size} sensors each group
if sum(Genes_vect) < min_size:
child = breeder_scattered(parent1, parent2, groups, min_size)
return child
# We now have all the ingredients for our GA, we just need a function to wrap them together
# As the name suggest, this is the core of the genetic algorithm:
def core(
pop,
gen,
groups,
df,
df_train,
normalizing,
crossover_fr=0.8,
elite=5,
mutation_prob=0.1,
min_size=4,
n_comp=1,
):
# First thing we have to generate the parents
parents = list()
for i in tqdm(range(pop), desc="Parents generation"):
parents.append(
generate_parent(df_val.shape[1], groups, min_size)
) # list containing the parents of our problem
# Initial state of the population
fit_array = np.zeros(pop)
for j in tqdm(range(len(parents))):
for i in range(groups):
fit_array[j] = fit_array[j] + get_fitness_wrapper(
parents[j], i, df, df_train, normalizing, n_comp
)
# Setup of variables for later
fit_mean_array = list()
fit_min_array = list()
gen_array = list()
next_gen = parents
next_fit = fit_array
# Evolution
for j in range(gen):
fit_array = next_fit
parents = next_gen
next_fit = np.zeros(pop)
next_gen = list()
# ELITE
elite_array = np.copy(fit_array)
for i in range(elite):
if i != 0:
while (
next_fit[i - 1] == elite_array[elite_array.argmin()]
): # We assure different elite to be passed on
elite_array = np.delete(elite_array, elite_array.argmin(), 0)
next_fit[i] = elite_array[elite_array.argmin()]
next_gen.append(parents[elite_array.argmin()])
# CROSSOVER:
Cross_fr = 0
Cc = 1
while Cross_fr < crossover_fr and len(next_gen) < pop:
couples = random.sample(range(0, len(fit_array)), len(fit_array))
child = breeder_scattered(
parents[couples[Cc]], parents[couples[Cc + 1]], groups, min_size
)
fit_child = np.zeros(1)
for k in range(groups):
fit_child = fit_child + get_fitness_wrapper(
child, k, df, df_train, normalizing, n_comp
)
# Only the strong survive in this version:
if fit_child < fit_array[couples[Cc + 1]]:
next_fit[Cc + i] = fit_child
next_gen.append(child)
else:
next_fit[Cc + i] = fit_array[couples[Cc + 1]]
next_gen.append(parents[couples[Cc + 1]])
Cc += 1
Cross_fr = Cc / pop
# MUTATION:
while len(next_gen) < pop:
parent1 = random.randint(0, len(parents) - 1)
# We mutate the vector with the probability {mutation_prob}
if np.random.rand() < mutation_prob:
child = mutate(parents[parent1], groups, min_size)
fit_child = np.zeros(1)
for k in range(groups):
fit_child = fit_child + get_fitness_wrapper(
child, k, df, df_train, normalizing, n_comp
)
next_fit[len(next_gen)] = fit_child
next_gen.append(child)
else:
next_fit[len(next_gen)] = fit_array[parent1]
next_gen.append(parents[parent1])
# This part in only for presentation purpose, it will update the graph while the GA is running
plt.clf()
display.clear_output(wait=True)
fit_mean_array.append(np.mean(fit_array))
fit_min_array.append(fit_array[fit_array.argmin()])
gen_array.append(j)
label1 = "mean fitness " + str(round(fit_mean_array[-1], 3))
label2 = "best fitness " + str(round(fit_min_array[-1], 3))
plt.xlim((0, gen))
plt.plot(gen_array, fit_mean_array, "r.", label=label1)
plt.plot(gen_array, fit_min_array, "k.", label=label2, markersize=5)
plt.legend(bbox_to_anchor=(1.05, 1), loc="upper left", borderaxespad=0.0)
plt.legend()
plt.ylabel("MSE")
plt.xlabel("Gen")
plt.show()
if round(fit_mean_array[-1], 1) == round(fit_min_array[-1], 1):
print("Convergence reached, the GA is stopping")
return parents, fit_array
return parents, fit_array
# We can now call the core function and set the pop & generations to the parameters we want
"""Avoid bugs in tqdm module"""
try:
tqdm._instances.clear()
except:
pass
# We can finally run the GA, we first select the number of groups we require for or system:
groups = 3
# This will return the evolved population and their fitnesses, we can then select one of the element to group the signals:
pop, fit_vect = core(150, 60, groups, df_val, df_train, normalizing)
# If the GA was succesfull, it should be better than the one generated at the beginning:
fit_vect[fit_vect.argmin()]
# The best solution found is then:
pop[fit_vect.argmin()]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from collections import Counter
from IPython.core.display import display, HTML
sns.set_style("darkgrid")
df = pd.read_csv("/kaggle/input/glass/glass.csv")
df.head()
corr = df.corr()
# Plot figsize
fig, ax = plt.subplots(figsize=(10, 8))
# Generate Heat Map, allow annotations and place floats in map
sns.heatmap(corr, cmap="coolwarm", annot=True, fmt=".2f")
# Apply xticks
plt.xticks(range(len(corr.columns)), corr.columns)
# Apply yticks
plt.yticks(range(len(corr.columns)), corr.columns)
# show plot
plt.show()
print(corr)
features = ["RI", "Na", "Mg", "Al", "Si", "K", "Ca", "Ba", "Fe", "Type"]
data = df[features]
# select target
target = data["Type"]
data = data.drop("Type", axis=1)
X_train, X_test, y_train, y_test = train_test_split(
data, target, test_size=0.4, random_state=0
)
from sklearn.ensemble import GradientBoostingClassifier
# clf = GradientBoostingClassifier(loss='deviance', n_estimators=100, learning_rate=1.0,max_depth=2, random_state=0)
# Fit classifier with out-of-bag estimates
params = {
"n_estimators": 1500,
"max_depth": 5,
"subsample": 0.5,
"learning_rate": 0.01,
"min_samples_leaf": 1,
"random_state": 3,
}
clf = GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
|
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_absolute_error as mae
from sklearn.neural_network import MLPRegressor
pd.options.display.max_rows = 999
PATH_TO_FILES = "/kaggle/input/house-prices-advanced-regression-techniques/"
SEED = 42
data = pd.read_csv(os.path.join(PATH_TO_FILES, "train.csv"))
predict = pd.read_csv(os.path.join(PATH_TO_FILES, "test.csv"))
sample = pd.read_csv(os.path.join(PATH_TO_FILES, "sample_submission.csv"))
with open(os.path.join(PATH_TO_FILES, "data_description.txt"), "r") as f:
for l in f.readlines():
print(l)
data.dtypes
plt.hist(data.SalePrice)
plt.show()
le = LabelEncoder()
le.fit(data.Street)
data.Street = le.transform(data.Street)
predict.Street = le.transform(predict.Street)
plt.hist(data.Street)
plt.show()
cols_to_drop = ["Id"]
for col, t in zip(list(data), data.dtypes):
if t == "object":
cols_to_drop.append(col)
print(cols_to_drop)
data = data.drop(cols_to_drop, axis=1)
predict = predict.drop(cols_to_drop, axis=1)
data = data.fillna(0)
predict = predict.fillna(0)
y = data.pop("SalePrice")
x = data.values
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=SEED
)
print("Train size: {} Test size: {}".format(len(x_train), len(x_test)))
model = DecisionTreeRegressor(random_state=SEED)
model.fit(x_train, y_train)
preds_train = model.predict(x_train)
preds_test = model.predict(x_test)
print(
"Train score {} Test score {}".format(
mae(y_train, preds_train), mae(y_test, preds_test)
)
)
sample.SalePrice = model.predict(predict)
sample.to_csv("submission.csv", index=None)
|
#
#
# Dataviz - Data Science Specialization Program - FACENS
# # Exercício Extra
# * **Data de entrega:** 08/02/2020
# * **Professor:** Matheus Mota
# * **Aluno:** Rodrigo Prenstteter
# * **RA:** 191352
# ## Questão única
# Desenvolva e documente ao longo deste notebook a **sua** análise exploratória do dataset `BlackFriday.csv`.
# Sugestão de roteiro:
# 1. Classificação das variáveis (não esqueça de apresentar as frequências absolutas e relativas das variáveis qualitativas nominais)
# 2. Resumo, avaliação e correção de eventuais problemas nos valores das variáveis
# 3. Exame gráfico dos dados
# 3.1. Resumo/comportamento dos valores (linha(s), setores, barra(s), diagrama de dispersão)
#
# 3.2. Distribuição dos valores (histograma, diagrama de caixa)
#
# 3.3. Relações entre as variáveis (mapa de calor, diagrama de dispersão)
#
# 4. Pelo menos três análises da distribuição por grupos/recortes (exemplo: como ficam os histogramas de idade por categoria de produtos?)
#
# 5. Conclusão: Hipóteses e observações
# ## Resolução
# Bibliotecas utilizadas
import pandas as pd
from operator import itemgetter
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
# Leitura do dataset Black Friday]
df_blackfriday = pd.read_csv("../input/dataviz-facens-20182-ex3/BlackFriday.csv")
print(df_blackfriday.info())
print(df_blackfriday.describe())
## Rotinas gerais
# Cálculo da frequência de valores e identificação dos valores não preenchidos
def frequencia_coluna(df, coluna, qtd_linhas):
list_value = []
list_freq_absoluta = df[coluna].value_counts()
qtd_tot_valores = 0
for item in list_freq_absoluta.iteritems():
list_value.append([str(item[0]), item[1]])
qtd_tot_valores = qtd_tot_valores + item[1]
qtd_nao_inf = qtd_linhas - qtd_tot_valores
list_value.append(["Não disponível", qtd_nao_inf])
list_value = sorted(list_value, key=itemgetter(0))
df = pd.DataFrame(list_value)
df = df.rename(columns={0: "valor", 1: "freq"})
return df
# Identificação dos valores não preenchidos
def valores_zerados(df, coluna):
list_value = []
list_freq_absoluta = df_blackfriday[coluna].value_counts()
qtd_tot_valores = 0
for item in list_freq_absoluta.iteritems():
qtd_tot_valores = qtd_tot_valores + item[1]
list_value.append(["Disponível", qtd_tot_valores])
qtd_nao_inf = qtd_linhas - qtd_tot_valores
list_value.append(["Não disponível", qtd_nao_inf])
list_value = sorted(list_value, key=itemgetter(0))
df = pd.DataFrame(list_value)
df = df.rename(columns={0: "valor", 1: "freq"})
return df
# Apresentação de gráfico de barras
def mostra_bargraph(df, coluna_x, coluna_y, titulo, descricao_x, descricao_y):
df = df.sort_values(by=[coluna_x])
plt.figure(figsize=(15, 6.5))
graph = sns.barplot(x=coluna_x, y=coluna_y, data=df)
for p in graph.patches:
graph.annotate(
format(p.get_height(), ".0f"),
(p.get_x() + p.get_width() / 2.0, p.get_height()),
ha="center",
va="center",
xytext=(0, 10),
textcoords="offset points",
)
plt.title(titulo, fontsize=16)
plt.xlabel(descricao_x, fontsize=14)
plt.ylabel(descricao_y, fontsize=14)
plt.show()
# Apresentação de gráfico de dispersão
def mostra_boxplot(df, coluna_x, coluna_y, titulo, descricao_x, descricao_y):
df = df.sort_values(by=[coluna_x])
plt.figure(figsize=(15, 6.5))
sns.set_style("whitegrid")
sns.boxplot(x=coluna_x, y=coluna_y, data=df) # , order=list(sorted_nb.index))
plt.title(titulo, fontsize=16)
plt.xlabel(descricao_x, fontsize=14)
plt.ylabel(descricao_y, fontsize=14)
plt.show()
def mostra_heatmap(df, titulo):
plt.figure(figsize=(25, 7))
sns.heatmap(df, annot=True, annot_kws={"size": 12})
plt.title(titulo, fontsize=16)
plt.show()
def mostra_scatter(df, coluna_x, coluna_y, coluna_s, titulo, descricao_x, descricao_y):
plt.figure(figsize=(25, 7))
df2 = df.groupby([coluna_x, coluna_y]).count()
for column in df2:
if column != coluna_x and column != coluna_y and column != coluna_s:
df2.drop(column, axis=1, inplace=True)
df2 = df2.reset_index()
plt.scatter(x=df2[coluna_x], y=df2[coluna_y], s=df2[coluna_s] / 25)
plt.title(titulo, fontsize=16)
plt.xlabel(descricao_x, fontsize=14)
plt.ylabel(descricao_y, fontsize=14)
plt.show()
# Classificação das colunas
def classificacao(codigo):
if codigo == "QD":
return "Quantitativa Descritiva"
elif codigo == "QC":
return "Quantitativa Contínua"
elif codigo == "QN":
return "Qualitativa Nominal"
elif codigo == "QO":
return "Qualitativa Ordinal"
# Cálculo das frequências absolutas e relativas
def classificacao_coluna(coluna, classif, qtd_linhas):
classif_ret = classificacao(classif)
list_value = []
if qtd_linhas != 0: # Calcular frequências absolutas e relativas
qtd_tot_valores = 0
list_freq_absoluta = df_blackfriday[coluna].value_counts()
for item in list_freq_absoluta.iteritems():
list_value.append(
[item[0], item[1], "{:.3%}".format(float(item[1] / qtd_linhas))]
)
qtd_tot_valores = qtd_tot_valores + item[1]
if qtd_tot_valores != qtd_linhas: # se diferente, existem dados não informados
qtd_nao_inf = qtd_linhas - qtd_tot_valores
list_value.append(
["N/A", qtd_nao_inf, "{:.3%}".format(qtd_nao_inf / qtd_linhas)]
)
else: # Não calcular frequências
list_value.append([0, 0, ""])
return [coluna, classif_ret, list_value]
# ## **Item 1 - Classificação das variáveis**
# Descrição do Dataset
# estrutura saída: lista(nome coluna, classificação, lista(valor, frequência absoluta, frequência relativa))
qtd_linhas = len(df_blackfriday)
colunas = [
classificacao_coluna(
"User_ID", "QN", qtd_linhas
), # QN - Qualitativa Nominal: característica não numérica sem ordem entre os valores
classificacao_coluna(
"Product_ID", "QN", qtd_linhas
), # QO - Qualitativa Ordinal: característica não numérica com ordem entre os valores
classificacao_coluna(
"Gender", "QN", qtd_linhas
), # QD - Quantitativa Discreta: conjunto finito ou enumerável de números,
classificacao_coluna("Age", "QO", 0), # e que resultam de uma contagem
classificacao_coluna(
"Occupation", "QN", qtd_linhas
), # QC - Quantitativa Contínua: valor contido num intervalo de números reais
classificacao_coluna("City_Category", "QN", qtd_linhas),
classificacao_coluna("Stay_In_Current_City_Years", "QO", 0),
classificacao_coluna("Marital_Status", "QN", qtd_linhas),
classificacao_coluna("Product_Category_1", "QN", qtd_linhas),
classificacao_coluna("Product_Category_2", "QN", qtd_linhas),
classificacao_coluna("Product_Category_3", "QN", qtd_linhas),
classificacao_coluna("Purchase", "QC", 0),
]
colunas
# ## **Item 2 - Resumo, avaliação e correção de eventuais problemas nos valores das variáveis**
# Colunas consideradas para análise:
# Gender / Age / Stay_In_Current_City_Years / Marital_Status / Purchase
# Cálculo das frequências de cada coluna verificando se alguma não foi preenchida
qtd_linhas = len(df_blackfriday)
df_Gender = frequencia_coluna(df_blackfriday, "Gender", qtd_linhas)
df_Age = frequencia_coluna(df_blackfriday, "Age", qtd_linhas)
df_Stay_In_Current_City_Yearsprint = frequencia_coluna(
df_blackfriday, "Stay_In_Current_City_Years", qtd_linhas
)
df_Marital_Status = frequencia_coluna(df_blackfriday, "Marital_Status", qtd_linhas)
# Verificando se todos os valores de compras estão preenchidos
df_Purchase = valores_zerados(df_blackfriday, "Purchase")
# Apresentação do gráfico de barras com os dados encontrados
print("Número total de linhas do dataset origem:", qtd_linhas)
mostra_bargraph(
df_Gender, "valor", "freq", "Qtd. Vendas por Gênero", "Gênero", "Número Vendas"
)
mostra_bargraph(
df_Age,
"valor",
"freq",
"Qtd. Vendas por Faixa de Idade",
"Faixa Idade",
"Número Vendas",
)
mostra_bargraph(
df_Stay_In_Current_City_Yearsprint,
"valor",
"freq",
"Qtd. Vendas por Tempo permanência na cidade",
"Tempo permanência",
"Número Vendas",
)
mostra_bargraph(
df_Marital_Status,
"valor",
"freq",
"Qtd. Vendas por Estado Civil",
"Estado Civil",
"Número Vendas",
)
mostra_bargraph(
df_Purchase,
"valor",
"freq",
"Valores gastos em cada compra",
"Informação",
"Número Vendas",
)
# ## **Análise:**
# Todas as cinco colunas selecionas para análises estão devidamente preenchidas, não necessitando assim qualquer procedimento de saneamento
# ## **Item 3 - Exame gráfico dos dados**
# ## 3.1 - Resumo/comportamento dos valores (linha(s), setores, barra(s), diagrama de dispersão)
# Cálculo do ticket médio
df_ticket_medio_idade = pd.DataFrame(
df_blackfriday.groupby("Age")["Purchase"].sum()
/ df_blackfriday.groupby("Age")["Purchase"].count()
).reset_index()
df_ticket_medio_genero = pd.DataFrame(
df_blackfriday.groupby("Gender")["Purchase"].sum()
/ df_blackfriday.groupby("Gender")["Purchase"].count()
).reset_index()
df_ticket_medio_tempo = pd.DataFrame(
df_blackfriday.groupby("Stay_In_Current_City_Years")["Purchase"].sum()
/ df_blackfriday.groupby("Stay_In_Current_City_Years")["Purchase"].count()
).reset_index()
df_ticket_medio_estcivil = pd.DataFrame(
df_blackfriday.groupby("Marital_Status")["Purchase"].sum()
/ df_blackfriday.groupby("Marital_Status")["Purchase"].count()
).reset_index()
# Apresentação do gráfico de barras com os dados encontrados
mostra_bargraph(
df_ticket_medio_idade,
"Age",
"Purchase",
"Vr.Ticket Médio / Faixa Etária",
"Faixa Etária",
"$ Ticket Médio",
)
mostra_bargraph(
df_ticket_medio_genero,
"Gender",
"Purchase",
"Vr.Ticket Médio / Gênero",
"Gênero",
"$ Ticket Médio",
)
mostra_bargraph(
df_ticket_medio_tempo,
"Stay_In_Current_City_Years",
"Purchase",
"Vr.Ticket Médio / Tempo na cidade",
"Tempo na cidade",
"$ Ticket Médio",
)
mostra_bargraph(
df_ticket_medio_estcivil,
"Marital_Status",
"Purchase",
"Vr.Ticket Médio / Estado Civil",
"Estado Cibil",
"$ Ticket Médio",
)
# ## 3.2 - Distribuição dos valores (histograma, diagrama de caixa)
# Gráfico de caixa (boxplot)
mostra_boxplot(
df_blackfriday,
"Gender",
"Purchase",
"Distribuição de valores gastos x gênero",
"Gênero",
"$ Valor Gasto",
)
mostra_boxplot(
df_blackfriday,
"Age",
"Purchase",
"Distribuição de valores gastos x faixa etária",
"Faixa Etária",
"$ Valor Gasto",
)
mostra_boxplot(
df_blackfriday,
"Stay_In_Current_City_Years",
"Purchase",
"Distribuição de valores gastos x tempo na cidade",
"Tempo na Cidade",
"$ Valor Gasto",
)
mostra_boxplot(
df_blackfriday,
"Marital_Status",
"Purchase",
"Distribuição de valores gastos x estado civil",
"Estado Cibil",
"$ Valor Gasto",
)
# ## 3.3 - Relações entre as variáveis (mapa de calor, diagrama de dispersão)
# Mapa de Calor (Heatmap)
mostra_heatmap(
df_blackfriday.corr(), "Cálculo de correlação entre as colunas do dataset"
)
# ## **Item 4 - Pelo menos três análises da distribuição por grupos/recortes (exemplo: como ficam os histogramas de idade por categoria de produtos?)**
# Gráfico de bolha (Bubble Plot)
mostra_scatter(
df_blackfriday,
"Product_Category_1",
"Age",
"Purchase",
"Volume de Vendas por Idade x Categoria de Produtos",
"Categoria de Produtos",
"Faixa Etária",
)
mostra_scatter(
df_blackfriday,
"Gender",
"Age",
"Purchase",
"Volume de Vendas por Idade x Gênero",
"Gênero",
"Faixa Etária",
)
mostra_scatter(
df_blackfriday,
"Gender",
"Marital_Status",
"Purchase",
"Volume de Vendas por Estado Civil x Gênero",
"Gênero",
"Estado Civil",
)
mostra_scatter(
df_blackfriday,
"Stay_In_Current_City_Years",
"Marital_Status",
"Purchase",
"Volume de Vendas por Estado Civil x Tempo Cidade",
"Tempo na Cidade",
"Estado Civil",
)
|
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv("/kaggle/input/covid19-deaths-dataset/all_weekly_excess_deaths.csv")
df.head()
print(df["country"].unique())
# Create df for Canada
df1 = df.loc[(df["country"] == "Canada") & (df["year"] == 2020)]
df1.head()
# Line Chart
plt.plot(df1["week"], df1["total_deaths"])
plt.xlabel("week")
plt.ylabel("total_deaths")
plt.title("line chart")
plt.show()
# scatter chart
plt.scatter(df1["week"], df1["total_deaths"], df1["covid_deaths"])
plt.xlabel("week")
plt.ylabel("total_deaths")
plt.title("scatter chart")
plt.show()
# histogram
plt.hist(df["total_deaths"], bins=20)
plt.xlabel("total_deaths")
plt.title("histogram")
plt.xticks([0, 20000, 40000, 60000, 80000], ["0", "20k", "40k", "60k", "80k"])
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
df_train = pd.read_csv(
"/kaggle/input/human-activity-recognition-with-smartphones/train.csv"
)
df_train.head()
df_test = pd.read_csv(
"/kaggle/input/human-activity-recognition-with-smartphones/test.csv"
)
df_test.head()
len(df_train)
data = df_train
total_null_values = df_train.isnull().sum().sort_values(ascending=False)
percentage = ((df_train.isnull().sum() / df_train.isnull().count()) * 100).sort_values(
ascending=False
)
print("Total values present are", df_train.shape[0])
total_missing_data = pd.concat(
[total_null_values, percentage.round(2)],
axis=1,
keys=["Total Missing", "In precentage"],
)
total_missing_data
total_null_values_in_test = df_test.isnull().sum().sort_values(ascending=False)
percentage_in_test = (
(df_test.isnull().sum() / df_test.isnull().count()) * 100
).sort_values(ascending=False)
print("Total values present are", df_test.shape[0])
total_missing_data_in_test = pd.concat(
[total_null_values, percentage.round(2)],
axis=1,
keys=["Total Missing", "In precentage"],
)
total_missing_data_in_test
df_train.isna().sum()
df_train.shape
df_test.shape
df_train.select_dtypes(include=["object"]).columns.tolist()
df_test.select_dtypes(include=["object"]).columns.tolist()
columns = df_train.columns
columns = columns.str.replace("[()]", "")
columns = columns.str.replace("[-]", "")
columns = columns.str.replace("[,]", "")
df_train.columns = columns
df_test.columns = columns
print(df_test.columns)
df_train.columns
from sklearn import preprocessing
sns.set_style("whitegrid")
label_encoder = preprocessing.LabelEncoder()
df_train["Activity"] = label_encoder.fit_transform(df_train["Activity"])
df_train.Activity.unique()
df_test["labels"] = label_encoder.fit_transform(df_test["Activity"].values)
dat = pd.unique(df_test[["Activity", "labels"]].values.ravel())
d = dat.tolist()
print(d)
df_test["Activity"] = label_encoder.fit_transform(df_test["Activity"])
df_test.Activity.unique()
plt.figure(figsize=(16, 8))
plt.title("Data provided by each user", fontsize=20)
sns.countplot(x="subject", hue="Activity", data=df_train)
plt.show()
df_train.describe()
df_test.describe()
plt.title("Data points per activity", fontsize=15)
sns.histplot(df_train.Activity, element="poly", discrete=True, fill=False)
plt.xticks(rotation=0)
plt.show()
plt.figure(figsize=(7, 7))
sns.boxplot(
x="Activity", y="tBodyAccMagmean", data=df_train, showfliers=False, saturation=1
)
plt.ylabel("Acceleration Magnitude mean")
plt.axhline(y=-0.9, xmin=0.1, xmax=0.9, dashes=(5, 5), c="g")
plt.axhline(y=0.02, xmin=0.4, dashes=(5, 5), c="m")
plt.show()
sns.boxplot(x="Activity", y="angleXgravityMean", data=df_train)
plt.axhline(y=0.08, xmin=0.1, xmax=0.9, c="m", dashes=(5, 3))
plt.title("Angle between X-axis and Gravity-mean", fontsize=15)
plt.show()
|
# 
# ## What's in the notebook?
# - Full Exploratory Data Analysis (EDA)
# - Data Cleaning
# - Evaluation
# - BL Models (majority model + tfidf & logreg)
# - Gradient Boosting
# - Simple RNN
# - Glove Bi-LSTM
# - BERT + sigmoid
# - Ensemble (BERT + 10 shallow classifiers)
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from nltk.corpus import stopwords
from nltk.util import ngrams
from sklearn.feature_extraction.text import CountVectorizer
from collections import defaultdict
from collections import Counter
plt.style.use("ggplot")
stop = set(stopwords.words("english"))
import re
from nltk.tokenize import word_tokenize
import gensim
import string
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from tqdm import tqdm
from keras.models import Sequential
from keras.layers import Embedding, LSTM, Dense, SpatialDropout1D
from keras.initializers import Constant
from sklearn.model_selection import train_test_split
from keras.optimizers import Adam
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import nltk
from sklearn.preprocessing import LabelEncoder
## ADD STOPWORDS
stop = set(list(stop) + ["http", "https", "s", "nt", "m"])
def load_training(training_path="/kaggle/input/nlp-getting-started/train.csv"):
df = pd.read_csv(training_path)
print(df.head(10))
return df
df = load_training()
# # Data Analisys
# In the following we're gonna see some data analysis on the corpus.
# Specifically:
# - General dataset infos
# - Number of samples
# - Data Columns
# - Class Label Distributiom
# - Text analysis
# - Number of characters in tweets
# - Number of words in a tweet
# - Average word lenght in a tweet
# - Word distribution
# - Hashtag Analysis
# - KW and Location Analysis
# ## General dataset information
# Here we show the number of samples, the input data columns and the class label distributiom
print("-Number of samples: {}".format(len(df)))
print("-Input data columns: {}".format(df.columns))
print("-Class label distribution")
print("--Number of positive samples: {}".format(len(df.loc[df["target"] == 1])))
print("--Number of negative samples: {}".format(len(df.loc[df["target"] == 0])))
print("--Plot of Y distributions")
x = df.target.value_counts()
sns.barplot(x.index, x)
plt.gca().set_ylabel("samples")
def plot_hist_classes(to_plot, _header):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
df_len = to_plot(1)
ax1.hist(df_len, color="red")
ax1.set_title("Negative Tweets [disasters]")
df_len = to_plot(0)
ax2.hist(df_len, color="green")
ax2.set_title("Positive Tweets [good posts]")
fig.suptitle(_header)
plt.show()
plt.close()
# ## Text analysis
# Insights on number of character and words in tweets, word lenght distribution, and word distribution.
def show_word_distrib(target=1, field="text"):
txt = (
df[df["target"] == target][field]
.str.lower()
.str.replace(r"\|", " ")
.str.cat(sep=" ")
)
words = nltk.tokenize.word_tokenize(txt)
words_except_stop_dist = nltk.FreqDist(w for w in words if w not in stop)
rslt = pd.DataFrame(
words_except_stop_dist.most_common(top_N), columns=["Word", "Frequency"]
).set_index("Word")
print(rslt)
matplotlib.style.use("ggplot")
rslt.plot.bar(rot=0)
print("-Number of characters in tweets")
def to_plot(_target):
return df[df["target"] == _target]["text"].str.len()
plot_hist_classes(to_plot, _header="Characters Distribution in Tweets")
print("-Number of words in a tweet")
def to_plot(_target):
return df[df["target"] == _target]["text"].str.split().map(lambda x: len(x))
def how_to_plot(**kwargs):
ax1.hist(df_len, **kwargs)
plot_hist_classes(to_plot, _header="Word Distribution in Tweet")
print("-Average word lenght in a tweet")
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
word = df[df["target"] == 1]["text"].str.split().apply(lambda x: [len(i) for i in x])
sns.distplot(word.map(lambda x: np.mean(x)), ax=ax1, color="red")
ax1.set_title("Negative Tweets [disasters]")
word = df[df["target"] == 0]["text"].str.split().apply(lambda x: [len(i) for i in x])
sns.distplot(word.map(lambda x: np.mean(x)), ax=ax2, color="green")
ax2.set_title("Positive Tweets [good posts]")
fig.suptitle("Average word length in each tweet")
plt.show()
print("-Word distribution")
top_N = 10
print("-- Positive Class")
show_word_distrib(target=1, field="text")
print("-- Negative Class")
show_word_distrib(target=0, field="text")
# ## Hashtag analysis
# Small analysis done on the hashtags, to check it's possible discriminator capability for this task.
print("-Hashtag Analysis ")
def find_hashtags(tweet):
return (
", ".join([match.group(0)[1:] for match in re.finditer(r"#\w+", tweet)]) or None
)
def add_hashtags(df):
from sklearn.feature_extraction.text import CountVectorizer
df["hashtag"] = df["text"].apply(lambda x: find_hashtags(x))
df["hashtag"].fillna(value="no", inplace=True)
return df
top_N = 20
df = add_hashtags(df)
_l = len([v for v in df.hashtag.values if isinstance(v, str)])
print("-Number of tweets with hashtags: {}".format(_l))
print("-- Hashtag distribution in positive samples ")
show_word_distrib(target=1, field="hashtag")
print("-- Hashtag distribution in negative samples ")
show_word_distrib(target=0, field="hashtag")
# There is too much intersection between hashtag in positive and negative samples, meaning that an
# hashtag approach will not work that well.
# # KW and Location analysis
#
# Remove the encoded space character for keywords, since appears a lot of times and is junk
df["keyword"] = df["keyword"].map(
lambda s: s.replace("%20", " ") if isinstance(s, str) else s
)
un_KW = {kw for kw in df["keyword"].values if isinstance(kw, str)}
tot_KW = len(df) - len(df[df["keyword"].isna()])
un_LOC = {lc for lc in df["location"].values if isinstance(lc, str)}
tot_LOC = len(df) - len(df[df["location"].isna()])
print("Unique KW: {}".format(len(un_KW)))
print("Out of: {}".format(tot_KW))
print("Samples with no KW: {}".format(len(df[df["keyword"].isna()])))
print("Unique LOC: {}".format(len(un_LOC)))
print("Out of: {}".format(tot_LOC))
print("Samples with no Loc: {}".format(len(df[df["location"].isna()])))
# LOCATION IS TOO SPARSE TO BE USED.
disaster_keywords = [kw for kw in df.loc[df.target == 1].keyword]
regular_keywords = [kw for kw in df.loc[df.target == 0].keyword]
disaster_keywords_counts = dict(
pd.DataFrame(data={"x": disaster_keywords}).x.value_counts()
)
regular_keywords_counts = dict(
pd.DataFrame(data={"x": regular_keywords}).x.value_counts()
)
all_keywords_counts = dict(pd.DataFrame(data={"x": df.keyword.values}).x.value_counts())
# we sort the keywords so the most frequents are on top and we print them with relative
# occurrences in both classes of tweets:
for keyword, _ in sorted(all_keywords_counts.items(), key=lambda x: x[1], reverse=True)[
:10
]:
print("> KW: {}".format(keyword))
print(
"-- # in negative tweets: {}".format(disaster_keywords_counts.get(keyword, 0))
)
print("-- # in positive tweets: {}".format(regular_keywords_counts.get(keyword, 0)))
print("--------")
# Many KWs in negative tweets are also present in positive ones, meaning that a KW approach is most likely to not work
# The same result was given by the hashtag analysis.
# # Data Cleaning
# Here we are gonna clean the DF.
# Specifically, we clean:
# - stopwords (Kept cause removing them cause drop of performances)
# - URL
# - HTML
# - emoji
# - punctuation
def clean_df(df):
def remove_stopwords(text):
if text is not None:
tokens = [x for x in word_tokenize(text) if x not in stop]
return " ".join(tokens)
else:
return None
# TMP: TRY TO USE DEFAULT STRING FOR NONE. TODO: USE ROW["KEYWORDS"]
# df['hashtag'] =df['hashtag'].apply(lambda x : "NO" if x is None else x)
df["text"] = df["text"].apply(lambda x: x.lower())
# df["hashtag"] = df['hashtag'].apply(lambda x : x.lower())
# df['text'] =df['text'].apply(lambda x : remove_stopwords(x))
# df['hashtag'] =df['hashtag'].apply(lambda x : remove_stopwords(x))
def remove_URL(text):
url = re.compile(r"https?://\S+|www\.\S+")
return url.sub(r"", text)
df["text"] = df["text"].apply(lambda x: remove_URL(x))
def remove_html(text):
html = re.compile(r"<.*?>")
return html.sub(r"", text)
df["text"] = df["text"].apply(lambda x: remove_html(x))
# Reference : https://gist.github.com/slowkow/7a7f61f495e3dbb7e3d767f97bd7304b
def remove_emoji(text):
emoji_pattern = re.compile(
"["
"\U0001F600-\U0001F64F" # emoticons
"\U0001F300-\U0001F5FF" # symbols & pictographs
"\U0001F680-\U0001F6FF" # transport & map symbols
"\U0001F1E0-\U0001F1FF" # flags (iOS)
"\U00002702-\U000027B0"
"\U000024C2-\U0001F251"
"]+",
flags=re.UNICODE,
)
return emoji_pattern.sub(r"", text)
df["text"] = df["text"].apply(lambda x: remove_emoji(x))
def remove_punct(text):
table = str.maketrans("", "", string.punctuation)
return text.translate(table)
df["text"] = df["text"].apply(lambda x: remove_punct(x))
df.text = df.text.replace("\s+", " ", regex=True)
return df
df = clean_df(df)
print("-- Word distrig Positive Class")
show_word_distrib(target=1, field="text")
print("-- Word distrib Negative Class")
show_word_distrib(target=0, field="text")
# # Utils for models
def read_test(test_path="/kaggle/input/nlp-getting-started/test.csv"):
my_df = pd.read_csv(test_path)
res_df = my_df[["id"]]
my_df = my_df[["text"]]
add_hashtags(my_df)
my_df = clean_df(my_df)
print("Test DF: {}".format(my_df.head(10)))
return my_df, res_df
def dump_preds(res_df, preds, out="default"):
res_df["target"] = None
for i, p in enumerate(preds):
res_df.ix[i, "target"] = p
res_df.to_csv(out, index=False)
def split_data(df, _t=True):
X = df.text
if _t:
Y = df.target
le = LabelEncoder()
Y = le.fit_transform(Y)
Y = Y.reshape(-1, 1)
return X, Y
else:
return X
# # Baseline Models
# We build a simple majority model and TFIDF + LogReg to check the problem hardness.
from sklearn import model_selection
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
"""
Build a baseline TFIDF + LOGREG based just on text
"""
def build_tfidf_logreg(df):
my_df = df[["text", "target"]]
x_features = my_df.columns[0]
x_data = my_df[x_features]
Y = my_df["target"]
x_train, x_validation, y_train, y_validation = model_selection.train_test_split(
x_data.values, Y.values, test_size=0.2, random_state=7
)
# configure TfidfVectorizer to accept tokenized data
# reference http://www.davidsbatista.net/blog/2018/02/28/TfidfVectorizer/
tfidf_vectorizer = TfidfVectorizer(
analyzer="word",
tokenizer=lambda x: x,
preprocessor=lambda x: x,
token_pattern=None,
)
lr = LogisticRegression()
tfidf_lr_pipe = Pipeline([("tfidf", tfidf_vectorizer), ("lr", lr)])
tfidf_lr_pipe.fit(x_train, y_train)
return tfidf_lr_pipe
def test_tfidf_logreg(model, test_path="/kaggle/input/nlp-getting-started/test.csv"):
my_df, res_df = read_test(test_path="/kaggle/input/nlp-getting-started/test.csv")
# x_features = my_df.columns[0]
x_data = my_df["text"].values
preds = model.predict(x_data)
# dump_preds(res_df, preds, out="res_tfidf_logreg4_0.csv")
return res_df
"""
Build a majority model
"""
def test_majority_model(test_path="/kaggle/input/nlp-getting-started/test.csv"):
my_df = pd.read_csv(test_path)
res = my_df[["id"]]
res["target"] = 1
res.to_csv("res_majority.csv", index=False)
return res
# test_majority_model(test_path="/kaggle/input/nlp-getting-started/test.csv")
# 0.42944
# tfidf_log_reg = build_tfidf_logreg(df)
# test_tfidf_logreg(tfidf_log_reg, test_path="/kaggle/input/nlp-getting-started/test.csv")
# 0.63164
# # Test Gradient Boosting
# Here we check a gradient boosting classifier, which is a bit less shallow model w.r.t logistic regression. In fact we gain a 3% w.r.t LogReg
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.pipeline import Pipeline
from sklearn import metrics
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
X_train, y_train = split_data(df)
test_df, res_df = read_test(test_path="/kaggle/input/nlp-getting-started/test.csv")
X_test = split_data(test_df, _t=False)
text_clf = Pipeline(
[
("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("clf", GradientBoostingClassifier(n_estimators=100)),
]
)
# text_clf.fit(X_train, y_train)
# predicted = text_clf.predict(X_test)
# dump_preds(res_df, predicted, out="submission.csv")
# 0.66462
# # Test RNN Model
# Here we test a simple LSTM model with Dropout. The experiment does not give better performances w.r.t gradient boosting.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from keras.models import Model
from keras.layers import LSTM, Activation, Dense, Dropout, Input, Embedding
from keras.optimizers import RMSprop
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping
# Value tuned based on data analysis
max_words = 750
max_len = 160
def process_data(X, tok=None):
if tok is None:
tok = Tokenizer(num_words=max_words)
tok.fit_on_texts(X)
sequences = tok.texts_to_sequences(X)
sequences_matrix = sequence.pad_sequences(sequences, maxlen=max_len)
return sequences_matrix, tok
def RNN():
inputs = Input(name="inputs", shape=[max_len])
layer = Embedding(max_words, 50, input_length=max_len)(inputs)
layer = LSTM(64)(layer)
layer = Dense(256, name="FC1")(layer)
layer = Activation("relu")(layer)
layer = Dropout(0.5)(layer)
layer = Dense(1, name="out_layer")(layer)
layer = Activation("sigmoid")(layer)
model = Model(inputs=inputs, outputs=layer)
return model
def test_model(
model, tok=None, test_path="/kaggle/input/nlp-getting-started/test.csv", cut=0.5
):
my_df, res = read_test(test_path="/kaggle/input/nlp-getting-started/test.csv")
X = split_data(my_df, _t=False)
sequences_matrix, tok = process_data(X, tok=tok)
preds = model.predict(sequences_matrix)
normalized_preds = []
for p in preds:
if p >= cut:
normalized_preds.append(1)
else:
normalized_preds.append(0)
# dump_preds(res, normalized_preds, out="res_rnn.csv")
return res
def tune_cutoff(model, tok):
x_tune = df["text"].values
x_target = df[["target"]]
X, Y = split_data(df)
sequences_matrix, tok = process_data(X, tok=tok)
preds = model.predict(sequences_matrix)
x_target["preds"] = preds
accumulator = 0
_0 = []
max_0 = 0
_1 = []
min_1 = 0
for i, row in x_target.iterrows():
if row["target"] == 0:
_0.append(row["preds"])
if row["preds"] > max_0:
max_0 = row["preds"]
else:
_1.append(row["preds"])
if row["preds"] < min_1:
min_1 = row["preds"]
mean_0 = sum(_0) / len(_0)
mean_1 = sum(_1) / len(_1)
return max(mean_0, mean_1) / min(mean_0, mean_1)
# model = RNN()
# model.summary()
# model.compile(loss='binary_crossentropy',optimizer=RMSprop(),metrics=['accuracy'])
# X, Y = split_data(df)
# sequences_matrix, tok = process_data(X, tok=None)
# model.fit(sequences_matrix,Y,batch_size=128,epochs=10,validation_split=0.2,callbacks=[EarlyStopping(monitor='val_loss',min_delta=0.001)])
# cut = tune_cutoff(model, tok)
# test_model(model, tok=tok, cut=cut)
# 0.57259
# autocut: 0.57055
# # Test Glove LSTM
# Here we check the usage of Glove embeddings. We first encode the sentence with average word Glove embedding and then we use a Bi-LSTM to classify the sample representation.
# We reach a new best of 79.45, meaning that the embedding direction seem to be valuable. We are gonna test some other newer embeddings.
import numpy as np
import re # regular expression
from bs4 import BeautifulSoup
import pandas as pd
from sklearn import model_selection, preprocessing
import os
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import (
Dense,
Input,
GlobalMaxPooling1D,
Conv1D,
MaxPooling1D,
Embedding,
Dropout,
Bidirectional,
)
from tensorflow.keras.models import Model
from tensorflow.keras.initializers import Constant
from tensorflow.keras import metrics
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
vocab_size = 10000
embedding_dim = 100
max_length = 50
trunc_type = "post"
padding_type = "post"
oov_tok = "<OOV>"
validation_split = 0.10
def load_glove_vectors():
print("Indexing word vectors.")
# Many thanks to rtatman for hosting the GloVe word embeddings dataset on Kaggle
# https://www.kaggle.com/rtatman/glove-global-vectors-for-word-representation
GLOVE_DIR = "/kaggle/input/glove6b100dtxt/"
embeddings_index = {}
print(list(os.walk("/kaggle/input")))
with open(os.path.join(GLOVE_DIR, "glove.6B.100d.txt")) as f:
for line in f:
word, coefs = line.split(maxsplit=1)
coefs = np.fromstring(coefs, "f", sep=" ")
embeddings_index[word] = coefs
print("Found %s word vectors." % len(embeddings_index))
return embeddings_index
def tokenize(df):
tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(df.text)
word_index = tokenizer.word_index
print("Found %s unique tokens." % len(word_index))
training_sequences = tokenizer.texts_to_sequences(df.text)
training_padded = pad_sequences(
training_sequences,
maxlen=max_length,
padding=padding_type,
truncating=trunc_type,
)
print("Shape of the data vector is", training_padded.shape, df.target.shape)
return training_sequences, training_padded, word_index, tokenizer
def prepare_embedding_layer(word_index, embeddings_index):
print("Preparing the embedding matrix")
num_words = min(vocab_size, len(word_index) + 1)
embedding_matrix = np.zeros((num_words, embedding_dim))
for word, index in word_index.items():
if index >= vocab_size:
continue
embedding_vector = embeddings_index.get(
word, np.zeros(embedding_dim, dtype="float32")
)
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
embedding_layer = Embedding(
num_words,
embedding_dim,
embeddings_initializer=Constant(embedding_matrix),
input_length=max_length,
trainable=False,
)
return embedding_layer
def model(embedding_layer):
METRICS = [
metrics.BinaryAccuracy(name="accuracy"),
metrics.Precision(name="precision"),
metrics.Recall(name="recall"),
metrics.AUC(name="auc"),
]
sequence_input = Input(shape=(max_length,))
embedded_sequences = embedding_layer(sequence_input)
x = Bidirectional(tf.keras.layers.LSTM(64))(embedded_sequences)
x = Dropout(0.5)(x)
x = Dense(32, activation="relu")(x)
x = Dropout(0.5)(x)
output = Dense(1, activation="sigmoid")(x)
model = Model(sequence_input, output)
model.compile(
loss="binary_crossentropy",
optimizer=tf.keras.optimizers.Adam(lr=0.0002),
metrics=METRICS,
)
return model
def test_model(model, test_df, tokenizer):
test_sequences = tokenizer.texts_to_sequences(test_df.text)
test_padded = pad_sequences(
test_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type
)
predictions = model.predict(test_padded)
predictions = np.round(predictions).astype(int).flatten()
dump_preds(res, predictions, out="submission.csv")
"""
embeddings_index = load_glove_vectors()
test_df, res = read_test(test_path="/kaggle/input/nlp-getting-started/test.csv")
training_sequences, training_padded, word_index,tokenizer = tokenize(df)
embedding_layer = prepare_embedding_layer(word_index, embeddings_index)
X_train, X_valid, y_train, y_valid = model_selection.train_test_split(training_padded,
df.target,
test_size = validation_split,
random_state=1)
model = model(embedding_layer)
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor='val_auc',
verbose=1,
patience=10,
mode='max',
restore_best_weights=True)
history = model.fit(X_train, y_train, batch_size = 64, epochs = 30,
callbacks = [early_stopping],
validation_data = (X_valid, y_valid))
test_model(model, test_df, tokenizer)
#0.7945
"""
# # BERT TfHub
# Using the BERT TfHub module, we build a DNN using BERT Embeddings and a simple Dense layer with sigmoid on the top.
# This is the best scoring model so fa, reaching a best of 82.45%
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import ModelCheckpoint
import tensorflow_hub as hub
import tokenization
def bert_encode(texts, tokenizer, max_len=512):
all_tokens = []
all_masks = []
all_segments = []
for text in texts:
text = tokenizer.tokenize(text)
text = text[: max_len - 2]
input_sequence = ["[CLS]"] + text + ["[SEP]"]
pad_len = max_len - len(input_sequence)
tokens = tokenizer.convert_tokens_to_ids(input_sequence)
tokens += [0] * pad_len
pad_masks = [1] * len(input_sequence) + [0] * pad_len
segment_ids = [0] * max_len
all_tokens.append(tokens)
all_masks.append(pad_masks)
all_segments.append(segment_ids)
return np.array(all_tokens), np.array(all_masks), np.array(all_segments)
def build_model(bert_layer, max_len=512):
input_word_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids")
input_mask = Input(shape=(max_len,), dtype=tf.int32, name="input_mask")
segment_ids = Input(shape=(max_len,), dtype=tf.int32, name="segment_ids")
_, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids])
clf_output = sequence_output[:, 0, :]
out = Dense(1, activation="sigmoid")(clf_output)
model = Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=out)
model.compile(Adam(lr=1e-5), loss="binary_crossentropy", metrics=["accuracy"])
return model
"""
test_df, res = read_test(test_path="/kaggle/input/nlp-getting-started/test.csv")
module_url = "https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1"
bert_layer = hub.KerasLayer(module_url, trainable=True)
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case)
train_input = bert_encode(df.text.values, tokenizer, max_len=160)
test_input = bert_encode(test_df.text.values, tokenizer, max_len=160)
train_labels = df.target.values
model = build_model(bert_layer, max_len=160)
model.summary()
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor='val_auc',
verbose=1,
patience=10,
mode='max',
restore_best_weights=True)
train_history = model.fit(
train_input, train_labels,
validation_split=0.2,
epochs=3,
batch_size=16
)
test_pred = model.predict(test_input)
_t = []
for t in test_pred:
if t < 0.5:
_t.append(0)
else:
_t.append(1)
dump_preds(res, _t, out="submission.csv")
#82.45
"""
# # XLNet / RoBERTa in 4 lines!
# Here the XLNet/ GPT-2 / RoBERTa evaluation using the huggingface simpletransformers.
import os
import shutil
shutil.rmtree("apex")
if os.path.exists("cache_dir"):
shutil.rmtree("cache_dir")
if os.path.exists("outputs"):
shutil.rmtree("outputs")
if os.path.exists("runs"):
shutil.rmtree("runs")
import torch
import random
import numpy as np
from simpletransformers.classification import ClassificationModel
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold, StratifiedKFold
seed = 98
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
train_data = df[["text", "target"]]
print("Building Model")
train_args = {
"train_batch_size": 16,
"eval_batch_size": 4,
"num_train_epochs": 10,
"use_early_stopping": True,
"early_stopping_patience": 3,
"early_stopping_delta": 0.005,
"max_seq_length": 200,
"save_model_every_epoch": False,
"overwrite_output_dir": True,
"save_eval_checkpoints": False,
"gradient_accumulation_steps": 1,
"overwrite_output_dir": True,
}
# model = ClassificationModel("xlnet", 'xlnet-base-cased', num_labels=2, args=train_args)
model = ClassificationModel(
"xlmroberta", "xlm-roberta-base", num_labels=2, args=train_args
)
print("Training the model")
model.train_model(train_data)
test_df, res = read_test(test_path="/kaggle/input/nlp-getting-started/test.csv")
print("Model evaluation")
predictions, raw_outputs = model.predict(test_df["text"])
dump_preds(res, predictions, out="submission_xlnet.csv")
# # Test SuperLearner ensemble
# Finally we try a super-learner ensemble. Meaning that we use a set of models (also the previous BERT used) to classify the samples and then we use a meta-model to ingest these classification results (from all models) and classify the sample.
# It is ensured that this ensemble cannot perform worse than the best in the ensemble, so we could enhance the BERT model capabilities.
#
# example of a super learner model for binary classification
from tamnun.bert import BertClassifier, BertVectorizer
from numpy import hstack
from numpy import vstack
from numpy import asarray
from sklearn.datasets.samples_generator import make_blobs
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.base import TransformerMixin
from tamnun.bert import BertClassifier, BertVectorizer
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import StratifiedKFold
import copy
class DenseTransformer(TransformerMixin):
def fit(self, X, y=None, **fit_params):
return self
def transform(self, X, y=None, **fit_params):
return X.todense()
# create a list of base-models
def get_models():
models = []
tfidf_vectorizer = TfidfVectorizer(
analyzer="word",
tokenizer=lambda x: x,
preprocessor=lambda x: x,
token_pattern=None,
)
lr = LogisticRegression()
tfidf_lr_pipe = Pipeline([("tfidf", tfidf_vectorizer), ("lr", lr)])
dec_pipe = Pipeline([("tfidf", tfidf_vectorizer), ("dt", DecisionTreeClassifier())])
svc = Pipeline(
[("tfidf", tfidf_vectorizer), ("svc", SVC(gamma="scale", probability=True))]
)
gaus = Pipeline(
[
("tfidf", tfidf_vectorizer),
("to_dense", DenseTransformer()),
("gaus", GaussianNB()),
]
)
kn = Pipeline([("tfidf", tfidf_vectorizer), ("kn", KNeighborsClassifier())])
ada = Pipeline([("tfidf", tfidf_vectorizer), ("ada", AdaBoostClassifier())])
bagging = Pipeline(
[("tfidf", tfidf_vectorizer), ("bag", BaggingClassifier(n_estimators=10))]
)
ran_forest = Pipeline(
[("tfidf", tfidf_vectorizer), ("ran", RandomForestClassifier(n_estimators=10))]
)
extra_tree = Pipeline(
[("tfidf", tfidf_vectorizer), ("extr", ExtraTreesClassifier(n_estimators=10))]
)
gradient_boosting = Pipeline(
[
("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("clf", GradientBoostingClassifier(n_estimators=100)),
]
)
# bert = Pipeline([('extr',BertVectorizer()), ("extr2",BertClassifier(num_of_classes=2))])
module_url = "https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1"
bert_layer = hub.KerasLayer(module_url, trainable=True)
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case)
model = build_model(bert_layer, max_len=160)
model.summary()
# models.append(bert)
models.append(model)
models.append(dec_pipe)
models.append(tfidf_lr_pipe)
models.append(svc)
models.append(gaus)
models.append(kn)
models.append(ada)
models.append(bagging)
models.append(ran_forest)
models.append(extra_tree)
models.append(gradient_boosting)
return models, tokenizer
# collect out of fold predictions form k-fold cross validation
def get_out_of_fold_predictions(X, y, models, tokenizer):
meta_X, meta_y = list(), list()
# define split of data
kfold = StratifiedKFold(n_splits=5, shuffle=True)
# enumerate splits
for train_ix, test_ix in kfold.split(X, y):
fold_yhats = list()
train_X, test_X = X[train_ix], X[test_ix]
train_y, test_y = y[train_ix], y[test_ix]
meta_y.extend(test_y)
# fit and make predictions with each sub-model
for i, model in enumerate(models):
if i == 0:
train_input = bert_encode(train_X, tokenizer, max_len=160)
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor="val_auc",
verbose=1,
patience=10,
mode="max",
restore_best_weights=True,
)
model.fit(
train_input,
train_y,
validation_split=0.25,
epochs=10,
batch_size=16,
callbacks=[early_stopping],
)
_test_X = bert_encode(test_X, tokenizer, max_len=160)
yhat = model.predict(_test_X)
_y = []
for __y in yhat:
one_prob = __y[0]
zero_prob = 1 - one_prob
_y.append([zero_prob, one_prob])
yhat = _y
else:
model.fit(train_X, train_y)
yhat = model.predict_proba(test_X)
# store columns
fold_yhats.append(yhat)
# store fold yhats as columns
meta_X.append(hstack(fold_yhats))
return vstack(meta_X), asarray(meta_y)
# fit all base models on the training dataset
def fit_base_models(X, y, models, bert_tok):
for i, model in enumerate(models):
if i == 0:
train_input = bert_encode(X, bert_tok, max_len=160)
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor="val_auc",
verbose=1,
patience=10,
mode="max",
restore_best_weights=True,
)
model.fit(
train_input,
y,
validation_split=0.10,
epochs=10,
batch_size=16,
callbacks=[early_stopping],
)
else:
model.fit(X, y)
# fit a meta model
def fit_meta_model(X, y):
model = ExtraTreesClassifier(n_estimators=30)
model.fit(X, y)
return model
# make predictions with stacked model
def super_learner_predictions(X, models, meta_model, bert_tok):
meta_X = list()
for i, model in enumerate(models):
if i == 0:
x = bert_encode(X, bert_tok, max_len=160)
yhat = model.predict(x)
_y = []
for y in yhat:
one_prob = y[0]
zero_prob = 1 - one_prob
_y.append([zero_prob, one_prob])
yhat = _y
else:
yhat = model.predict_proba(X)
meta_X.append(yhat)
meta_X = hstack(meta_X)
# predict
return meta_model.predict(meta_X)
"""
test_df, res = read_test(test_path="/kaggle/input/nlp-getting-started/test.csv")
X, y = split_data(df)
X_test = split_data(test_df, _t=False)
# get models
models, bert_tok = get_models()
# get out of fold predictions
meta_X, meta_y = get_out_of_fold_predictions(X, y, models,bert_tok)
print('Meta ', meta_X.shape, meta_y.shape)
# fit base models
fit_base_models(X, y, models, bert_tok)
# fit the meta model
meta_model = fit_meta_model(meta_X, meta_y)
# evaluate meta model
yhat = super_learner_predictions(X_test, models, meta_model, bert_tok)
print("YHat: {}".format(yhat))
dump_preds(res, yhat, out="submission_ensemble.csv")
"""
# # Embedding Stacking!
# Unfortunately the GPU allowed in Kaggle is not enough to keep multiple embeddings, if Elmo is one of them. So this is just an example code, witouth an official score.
from flair.data import Corpus
from flair.datasets import TREC_6
from flair.embeddings import (
WordEmbeddings,
FlairEmbeddings,
DocumentRNNEmbeddings,
StackedEmbeddings,
BertEmbeddings,
ELMoEmbeddings,
)
from flair.models import TextClassifier
from flair.trainers import ModelTrainer
from flair.data import Sentence
import pandas as pd
from keras.layers import Input, Dense, GRU, Bidirectional, Flatten
from keras.optimizers import Adam
from keras.models import Model
import numpy as np
def generateTrainingData(
dataset, batch_size, max_length, num_classes, emb_size, stacked_embedding
):
x_batch = []
y_batch = []
while True:
data = dataset.sample(frac=1)
for index, row in data.iterrows():
my_sent = row["text"]
sentence = Sentence(my_sent)
stacked_embedding.embed(sentence)
x = []
for token in sentence:
x.append(token.embedding.cpu().detach().numpy())
if len(x) == max_length:
break
while len(x) < max_length:
x.append(np.zeros(emb_size))
y = np.zeros(num_classes)
y[row["target"]] = 1
x_batch.append(x)
y_batch.append(y)
if len(y_batch) == batch_size:
yield np.array(x_batch), np.array(y_batch)
x_batch = []
y_batch = []
def generatePredictionData(
dataset, batch_size, max_length, num_classes, emb_size, stacked_embedding
):
x_batch = []
while True:
for text in dataset["text"].values:
my_sent = text
sentence = Sentence(my_sent)
stacked_embedding.embed(sentence)
x = []
for token in sentence:
x.append(token.embedding.cpu().detach().numpy())
if len(x) == max_length:
break
while len(x) < max_length:
x.append(np.zeros(emb_size))
x_batch.append(x)
if len(x_batch) == batch_size:
yield np.array(x_batch)
x_batch = []
def get_stacked_embeddings():
stacked_embedding = StackedEmbeddings([ELMoEmbeddings(), WordEmbeddings("en")])
print("Stacked embedding size: {}".format(stacked_embedding.embedding_length))
embedding_size = stacked_embedding.embedding_length
return stacked_embedding, embedding_size
def declare_model(batch_size, max_len, emb_size, gru_size, num_classes):
sample = Input(batch_shape=(batch_size, max_len, emb_size))
gru_out = Bidirectional(GRU(gru_size, return_sequences=True))(sample)
gru_out = Flatten()(gru_out)
predictions = Dense(num_classes, activation="sigmoid")(gru_out)
model = Model(inputs=sample, outputs=[predictions])
model.compile(optimizer=Adam(), loss="binary_crossentropy", metrics=["acc"])
print(model.summary())
return model
"""
BATCH_SIZE = 256
MAX_LEN = 150
GRU_SIZE = 20
NUM_CLASSES=2
EPOCHS = 1
stacked_embedding, embedding_length = get_stacked_embeddings()
m = declare_model(batch_size=BATCH_SIZE, max_len=MAX_LEN, emb_size=embedding_length, gru_size=GRU_SIZE, num_classes=NUM_CLASSES)
gen = generateTrainingData(df, batch_size=BATCH_SIZE, max_length=MAX_LEN, num_classes=NUM_CLASSES, emb_size=embedding_length,
stacked_embedding= stacked_embedding)
print(gen)
steps_per_epoch = len(df)/BATCH_SIZE
m.fit_generator(gen, steps_per_epoch=1, epochs=EPOCHS, workers=1)
df_test = df[:10]
test_gen = list(generatePredictionData(df_test, batch_size=BATCH_SIZE, max_length=MAX_LEN, num_classes=NUM_CLASSES,
emb_size=embedding_length, stacked_embedding=stacked_embedding))
print(np.argmax(m.predict_generator(test_gen, steps=1), axis=1))
"""
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
from fastai import *
from fastai.vision import *
import os
from os import listdir
path = "../input/rice-leaf-diseases/"
os.listdir(path)
directory_root = "../input/rice-leaf-diseases/"
image_list, label_list = [], []
try:
print("[INFO] Loading images ...")
root_dir = listdir(directory_root)
for directory in root_dir:
# remove .DS_Store from list
if directory == ".DS_Store":
root_dir.remove(directory)
for plant_disease_folder in root_dir:
print(f"[INFO] Processing {plant_disease_folder} ...")
plant_disease_image_list = listdir(f"{directory_root}/{plant_disease_folder}/")
for single_plant_disease_image in plant_disease_image_list:
if single_plant_disease_image == ".DS_Store":
plant_disease_image_list.remove(single_plant_disease_image)
for image in plant_disease_image_list[:200]:
image_directory = f"{directory_root}/{plant_disease_folder}/{image}"
if (
image_directory.endswith(".jpg") == True
or image_directory.endswith(".JPG") == True
):
image_list.append(image_directory)
label_list.append(plant_disease_folder)
print("[INFO] Image loading completed")
except Exception as e:
print(f"Error : {e}")
tfms = get_transforms(flip_vert=True, max_warp=0.0, max_zoom=0.0, max_rotate=0.0)
def get_labels(file_path):
dir_name = os.path.dirname(file_path)
split_dir_name = dir_name.split("/")
dir_length = len(split_dir_name)
label = split_dir_name[dir_length - 1]
return label
data = ImageDataBunch.from_name_func(
path,
image_list,
label_func=get_labels,
size=224,
bs=64,
num_workers=2,
ds_tfms=tfms,
)
data = data.normalize()
learn = cnn_learner(data, models.resnet34, metrics=error_rate, model_dir="/tmp/models/")
learn.fit_one_cycle(15)
learn.unfreeze()
learn.lr_find()
learn.recorder.plot()
learn.recorder.plot_losses()
ls
os.chdir("/output")
learn.save("model.pkl")
from IPython.display import FileLinks
FileLinks(".")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
train_data = pd.read_csv("/kaggle/input/cat-in-the-dat-ii/train.csv")
test_data = pd.read_csv("/kaggle/input/cat-in-the-dat-ii/test.csv")
# **Explore Data**
def resumetable(df):
print(f"Dataset Shape: {df.shape}")
summary = pd.DataFrame(df.dtypes, columns=["dtypes"])
summary = summary.reset_index()
summary["Name"] = summary["index"]
summary = summary[["Name", "dtypes"]]
summary["Missing"] = df.isnull().sum().values
summary["Total"] = df.count().values
summary["Missing Percentage"] = (summary["Missing"] / summary["Total"]) * 100
summary["Uniques"] = df.nunique().values
summary["Uniques_val"] = [df[col].unique() for col in df.columns]
return summary
resumetable(train_data)
# **Missing Values**
def fillna_sample(df):
for col in df.columns:
df.loc[df[col].isna(), col] = (
df[col][-df[col].isna()].sample(n=df[col].isna().sum()).values
)
fillna_sample(train_data)
fillna_sample(test_data)
# **Split label Column & Drop usless**
train_label = train_data["target"]
train_data.drop(columns=["id", "target"], axis=1, inplace=True)
test_id = test_data["id"]
test_data.drop(columns=["id"], axis=1, inplace=True)
# **Convert Categorical Features to Numeric**
from sklearn.preprocessing import LabelEncoder
lb_bin = LabelEncoder()
categorical_cols = train_data.select_dtypes(include=["object"]).columns
for col in categorical_cols:
train_data[col] = lb_bin.fit_transform(train_data[col])
test_data[col] = lb_bin.fit_transform(test_data[col])
# **Scale Data**
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
train_data = scaler.fit_transform(train_data)
test_data = scaler.fit_transform(test_data)
# **Fit Model & Predict**
from sklearn.linear_model import LogisticRegression
lrclf = LogisticRegression(C=5)
lrclf.fit(train_data, train_label)
lrclf_pred = lrclf.predict_proba(test_data)
submission = pd.DataFrame({"id": test_id, "target": lrclf_pred[:, 1]})
submission.to_csv("submission.csv", index=False)
|
import os
import sys
import shutil
import random
import warnings
warnings.filterwarnings("ignore") # , category=UserWarning
import numpy as np
import pandas as pd
from decimal import *
from pathlib import Path
import glob
from sklearn import datasets
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.impute import SimpleImputer
from sklearn.multioutput import MultiOutputRegressor
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.tree import plot_tree
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import RFE
from sklearn.model_selection import cross_val_score, KFold
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
# from stepwise_regression import step_reg
import xgboost as xgb
from xgboost import XGBRegressor
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import seaborn as sns
def smape_plus_1(y_true, y_pred):
y_true_plus_1 = y_true + 1
y_pred_plus_1 = y_pred + 1
metric = np.zeros(len(y_true_plus_1))
numerator = np.abs(y_true_plus_1 - y_pred_plus_1)
denominator = (np.abs(y_true_plus_1) + np.abs(y_pred_plus_1)) / 2
mask_not_zeros = (y_true_plus_1 != 0) | (y_pred_plus_1 != 0)
metric[mask_not_zeros] = numerator[mask_not_zeros] / denominator[mask_not_zeros]
return 100 * np.nanmean(metric)
# ## Get data
submission = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/sample_submission.csv"
)
print(submission.shape)
submission.head()
test_clinical = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/test.csv"
)
print(test_clinical.shape)
test_clinical.head()
train_clinical = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv"
)
# pd.options.display.float_format = '{:,.0f}'.format
train_clinical = train_clinical.drop("upd23b_clinical_state_on_medication", axis=1)
print(train_clinical.shape)
train_clinical.head()
test_proteins = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/test_proteins.csv"
)
print(test_proteins.shape)
test_proteins.head()
train_proteins = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv"
)
print(train_proteins.shape)
train_proteins.head()
test_peptides = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/example_test_files/test_peptides.csv"
)
print(test_peptides.shape)
test_peptides.head()
train_peptides = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/train_peptides.csv"
)
print(train_peptides.shape)
train_peptides.head()
# ### Join Clinical with Protein data and display in wide format
proteins_wide = pd.pivot(
train_proteins,
index=["visit_id", "visit_month", "patient_id"],
columns="UniProt",
values="NPX",
)
proteins_wide = proteins_wide.rename_axis(None, axis=1).reset_index()
proteins_wide = proteins_wide.fillna(proteins_wide.median())
proteins_wide["visit_month"] = proteins_wide["visit_month"].astype(str)
proteins_wide["patient_id"] = proteins_wide["patient_id"].astype(str)
train_clinical["visit_month"] = train_clinical["visit_month"].astype(str)
train_clinical["patient_id"] = train_clinical["patient_id"].astype(str)
train_clinical_proteins = pd.merge(
train_clinical,
proteins_wide,
how="left",
on=["visit_id", "patient_id", "visit_month"],
)
train_clinical_proteins.head()
# ## Updrs_1
df_updrs_1 = train_clinical_proteins
df_updrs_1 = df_updrs_1.dropna(
subset=["Q99435", "Q99674", "Q99683", "Q99829", "Q99832"]
)
df_updrs_1 = df_updrs_1.drop(
["visit_id", "patient_id", "visit_month", "updrs_2", "updrs_3", "updrs_4"], axis=1
)
df_updrs_1 = df_updrs_1.fillna(proteins_wide.median())
print(df_updrs_1.shape)
df_updrs_1.head()
# ### Find the most important proteins
X = df_updrs_1.drop("updrs_1", axis=1)
y = df_updrs_1.iloc[:, 0]
# Train/test set generation
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=15
)
# Scale train and test sets with StandardScaler
# X_train_std = StandardScaler().fit_transform(X_train)
# X_test_std = StandardScaler().fit_transform(X_test)
# Fix the dimensions of the target array
y_train = y_train.values.reshape(-1, 1)
y_test = y_test.values.reshape(-1, 1)
m = XGBRegressor()
m.fit(X, y)
feat_dict = {}
for col, val in sorted(
zip(X_train.columns, m.feature_importances_), key=lambda x: x[1], reverse=True
):
feat_dict[col] = val
updrs_1_xgb = pd.DataFrame(
{"Feature": feat_dict.keys(), "Importance": feat_dict.values()}
)
updrs_1_xgb.to_csv("updrs_1_xgb.csv")
# Find cumulative importance and create list of columns to model
updrs_1_xgb["Cum_importance"] = updrs_1_xgb["Importance"].cumsum()
updrs_1_xgb
updrs_1_xgb_keep = updrs_1_xgb.drop(
updrs_1_xgb[updrs_1_xgb.Cum_importance >= 0.80].index
)
updrs_1_xgb_keep
updrs_1_xgb_keep_list = updrs_1_xgb_keep.Feature.values.tolist()
# Find cumulative importance and create list of columns to model
updrs_1_xgb["Cum_importance"] = updrs_1_xgb["Importance"].cumsum()
updrs_1_xgb
updrs_1_xgb_keep = updrs_1_xgb.drop(
updrs_1_xgb[updrs_1_xgb.Cum_importance >= 0.80].index
)
updrs_1_xgb_keep
updrs_1_xgb_keep_list = updrs_1_xgb_keep.Feature.values.tolist()
# Create df_updrs_1_model
updrs_1_xgb_keep_list.insert(0, "updrs_1")
df_updrs_1_model = df_updrs_1[df_updrs_1.columns.intersection(updrs_1_xgb_keep_list)]
print(df_updrs_1_model.shape)
df_updrs_1_model.head()
# ### Linear regression model and prediction
# Linear regression
X = df_updrs_1_model.drop("updrs_1", axis=1)
y = df_updrs_1_model.iloc[:, 0]
model = LinearRegression().fit(X, y)
r_sq = model.score(X, y)
print("coefficient of determination:", r_sq)
print("intercept:", model.intercept_)
print("slope:", model.coef_)
y_pred = model.predict(X)
print("Predicted response:", y_pred, sep="\n")
# ## Updrs_2
df_updrs_2 = train_clinical_proteins
df_updrs_2 = df_updrs_2.dropna(
subset=["Q99435", "Q99674", "Q99683", "Q99829", "Q99832"]
)
df_updrs_2 = df_updrs_2.drop(
["visit_id", "patient_id", "visit_month", "updrs_1", "updrs_3", "updrs_4"], axis=1
)
df_updrs_2 = df_updrs_2.fillna(proteins_wide.median())
print(df_updrs_2.shape)
df_updrs_2.head()
# ### Find the most important proteins
X = df_updrs_1.drop("updrs_1", axis=1)
y = df_updrs_1.iloc[:, 0]
# Train/test set generation
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=15
)
# Scale train and test sets with StandardScaler
# X_train_std = StandardScaler().fit_transform(X_train)
# X_test_std = StandardScaler().fit_transform(X_test)
# Fix the dimensions of the target array
y_train = y_train.values.reshape(-1, 1)
y_test = y_test.values.reshape(-1, 1)
m = XGBRegressor()
m.fit(X, y)
feat_dict = {}
for col, val in sorted(
zip(X_train.columns, m.feature_importances_), key=lambda x: x[1], reverse=True
):
feat_dict[col] = val
updrs_2_xgb = pd.DataFrame(
{"Feature": feat_dict.keys(), "Importance": feat_dict.values()}
)
updrs_2_xgb.to_csv("updrs_2_xgb.csv")
# Find cumulative importance and create list of columns to model
updrs_2_xgb["Cum_importance"] = updrs_2_xgb["Importance"].cumsum()
updrs_2_xgb
updrs_2_xgb_keep = updrs_2_xgb.drop(
updrs_2_xgb[updrs_2_xgb.Cum_importance >= 0.80].index
)
updrs_2_xgb_keep
updrs_2_xgb_keep_list = updrs_2_xgb_keep.Feature.values.tolist()
# Find cumulative importance and create list of columns to model
updrs_2_xgb["Cum_importance"] = updrs_2_xgb["Importance"].cumsum()
updrs_2_xgb
updrs_2_xgb_keep = updrs_2_xgb.drop(
updrs_2_xgb[updrs_2_xgb.Cum_importance >= 0.80].index
)
updrs_2_xgb_keep
updrs_2_xgb_keep_list = updrs_2_xgb_keep.Feature.values.tolist()
# Create df_updrs_2_model
updrs_2_xgb_keep_list.insert(0, "updrs_2")
df_updrs_2_model = df_updrs_2[df_updrs_2.columns.intersection(updrs_2_xgb_keep_list)]
print(df_updrs_2_model.shape)
df_updrs_2_model.head()
# ### Linear regression model and prediction
# Linear regression
X = df_updrs_2_model.drop("updrs_2", axis=1)
y = df_updrs_2_model.iloc[:, 0]
model = LinearRegression().fit(X, y)
r_sq = model.score(X, y)
print("coefficient of determination:", r_sq)
print("intercept:", model.intercept_)
print("slope:", model.coef_)
y_pred = model.predict(X)
print("Predicted response:", y_pred, sep="\n")
# ## Updrs_3
df_updrs_3 = train_clinical_proteins
df_updrs_3 = df_updrs_3.dropna(
subset=["Q99435", "Q99674", "Q99683", "Q99829", "Q99832"]
)
df_updrs_3 = df_updrs_3.drop(
["visit_id", "patient_id", "visit_month", "updrs_1", "updrs_2", "updrs_4"], axis=1
)
df_updrs_3 = df_updrs_3.fillna(proteins_wide.median())
df_updrs_3 = df_updrs_3[df_updrs_3["updrs_3"].notna()]
print(df_updrs_3.shape)
df_updrs_3.head()
# ### Find the most important proteins
X = df_updrs_3.drop("updrs_3", axis=1)
y = df_updrs_3.iloc[:, 0]
# Train/test set generation
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=15
)
# Scale train and test sets with StandardScaler
# X_train_std = StandardScaler().fit_transform(X_train)
# X_test_std = StandardScaler().fit_transform(X_test)
# Fix the dimensions of the target array
y_train = y_train.values.reshape(-1, 1)
y_test = y_test.values.reshape(-1, 1)
m = XGBRegressor()
m.fit(X, y)
feat_dict = {}
for col, val in sorted(
zip(X_train.columns, m.feature_importances_), key=lambda x: x[1], reverse=True
):
feat_dict[col] = val
updrs_3_xgb = pd.DataFrame(
{"Feature": feat_dict.keys(), "Importance": feat_dict.values()}
)
updrs_3_xgb.to_csv("updrs_3_xgb.csv")
# Find cumulative importance and create list of columns to model
updrs_3_xgb["Cum_importance"] = updrs_3_xgb["Importance"].cumsum()
updrs_3_xgb
updrs_3_xgb_keep = updrs_3_xgb.drop(
updrs_3_xgb[updrs_3_xgb.Cum_importance >= 0.80].index
)
updrs_3_xgb_keep
updrs_3_xgb_keep_list = updrs_3_xgb_keep.Feature.values.tolist()
# Find cumulative importance and create list of columns to model
updrs_3_xgb["Cum_importance"] = updrs_3_xgb["Importance"].cumsum()
updrs_3_xgb
updrs_3_xgb_keep = updrs_3_xgb.drop(
updrs_3_xgb[updrs_3_xgb.Cum_importance >= 0.80].index
)
updrs_3_xgb_keep
updrs_3_xgb_keep_list = updrs_3_xgb_keep.Feature.values.tolist()
# Create df_updrs_3_model
updrs_3_xgb_keep_list.insert(0, "updrs_3")
df_updrs_3_model = df_updrs_3[df_updrs_3.columns.intersection(updrs_3_xgb_keep_list)]
print(df_updrs_3_model.shape)
df_updrs_3_model.head()
# ### Linear regression model and prediction
# Linear regression
X = df_updrs_3_model.drop("updrs_3", axis=1)
y = df_updrs_3_model.iloc[:, 0]
model = LinearRegression().fit(X, y)
r_sq = model.score(X, y)
print("coefficient of determination:", r_sq)
print("intercept:", model.intercept_)
print("slope:", model.coef_)
y_pred = model.predict(X)
print("Predicted response:", y_pred, sep="\n")
# ## Updrs_4
df_updrs_4 = train_clinical_proteins
df_updrs_4 = df_updrs_4.dropna(
subset=["Q99435", "Q99674", "Q99683", "Q99829", "Q99832"]
)
df_updrs_4 = df_updrs_4.drop(
["visit_id", "patient_id", "visit_month", "updrs_1", "updrs_2", "updrs_3"], axis=1
)
df_updrs_4 = df_updrs_4.fillna(proteins_wide.median())
df_updrs_4 = df_updrs_4[df_updrs_4["updrs_4"].notna()]
print(df_updrs_4.shape)
df_updrs_4.head()
X = df_updrs_4.drop("updrs_4", axis=1)
y = df_updrs_4.iloc[:, 0]
# Train/test set generation
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=15
)
# Scale train and test sets with StandardScaler
# X_train_std = StandardScaler().fit_transform(X_train)
# X_test_std = StandardScaler().fit_transform(X_test)
# Fix the dimensions of the target array
y_train = y_train.values.reshape(-1, 1)
y_test = y_test.values.reshape(-1, 1)
m = XGBRegressor()
m.fit(X, y)
feat_dict = {}
for col, val in sorted(
zip(X_train.columns, m.feature_importances_), key=lambda x: x[1], reverse=True
):
feat_dict[col] = val
updrs_4_xgb = pd.DataFrame(
{"Feature": feat_dict.keys(), "Importance": feat_dict.values()}
)
updrs_4_xgb.to_csv("updrs_4_xgb.csv")
# Find cumulative importance and create list of columns to model
updrs_4_xgb["Cum_importance"] = updrs_4_xgb["Importance"].cumsum()
updrs_4_xgb
updrs_4_xgb_keep = updrs_4_xgb.drop(
updrs_4_xgb[updrs_4_xgb.Cum_importance >= 0.80].index
)
updrs_4_xgb_keep
updrs_4_xgb_keep_list = updrs_4_xgb_keep.Feature.values.tolist()
# Find cumulative importance and create list of columns to model
updrs_4_xgb["Cum_importance"] = updrs_4_xgb["Importance"].cumsum()
updrs_4_xgb
updrs_4_xgb_keep = updrs_4_xgb.drop(
updrs_4_xgb[updrs_4_xgb.Cum_importance >= 0.80].index
)
updrs_4_xgb_keep
updrs_4_xgb_keep_list = updrs_4_xgb_keep.Feature.values.tolist()
# Create df_updrs_4_model
updrs_4_xgb_keep_list.insert(0, "updrs_4")
df_updrs_4_model = df_updrs_4[df_updrs_4.columns.intersection(updrs_4_xgb_keep_list)]
print(df_updrs_4_model.shape)
df_updrs_4_model.head()
# ### Linear regression model and prediction
# Linear regression
X = df_updrs_4_model.drop("updrs_4", axis=1)
y = df_updrs_4_model.iloc[:, 0]
model = LinearRegression().fit(X, y)
r_sq = model.score(X, y)
print("coefficient of determination:", r_sq)
print("intercept:", model.intercept_)
print("slope:", model.coef_)
y_pred = model.predict(X)
print("Predicted response:", y_pred, sep="\n")
# ## Competition Submission
# #### (This code has been adapted from that provided by Vitaly Kudelya in his Notebook [Explain Dataset, Test API, Cross-Validation Tips](https://www.kaggle.com/code/vitalykudelya/explain-dataset-test-api-cross-validation-tips))
# target to prediction is derived from the mean of the mean for each Updrs prediction above
target_to_prediciton = {
"updrs_1": 5.56,
"updrs_2": 5.66,
"updrs_3": 17.57,
"updrs_4": 0,
}
import amp_pd_peptide
amp_pd_peptide.make_env.func_dict["__called__"] = False
env = amp_pd_peptide.make_env() # initialize the environment
iter_test = env.iter_test() # an iterator which loops over the test files
# The API will deliver four dataframes in this specific order:
iteration_to_data = {}
for iteration, (
test_clinical,
test_peptides,
test_proteins,
sample_submission,
) in enumerate(iter_test):
print("ITERATION", iteration)
sample_submission["patient_id"] = sample_submission["prediction_id"].map(
lambda x: int(x.split("_")[0])
)
sample_submission["visit_month"] = sample_submission["prediction_id"].map(
lambda x: int(x.split("_")[1])
)
sample_submission["target_name"] = sample_submission["prediction_id"].map(
lambda x: "updrs_" + x.split("_")[3]
)
sample_submission["plus_month"] = sample_submission["prediction_id"].map(
lambda x: int(x.split("_")[5])
)
sample_submission["pred_month"] = (
sample_submission["visit_month"] + sample_submission["plus_month"]
)
data = {
"test_clinical": test_clinical,
"test_peptides": test_peptides,
"test_proteins": test_proteins,
"sample_submission": sample_submission,
}
iteration_to_data[iteration] = data
print(test_clinical.shape)
display(test_clinical.head())
display(test_peptides.head())
display(test_proteins.head())
display(sample_submission.head())
print()
print()
# change rating
for i in range(1, 5):
target = f"updrs_{i}"
mask_target = sample_submission["target_name"] == target
sample_submission.loc[mask_target, "rating"] = target_to_prediciton[target]
# don't add additional columns (other than prediction_id, rating) in env.predict, it fails after submitting notebook
env.predict(sample_submission[["prediction_id", "rating"]])
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import numpy as np
import matplotlib.pyplot as plt # to plot accuracy graphs
from keras import layers # for building layers of neural net
from keras.models import Model
from keras.models import load_model
from keras import callbacks # for training logs, saving to disk periodically
import cv2 # OpenCV(Open Source computer vision lib), containg CV algos
import string
os.listdir("/kaggle/input/captcha-data/data/train")
# total no of images in dataset
n = len(os.listdir("/kaggle/input/captcha-data/data/train"))
n
# defining size of image
imgshape = (50, 200, 1) # 50-height, 200-width, 1-no of channels
character = (
string.ascii_lowercase + "0123456789"
) # All symbols captcha can contain i.e. lowercase and digits 0-9
nchar = len(character) # total number of char possible
nchar
character
# preprocesss image
def preprocess():
X = np.zeros((n, 50, 200, 1)) # 856*50*200 array with all entries 0
y = np.zeros((5, n, nchar)) # 5*856*36(5 letters in captcha) with all entries 0
for i, pic in enumerate(os.listdir("/kaggle/input/captcha-data/data/train")):
# i represents index no. of image in directory
# pic contains the file name of the particular image to be preprocessed at a time
img = cv2.imread(
os.path.join("/kaggle/input/captcha-data/data/train", pic),
cv2.IMREAD_GRAYSCALE,
) # Read image in grayscale format
pic_target = pic[
:-4
] # this drops the .png extension from file name and contains only the captcha for training
if len(pic_target) < 6: # captcha is not more than 5 letters
img = img / 255.0 # scales the image between 0 and 1
img = np.reshape(
img, (50, 200, 1)
) # reshapes image to width 200 , height 50 ,channel 1
target = np.zeros(
(5, nchar)
) # creates an array of size 5*36 with all entries 0
for j, k in enumerate(pic_target):
# j iterates from 0 to 4(There are 5 letters in captcha and indexing is 0 based)
# k denotes the letter in captcha which is to be scanned
index = character.find(
k
) # index stores the position of letter k of captcha in the character string
target[
j, index
] = 1 # replaces 0 with 1 in the target array at the position of the letter in captcha
X[i] = img # stores all the images
y[
:, i
] = target # stores all the info about the letters in captcha of all images
return X, y
# create model
def createmodel():
img = layers.Input(shape=imgshape) # Get image as an input of size 50,200,1
# convolution layers
conv1 = layers.Conv2D(
filters=16, kernel_size=(3, 3), padding="same", activation="relu"
)(
img
) # 50*200
mp1 = layers.MaxPooling2D(padding="same")(conv1) # 25*100
conv2 = layers.Conv2D(
filters=32, kernel_size=(3, 3), padding="same", activation="relu"
)(mp1)
mp2 = layers.MaxPooling2D(padding="same")(conv2) # 13*50
conv3 = layers.Conv2D(
filters=32, kernel_size=(3, 3), padding="same", activation="relu"
)(mp2)
bn = layers.BatchNormalization()(conv3) # to improve the stability of model
mp3 = layers.MaxPooling2D(padding="same")(bn) # 7*25
flat = layers.Flatten()(mp3) # convert the layer into 1-D
# hidden and output layers for the 5 outputs
outs = []
for _ in range(5): # for 5 letters of captcha
dens1 = layers.Dense(64, activation="relu")(flat)
drop = layers.Dropout(0.5)(dens1) # drops 0.5 fraction of nodes
res = layers.Dense(nchar, activation="softmax")(drop)
outs.append(res) # result of layers
# Compile model and return it
model = Model(img, outs) # create model
model.compile(
loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
return model
# Create model
model = createmodel()
model.summary()
X, y = preprocess()
plt.imshow(X[1])
print(
y[2][1]
) # means that the model shall detect c as second third character in the second captcha
# training data split into features and labels
X_train, y_train = X, y
# Applying the model
hist = model.fit(
X_train,
[y_train[0], y_train[1], y_train[2], y_train[3], y_train[4]],
batch_size=32,
epochs=60,
validation_split=0.2,
)
# batch size- 32 defines no. of samples per gradient update
# Validation split=0.2 splits the training set in 80-20% for training nd testing
# graph of loss vs epochs
for label in ["loss"]:
plt.plot(hist.history[label], label=label)
plt.legend()
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.show()
# graph of accuracy of dense_2 vs epochs
for label in ["val_dense_51_accuracy"]:
plt.plot(hist.history[label], label=label)
plt.legend()
plt.xlabel("Epochs")
plt.ylabel("Accuracy of Dense 1 layer")
plt.show()
# graph of accuracy of dense_6 vs epochs
for label in ["val_dense_53_accuracy"]:
plt.plot(hist.history[label], label=label)
plt.legend()
plt.xlabel("Epochs")
plt.ylabel("Accuracy of Dense 2 layer")
plt.show()
# graph of accuracy of dense_6 vs epochs
for label in ["val_dense_55_accuracy"]:
plt.plot(hist.history[label], label=label)
plt.legend()
plt.xlabel("Epochs")
plt.ylabel("Accuracy of Dense 3 layer")
plt.show()
# graph of accuracy of dense_6 vs epochs
for label in ["val_dense_57_accuracy"]:
plt.plot(hist.history[label], label=label)
plt.legend()
plt.xlabel("Epochs")
plt.ylabel("Accuracy of Dense 4 layer")
plt.show()
# graph of accuracy of dense_6 vs epochs
for label in ["val_dense_59_accuracy"]:
plt.plot(hist.history[label], label=label)
plt.legend()
plt.xlabel("Epochs")
plt.ylabel("Accuracy of Dense 5 layer")
plt.show()
# Loss on training set
# Finding Loss on training set
preds = model.evaluate(
X_train, [y_train[0], y_train[1], y_train[2], y_train[3], y_train[4]]
)
print("Loss on training set= " + str(preds[0]))
print("No. of test images: ", len(os.listdir("/kaggle/input/captcha-data/data/val")))
# Preprocessing for testing data
def preprocess_test():
X = np.zeros((n, 50, 200, 1)) # 214*50*200 array with all entries 0
y = np.zeros((5, n, nchar)) # 5*214*36(5 letters in captcha) with all entries 0
for i, pic in enumerate(os.listdir("/kaggle/input/captcha-data/data/val")):
# i represents index no. of image in directory
# pic contains the file name of the particular image to be preprocessed at a time
img = cv2.imread(
os.path.join("/kaggle/input/captcha-data/data/val", pic),
cv2.IMREAD_GRAYSCALE,
) # Read image in grayscale format
pic_target = pic[
:-4
] # this drops the .png extension from file name and contains only the captcha for training
if len(pic_target) < 6: # captcha is not more than 5 letters
img = img / 255.0 # scales the image between 0 and 1
img = np.reshape(
img, (50, 200, 1)
) # reshapes image to width 200 , height 50 ,channel 1
target = np.zeros(
(5, nchar)
) # creates an array of size 5*36 with all entries 0
for j, k in enumerate(pic_target):
# j iterates from 0 to 4(There are 5 letters in captcha and indexing is 0 based)
# k denotes the letter in captcha which is to be scanned
index = character.find(
k
) # index stores the position of letter k of captcha in the character string
target[
j, index
] = 1 # replaces 0 with 1 in the target array at the position of the letter in captcha
X[i] = img # stores all the images
y[
:, i
] = target # stores all the info about the letters in captcha of all images
return X, y
X_test, y_test = preprocess_test()
# Finding loss on test set
preds = model.evaluate(X_test, [y_test[0], y_test[1], y_test[2], y_test[3], y_test[4]])
print("Loss on testing set= " + str(preds[0]))
# to predict captcha
def predict(filepath):
img = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
if img is not None: # image foud at file path
img = img / 255.0 # Scale image
else:
print("Not detected")
res = np.array(model.predict(img[np.newaxis, :, :, np.newaxis])) # np.newaxis=1
# added this bcoz x_train 970*50*200*1
# returns array of size 1*5*36
result = np.reshape(res, (5, 36)) # reshape the array
k_ind = []
probs = []
for i in result:
k_ind.append(np.argmax(i)) # adds the index of the char found in captcha
capt = "" # string to store predicted captcha
for k in k_ind:
capt += character[k] # finds the char corresponding to the index
return capt
# Check model on samples
img = cv2.imread(
"/kaggle/input/captcha-data/data/sample/2b827.png", cv2.IMREAD_GRAYSCALE
)
plt.imshow(img, cmap=plt.get_cmap("gray"))
print(
"Predicted Captcha =", predict("/kaggle/input/captcha-data/data/sample/2b827.png")
)
# displaying the model performance on test images
cnt = 0
for i, pic in enumerate(os.listdir("/kaggle/input/captcha-data/data/val")):
img = cv2.imread(
os.path.join("/kaggle/input/captcha-data/data/val", pic), cv2.IMREAD_GRAYSCALE
)
plt.imshow(img, cmap=plt.get_cmap("gray"))
plt.show()
print("Actual Captcha:", pic[:-4])
img_path = "/kaggle/input/captcha-data/data/val/" + str(pic)
predicted_img = predict(img_path)
print("Predicted Captcha =", predicted_img)
print("\n")
if predicted_img == pic[:-4]:
cnt += 1
print("The accuracy of breaking test captchas is:", str((149 / 214) * 100)[0:6], "%")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
d = pd.read_csv("/kaggle/input/car-prices-market/new_cars_prices.csv")
d
d.info()
d.describe()
d.head()
d.tail()
d[["Car Model"]].describe()
d["Car Model"].unique()
d.columns
d[
[
"web-scraper-order",
"Car Model",
"Old Price",
"Price Change",
"New Price",
"date_range",
]
].describe()
d.replace(to_replace)
|
# # Import necessary libraries
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, mean_squared_error
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
# # Read data
test = pd.read_csv("../input/titanic/test.csv")
train = pd.read_csv("../input/titanic/train.csv")
# # Check missing values
print("Train")
print(train.isnull().sum())
print("==========================")
print("Test")
print(test.isnull().sum())
# # Fill missing 'Cabin', 'Embarked' and 'Fare' features
train = train.fillna({"Age": -0.1})
test = test.fillna({"Age": -0.1})
# encode sex
train["Sex"] = LabelEncoder().fit_transform(train["Sex"])
test["Sex"] = LabelEncoder().fit_transform(test["Sex"])
# encode cabin
train.loc[~train.Cabin.isnull(), "Cabin"] = 1
train.loc[train.Cabin.isnull(), "Cabin"] = 0
test.loc[~test.Cabin.isnull(), "Cabin"] = 1
test.loc[test.Cabin.isnull(), "Cabin"] = 0
# detect wich is a most common embarking place and fill missed 'Embarked' values with max embarked places
common_embarked = train.groupby(["Embarked"])["Embarked"].value_counts().idxmax()[0]
train = train.fillna({"Embarked": common_embarked})
test = test.fillna({"Embarked": common_embarked})
# fill 'Fare' null values in test
test.loc[test.Fare.isnull(), "Fare"] = 0
# # Create new feature: 'Title' derived from 'Name'
train["Title"] = train.Name.str.split(",", n=1, expand=True)[1].str.split(
".", n=1, expand=True
)[0]
train["Title"] = train.Title.str.strip()
test["Title"] = test.Name.str.split(",", n=1, expand=True)[1].str.split(
".", n=1, expand=True
)[0]
test["Title"] = test.Title.str.strip()
train.head()
train.loc[train.Title == "Ms", "Title"] = "Miss"
test.loc[test.Title == "Ms", "Title"] = "Miss"
train.loc[~train.Title.isin(["Mr", "Miss", "Mrs", "Master"]), "Title"] = "Other"
test.loc[~test.Title.isin(["Mr", "Miss", "Mrs", "Master"]), "Title"] = "Other"
# # Process 'Ticket' feature
# It seems that ticket numbers are different from each other by it's prefix. Let's see the connection between this prefixes and 'Survived' feature
train["TicketPrefix"] = train.Ticket.str.split(" ").apply(
lambda x: x[0] if len(x) > 1 else "No"
)
test["TicketPrefix"] = test.Ticket.str.split(" ").apply(
lambda x: x[0] if len(x) > 1 else "No"
)
train.head()
train.groupby(["TicketPrefix"])["TicketPrefix"].count()
train.loc[train.TicketPrefix.str.startswith("A"), "TicketPrefix"] = "A"
train.loc[train.TicketPrefix.str.startswith("C"), "TicketPrefix"] = "C"
train.loc[train.TicketPrefix.str.startswith("F"), "TicketPrefix"] = "F"
train.loc[train.TicketPrefix.str.startswith("P"), "TicketPrefix"] = "P"
train.loc[train.TicketPrefix.str.startswith("S"), "TicketPrefix"] = "S"
train.loc[train.TicketPrefix.str.startswith("W"), "TicketPrefix"] = "W"
test.loc[test.TicketPrefix.str.startswith("A"), "TicketPrefix"] = "A"
test.loc[test.TicketPrefix.str.startswith("C"), "TicketPrefix"] = "C"
test.loc[test.TicketPrefix.str.startswith("F"), "TicketPrefix"] = "F"
test.loc[test.TicketPrefix.str.startswith("P"), "TicketPrefix"] = "P"
test.loc[test.TicketPrefix.str.startswith("S"), "TicketPrefix"] = "S"
test.loc[test.TicketPrefix.str.startswith("W"), "TicketPrefix"] = "W"
train.groupby(["TicketPrefix"])["TicketPrefix"].count()
sns.barplot(x="TicketPrefix", y="Survived", data=train)
# Let's combine 'Parch' and 'SibSp' feature and create new feature 'Alone'
train["Alone"] = ((train.Parch + train.SibSp) == 0).astype(int)
test["Alone"] = ((test.Parch + test.SibSp) == 0).astype(int)
train.head()
train = train.drop(["Name", "SibSp", "Parch", "Embarked"], axis=1)
test = test.drop(["Name", "SibSp", "Parch", "Embarked"], axis=1)
train.head()
# # Encode 'TicketPrefix' feature
def encode_ticket(t):
e = {"No": 0, "A": 1, "P": 2, "S": 3, "C": 4, "W": 5, "F": 6}
return e.get(t, -1)
train["Ticket"] = train.TicketPrefix.apply(encode_ticket)
test["Ticket"] = test.TicketPrefix.apply(encode_ticket)
train.head()
# # Encode 'Title' feature
train.Title = LabelEncoder().fit_transform(train.Title)
test.Title = LabelEncoder().fit_transform(test.Title)
train.head()
train.drop(["TicketPrefix"], axis=1, inplace=True)
test.drop(["TicketPrefix"], axis=1, inplace=True)
train.head()
# # Predict missing 'Ages'
# combine data
data = pd.concat([train, test])
data.drop(["Survived"], axis=1, inplace=True)
data.head()
data.drop(["PassengerId"], axis=1, inplace=True)
predictors = data[data.Age > 0]
predictors.drop(["Age"], axis=1, inplace=True)
targets = np.array(data[data.Age > 0].Age)
predictors.shape, targets.shape
predictors.head()
predictors = StandardScaler().fit_transform(predictors)
mlp = MLPRegressor(hidden_layer_sizes=(150, 100))
mlp.fit(predictors, targets)
y_pred = mlp.predict(predictors)
mean_squared_error(y_pred, targets), mlp
real_data = np.sort(targets)
predicted_data = np.sort(mlp.predict(predictors))
fig, ax = plt.subplots()
fig.set_size_inches(12, 10)
plt.plot(
np.linspace(start=0, stop=len(real_data) * 100, num=len(real_data)),
real_data,
color="b",
label="Real Data",
)
plt.plot(
np.linspace(start=0, stop=len(real_data) * 100, num=len(real_data)),
predicted_data,
color="g",
label="Predicted Data",
)
plt.legend()
train.loc[train.Age < 0, "Age"] = mlp.predict(
StandardScaler().fit_transform(
train[train["Age"] < 0][
["Alone", "Cabin", "Fare", "Pclass", "Sex", "Ticket", "Title"]
]
)
)
test.loc[test.Age < 0, "Age"] = mlp.predict(
StandardScaler().fit_transform(
test[test["Age"] < 0][
["Alone", "Cabin", "Fare", "Pclass", "Sex", "Ticket", "Title"]
]
)
)
test.loc[test.Age < 0, "Age"] = 0.1
train.loc[train.Age < 0, "Age"] = 0.1
train.head()
# # Do final prediction
predictors = train.drop(["PassengerId", "Survived"], axis=1)
targets = train[["Survived"]]
predictors = StandardScaler().fit_transform(predictors)
x_train, x_test, y_train, y_test = train_test_split(
predictors, targets, test_size=0.05, random_state=0
)
mlp = MLPRegressor(batch_size=50, hidden_layer_sizes=(140))
mlp.fit(x_train, y_train)
y_pred = mlp.predict(x_train).round()
test_pred = mlp.predict(x_test).round()
score = accuracy_score(y_train, y_pred)
test_score = accuracy_score(y_test, test_pred)
mlp, score, test_score
ids = test["PassengerId"]
predictions = np.abs(
mlp.predict(
StandardScaler().fit_transform(test.drop(["PassengerId"], axis=1))
).round()
).astype(int)
output = pd.DataFrame({"PassengerId": ids, "Survived": predictions})
output.to_csv("submission.csv", index=False)
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split, KFold
from sklearn.preprocessing import MinMaxScaler
import lightgbm as lgb
from sklearn.metrics import mean_squared_error
df = pd.read_csv("../input/berlin-airbnb-data/listings.csv")
df_train = df[:15699] ##load your train here
df_test = df[15735:] ## load your test here
target = df_train["price"]
df_train.drop(["price"], axis=1, inplace=True)
df.head()
df_train.isna().sum()
def fill(df):
df["name"].fillna(method="ffill", inplace=True)
df["host_name"].fillna(method="ffill", inplace=True)
df["last_review"].fillna(method="ffill", inplace=True)
df = df.fillna(df.mean())
return df
df_train = fill(df_train)
df_test = fill(df_test)
def date_features(df):
df["last_review"] = pd.to_datetime(df["last_review"])
df["year"] = df["last_review"].dt.year
df["year"] = df["last_review"].dt.month
df["day"] = df["last_review"].dt.day
# df['day']=df['day'].astype(int)
df.drop(["last_review"], axis=1, inplace=True)
return df
df_train = date_features(df_train)
df_test = date_features(df_test)
df_train.head()
cat = []
for col in df_train.columns:
if type(df_train[col][0]) == str:
cat.append(col)
cat
final = pd.concat([df_train, df_test])
for col in cat:
lb = LabelEncoder()
final[col] = lb.fit_transform(final[col].values)
train = final[: len(df_train)]
test = final[len(df_train) :]
minmax = MinMaxScaler()
train = minmax.fit_transform(train)
test = minmax.transform(test)
n_folds = 5
final_predictions = np.zeros((1, len(test)))
folds = KFold(n_splits=n_folds, shuffle=False, random_state=12)
for fold_, (train_idx, test_idx) in enumerate(folds.split(train)):
x_train = train[train_idx]
y_train = target[train_idx]
x_val = train[test_idx]
y_val = target[test_idx]
lgbm = lgb.LGBMRegressor()
lgbm.fit(
x_train, y_train, eval_set=(x_val, y_val), eval_metric="mea", verbose=False
)
y_pre = lgbm.predict(x_val)
y_pre = list(map(abs, y_pre))
print("rmse", np.sqrt(mean_squared_error(y_pre, y_val)))
predictions = lgbm.predict(test)
final_predictions += predictions
final_predictions = final_predictions / n_folds
final_predictions = list(map(abs, final_predictions[0]))
sub = pd.DataFrame()
sub["id"] = df_test.index
sub["price"] = final_predictions
sub.to_csv("submission.csv", index=False)
sub.head()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.metrics import mean_squared_error, r2_score, accuracy_score
from sklearn.model_selection import KFold, cross_val_score, GridSearchCV
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
df = pd.read_csv("../input/train_modified.csv")
df.head()
df.info()
df = pd.DataFrame(preprocessing.normalize(df), columns=train.columns)
y = df.Item_Outlet_Sales
X = df.drop("Item_Outlet_Sales", axis=1)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=111, shuffle=True
)
clf = RandomForestRegressor(
max_depth=20, min_samples_split=170, n_estimators=230, random_state=111
).fit(X_train, y_train)
y_pred = clf.predict(X_test)
print("Test RMSE: ", np.sqrt(mean_squared_error(y_test, y_pred)))
print("Test R^2: ", r2_score(y_test, y_pred))
pca = PCA()
pX_train = pca.fit_transform(X_train)
pX_test = pca.transform(X_test)
explained_variance = pca.explained_variance_ratio_
print(list(explained_variance))
pca = PCA(n_components=2, random_state=42)
X_train_pca = pca.fit_transform(X_train)
X_test_pca = pca.transform(X_test)
clf2 = RandomForestRegressor(
max_depth=20, min_samples_split=170, n_estimators=230, random_state=42
).fit(X_train_pca, y_train)
y_pred2 = clf2.predict(X_test_pca)
print("Test RMSE: ", np.sqrt(mean_squared_error(y_test, y_pred2)))
print("Test R^2: ", r2_score(y_test, y_pred2))
ax = sns.regplot(X_test_pca[:, 0], X_test_pca[:, 1])
ax.set_xlabel("Principal Component 1")
ax.set_ylabel("Principal Component 2")
ax.set_title("PCA")
|
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm # progress bars with support for jupyter notebooks
import datetime as dt
import matplotlib.pyplot as plt
tqdm.pandas(desc="my bar!")
import os
def unique_union(x, y):
"""
takes two lists and returns their union with only unique elements. No ordering.
*x expands x, {*x} makes a set, *{*x} expands the set.
"""
return [*({*x}.union({*y}))]
# right way to do this is probably three different functions with usecols argument...
def keep_cols(cols, col):
return col in cols
def read_data():
print("Reading train.csv.")
train = pd.read_csv("../input/data-science-bowl-2019/train.csv")
print(
"Read train.csv with {} rows and {} columns.".format(
train.shape[0], train.shape[1]
)
)
print("Reading test.csv.")
test = pd.read_csv("../input/data-science-bowl-2019/test.csv", usecols=["event_id"])
print(
"Read test.csv with {} rows and {} columns.".format(
test.shape[0], test.shape[1]
)
)
print("Reading train_labels.csv.")
train_labels = pd.read_csv("../input/data-science-bowl-2019/train_labels.csv")
print(
"Read train_labels.csv with {} rows and {} columns.".format(
train_labels.shape[0], train_labels.shape[1]
)
)
# print('Reading specs.csv.')
# specs = pd.read_csv('../input/data-science-bowl-2019/specs.csv')
# print('Read specs.csv with {} rows and {} columns.'.format(specs.shape[0],specs.shape[1]))
return (
train,
test,
train_labels,
) # specs
train, test, train_labels = read_data()
## Filter out all the installations which never complete an assessment
# Sorting by event_code is definitely not enough.
# I don't think 'Assessment' is -- user could start an event of 'type' 'Assessment' and not finish it
train = (
train.reset_index(drop=True)
.groupby("installation_id")
.filter(
lambda x: len(
x[
((x["event_code"] == 4100) | (x["event_code"] == 4110))
& (x["type"] == "Assessment")
].index
)
> 0
)
)
train.drop(columns=["event_code"], inplace=True)
## 'pattern' to detect successful assessments
# (originally had r'string' here for "raw string" -- not necessary! only for backslashes)
pattern = '"correct":true'
train["correct_assessment"] = train["event_data"].str.contains(pattern)
train.drop(columns=["event_data"], inplace=True) # no further use
## Double-check that 'True' only appears on assessments!
train[train["correct_assessment"] == True]["title"].unique()
# !!!
## An example of feature engineering that we don't do:
# make a dict {event_id -> event_info entries to keep}!
# good project for next time!
print(train)
assessment_titles = {*list(train[train["type"] == "Assessment"]["title"])}
train.drop(columns=["title"], inplace=True)
event_ids = unique_union(train["event_id"], test["event_id"])
worlds = {*list(train["world"])}
## recover memory
# TODO: it's principled but silly to bring in test for 'world' and 'type'...
del test
# number the lists
worlds_map = {x: i for (x, i) in zip(worlds, np.arange(len(worlds)))}
event_ids_map = {x: i for (x, i) in zip(event_ids, np.arange(len(event_ids)))}
types_map = {"Clip": 0, "Activity": 1, "Game": 2, "Assessment": 3}
# change from text labels to int labels via maps
# TODO: could this be one pass?
# (uses apply: https://stackoverflow.com/a/44648068)
train["world"] = train["world"].map(worlds_map)
train["event_id"] = train["event_id"].map(event_ids_map)
train["type"] = train["type"].map(types_map)
# test['world'] = test['world'].map(worlds_map)
# test['event_id'] = test['event_id'].map(event_ids_map)
# test['type'] = test['type'].map(types_map)
## Want a column 'session_number' which measures not just time in-app but real time
## since starting.
## This seems better than 'game_session' which counts something like turning the app on and off
## This is the most active 'feature engineering' -- worth it?
train["timestamp"] = pd.to_datetime(train["timestamp"])
# test['timestamp'] = pd.to_datetime(test['timestamp'])
# replaces 'timestamp' with 'session_number'
# TODO: make one pass?
def times_to_numbers(data):
## DROP_TIME is the number of seconds between distinct sessions
# TODO: treat this as a hyperparameter!
DROP_TIME = 900
# sort by timestamp
data_sorted = data.groupby("installation_id").apply(
lambda x: x.sort_values(by=["timestamp"])
)
# 'end_of_session' is a bool which denotes whether the event is the last in a session
data_sorted["end_of_session"] = (
data_sorted["timestamp"].shift(periods=-1) - data_sorted["timestamp"]
).map(lambda x: x.total_seconds()) > DROP_TIME
# get session number by adding up 'end_of_session'
data_sorted["session_number"] = data_sorted.groupby(level=0)[
"end_of_session"
].cumsum()
# don't need the rest of it
data_sorted = data_sorted.drop(
columns=["timestamp", "end_of_session", "game_session"]
)
return data_sorted
# apply
train = times_to_numbers(train)
## TODO: move up
train = train.drop(columns="event_count")
## Need to scale down 'game_time'
# kind of makes sense for it to have a similar scale to 'session_number'?
max_game_time = train["game_time"].max()
print("max_game_time: " + str(max_game_time))
max_session_num = train["session_number"].max()
print("max_session_num: " + str(max_session_num))
### scales 'game_time' to be smaller
# Usually need to subtract min but the min is zero.
# everything will get shifted by 1 anyway...
LIMEROBOT_NORM = 1000 # this is what limerobot used...
def scale_game_time(gt):
# return gt / max_game_time
return gt / LIMEROBOT_NORM
## to support masking
# TODO: how important is this if we're not using an official Masking layer?
# TODO: make one pass
train["event_id"] = train["event_id"].transform(lambda x: x + 1)
train["type"] = train["type"].transform(lambda x: x + 1)
train["world"] = train["world"].transform(lambda x: x + 1)
train["game_time"] = train["game_time"].transform(lambda x: scale_game_time(x))
train["session_number"] = train["session_number"].transform(lambda x: x + 1)
train["correct_assessment"] = train["correct_assessment"].transform(
lambda x: 1 if x else -1
) # to avoid mask? not sure about this one.
## TODO: use this construction above!
train_ids = set(train["installation_id"].unique())
original_columns = list(train.columns)
del original_columns[1]
print("Original columns:")
print(original_columns)
# (when we apply this permutation we'll have already dropped 'installation_id' so ignored below)
# ['event_id', 'game_time','type','world','correct_assessment','session_number']
# we want
# ['game_time', 'session_number', 'correct_assessment', 'event_id', 'type','world']
# permutation written as [f(0),f(1),f(2),f(3),f(4),f(5)] under the permutation
permutation = [3, 0, 4, 5, 2, 1]
idx = np.empty_like(permutation)
idx[permutation] = np.arange(len(permutation))
# used below when we pipe pandas into numpy:
new_columns = [original_columns[i] for i in idx]
print(
"Want: \n['game_time', 'session_number', 'correct_assessment', 'event_id', 'type', 'world']"
)
print("New columns:")
print(new_columns)
## splits [0,1,2,3,4,5] into [[0,1,2],3,4,5]
# and https://discourse.julialang.org/t/reshape-a-1-d-array-into-an-array-of-different-size-arrays/25999
n = [3, 1, 1, 1]
split_points = np.cumsum(n[0:-1])
## sequences need to be of the same length
# what should it be?
# note that we have already filtered out players who did not take any assessments!
train["installation_id"].value_counts().describe(
percentiles=[0.25, 0.5, 0.75, 0.9, 0.99]
)
plt.figure()
train["installation_id"].value_counts().hist(bins=60)
plt.xticks([0, 2000, 13000, 60000], rotation=45)
print(
train["installation_id"]
.value_counts()
.describe(percentiles=[0.25, 0.33, 0.5, 0.66, 0.75])
)
## Cuts off a lot of the long tail
SEQ_LENGTH = 2000
# originally had SEQ_LENGTH = 13000
# cutting to 2000 cuts training time by 75%, seems worth it
# another interesting hyperparameter to play with!
### time to work with train_labels
import feather
## two ways we could do this:
# 1) take the max accuracy group. in other words, take the best result.
# 2) take the last accuracy group. in other words, take the most recent result.
# I think 2) makes more sense since our goal is predict the next result, not the "average" result
# (Q: what does it mean for the app if these heavily diverge?)
# 'last_session' is the id of the most recent session for an ('installation_id', 'title')
train_labels["last_session"] = train_labels.groupby(["installation_id", "title"])[
"game_session"
].tail(n=1)
# keep only the most recent sessions
train_labels = train_labels[
train_labels["game_session"] == train_labels["last_session"]
]
# actually only one label per (installation, title)?
print(
train_labels.groupby(["installation_id"])["title"]
.apply(lambda x: x.duplicated())
.any()
)
# yes
# TODO: 3rd finish uses 'num_correct' and 'num_incorrect'
train_labels.drop(
columns=[
"game_session",
"last_session",
"num_correct",
"num_incorrect",
"accuracy",
],
inplace=True,
)
## assessment: 'installation_id's which took the assessment
took_assessment_ids_map = {
activity: list(train_labels[(train_labels["title"] == activity)]["installation_id"])
for activity in assessment_titles
}
## fill in assessments not taken
title_index = pd.MultiIndex.from_product(
[train_labels["installation_id"].unique(), assessment_titles],
names=["installation_id", "title"],
) # , names=['installation_id', 'title'])
# TODO: can't be right to have set_index.reindex!
filled_train_labels = (
train_labels.set_index(["installation_id", "title"])
.reindex(index=title_index, fill_value=0)
.reset_index()
)
feather.write_dataframe(filled_train_labels, "train_labels_processed.fth")
# want to fill in all assessments for each session
# so create data frame with the same 'game_session', 'installation_id' and fill in known values
# rest are zero
## We're done with train! write it
feather.write_dataframe(train, "train_processed.fth")
# writing records that will be useful elsewhere
import pickle
with open("event_ids_map.pkl", "wb") as file:
pickle.dump(event_ids_map, file)
with open("took_assessments_map.pkl", "wb") as file:
pickle.dump(took_assessment_ids_map, file)
### Make nice numpy arrays
## make a directory if it's not already there
import os
try:
os.mkdir("data")
# if FileExistsError AND the file is a directory -- good, move on
# otherwise, something screwy is happening! stop everything
# all kinds of concurrency issues here, but not an issue for us
except FileExistsError:
if not os.path.isdir("data"):
raise
row = np.array(
[np.array([0, 0, 0]), np.array([0]), np.array([0]), np.array([0])], dtype=object
)
# one-hot encodes 'accuracy_group'
def my_dumb_one_hot(num):
if num == 0:
return np.array([1, 0, 0, 0])
elif num == 1:
return np.array([0, 1, 0, 0])
elif num == 2:
return np.array([0, 0, 1, 0])
elif num == 3:
return np.array([0, 0, 0, 1])
for activity in tqdm(took_assessment_ids_map):
relevant_ids = took_assessment_ids_map[activity]
X0 = np.empty([len(relevant_ids), SEQ_LENGTH, 3])
X1 = np.empty([len(relevant_ids), SEQ_LENGTH])
X2 = np.empty([len(relevant_ids), SEQ_LENGTH])
X3 = np.empty([len(relevant_ids), SEQ_LENGTH])
y = np.empty([len(relevant_ids), 4])
j = 0
for an_id in tqdm(relevant_ids):
# fill a new array with rows of zeros
Xentry = np.tile(row, (SEQ_LENGTH, 1))
# to form id_array: take train for a particular id, drop the id, make into numpy
# fix type, cut off/pad at SEQ_LENGTH
id_array = (
train.loc[an_id]
.drop(columns="installation_id")
.to_numpy()
.astype(int)[-SEQ_LENGTH:]
)
# permute columns to be correct
id_array[:] = id_array[:, idx]
# TODO: should be a way to do this in numpy without explicit loop
for i in np.arange(id_array.shape[0]):
Xentry[i, :] = np.split(id_array[i], split_points, axis=0)
# now we have an array like
# [
# [[x,y,z],a,b,c]
# ...
# [[x,y,z],a,b,c]
# ]
# but we want four arrays like:
# [[x,y,z],...,[x,y,z]], [a...], [b...], [c...]
X0[j] = np.vstack(Xentry[:, 0])
X1[j] = Xentry[:, 1].astype(int)
X2[j] = Xentry[:, 2].astype(int)
X3[j] = Xentry[:, 3].astype(int)
# now find the label and one-hot encode it
y_temp = filled_train_labels.set_index(["installation_id", "title"]).loc[
(an_id, activity)
]
y_temp = y_temp[0]
y_temp = my_dumb_one_hot(y_temp)
# add it to the array of labels
y[j] = y_temp
# TODO: determine if this is a necessary evil
j = j + 1
np.savez_compressed(
os.path.join("data", "X_" + activity + ".npz"), x0=X0, x1=X1, x2=X2, x3=X3
)
np.save(os.path.join("data", "Y_" + activity + ".npy"), y)
|
# # Machine Learning From Titanic Disaster:
# ## 0. Introduction and Contents Overview:
# ### Aim and Context:
# **Aim** of this challenge is: *use machine learning to create a model that predicts which passengers survived the Titanic shipwreck.*
# **The context** of this challenge is that the Titanic ...
# ### Contents Overview:
# This notebook will be structured in the following manner:
# 1. **Exploratory Data Analysis:***Discern the main trends between the variables and the target outcome: survived*
# 2. **Creating Categories & Feature Engineering:***Following from our EDA create new categories and apply feature engineering to cases of null values*
# 3. **Prepare the Test, Train Data:**
# 4. **Apply the Model(s):**
# 5. **Cross-Validation:**
# 6. **Final Score and Concluding Thoughts:**
# ### Import Libraries, Loading the Data:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import math
plt.rc("font", size=18)
sns.set(style="white")
sns.set(style="whitegrid", color_codes=True)
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
train = pd.read_csv("data/train.csv")
test = pd.read_csv("data/test.csv")
# ## 1. Exploratory Data Analysis:
# ### Initial Comments:
print("Train observations", train.shape[0])
print("Test observations", test.shape[0])
print("Test Size", "{:.0%}".format(test.shape[0] / (train.shape[0] + test.shape[0])))
# **Notes on when dealing with a small sample size:**
# *Main point: Generally speaking, as you decrease sample size, your Cross Validation (CV) variance will increase.*
# Suggestion:
# Use cross validation over features themselves; that is, in addition to randomly selecting a train set, you randomly select a subset of features for each cross-validation fold.
# "Bagging" method in Random Forest Tree seems to achieve this.
# Read more:
# https://stats.stackexchange.com/questions/86238/is-cross-validation-still-valid-when-the-sample-size-is-small
# Further discussion on whether on small datasets and cross-validation can be seen:
# https://stats.stackexchange.com/questions/120152/cross-validation-and-small-samples
# Here we can see a reference to an Oxford Journal on this topic of whether cross-validation works on small datasets.
# Here a bootstrap method can be applied [need to learn more about this].
# ### Variable by Variable Analysis:
# #### Basic Overview:
train.head()
train.info()
train.isnull().sum()
# **Initial comments on the variables:**
# - Survived is our **Target**; we know that 38% of the training set survived the Titanic. This we will need to take into consideration later on if we are to use classification alogirthms which are sensitive to this inbalance in the data.
# - Pclass: this is a proxy for social economic class; there are a total of 3 classes [1,2,3]
# - Name: this is a string from which we can derive the social class (eg. 'Sir')
# - Sex: M and F; we will subtitute this will dummy variables [0,1]
# - Sibsp: number of siblings on board; indicator of size of family
# - ParchL: number of parents, children on board; indicator of size of family
# - Fare: Continuous variable;
# - Age: continuous variable
# - PassengerID and Ticker are assumed to be randomly identifiers assigned to each customer, therefore we do not consider this for our analysis.
# - Embarked: categorical variable with 3 :(C = Cherbourg; Q = Queenstown; S = Southampton); they will need to be changed to dummy variables.
# - Null values with **Age** and **Cabin** and **Embarked**; we will deal with these in **Section 2.**
train.describe()
# #### Age and Sex:
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))
male = train[train.Sex == "male"]
female = train[train.Sex == "female"]
sns.distplot(
female[female.Survived == 1].Age.dropna(),
bins=20,
label="Survived",
ax=ax[0],
kde=False,
)
sns.distplot(
female[female.Survived == 0].Age.dropna(),
bins=20,
label="Not Survived",
ax=ax[0],
kde=False,
)
ax[0].legend()
ax[0].set_title("Female")
sns.distplot(
male[male.Survived == 1].Age.dropna(),
bins=20,
label="Survived",
ax=ax[1],
kde=False,
)
sns.distplot(
male[male.Survived == 0].Age.dropna(),
bins=20,
label="Not Survived",
ax=ax[1],
kde=False,
)
ax[1].legend()
ax[1].set_title("Male")
print(
"Total count of Male surviors",
male[male.Survived == 1].shape[0],
"| Percentage of Total Male",
"{:.0%}".format(male[male.Survived == 1].shape[0] / male.shape[0]),
)
print(
"Total count of Female surviors",
female[female.Survived == 1].shape[0],
"| Percentage of Total Female",
"{:.0%}".format(female[female.Survived == 1].shape[0] / female.shape[0]),
)
# **Here we can make the following observations:**
# * Both a higher absolute number and relative number of females survived the Titanic
# * For male survivors, the higher peaks are wiht the very low end (young children of 0-4 years old and 18-30 year old age mark (potentially parents)
# * For women we have higher chances of survival with 14 to 40 years old
# * With young children 5-18 the chance of survival is much lower for mena that it is for women
# * Elderly people did not survive much (relatively)
# **We can make the following (preliminary inferences):**
# 1) Being part of a family may increase your chances of survival
# 2) Being a female has a higher chance of survival (this may be also because of (1); i.e. many women on board were part of families)
# 3) Being a man generally has a lower chance of survival
# #### SibSp (Siblings/Spouse) and Parch (Parents/Children):
from numpy import mean
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))
sns.barplot(
x="SibSp",
y="Survived",
data=train,
ci=None,
color="salmon",
estimator=mean,
ax=ax[0],
)
ax[0].set_title("Mean Survival of Siblings or Spouse")
sns.barplot(
x="Parch",
y="Survived",
data=train,
ci=None,
color="indigo",
estimator=mean,
ax=ax[1],
)
ax[1].set_title("Mean Survival of Parents or Children")
# **Preliminary Observations:**
# * We can see that the highet survival rates are with 1 and 2 for SibSp; wiht 1, 2 and 3 with Parch
# * Survival rate drops a lot wiht 3, 4 for SibSp and 5 for Parch;
# **From logic we know the following:**
# *SibSp:*
# * If SibSp == 0: This is either a SINGLE CHILD or a SINGLE ADULT
# * If the SibSp > 1: This is most likely a CHILD in a FAMILY (exception: Adult with more than one sibling or with spouse and sibling)
# *Parch:** If Parch == 0: This is an ADULT OR (with potentially a)
# *SibSp AND Parch** If Parch == 0 AND SibSp ==0: This is a SINGLE ADULT
# If we take the variables as follows:
# (1) SINGLE CHILD OR FAMILY
# (2) SINGLE ADULT
# We can construct the following matrix:
def create_matrix(variable1, variable2):
if max(train[variable1].unique()) > max(train[variable2].unique()):
number = max(train[variable1].unique()) + 1
else:
number = max(train[variable2].unique()) + 1
matrix_data = np.array([[np.empty for i in range(number)] for j in range(number)])
for i in range(number):
for j in range(number):
matrix_data[i, j] = "child or parent"
matrix_data[6, j] = ""
matrix_data[7, j] = ""
matrix_data[0, 0] = "adult"
matrix_data[1, 0] = "adult"
if i > 1:
matrix_data[i, j] = "child"
if j > 2:
matrix_data[i, j] = "parent"
if j == 7 or 8:
matrix_data[i, 7] = ""
matrix_data[i, 8] = ""
for i in range(number):
for j in range(number):
if j not in train[train[variable1] == i][variable2].unique():
matrix_data[i, j] = ""
columns = [variable2 + " " + str(i) for i in range(number)]
matrix = pd.DataFrame(data=matrix_data, columns=columns)
matrix[variable1] = [variable1 + " " + str(i) for i in range(number)]
matrix = matrix.set_index(variable1)
matrix = matrix.drop(["Parch 7", "Parch 8"], axis=1)
return matrix
create_matrix("SibSp", "Parch")
# Here we can see which cases belong to which. Based on this schema, we can now reassess the cases of survival as per the type of person it is. Let's now create a function so we can append this to our dataframes.
def generate_persona(dataset, persona_list):
for i in range(dataset.shape[0]):
if dataset.Age[i] <= 14:
persona_list.append("child")
if dataset.Age[i] > 14:
if dataset.Parch[i] > 0:
persona_list.append("parent")
elif dataset.Parch[i] == 0:
persona_list.append("adult")
if math.isnan(dataset.Age[i]) == True:
if dataset.SibSp[i] in [0, 1] and dataset.Parch[i] == 0:
persona_list.append("adult")
elif dataset.SibSp[i] >= 2 and dataset.Parch[i] < 3:
persona_list.append("child")
elif dataset.Parch[i] > 2:
persona_list.append("parent")
else:
persona_list.append("child or parent")
dataset["persona"] = persona_list
# Let's now make sure we do this to both our train and test datasets.
persona_list_train = []
persona_list_test = []
persona_lists = [persona_list_train, persona_list_test]
data = [train, test]
for i in range(2):
generate_persona(data[i], persona_lists[i])
# **Now we can plot our results:**
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5))
sns.countplot(x="persona", data=train, color="salmon", hue="Sex", ax=ax)
ax.set_title("Count by Persona")
from numpy import sum
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5))
sns.barplot(
x="persona",
y="Survived",
data=train[train.Survived == 1],
ci=None,
color="salmon",
hue="Sex",
estimator=sum,
ax=ax,
)
ax.set_title("Count Survival by Persona")
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5))
sns.barplot(
x="persona",
y="Survived",
data=train,
ci=None,
color="indigo",
hue="Sex",
estimator=mean,
ax=ax,
)
ax.set_title("Mean Survival by Persona")
# Preliminary conclusions on 'personas':
# - Females have a higher a survival rate all round, however it is particularly prevalent with the adult class;
# - Parents who are male have may have a smaller chance of survival;
# ### PClass, Embarked:
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(12, 7))
sns.countplot(x="Embarked", data=train, color="salmon", hue="persona", ax=ax[0, 0])
ax[0, 0].set_title("Count by Embarking Point")
sns.countplot(x="Pclass", data=train, color="indigo", hue="persona", ax=ax[0, 1])
ax[0, 1].set_title("Count by Passenger Class")
sns.barplot(
x="Embarked",
y="Survived",
data=train,
ci=None,
color="salmon",
hue="persona",
estimator=mean,
ax=ax[1, 0],
)
ax[1, 0].set_title("Mean Survival by Embarking Point")
sns.barplot(
x="Pclass",
y="Survived",
data=train,
ci=None,
color="indigo",
hue="persona",
estimator=mean,
ax=ax[1, 1],
)
ax[1, 1].set_title("Mean Survival by Passenger Class")
plt.tight_layout()
# **Preliminary observations for Embarking Points:**
# * The vast majority of passengers come from Port S regardless of persona type;
# * The vast majority of children are travelling in Pclass = 3
# * The mean survival rate of children in higher in Pclass =2 and Pclass = 1; this might be because in Pclass=3 there is more competition as there are more people in this class (single adults highest in this class).
# * In general we have higher rates of survival the from pclass= 3 to 1;
# # 2. Creating Categories & Feature Engineering:
# ### Part I: Creating Categories:
# We have already created categories for the different 'personas' for our EDA, but now we are going to continue by looking at other variables and what categories we can create from them.
# ### Titles:
# First, let's generate the list of titles and the list of surnames:
def generate_titles(dataset, titles_list):
for i in dataset.Name:
split = i.split(" ")
for j in range(len(split)):
if "," in split[j]:
if split[j + 2] == "Countess.":
titles_list.append(split[j + 2])
else:
titles_list.append(split[j + 1])
dataset["titles"] = titles_list
def generate_surnames(dataset, surname_list):
for i in dataset.Name:
surname_list.append(i.split(" ")[0][:-1])
train_titles = []
test_titles = []
titles_lists = [train_titles, test_titles]
data = [train, test]
for i in range(2):
generate_titles(data[i], titles_lists[i])
# ### Personas, revisited:
# You will recall that in the previous section we created the 'personas' to aid our Exploratory Data Analysis - that is, to show the breakdown by what sort of person type they were. We can refresh how we constructed this with the matrix:
create_matrix("SibSp", "Parch")
# Now that we have titles, though, we can verify and correct this as we constructed this on certain assumptions that need to be checked with the 'titles' information.
train[train.persona == "child"].titles.unique()
train[train.persona == "parent"].titles.unique()
train[train.persona == "adult"].titles.unique()
train[train.persona == "child or parent"].titles.unique()
train[train.persona == "child or parent"]
# Interestingly, we can see that all the unknown cases of child or parent are in fact 'NaN' age values. The only thing we can correct for now is the 'Master' category which is used for children.
train.loc[
(train.persona == "child or parent") & (train.titles == "Master."), "persona"
] = "child"
train[train.persona == "child or parent"].titles.unique()
# ### Part II: Cleaning and Dealing with Null Values:
# Now that we have some categories created and a better understanding of the overall data, we are in a better position to deal with the case of the null values.
train.isnull().sum()
train_df = train.copy()
test_df = test.copy()
# #### Cabin:
# This indicates the position around the ship; however, since there are a lot of null variables we will not consider this for feature engineer. We will drop this altogether.
train_df = train_df.drop(["Cabin"], axis=1)
test_df = test_df.drop(["Cabin"], axis=1)
# #### Embarked:
train_df[train_df.Embarked.isnull()]
train[train.Embarked.isnull()]
# For this we can simply use the most frequent occurence of Embarked to replace it:
train_df.Embarked.describe()
replace_value = "S"
data = [train_df, test_df]
for dataset in data:
dataset["Embarked"] = dataset["Embarked"].fillna(replace_value)
# #### Age:
# Here is where we have 177 null values and we have to be careful of what strategy is best to deal with this. A simple strategy might be to use the median of the age to fill the NaN values - however, as we have seen Age varies greatly by other factors, this may not be the best way of doing so.
# What we are going to do here then is to return to our grouping of the different personas to help us deal with this.
train_df.persona.unique()
print("Null adult persona", train_df[train_df.persona == "adult"].Age.isnull().sum())
print("Median age", train_df[train_df.persona == "adult"].Age.median())
train_df[train_df.persona == "adult"].describe()
# Let's replace 'adult' nan with the median:
train_df.loc[(train_df.Age.isnull()) & (train_df.persona == "adult"), "Age"] = train_df[
train_df.persona == "adult"
].Age.median()
test_df.loc[(test_df.Age.isnull()) & (test_df.persona == "adult"), "Age"] = test_df[
test_df.persona == "adult"
].Age.median()
print(
"Null child or parent persona",
train_df[train_df.persona == "child or parent"].Age.isnull().sum(),
)
print("Median age", train_df[train_df.persona == "child or parent"].Age.median())
train_df[train_df.persona == "child or parent"].describe()
# From the above, a median substitution for the personas: 'adult' and 'child' seem appropriate (no need for 'parent' as there are no instances of null values). However, for child or parent, a median value of 25 would be an option if there truly is no way of differentiating the two, but let's see if we can unpack this further.
# If we recall our matrix, we used the following conditions to define this persona category:
create_matrix("SibSp", "Parch")
pd.set_option("display.max_rows", 1000)
train_df[train_df.persona == "child or parent"]
# We can look at the titles to modify this:
train_df.loc[
(train_df.persona == "child or parent") & (train.titles == "Mrs."), "persona"
] = "parent"
train_df.loc[
(train_df.persona == "child or parent") & (train.titles == "Miss."), "persona"
] = "child"
train_df.loc[
(train_df.persona == "child or parent") & (train.titles == "Mr."), "persona"
] = "parent"
test_df.loc[
(test_df.persona == "child or parent") & (test.titles == "Mrs."), "persona"
] = "parent"
test_df.loc[
(test_df.persona == "child or parent") & (test.titles == "Master."), "persona"
] = "child"
print("Null child persona", train_df[train_df.persona == "child"].Age.isnull().sum())
print("Median age", train_df[train_df.persona == "child"].Age.median())
train_df[train_df.persona == "child"].describe()
# Let's replace 'child' nan with the median:
train_df.loc[(train_df.Age.isnull()) & (train_df.persona == "child"), "Age"] = train_df[
train_df.persona == "child"
].Age.median()
test_df.loc[(test_df.Age.isnull()) & (test_df.persona == "child"), "Age"] = test_df[
test_df.persona == "child"
].Age.median()
print("Null parent persona", train_df[train_df.persona == "parent"].Age.isnull().sum())
print("Median age", train_df[train_df.persona == "parent"].Age.median())
train_df[train_df.persona == "parent"].describe()
train_df.loc[
(train_df.Age.isnull()) & (train_df.persona == "parent"), "Age"
] = train_df[train_df.persona == "parent"].Age.median()
test_df.loc[(test_df.Age.isnull()) & (test_df.persona == "parent"), "Age"] = test_df[
test_df.persona == "parent"
].Age.median()
# And, finally, our list of null values has been dealt with.
train_df.isnull().sum()
test_df.isnull().sum()
test_df.loc[test_df.Fare.isnull(), "Fare"] = test_df.Age.mean()
test_df.isnull().sum()
# ### Part III: Dummy Variables
train_df.head(2)
# List of variables to conver to dummy:
# * Sex
# * Embarked
# * persona
# * titles
# #### Sex:
train_df.Sex = train_df.Sex.map({"female": 1, "male": 0}).astype(int)
test_df.Sex = test_df.Sex.map({"female": 1, "male": 0}).astype(int)
# #### Embarked:
train_df.Embarked.unique()
train_df.Embarked = train_df.Embarked.map({"Q": 0, "C": 1, "S": 2}).astype(int)
test_df.Embarked = test_df.Embarked.map({"Q": 0, "C": 1, "S": 2}).astype(int)
# #### Persona:
train_df.persona.unique()
train_df.persona = train_df.persona.map({"adult": 0, "parent": 1, "child": 2}).astype(
int
)
test_df.persona = test_df.persona.map({"adult": 0, "parent": 1, "child": 2}).astype(int)
# #### Titles:
train_df.titles.unique()
# Let's clean some of the data so we can have some distinct groupings:
train_df.titles = train_df.titles.replace(
[
"Don.",
"Dr.",
"Major.",
"Lady.",
"Sir.",
"Col.",
"Capt.",
"Countess.",
"Jonkheer.",
"Rev.",
],
"Rare",
)
train_df.titles = train_df.titles.replace("Mlle.", "Miss.")
train_df.titles = train_df.titles.replace("Ms.", "Miss.")
train_df.titles = train_df.titles.replace("Mme.", "Miss.")
test_df.titles = test_df.titles.replace(
[
"Don.",
"Dr.",
"Major.",
"Lady.",
"Sir.",
"Col.",
"Capt.",
"Countess.",
"Jonkheer.",
"Rev.",
],
"Rare",
)
test_df.titles = test_df.titles.replace("Mlle.", "Miss.")
test_df.titles = test_df.titles.replace("Ms.", "Miss.")
test_df.titles = test_df.titles.replace("Mme.", "Miss.")
train_df.titles.unique()
test_df.titles.unique()
test_df.titles = test_df.titles.replace("Dona.", "Rare")
train_df.titles = train_df.titles.map(
{"Mr.": 1, "Miss.": 2, "Mrs.": 3, "Master.": 4, "Rare": 5}
).astype(int)
test_df.titles = test_df.titles.map(
{"Mr.": 1, "Miss.": 2, "Mrs.": 3, "Master.": 4, "Rare": 5}
).astype(int)
train_df.head(2)
test_df.head(2)
# #### Age and Fare (Continuous Variables) Scaling:
from sklearn import preprocessing
continuous_features = ["Fare", "Age"]
data = [train_df, test_df]
for dataset in data:
for col in continuous_features:
transf = dataset[col].values.reshape(-1, 1)
scaler = preprocessing.StandardScaler().fit(transf)
dataset[col] = scaler.transform(transf)
# #### Dummy Variables for non-binary Categorical Data:
train_df = pd.get_dummies(
train_df,
columns=["Embarked", "titles", "Parch", "SibSp", "Pclass", "persona"],
drop_first=False,
)
test_df = pd.get_dummies(
test_df,
columns=["Embarked", "titles", "Parch", "SibSp", "Pclass", "persona"],
drop_first=False,
)
# Now let's drop the last few variables we do not need:
train_df = train_df.drop(["PassengerId", "Name", "Ticket"], axis=1)
test_df = test_df.drop(["PassengerId", "Name", "Ticket"], axis=1)
pd.set_option("display.max_columns", 40)
train_df.head(2)
test_df.head(2)
# ## 4. Apply Model:
# Let's look at: 1) Decision Tree; 2) Random Forest Tree Classifier for this.
# ### 4.1: Decision Tree
# Reference: https://www.kaggle.com/dmilla/introduction-to-decision-trees-titanic-dataset
def get_gini_impurity(survived_count, total_count):
survival_prob = survived_count / total_count
not_survival_prob = 1 - survival_prob
random_observation_survived_prob = survival_prob
random_observation_not_survived_prob = 1 - random_observation_survived_prob
mislabelling_survided_prob = not_survival_prob * random_observation_survived_prob
mislabelling_not_survided_prob = (
survival_prob * random_observation_not_survived_prob
)
gini_impurity = mislabelling_survided_prob + mislabelling_not_survided_prob
return gini_impurity
# The way decision trees work is by using a starting node impurity (that is, "*probability of mislabelling an element assuming that the element is randomly labelled according the the distribution of all the classes in the set.*"). Then, as we add more decisions (i.e. depth of tree increases) we seek to reduce that impurity.
# Let's begin by trying to find that starting node impurity. If we recall, the 342 out of 891 survived in train set.
gini_impurity_starting_node = get_gini_impurity(342, 891)
gini_impurity_starting_node
# Now, considering our EDA, where we saw that Sex was a major determining factor. Let's use this to simulate our model then.
# If we call:
# * Male: 577 observations with only 109 survived
# * Female: 314 observations with 233 survived
gini_impurity_male = get_gini_impurity(109, 577)
print("male gini", gini_impurity_male)
gini_impurity_female = get_gini_impurity(233, 314)
print("female gini", gini_impurity_female)
# Weighted =impurity
men_weight = 577 / 891
women_weight = 314 / 891
weighted_gini_impurity_sex_split = (gini_impurity_male * men_weight) + (
gini_impurity_female * women_weight
)
sex_gini_decrease = weighted_gini_impurity_sex_split - gini_impurity_starting_node
sex_gini_decrease
# Therefore, if we compare splitting by Sex to Starting node - we see that the Gini impurity decreases by 13%
# **Adult:**
print("adult observations", train_df[(train_df.persona_0 == 1)].shape[0])
print(
"adult survived",
train_df[(train_df.persona_0 == 1) & (train_df.Survived == 1)].shape[0],
)
gini_impurity_adult = get_gini_impurity(226, 668)
print("adult gini", gini_impurity_adult)
gini_impurity_nonadult = get_gini_impurity((342 - 226), (891 - 668))
print("nonadult gini", gini_impurity_nonadult)
# Weighted =impurity
adult_weight = 668 / 891
nonadult_weight = (891 - 668) / 891
weighted_gini_impurity_adult_split = (gini_impurity_adult * adult_weight) + (
gini_impurity_nonadult * nonadult_weight
)
adult_gini_decrease = weighted_gini_impurity_adult_split - gini_impurity_starting_node
adult_gini_decrease
# Therefore, we can see that splitting by Adult for the starting node is negligent in decreasing impurity.
# **Child**:
print("child observations", train_df[(train_df.persona_2 == 1)].shape[0])
print(
"child survived",
train_df[(train_df.persona_2 == 1) & (train_df.Survived == 1)].shape[0],
)
print("nonchild observations", train_df[(train_df.persona_2 == 1)].shape[0])
print(
"child survived",
train_df[(train_df.persona_2 == 1) & (train_df.Survived == 1)].shape[0],
)
gini_impurity_child = get_gini_impurity(50, 96)
print("child gini", gini_impurity_child)
gini_impurity_nonchild = get_gini_impurity((342 - 50), (891 - 96))
print("non-child gini", gini_impurity_nonchild)
# Weighted =impurity
child_weight = 96 / 891
nonchild_weight = (891 - 96) / 891
weighted_gini_impurity_child_split = (gini_impurity_child * child_weight) + (
gini_impurity_nonchild * nonchild_weight
)
child_gini_decrease = weighted_gini_impurity_child_split - gini_impurity_starting_node
child_gini_decrease
# **Title Mr:**
print("mr observations", train_df[(train_df.titles_1 == 1)].shape[0])
print(
"mr survived",
train_df[(train_df.titles_1 == 1) & (train_df.Survived == 1)].shape[0],
)
gini_impurity_mr = get_gini_impurity(81, 517)
print("mr gini", gini_impurity_mr)
gini_impurity_nonmr = get_gini_impurity((342 - 81), (891 - 517))
print("non-mr gini", gini_impurity_nonmr)
# Weighted =impurity
mr_weight = 517 / 891
nonmr_weight = (891 - 517) / 891
weighted_gini_impurity_mr_split = (gini_impurity_mr * mr_weight) + (
gini_impurity_nonmr * nonmr_weight
)
mr_gini_decrease = weighted_gini_impurity_mr_split - gini_impurity_starting_node
mr_gini_decrease
# Here we can see that this - so far - is the highest decrease in gini.
# Can we automate this?
train_df.head(2)
def get_impurity_change(dataset, variable):
def get_gini_impurity(survived_count, total_count):
survival_prob = survived_count / total_count
not_survival_prob = 1 - survival_prob
random_observation_survived_prob = survival_prob
random_observation_not_survived_prob = 1 - random_observation_survived_prob
mislabelling_survided_prob = (
not_survival_prob * random_observation_survived_prob
)
mislabelling_not_survided_prob = (
survival_prob * random_observation_not_survived_prob
)
gini_impurity = mislabelling_survided_prob + mislabelling_not_survided_prob
return gini_impurity
obs = dataset[(dataset[variable] == 1)].shape[0]
survived_obs = dataset[(dataset[variable] == 1) & (dataset.Survived == 1)].shape[0]
total_obs = dataset.shape[0]
total_surv = dataset[dataset.Survived == 1].shape[0]
gini_impurity_1 = get_gini_impurity(survived_obs, obs)
gini_impurity_2 = get_gini_impurity((total_surv - survived_obs), (total_obs - obs))
gini_impurity_starting_node = get_gini_impurity(total_surv, total_obs)
weight1 = obs / total_obs
weight2 = (total_obs - obs) / total_obs
weighted_gini = (gini_impurity_1 * weight1) + (gini_impurity_2 * weight2)
gini_decrease = weighted_gini - gini_impurity_starting_node
return gini_decrease
get_impurity_change(train_df, "Sex")
select_columns = train_df.columns.to_list()
select_columns = select_columns[1:2] + select_columns[4:] # remove non binary variables
for i in select_columns:
print(i, get_impurity_change(train_df, i))
# Now that we know which one is the most significant (Title, Mr.) then we can move onto finding the optimum tree-depth:
# ##### Our decision tree model:
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
X_train = train_df.drop(["Survived"], axis=1).values
y_train = train_df["Survived"]
X_test = test_df.drop(["Parch_9"], axis=1).values
decision_tree = DecisionTreeClassifier(max_depth=3)
decision_tree.fit(X_train, y_train)
y_pred = decision_tree.predict(X_test)
from sklearn.metrics import accuracy_score
acc_decision_tree = round(decision_tree.score(X_train, y_train) * 100, 2)
acc_decision_tree
len(y_pred)
# ### Submission 1:
submission = pd.DataFrame()
submission["PassengerId"] = test["PassengerId"]
submission["Survived"] = y_pred
submission = submission.set_index("PassengerId")
submission.to_csv("titanic_submission_03022020.csv")
# ### Submission 2:
from sklearn.model_selection import GridSearchCV
params = {
"max_leaf_nodes": list(range(2, 100)),
"min_samples_split": [2, 3, 4],
"criterion": ["entropy", "gini"],
}
grid_search_cv = GridSearchCV(
DecisionTreeClassifier(random_state=42), params, verbose=1, cv=3
)
grid_search_cv.fit(X_train, y_train)
grid_search_cv.best_params_
tree_clf = DecisionTreeClassifier(
criterion="entropy", max_leaf_nodes=47, min_samples_split=4, random_state=42
)
tree_clf.fit(X_train, y_train)
y_pred1 = tree_clf.predict(X_test)
acc_decision_tree = round(tree_clf.score(X_train, y_train) * 100, 2)
acc_decision_tree
submission2 = pd.DataFrame()
submission2["PassengerId"] = test["PassengerId"]
submission2["Survived"] = y_pred1
submission2 = submission2.set_index("PassengerId")
submission2.to_csv("titanic_submission_03022020_sub2.csv")
# **Result:**
# * The train accuracy is very high (much higher than the first submission)
# * The score for submission 2 was 0.76555 which is lower than submission 1: 0.78947
# Why is this the case?
# * Overfitting
# ### Submission 3: Random Forest
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier()
params = {
"bootstrap": [True],
"max_depth": list(range(3, 11)),
"max_features": ["sqrt", "log2"],
"max_leaf_nodes": list(range(6, 10)),
}
grid_search = GridSearchCV(model, params, verbose=1)
grid_search.fit(X_train, y_train)
grid_search.best_params_
randomf_tree = RandomForestClassifier(
bootstrap=True, max_depth=9, max_features="log2", max_leaf_nodes=8, random_state=0
)
randomf_tree.fit(X_train, y_train)
y_pred2 = randomf_tree.predict(X_test)
acc_random_tree = round(randomf_tree.score(X_train, y_train) * 100, 2)
acc_random_tree
submission3 = pd.DataFrame()
submission3["PassengerId"] = test["PassengerId"]
submission3["Survived"] = y_pred2
submission3 = submission3.set_index("PassengerId")
submission3.to_csv("titanic_submission_03022020_sub3.csv")
# Result:
# * Score of: 0.79425, which is higher
# ### Again, but with smote for oversampling:
from imblearn.over_sampling import SMOTE
os = SMOTE(random_state=0)
os_X_train, os_y_train = os.fit_sample(X_train, y_train)
params = {
"bootstrap": [True],
"max_depth": list(range(3, 11)),
"max_features": ["sqrt", "log2"],
"max_leaf_nodes": list(range(6, 10)),
}
grid_search = GridSearchCV(model, params, verbose=1)
grid_search.fit(os_X_train, os_y_train)
grid_search.best_params_
os_randomf_tree = RandomForestClassifier(
bootstrap=True, max_depth=9, max_features="log2", max_leaf_nodes=9, random_state=0
)
os_randomf_tree.fit(os_X_train, os_y_train)
y_pred3 = os_randomf_tree.predict(X_test)
acc_random_tree_os = round(os_randomf_tree.score(os_X_train, os_y_train) * 100, 2)
acc_random_tree_os
submission4 = pd.DataFrame()
submission4["PassengerId"] = test["PassengerId"]
submission4["Survived"] = y_pred3
submission4 = submission4.set_index("PassengerId")
submission4.to_csv("titanic_submission_04022020.csv")
# Conclusion:
# - 0.78468 which is not an improvement from previous score;
# ### Submission 5:
params = {
"bootstrap": [True],
"max_depth": list(range(3, 16)),
"max_features": ["auto", "sqrt", "log2"],
"max_leaf_nodes": list(range(6, 20)),
}
grid_search = GridSearchCV(model, params, verbose=1, cv=10)
grid_search.fit(X_train, y_train)
grid_search.best_params_
randomf_tree_new = RandomForestClassifier(
bootstrap=True, max_depth=9, max_features="sqrt", max_leaf_nodes=18
)
randomf_tree_new.fit(X_train, y_train)
y_pred4 = randomf_tree_new.predict(X_test)
acc_random_tree_5 = round(randomf_tree_new.score(X_train, y_train) * 100, 2)
acc_random_tree_5
submission5 = pd.DataFrame()
submission5["PassengerId"] = test["PassengerId"]
submission5["Survived"] = y_pred4
submission5 = submission5.set_index("PassengerId")
submission5.to_csv("titanic_submission_04022020_2.csv")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
import regex
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# 1. /kaggle/input/nlp-getting-started/train.csv
# 2. /kaggle/input/nlp-getting-started/test.csv
# 3. /kaggle/input/nlp-getting-started/sample_submission.csv
train_data = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
train_data.head()
print(
"The train set contains {0} rows and {1} columns ".format(
train_data.shape[0], train_data.shape[1]
)
)
# Since train set contains 5 columns with target variables, I can do preliminary analysis by using the following method :
#
# 1. Understand on the no. of tweets which are disaster related and which are not.Bar plots would do in this case.
# 2. How keyword and location helps in the tweets and would they help in prediction or not.
# 3. Filtering out the stop words(words that can be filtered out as they may not be needed for the analysis)
# 4. Checking the words present in the tweet and then analysing based on the target variable.
ax = sns.countplot(data=train_data, x=train_data["target"])
plt.xlabel("Target Variable- Disaster or not disaster tweet")
plt.ylabel("Count of tweets")
plt.title("Count of disaster and non-disaster tweets")
total = len(train_data)
for p in ax.patches:
ax.annotate(
"{:.1f}%".format(100 * p.get_height() / total),
(p.get_x() + 0.1, p.get_height() + 5),
)
# https://stackoverflow.com/questions/33179122/seaborn-countplot-with-frequencies
# Study the document for matplotlib
# Seeing from the tweets we can do an analysis as it can be construed an imbalanced dataset.In the initial analysis where I have went through the dataset I found out that the data needs to be cleaned **a lot** .For this, I am going column wise i.e.
# **keyword --> location --> tweet **
# In the keywords column I found out that there are %20 added between words .I am thinking this has been fetched from the search strings hence I was thinking I remove it and replace it with *_* to make it look presentable.
train_data["keyword"] = train_data.keyword.str.replace("%20", "_")
plt.figure(figsize=(100, 50))
plt.xticks(rotation=90)
sns.countplot(data=train_data, y="keyword")
# sns.catplot(y="keyword", col="target",
# data=train_data, kind="count" ,height=100, aspect=1.3);
# not using the above plot as it is quite heavy in making the graph plot
|
import pandas as pd
import numpy as np
pic_data = pd.read_pickle("/kaggle/input/dataset-train/make_pic.pkl")
pic_data["pixcel"] = pic_data["pixcel"].apply(
lambda x: np.array(x) / 255 if type(x) == "list" else x
)
train = pd.read_json(
"../input/deepfake-detection-challenge/train_sample_videos/metadata.json"
).T
train["label"] = train["label"].apply(lambda x: 1 if x == "REAL" else 0)
train["video_file"] = train.index
train = pd.merge(train, pic_data, on="video_file")
from keras.preprocessing import image
bb = []
for u in range(len(train[train["label"] == 1]["pixcel"])):
datagen = image.ImageDataGenerator(rotation_range=20)
x = train[train["label"] == 1]["pixcel"].values[u].reshape(1, 128, 128, 3)
gen = datagen.flow(x, batch_size=1)
for i in range(3):
batches = next(gen)
gen_img = batches[0].astype(np.uint8)
bb.append(gen_img)
train = train.drop(["original", "split", "video_file"], axis=1)
a = pd.DataFrame()
a["pixcel"] = bb
a["label"] = 1
train = train.append(a)
train_data_box = []
for i in train["pixcel"].values:
train_data_box = np.append(train_data_box, i)
train_data_box = train_data_box.reshape(-1, 128, 128, 3)
label = train["label"]
label = pd.DataFrame(label.values)[0]
pic_data = pd.read_pickle("/kaggle/input/test-data/test_make_pic.pkl").rename(
columns={"video_file": "filename"}
)
sample = pd.read_csv("/kaggle/input/deepfake-detection-challenge/sample_submission.csv")
test_data = pd.merge(sample, pic_data, on="filename")
test_data_box = []
for i in test_data["pixcel"].values:
test_data_box = np.append(test_data_box, i)
test_data_box = test_data_box.reshape(-1, 128, 128, 3) / 255
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
from keras.models import Sequential
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPool2D
from keras.optimizers import Adam
from keras.layers.core import Dense, Activation, Dropout, Flatten
import keras
from keras.layers import BatchNormalization
from IPython.display import display, HTML, clear_output
kf = KFold(n_splits=5, shuffle=True)
oof_pred = np.zeros((train_data_box.shape[0],))
y_pred = np.zeros((label.shape[0],))
y_pred = y_pred.astype("float")
pp = 0
for train_index, eval_index in kf.split(train_data_box):
x_train, x_test = train_data_box[train_index], train_data_box[eval_index]
y_train, y_test = label[train_index], label[eval_index]
x_train = x_train / 255
x_test = x_test / 255
# モデルの定義
model = Sequential()
model.add(Conv2D(64, 3, input_shape=(128, 128, 3)))
model.add(Activation("relu"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(BatchNormalization())
# model.add(Dropout(0.3))
model.add(Conv2D(64, 3, input_shape=(128, 128, 3)))
model.add(Activation("relu"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(BatchNormalization())
# model.add(Dropout(0.3))
model.add(Conv2D(64, 3, input_shape=(128, 128, 3)))
model.add(Activation("relu"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Conv2D(64, 3, input_shape=(128, 128, 3)))
model.add(Activation("relu"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(32))
model.add(Activation("relu"))
model.add(BatchNormalization())
# model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Dense(32))
model.add(Activation("relu"))
model.add(BatchNormalization())
# model.add(Dropout(0.5))
model.add(Dense(32))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dense(32))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dense(1, activation="sigmoid"))
adam = Adam(lr=5e-3)
model.compile(optimizer=adam, loss="mean_squared_error", metrics=["accuracy"])
# model.summary()
# es_cb = keras.callbacks.EarlyStopping(monitor='val_loss', patience=0, verbose=0, mode='auto')
ep = 200
history = model.fit(
x_train,
y_train,
batch_size=32,
nb_epoch=ep,
verbose=1,
validation_data=(x_test, y_test),
) # ,callbacks=[es_cb])
clear_output()
# 可視化
plt.plot(range(1, ep + 1), history.history["loss"], label="loss")
plt.plot(range(1, ep + 1), history.history["val_loss"], label="val_loss")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
oof_pred[eval_index] = y_test.values.reshape(oof_pred[eval_index].shape)
y_pred[eval_index] = model.predict(x_test).reshape(y_pred[eval_index].shape)
pp = model.predict(test_data_box) + pp
pp = pp / 5
import os
# json_string = model.to_json()
# open(os.path.join('cnn_model.json'), 'w').write(json_string)
# model.save_weights(os.path.join('cnn_model_weights.hdf5'))
sample["label"] = pp
sample.to_csv("submission.csv", index=False)
sample.head()
sample["label"].value_counts()
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import gamma, pareto
plt.style.use("ggplot")
# Versicherungen allgemein
#### https://www.gdv.de/de/zahlen-und-fakten/versicherungsbereiche/ueberblick-24074
beitraege = pd.read_csv(
"../input/versicherungends/Beitraege.csv",
header=[0, 1],
sep=";",
nrows=13,
decimal=",",
)
leistungen = pd.read_csv(
"../input/versicherungends/Leistungen.csv",
header=[0, 1],
sep=";",
nrows=13,
decimal=",",
)
beitraege.columns = ["VERSICHERUNGSSPARTE", "2017", "2018", "VERAENDERUNG"]
leistungen.columns = ["VERSICHERUNGSSPARTE", "2017", "2018", "VERAENDERUNG"]
for df in [beitraege, leistungen]:
for jahr in ["2017", "2018"]:
df[jahr] = df[jahr].str.replace(".", "").astype(int)
df.VERAENDERUNG = (
df.VERAENDERUNG.str.replace(",", ".").str.replace("%", "").astype(float) / 100
)
df.set_index("VERSICHERUNGSSPARTE", inplace=True)
beitraege
leistungen
df = pd.concat([beitraege["2018"], leistungen["2018"]], axis=1)
df.columns = ["BEITRAEGE", "LEISTUNGEN"]
df["LEISTUNGSQUOTE"] = df.LEISTUNGEN / df.BEITRAEGE
df
# Sonstige Sachversicherungen z.B. Handy
leistungsquote_sonstige_sachversicherungen = (7420 - 5969 - 1274) / (
11319 - 7669 - 3142
)
# leistungsquote_sonstige_sachversicherungen = 0.7 # uebertrieben optimistisches Szenario
print("Erwartungswert je eingesetzten Euro", leistungsquote_sonstige_sachversicherungen)
faktor_sonstige_sachversicherungen = 1 / leistungsquote_sonstige_sachversicherungen
faktor_sonstige_sachversicherungen
pd.Series(gamma.rvs(7, 1, size=1_000_000) * 20).hist(bins=100, figsize=(20, 9))
leistungen_handyversicherung = pd.Series(
np.append(gamma.rvs(7, 1, size=1_000_000) * 20, [0] * 9_000_000)
)
leistungen_handyversicherung.hist(bins=50, figsize=(20, 9))
durchschnittliche_leistung = leistungen_handyversicherung.mean()
print("Durchschnittliche Leistung", durchschnittliche_leistung)
durchschnittlicher_beitrag = (
leistungen_handyversicherung.mean() * faktor_sonstige_sachversicherungen
)
print("Durchschnittlicher Beitrag", durchschnittlicher_beitrag)
print(
"Durchschnittlicher Verlust",
durchschnittlicher_beitrag - durchschnittliche_leistung,
)
print(
"Durchschnittlicher Verlust pro Euro Beitrag",
1 - leistungsquote_sonstige_sachversicherungen,
)
print(
"Anteil Versicherungsnehmer die Verlust machen",
(leistungen_handyversicherung < grenzwert_sonstige_sachversicherungen).mean(),
)
for quote_ohne_leistung in [0.5, 0.6, 0.7, 0.8, 0.9]:
anzahl_mit_leistung = int((1 - quote_ohne_leistung) * 10_000_000)
anzahl_ohne_leistung = int(quote_ohne_leistung * 10_000_000)
leistungen_handyversicherung = pd.Series(
np.append(
gamma.rvs(7, 1, size=anzahl_mit_leistung) * 50, [0] * anzahl_ohne_leistung
)
)
grenzwert_sonstige_sachversicherungen = (
leistungen_handyversicherung.mean() * faktor_sonstige_sachversicherungen
)
quote_verlierer = (
leistungen_handyversicherung < grenzwert_sonstige_sachversicherungen
).mean()
print(
"Quote ohne Leistung", quote_ohne_leistung, ", Quote Verlierer", quote_verlierer
)
betrag_faktor_allgemein = 200
for alpha in [1.5, 2.0, 2.5, 3.0]:
print(alpha)
betrag_faktor = betrag_faktor_allgemein * ((alpha - 1) / alpha)
pareto_numbers = pd.Series((pareto.rvs(alpha, size=1_000_000) - 1) * betrag_faktor)
pareto_max = pareto_numbers.quantile(0.995)
pareto_numbers.hist(bins=np.linspace(0, pareto_max, 101), figsize=(20, 9))
plt.axvline(pareto.mean(alpha) * betrag_faktor, color="blue")
plt.axvline(pareto.mean(alpha) * betrag_faktor * (3142 / 1274), color="green")
plt.show()
|
# ### Importing the usual suspects
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from collections import Counter
import seaborn as sns
plt.style.use("ggplot")
from nltk.corpus import stopwords
from nltk.util import ngrams
from sklearn.feature_extraction.text import CountVectorizer
stop = set(stopwords.words("english"))
import re
import string
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
pwd
# ### Reading Data and other checks.
data = pd.read_csv("train.csv")
data.head()
test = pd.read_csv("test.csv")
print("Rows = {}, Colums = {}".format(data.shape[0], data.shape[1]))
shape1 = (data.shape[0], data.shape[1])
data = data[data["text"].notnull()]
if data.shape == shape1:
print("Data Consistent")
else:
print("Data Inconsistent")
sns.set_style("whitegrid")
x = data.target.value_counts()
sns.barplot(x.index, x)
plt.gca().set_ylabel("samples")
print("0: Not Disaster Tweets, 1: Disaster Tweets")
def createCorpus(df, target):
corpus = []
for i in df[df["target"] == target]["text"]:
text = i.split()
corpus.extend(text)
return corpus
# ### Cleaning
df = pd.concat([data, test], sort=False)
df.shape
def remove_URL(text):
url = re.compile(r"https?://\S+|www\.\S+")
return url.sub(r"", text)
def remove_html(text):
html = re.compile(r"<.*?>")
return html.sub(r"", text)
def remove_emoji(text):
emoji_pattern = re.compile(
"["
"\U0001F600-\U0001F64F" # emoticons
"\U0001F300-\U0001F5FF" # symbols & pictographs
"\U0001F680-\U0001F6FF" # transport & map symbols
"\U0001F1E0-\U0001F1FF" # flags (iOS)
"\U00002702-\U000027B0"
"\U000024C2-\U0001F251"
"]+",
flags=re.UNICODE,
)
return emoji_pattern.sub(r"", text)
def remove_punct(text):
table = str.maketrans("", "", string.punctuation)
return text.translate(table)
df["text"] = df["text"].apply(lambda x: remove_URL(x))
df["text"] = df["text"].apply(lambda x: remove_html(x))
df["text"] = df["text"].apply(lambda x: remove_emoji(x))
df["text"] = df["text"].apply(lambda x: remove_punct(x))
from nltk import ngrams
def ngramCreation(notes, num_ngram=2):
if len(notes) == 0:
return ""
ngramList = []
for nrange in range(1, num_ngram + 1):
ngramss = ngrams(notes, nrange)
for grams in ngramss:
ngramList = ngramList + ["_".join(list(grams))]
return ngramList
def createCorpusNGrams(tweetCorpus, n):
s = [i.lower() for i in tweetCorpus]
s = [re.sub(r"[^a-zA-Z0-9\s]", " ", i) for i in s]
output = list(ngrams(s, n))
return output
try:
tweetCorpus = createCorpus(df, 1)
print("Success: Corpus Created\nTotal Words = {}".format(len(tweetCorpus)))
except:
print("Error: Corpus Creation Failed!")
bigrams = createCorpusNGrams(tweetCorpus, 2)
trigrams = createCorpusNGrams(tweetCorpus, 3)
|
# # Tutorial
# Basically just wrapped the model from https://github.com/qubvel/efficientnet into a dataset:
# - https://www.kaggle.com/guesejustin/efficientnet100minimal
# Hope you will enjoy!
# For further questions shoot me a message or https://www.linkedin.com/in/justin-guese/
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import sys
package_path = "/kaggle/input/efficientnet100minimal/"
sys.path.append(package_path)
import efficientnet.keras as efn
model = efn.EfficientNetB0(
include_top=False, input_shape=(128, 128, 3), pooling="avg", weights="imagenet"
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score, cross_validate
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_absolute_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LassoCV
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from xgboost import XGBRegressor
import lightgbm
from sklearn.ensemble import AdaBoostRegressor
import tensorflow as tf
from sklearn.metrics import mean_squared_error
from math import sqrt
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.layers import Input, Dense, Activation, Dropout
from tensorflow.keras.models import Model
from sklearn.metrics import explained_variance_score, mean_absolute_error
df = pd.read_csv("../input/predict-test-scores-of-students/test_scores.csv")
print(df.head())
print(df.describe())
print(df.isnull().sum())
print(df.dtypes)
print(df.school.unique())
corr = df.corr()
print(corr)
sns.heatmap(corr, annot=True, linewidth=0.5, linecolor="red")
plt.show()
for x in ["school"]:
for val in df[x].unique():
count = df[x].value_counts()[val]
percent = df[x].value_counts(normalize=True)[val] * 100
print(f"{val} - Count: {count}, Percentage: {percent:.2f}%")
print()
for x in ["school_setting", "school_type", "teaching_method", "gender", "lunch"]:
for val in df[x].unique():
count = df[x].value_counts()[val]
percent = df[x].value_counts(normalize=True)[val] * 100
print(f"{val} - Count: {count}, Percentage: {percent:.2f}%")
sns.distplot(df["n_student"], color="red")
sns.distplot(df["pretest"], color="green")
sns.distplot(df["posttest"], color="blue")
sns.distplot(df[["pretest", "posttest"]], color="magenta")
sns.pairplot(
df, x_vars=["n_student"], y_vars=["posttest"], height=8, aspect=1.5, kind="reg"
)
sns.pairplot(
df, x_vars=["n_student"], y_vars=["pretest"], height=8, aspect=1.5, kind="reg"
)
sns.displot(df, x="pretest", hue="n_student", kind="kde", palette="Set2")
sns.displot(df, x="posttest", hue="n_student", kind="kde", palette="Set2")
sns.lmplot(
x="pretest", y="posttest", hue="n_student", col="school", data=df, palette="Set2"
)
sns.lmplot(
x="pretest",
y="posttest",
hue="n_student",
col="school_setting",
data=df,
palette="Set2",
)
sns.relplot(
x="pretest", y="posttest", hue="n_student", col="gender", data=df, palette="Set2"
)
sns.relplot(
x="pretest",
y="posttest",
hue="n_student",
col="teaching_method",
kind="scatter",
data=df,
palette="Set2",
)
sns.barplot(x="n_student", y="pretest", hue="lunch", data=df, palette="Set2")
sns.barplot(x="n_student", y="posttest", hue="lunch", data=df, palette="Set2")
sns.relplot(
x="n_student",
y="pretest",
hue="gender",
style="lunch",
col="teaching_method",
ci=None,
kind="line",
data=df,
palette="Set2",
)
sns.relplot(
x="n_student",
y="posttest",
hue="gender",
style="lunch",
col="teaching_method",
ci=None,
kind="line",
data=df,
palette="Set2",
)
sns.pairplot(
df[
[
"school_setting",
"school_type",
"teaching_method",
"n_student",
"gender",
"lunch",
"pretest",
"posttest",
]
]
)
sns.pairplot(
df[
[
"school_setting",
"school_type",
"teaching_method",
"n_student",
"gender",
"lunch",
"pretest",
"posttest",
]
],
kind="kde",
)
df2 = df.drop(["classroom", "student_id"], axis=1)
print(df2.head())
features = pd.get_dummies(df2)
features.rename(
columns={
"school_type_Non-public": "school_type_Non_public",
"lunch_Does not qualify": "lunch_Does_not_qualify",
"lunch_Qualifies for reduced/free lunch": "lunch_Qualifies_for_reduced/free_lunch",
},
inplace=True,
)
print(features.head())
X = features.drop("posttest", axis=1)
y = features["posttest"]
print(y.head())
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
print(X_train)
print(X_train)
print(y_train)
print(y_train)
def test_score(y_test, y_pred):
"""Helper function for evaluation metrics."""
accuracy = explained_variance_score(y_test, y_pred) * 100
mae = round(mean_absolute_error(y_test, y_pred), 2)
print(f"""accuracy: {accuracy:.2f}""")
print(f"""MAE: {mae:.2f}""")
return accuracy
accuracy_scores = np.zeros(11, dtype="float64")
# Linear Regression
reg = LinearRegression().fit(X_train, y_train)
y_pred = reg.predict(X_test)
accuracy_scores[0] = test_score(y_test, y_pred)
# Lasso Regression
reg1 = LassoCV().fit(X_train, y_train)
y_pred1 = reg1.predict(X_test)
accuracy_scores[1] = test_score(y_test, y_pred1)
# Descision Tree Regression
reg2 = DecisionTreeRegressor().fit(X_train, y_train)
y_pred2 = reg2.predict(X_test)
accuracy_scores[2] = test_score(y_test, y_pred2)
# Support Vector Regressor
reg3 = SVR().fit(X_train, y_train)
y_pred3 = reg3.predict(X_test)
accuracy_scores[3] = test_score(y_test, y_pred3)
# Random Forest Regressor
reg4 = RandomForestRegressor().fit(X_train, y_train)
y_pred4 = reg4.predict(X_test)
accuracy_scores[4] = test_score(y_test, y_pred4)
# Gradient Boosting Regressor
reg5 = GradientBoostingRegressor()
# n_estimators=100, random_state=42
reg5.fit(X_train, y_train)
y_pred5 = reg5.predict(X_test)
accuracy_scores[5] = test_score(y_test, y_pred5)
# XGBoost Regressor
xg_model = XGBRegressor()
xg_model.fit(X_train, y_train)
xg_pred = xg_model.predict(X_test)
accuracy_scores[6] = test_score(y_test, xg_pred)
# LightGBM Regressor
lgb_model = lightgbm.LGBMRegressor()
lgb_model.fit(X_train, y_train)
lgb_pred = lgb_model.predict(X_test)
accuracy_scores[7] = test_score(y_test, lgb_pred)
# AdaBoost Regressor
ABR_model = AdaBoostRegressor()
ABR_model.fit(X_train, y_train)
ABR_pred = ABR_model.predict(X_test)
accuracy_scores[8] = test_score(y_test, ABR_pred)
# Regression with Tensorflow
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
input_layer = Input(shape=(X.shape[1],))
dense_layer_1 = Dense(100, activation="relu")(input_layer)
dense_layer_2 = Dense(50, activation="relu")(dense_layer_1)
dense_layer_3 = Dense(25, activation="relu")(dense_layer_2)
output = Dense(1)(dense_layer_3)
model = Model(inputs=input_layer, outputs=output)
model.compile(loss="mean_squared_error", optimizer="adam", metrics=["mae"])
print(model.summary())
history = model.fit(
X_train, y_train, batch_size=128, epochs=100, verbose=1, validation_split=0.2
)
model.evaluate(X_test, y_test)
tensor_pred = model.predict(X_test)
accuracy_scores[9] = test_score(y_test, tensor_pred)
test_predictions = model.predict(X_test).flatten()
a = plt.axes(aspect="equal")
plt.scatter(y_test, tensor_pred)
plt.xlabel("True Values [MPG]")
plt.ylabel("Predictions [MPG]")
lims = [0, 100]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
error = test_predictions - y_pred
plt.hist(error, bins=25, color="red")
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
lgb_model = lightgbm.LGBMRegressor()
kfold_validation = KFold(10)
results = cross_val_score(lgb_model, X, y, cv=kfold_validation)
print(results)
print(np.mean(results))
skfold = StratifiedKFold(n_splits=5)
lgb_model = lightgbm.LGBMRegressor()
scores = cross_val_score(lgb_model, X, y, cv=skfold)
print(np.mean(scores))
## We use this parameters in LightGBM regressor
## Hyper Parameter Optimization
n_estimators = [100, 500, 900, 1100, 1500] # Number of Decision Trees
max_depth = [2, 3, 5, 10, 15]
base_score = [0.25, 0.5, 0.75, 1]
booster = [
"gbtree",
"gblinear",
] # By default it select gbtree but i gave just to see how it performs
learning_rate = [0.05, 0.1, 0.15, 0.20]
min_child_weight = [1, 2, 3, 4]
# Define the grid of hyperparameters to search
hyperparameter_grid = {
"n_estimators": n_estimators,
"max_depth": max_depth,
"learning_rate": learning_rate,
"min_child_weight": min_child_weight,
"booster": booster,
"base_score": base_score,
}
random_cv = RandomizedSearchCV(
estimator=lgb_model,
param_distributions=hyperparameter_grid,
cv=5,
n_iter=50,
scoring="neg_mean_absolute_error",
n_jobs=4,
verbose=5,
return_train_score=True,
random_state=42,
)
random_cv.fit(X_train, y_train)
random_cv.best_estimator_
lgb_reg = lightgbm.LGBMRegressor(
base_score=1,
booster="gbtree",
learning_rate=0.2,
max_depth=2,
min_child_weight=3,
n_estimators=1500,
)
lgb_reg.fit(X_train, y_train)
lgb_pred1 = lgb_reg.predict(X_test)
accuracy_scores[10] = test_score(y_test, lgb_pred1)
sns.set_style("whitegrid")
models = [
"Linear Regression",
"Lasso Regressor",
"Decision Tree Regressor",
"Support Vector Regressor",
"Random Forest Regressor",
"Gradient boost Regressor",
"XGBoost Regressor",
"LightGBM REgressor",
"Ada Boost Regressor",
"Tensor Regressor",
"XG Boost Hyper",
]
plt.figure(figsize=(11, 11))
sns.barplot(x=accuracy_scores, y=models)
plt.xlabel("Model_Name")
plt.xticks(rotation=-90)
plt.ylabel("Accuracy")
plt.show()
sns.set_style("whitegrid")
models = [
"Linear Regression",
"Lasso Regressor",
"Decision Tree Regressor",
"Support Vector Regressor",
"Random Forest Regressor",
"Gradient boost Regressor",
"XGBoost Regressor",
"LightGBM REgressor",
"Tensor Regressor",
"AdaBoost Regressor",
]
mae = ["2.50", "2.61", "3.23", "3.38", "2.63", "2.48", "2.48", "2.48", "2.62", "2.82"]
plt.figure(figsize=(11, 11))
sns.relplot(x=models, y=mae)
plt.xlabel("Model_Name")
plt.xticks(rotation=-90)
plt.ylabel("Accuracy")
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
training_df = pd.read_csv("/kaggle/input/widsdatathon2020/training_v2.csv")
training_df.head()
training_df.shape
training_df.info(verbose=True)
training_df.isnull().sum()
# percentage of missing values in columns greater than 50%
null_cols = training_df.columns[
round(training_df.isnull().sum() / len(training_df.index) * 100, 2) > 50
].tolist()
null_cols
# deleting cols having missing %age greater than 80%
print(training_df.shape)
training_df = training_df.drop(null_cols, axis=1)
training_df.shape
# ## Dividing dataframe based on numeric and non-numeric columns
df_numeric = training_df.select_dtypes(
include=["int16", "int32", "int64", "float16", "float32", "float64"]
)
df_numeric.head()
# percentage of missing values in columns
null_cols = df_numeric.columns[df_numeric.isnull().any()].tolist()
print(null_cols)
print(len(null_cols))
# #### there are 95 columns having missing columns
# #### dividing them into 5 parts and visualizing them using box plot
# dividing the null columns in 5 part
null_cols_1 = null_cols[:19]
null_cols_2 = null_cols[19:38]
null_cols_3 = null_cols[38:57]
null_cols_4 = null_cols[57:76]
null_cols_5 = null_cols[76:]
# Visualizing first part of null_cols which is null_cols_1 using box_plot
df_numeric[null_cols_1].plot(kind="box")
plt.gcf().set_size_inches(20, 10)
plt.show()
|
import pandas as pd
df = pd.read_csv("../input/autompg-dataset/auto-mpg.csv", na_values="?")
df.describe()
df.isnull().any()
horse_med = df["horsepower"].median()
print(horse_med)
df["horsepower"].fillna(horse_med, inplace=True)
df.isnull().any()
df.boxplot(column=["horsepower"])
df.horsepower.quantile([0.25, 0.5, 0.75])
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score
import keras
import keras.backend as K
from keras.models import Sequential
from keras.layers import Dropout, Conv2D, Flatten, Dense, MaxPool2D
data = []
for i in range(1, 501):
data.append(
np.array(Image.open("../input/anokhaalphatest/Train/" + str(i) + ".jpg"))
)
data = np.array(data)
data.shape
data = data / 255
data = data.reshape(500, 256, 256, 1)
data.shape
train = pd.read_csv("../input/anokhaalphatest/train.csv")
train
y = train.iloc[:, -1].values
y = keras.utils.to_categorical(y)
y.shape
xtrain, xtest, ytrain, ytest = train_test_split(data, y, test_size=0.2)
xtrain.shape, xtest.shape, ytrain.shape, ytest.shape
model = Sequential()
model.add(
Conv2D(32, kernel_size=(3, 3), activation="relu", input_shape=xtrain.shape[1:])
)
model.add(Dropout(0.2))
model.add(MaxPool2D((2, 2), strides=(2, 2)))
model.add(Conv2D(32, kernel_size=(3, 3), activation="relu"))
model.add(Dropout(0.2))
model.add(MaxPool2D((2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(64, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(32, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(3, activation="softmax"))
model.summary()
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(xtrain, ytrain, epochs=20, validation_split=0.2, batch_size=32)
ypred = model.predict(xtest)
ypred1 = ypred.argmax(axis=1)
ytest1 = ytest.argmax(axis=1)
accuracy_score(ytest1, ypred1), f1_score(ytest1, ypred1, average="macro")
model.save("model1.h5")
testy = pd.read_csv("../input/anokhaalphatest/test.csv")
testy
testdata = []
for i in range(1, 2565):
testdata.append(
np.array(Image.open("../input/anokhaalphatest/Test/" + str(i) + ".jpg"))
)
testdata = np.array(testdata)
testdata.shape
testdata = testdata / 255
testdata = testdata.reshape(2564, 256, 256, 1)
testdata.shape
ytestpred = model.predict(testdata)
ytestpred = ytestpred.argmax(axis=1)
ytestpred.shape
sample = pd.read_csv("../input/anokhaalphatest/SampleSolution.csv")
sample
result = pd.concat([pd.DataFrame(np.arange(1, 2565)), pd.DataFrame(ytestpred)], axis=1)
result.columns = ["id", "label"]
result
result.to_csv("results.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Other necessary libraries
import cv2
import tensorflow as tf
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from sklearn.metrics import f1_score
import pyarrow.parquet as pq
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_addons as tfa
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
# from tensorflow.keras.applications.vit import ViT
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
# detect and init the TPU
tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect()
# instantiate a distribution strategy
tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu)
train_img0 = pd.read_parquet("../input/bengaliai-cv19/train_image_data_0.parquet")
train_img1 = pd.read_parquet("../input/bengaliai-cv19/train_image_data_1.parquet")
train_img2 = pd.read_parquet("../input/bengaliai-cv19/train_image_data_2.parquet")
train_img3 = pd.read_parquet("../input/bengaliai-cv19/train_image_data_3.parquet")
train_img0.shape
train_data = pd.concat([train_img0, train_img1, train_img2, train_img3])
train_data.shape
train_data.head()
img2 = train_data.iloc[:, 1:].values.reshape((-1, 137, 236, 1))
row = 2
col = 3
plt.figure(figsize=(20, (row / col) * 12))
for x in range(row * col):
plt.subplot(row, col, x + 1)
plt.imshow(img2[x, :, :, 0])
plt.show()
# Sometimes we may want to resize the images for future analysis. We can use cv2 package.
# DIM = 64
# img3 = np.zeros((img2.shape[0],DIM,DIM,1),dtype='float32')
# for j in range(img2.shape[0]):
# img3[j,:,:,0] = cv2.resize(img2[j,],(DIM,DIM),interpolation = cv2.INTER_AREA)
# row=2; col=3;
# plt.figure(figsize=(20,(row/col)*12))
# for x in range(row*col):
# plt.subplot(row,col,x+1)
# plt.imshow(img3[x,:,:,0])
# plt.show()
train = pd.read_csv("/kaggle/input/bengaliai-cv19/train.csv")
train.head()
num_samples = img2.shape[0]
num_classes_1 = len(train["grapheme_root"].unique())
num_classes_2 = len(train["vowel_diacritic"].unique())
num_classes_3 = len(train["consonant_diacritic"].unique())
# num_classes_4 = len(train['grapheme'].unique())
X = img2
y1 = train["grapheme_root"].to_numpy()
y2 = train["vowel_diacritic"].to_numpy()
y3 = train["consonant_diacritic"].to_numpy()
# y4 = train['grapheme'].to_numpy()
y1 = to_categorical(y1, num_classes=num_classes_1)
y2 = to_categorical(y2, num_classes=num_classes_2)
y3 = to_categorical(y3, num_classes=num_classes_3)
# y4 = to_categorical(y4, num_classes=num_classes_4)
X_train, X_val, y1_train, y1_val, y2_train, y2_val, y3_train, y3_val = train_test_split(
X, y1, y2, y3
)
learning_rate = 0.001
weight_decay = 0.0001
batch_size = 256
num_epochs = 100
input_shape = (img2.shape[1], img2.shape[2], img2.shape[3])
image_size = 72 # We'll resize input images to this size
patch_size = 6 # Size of the patches to be extract from the input images
num_patches = (image_size // patch_size) ** 2
projection_dim = 64
num_heads = 4
transformer_units = [
projection_dim * 2,
projection_dim,
] # Size of the transformer layers
transformer_layers = 8
mlp_head_units = [2048, 1024] # Size of the dense layers of the final classifier
data_augmentation = keras.Sequential(
[
layers.Normalization(),
layers.Resizing(image_size, image_size),
layers.RandomFlip("horizontal"),
layers.RandomRotation(factor=0.02),
layers.RandomZoom(height_factor=0.2, width_factor=0.2),
],
name="data_augmentation",
)
# Compute the mean and the variance of the training data for normalization.
data_augmentation.layers[0].adapt(X_train)
def mlp(x, hidden_units, dropout_rate):
for units in hidden_units:
x = layers.Dense(units, activation=tf.nn.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
return x
class Patches(layers.Layer):
def __init__(self, patch_size):
super().__init__()
self.patch_size = patch_size
def call(self, images):
batch_size = tf.shape(images)[0]
patches = tf.image.extract_patches(
images=images,
sizes=[1, self.patch_size, self.patch_size, 1],
strides=[1, self.patch_size, self.patch_size, 1],
rates=[1, 1, 1, 1],
padding="VALID",
)
patch_dims = patches.shape[-1]
patches = tf.reshape(patches, [batch_size, -1, patch_dims])
return patches
plt.figure(figsize=(4, 4))
image = X_train[np.random.choice(range(X_train.shape[0]))]
plt.imshow(image.astype("uint8"))
plt.axis("off")
resized_image = tf.image.resize(
tf.convert_to_tensor([image]), size=(image_size, image_size)
)
patches = Patches(patch_size)(resized_image)
print(f"Image size: {image_size} X {image_size}")
print(f"Patch size: {patch_size} X {patch_size}")
print(f"Patches per image: {patches.shape[1]}")
print(f"Elements per patch: {patches.shape[-1]}")
n = int(np.sqrt(patches.shape[1]))
plt.figure(figsize=(4, 4))
for i, patch in enumerate(patches[0]):
ax = plt.subplot(n, n, i + 1)
patch_img = tf.reshape(patch, (patch_size, patch_size, img2.shape[-1]))
plt.imshow(patch_img.numpy().astype("uint8"))
plt.axis("off")
class PatchEncoder(layers.Layer):
def __init__(self, num_patches, projection_dim):
super().__init__()
self.num_patches = num_patches
self.projection = layers.Dense(units=projection_dim)
self.position_embedding = layers.Embedding(
input_dim=num_patches, output_dim=projection_dim
)
def call(self, patch):
positions = tf.range(start=0, limit=self.num_patches, delta=1)
encoded = self.projection(patch) + self.position_embedding(positions)
return encoded
def create_vit_classifier():
inputs = layers.Input(shape=input_shape)
# Augment data.
augmented = data_augmentation(inputs)
# Create patches.
patches = Patches(patch_size)(augmented)
# Encode patches.
encoded_patches = PatchEncoder(num_patches, projection_dim)(patches)
# Create multiple layers of the Transformer block.
for _ in range(transformer_layers):
# Layer normalization 1.
x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
# Create a multi-head attention layer.
attention_output = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=projection_dim, dropout=0.1
)(x1, x1)
# Skip connection 1.
x2 = layers.Add()([attention_output, encoded_patches])
# Layer normalization 2.
x3 = layers.LayerNormalization(epsilon=1e-6)(x2)
# MLP.
x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1)
# Skip connection 2.
encoded_patches = layers.Add()([x3, x2])
# Create a [batch_size, projection_dim] tensor.
representation = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
representation = layers.Flatten()(representation)
representation = layers.Dropout(0.5)(representation)
# Add MLP.
features = mlp(representation, hidden_units=mlp_head_units, dropout_rate=0.5)
# Classify outputs.
logits_1 = layers.Dense(num_classes_1)(features)
logits_2 = layers.Dense(num_classes_2)(features)
logits_3 = layers.Dense(num_classes_3)(features)
# Create the Keras model.
model = keras.Model(inputs=inputs, outputs=[logits_1, logits_2, logits_3])
return model
def run_experiment(model):
optimizer = tfa.optimizers.AdamW(
learning_rate=learning_rate, weight_decay=weight_decay
)
model.compile(
loss=[
"categorical_crossentropy",
"categorical_crossentropy",
"categorical_crossentropy",
],
optimizer=optimizer,
metrics=["accuracy"],
)
history = model.fit(
X_train,
[y1_train, y2_train, y3_train],
validation_data=(X_val, [y1_val, y2_val, y3_val]),
batch_size=batch_size,
epochs=num_epochs,
)
return history
with tpu_strategy.scope():
vit_classifier = create_vit_classifier()
history = run_experiment(vit_classifier)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.