script
stringlengths 113
767k
|
---|
from fastai.imports import *
from fastai.vision.all import *
from fastai.vision.widgets import *
from PIL import Image
import numpy
PATH = "/kaggle/input/testset/data/validation"
bs = 128
im = Image.open(PATH + "/no_pools/img957.jpg")
im
data = DataBlock(
blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
splitter=RandomSplitter(valid_pct=0.2, seed=42),
get_y=parent_label,
item_tfms=Resize(50),
)
dls = data.dataloaders(PATH, bs=bs)
learn = load_learner("/kaggle/input/loadmodel5/firstModel5.pkl", cpu=False)
new_learn = Learner(
dls, learn.model, loss_func=CrossEntropyLossFlat(), metrics=accuracy
)
new_learn.model.load_state_dict(learn.model.state_dict())
val_loss, val_acc = new_learn.validate()
print(f"Validation Loss: {val_loss}, Validation Accuracy: {val_acc}")
def generate_grad_cam(learn, image_path):
img = PIL.Image.open(image_path)
(x,) = first(learn.dls.test_dl([img])) # Preprocess the image using the dataloader
with hook_output(learn.model[0]) as hook_a:
with hook_output(learn.model[0], grad=True) as hook_g:
preds = learn.model.eval()(x.unsqueeze(0).cuda())
preds[:, learn.pred.argmax().item()].backward()
acts = hook_a.stored[0].cpu()
grad = hook_g.stored[0][0].cpu()
grad_chan = grad.mean(1).mean(1)
mult = (acts * grad_chan[..., None, None]).mean(0)
_, ax = plt.subplots()
img.show(ctx=ax)
ax.imshow(
mult,
alpha=0.6,
extent=(0, *img.size, img.size[1], 0),
interpolation="bilinear",
cmap="inferno",
)
plt.show()
# Replace 'your_image_path.jpg' with the path to the image you want to visualize
generate_grad_cam(learn, PATH + "/no_pools/img957.jpg")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# **Iniciando analise**
# importando algumas bibliotecas
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
from pylab import rcParams
# lendo o arquivo
df = pd.read_csv(
"/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv"
)
# **Exploracao dos dados**
# verificando as colunas
list(df.columns)
# verificando a dimencao do dataframe
df.shape
# Temos 7043 observacoes e 21 colunas
# Visualizacao inicial do dataframe
df.head(10).T
# **Nota: ** Primeiro de tudo, vamos excuir custumerID porque nao ira afetar a probabilidade de rotacao dos clientes.
df.drop(["customerID"], axis=1, inplace=True)
# dataframe sem a coluna costomerID
df.head(10).T
# **Verificando a variavel a qual queremos prever**
df.Churn.value_counts()
# Realizando a representacao grafica da distribuicao das classes da variavel a qual desejamos prever
label = df["Churn"].value_counts(sort=True).index
sizes = df["Churn"].value_counts(sort=True)
colors = ["Whitesmoke", "red"]
explode = (0.1, 0)
rcParams["figure.figsize"] = 8, 8
plt.pie(
sizes,
explode=explode,
labels=label,
colors=colors,
autopct="%1.1f%%",
shadow=True,
startangle=270,
)
plt.title("Porcentagem de rotatividade entre os clientes")
plt.show()
# Como podemos ver aqui, as classes dessa variavel estao desbalanceadas, precisaremos antes de realizar a predicao tomar alguma atitude quanto a isso.
# **Missing values**
# Modelos de machine learning normalmente nao trabalham bem com missing values, por este motivo, vamos verificar a existencia deste valores.
df.info()
total = (
df.isnull().sum().sort_values(ascending=False)
) # pegando a quantidade total de valores missing
percent = df.isnull().sum() / df.isnull().count().sort_values(
ascending=False
) # Dividindo a quantidade de valores missings pela quantidade de observacoes.
missing_data = pd.concat(
[total, percent], axis=1, keys=["Total", "Porcentagem"]
) # concatenando o total com a porcentagem
display(missing_data)
# Como podemos ver, aparentemente nenhum valor missing, contudo, bastante valores do tipo object, seja pelo fato do pandas nao ter reconhecido o real tipo do dado ou seja por que ele esta no tipo texto, vamos alterar o tipo deste dados para o tipo numerico, pois os modelos de machine learning em geral nao trabalham muito bem com dados categoricos.
# **Realizando o preprocessamento dos dados.**
# Churn: Vamos comecar pela variavel target, transformando a em 0 quando o valor for 'No' e 1 quando o valor for 'Yes'.
df.Churn = df.Churn.map(lambda s: 1 if s == "Yes" else 0)
# Contract: vamos realizar o processo de one-hot encode nesta variavel.
# verificando os possiveis valores da variavel Contract
df.Contract.value_counts()
df = pd.get_dummies(data=df, columns=["Contract"])
# Dependents: Transformar o valores 'Yes' em 1 e 'No' em 0
df.Dependents.value_counts()
df.Dependents = df.Dependents.map(lambda s: 1 if s == "Yes" else 0)
# DevicePrtotection: Neste caso aqui, consideramos o valor No internet service como No e atribuiremos 0 para para a variavel.
df.DeviceProtection.value_counts()
df.DeviceProtection = df.DeviceProtection.map(lambda s: 1 if s == "Yes" else 0)
# InternetService: A partir dessa coluna criaremos outras tres informando se a pessoa possui ou nao internet e outras informando se o cliente possui conexao DSL ou FIBRA OPTICA
df.InternetService.value_counts()
df.TemInternet = df.InternetService.map(lambda s: 0 if s == "No" else 1)
df.DSL = df.InternetService.map(lambda s: 1 if s == "DSL" else 0)
df.FibraOptica = df.InternetService.map(lambda s: 1 if s == "Fiber optic" else 0)
# como as informacoes desta coluna ja esta em outras colunas ela nao nos tera mais serventia.
df.drop(["InternetService"], axis=1, inplace=True)
# MonthlyCharges: Esta coluna ja esta no formato numerico, a principio nao precisaremos mecher nela.
df.MonthlyCharges.head(5)
# MultipleLines: Como ja possuimos ja possuimos uma coluna que informa se o cliente possui ou nao o servico de telefone pegaremos apenas a informacao se ele tem ou nao multiplas linhas.
df.MultipleLines.value_counts()
df.MultipleLines = df.MultipleLines.map(lambda s: 1 if s == "Yes" else 0)
# OnlineBackup: Caso o cliente possua o servico de backup online atribuiremos 1 a variavel caso contrario 0
df.OnlineBackup.value_counts()
df.OnlineBackup = df.OnlineBackup.map(lambda s: 1 if s == "Yes" else 0)
# OnlineSecurity: Caso o cliente possua o servico de seguranca online atribuiremos 1 a variavel caso contrario 0
df.OnlineSecurity.value_counts()
df.OnlineSecurity = df.OnlineSecurity.map(lambda s: 1 if s == "Yes" else 0)
# PaperlessBilling: 1 para 'Yes' 0 para 'No'
df.PaperlessBilling.value_counts()
df.PaperlessBilling = df.PaperlessBilling.map(lambda s: 1 if s == "Yes" else 0)
# Partner: 1 para 'Yes' 0 para 'No'
df.Partner.value_counts()
df.Partner = df.Partner.map(lambda s: 1 if s == "Yes" else 0)
# PaymentMethod: Utilizaremos a tecnica de One-Hot encode nesta variavel.
df.PaymentMethod.value_counts()
df = pd.get_dummies(data=df, columns=["PaymentMethod"])
# PhoneService: 1 para 'Yes' 0 para 'No'
df.PhoneService.value_counts()
df.PhoneService = df.PhoneService.map(lambda s: 1 if s == "Yes" else 0)
# SeniorCitizen: 1 para 'Yes' 0 para 'No'
df.SeniorCitizen.value_counts()
df.SeniorCitizen = df.SeniorCitizen.map(lambda s: 1 if s == "Yes" else 0)
# StreamingMovies: 1 para 'Yes' caso contrario 0
df.StreamingMovies.value_counts()
df.StreamingMovies = df.StreamingMovies.map(lambda s: 1 if s == "Yes" else 0)
# StreamingTV: 1 para 'Yes' caso contrario 0
df.StreamingTV.value_counts()
df.StreamingTV = df.StreamingTV.map(lambda s: 1 if s == "Yes" else 0)
# TechSupport: 1 para 'Yes' caso contrario 0
df.TechSupport.value_counts()
df.TechSupport = df.TechSupport.map(lambda s: 1 if s == "Yes" else 0)
# TotalCharges: Esta coluna ja esta no formato numerico, contudo, se reparar bem existem 11 observacoes que nao sao numeros, possivelmente algum espaco em branco.
df.TotalCharges.value_counts()
(df.TotalCharges == " ").sum()
# Ha 11 observacoes com este valor em branco, optei por descarta estas obsevacoes.
df = df[df.TotalCharges != " "]
# convertendo a coluna para numerica
df.TotalCharges = pd.to_numeric(df.TotalCharges)
# gender: Utilizarei o one-hot encoce nesta variavel.
df.gender.value_counts()
df = pd.get_dummies(data=df, columns=["gender"])
# Tenure:
df.tenure.head()
# Vamos verificar agora como esta a cara do nosso data frame
df.info()
df.head().T
df.shape
# Agora que os dados estao devidamente tratados, vamos comecar a mexer com o modelo.
# Importando as bibliotecas que iremos utilizar no modelo
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score, GridSearchCV, train_test_split
from sklearn.metrics import classification_report
from imblearn.over_sampling import SMOTE
# from imblearn.under_sampling import NearMiss
# Separando as variaveis preditoras da variavel que sera predita.
#
X = df.drop(["Churn"], axis=1)
y = df.Churn
# separando o dataset em treino e teste
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Criaremos um modelo simples para servir de base.
rf = RandomForestClassifier(random_state=42)
rf.fit(X_train, y_train)
# predisendo os valores com o modelo criado
rf_pred = rf.predict(X_test)
print(classification_report(y_test, rf_pred))
# Como podemos ver tivemos uma acuracia de 78% contudo um recall bem baixo nos valores positivos em virtude do desbalanceamento das classes.
# Por este motivo sera interassante tanto balancearmos as classes como mudar o metodo de avalicao do modelo para um crossvalidation por exemplo para evitar que o modelo esteja aprendendo apenas uma determinada classe.
# Primeiramente, vamos balancear as classes
df.Churn.value_counts()
# Fiz um oversampling de ate 80% da classe majoritaria, para evitaro overfiting
smt = SMOTE(sampling_strategy=0.80)
X, y = smt.fit_sample(X, y)
y.value_counts()
# Vamos agora, verificar qual serao os melhores parametros para ajudar o modelo utilizando o GridSearchCV
print([{w: z} for w, z in zip([1, 1.5, 1], [1.5, 1, 1])])
param_grid = {
"n_estimators": [300, 500, 1200],
"criterion": ("gini", "entropy"),
"max_features": ("log2", "sqrt"),
"class_weight": [{w: z} for w, z in zip([1, 1.5, 1], [1.5, 1, 1])],
}
grid_rf = GridSearchCV(RandomForestClassifier(random_state=42, n_jobs=-1), param_grid)
grid_rf.fit(X, y)
print("\nOs melhores parametros foram: \n" + str(grid_rf.best_params_))
rf = RandomForestClassifier(random_state=42, **grid_rf.best_params_)
rf.fit(X, y)
cv = cross_val_score(rf, X, y, cv=10)
print(cv)
print(cv.mean())
rf_pred = rf.predict(X)
print(classification_report(y, rf_pred))
print(pd.crosstab(y, rf_pred, rownames=["Real"], colnames=["Predito"], margins=True))
# Influencia de cada variavel na probabilidade de rotacao dos clientes.
rf_feat = pd.DataFrame(rf.feature_importances_)
rf_feat["Features"] = list(df.drop(labels=["Churn"], axis=1).columns)
rf_feat.sort_values(by=0, ascending=False).head()
g = sns.barplot(
0,
"Features",
data=rf_feat.sort_values(by=0, ascending=False)[0:5],
palette="Pastel1",
orient="h",
)
g.set_xlabel("Peso")
g = g.set_title("Random Forest")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
economic_freedom = pd.read_csv(
"../input/the-economic-freedom-index/economic_freedom_index2019_data.csv",
encoding="latin-1",
)
economic_freedom.head(10)
trade_freedom = []
for x in economic_freedom["Trade Freedom"].iteritems():
temp = x[1]
if np.isnan(temp):
continue
else:
trade_freedom.append(temp)
world_rank = []
for x in economic_freedom["World Rank"].iteritems():
temp = x[1]
if np.isnan(temp):
continue
else:
world_rank.append(temp)
# look at mismatched length
print(len(world_rank))
print(len(trade_freedom))
# Delete excess data
del trade_freedom[len(trade_freedom) - 2 : len(trade_freedom)]
from scipy import stats
pearson_coef, p_value = stats.pearsonr(world_rank, trade_freedom)
# Pearson coefficient / correlation coefficient - how much are the two columns correlated?
print(pearson_coef)
# P-value - how sure are we about this correlation?
print(p_value)
economic_freedom.corr()
import seaborn as sns
import matplotlib.pyplot as plt
sns.set()
ax = sns.regplot(
x=world_rank,
y=trade_freedom,
)
plt.ylabel("Trade Freedom", fontsize=10)
plt.xlabel("World Rank", fontsize=10)
plt.title("Trade Freedom vs. World Rank", fontsize=10)
# Slight Negavtive correlation, interesting outlier at 0 trade freedom and World Rank around 178: North Korea
economic_freedom.loc[economic_freedom["Trade Freedom"] < 20]
# libraries
import matplotlib.pyplot as plt
import numpy as np
# create data
x = world_rank
y = trade_freedom
z = economic_freedom["GDP Growth Rate (%)"]
# use the scatter function
plt.scatter(x, y, s=z * 1000, alpha=0.5)
# plt.show()
plt.ylabel("Trade Freedom", fontsize=10)
plt.xlabel("World Rank", fontsize=10)
plt.title("Trade Freedom vs. World Rank (with GDP Growth)", fontsize=10)
# Bubble Size represents GDP Growth Rate (%), Interesting that one of the higher world ranks has a large growth rate (outlier)
economic_freedom.loc[economic_freedom["GDP Growth Rate (%)"] > 70]
# Libya has the highest GDP Growth Rate, but is not represented on the graph because it's world rank is Nan
import matplotlib.pyplot as plt
# library & dataset
import seaborn as sns
# Basic 2D density plot
sns.set_style("white")
sns.kdeplot(world_rank, trade_freedom)
plt.ylabel("Trade Freedom", fontsize=10)
plt.xlabel("World Rank", fontsize=10)
plt.title("Trade Freedom vs. World Rank", fontsize=10)
# # Custom it with the same argument as 1D density plot
# sns.kdeplot(df.sepal_width, df.sepal_length, cmap="Reds", shade=True, bw=.15)
# # Some features are characteristic of 2D: color palette and wether or not color the lowest range
# sns.kdeplot(df.sepal_width, df.sepal_length, cmap="Blues", shade=True, shade_lowest=True, )
# sns.plt.show(
|
# ### İş Problemi
# Online ayakkabı mağazası olan FLO müşterilerini segmentlere ayırıp bu segmentlere göre pazarlama stratejileri belirlemek istiyor.Buna yönelik olarak müşterilerin davranışları tanımlanacak ve
# bu davranışlardaki öbeklenmelere göre gruplar oluşturulacak.
# ### Veri Seti Hikayesi
# Veri seti Flo’dan son alışverişlerini 2020 - 2021 yıllarında OmniChannel (hem online hem offline alışveriş yapan) olarak yapan müşterilerin geçmiş alışveriş davranışlarından elde edilen bilgilerden oluşmaktadır.
import pandas as pd
import datetime as dt
pd.set_option("display.max_columns", None)
pd.set_option("display.float_format", lambda x: "%.2f" % x)
pd.set_option("display.width", 1000)
df_ = pd.read_csv("/kaggle/input/flodata/flo_data_20k.csv")
df = df_.copy()
def check_df(dataframe, head=5):
print("########Shape########")
print(dataframe.shape)
print("########Types########")
print(dataframe.dtypes)
print("########Head#########")
print(dataframe.head(head))
print("##########NA#########")
print(dataframe.isnull().sum())
print("#######Describe#####")
print(
dataframe.describe(
[0.05, 0.1, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99]
).T
)
check_df(df)
df["total_order"] = (
df["order_num_total_ever_online"] + df["order_num_total_ever_offline"]
)
df["total_customer_value"] = (
df["customer_value_total_ever_offline"] + df["customer_value_total_ever_online"]
)
df.head()
# Değişken tiplerini inceleyelim. Tarih ifade eden değişkenlerin tipini date'e çevirelim.
date_columns = df.columns[df.columns.str.contains("date")]
df[date_columns] = df[date_columns].apply(pd.to_datetime)
df.info()
# Alışveriş kanallarındaki müşteri sayısının, toplam alınan ürün sayısının ve toplam harcamaların dağılımına bakalım.
df.groupby("order_channel").agg(
{"master_id": "count", "total_order": "sum", "total_customer_value": "sum"}
)
# #### RFM Metriklerinin Hesaplanması
# Recency: Müşterinin son alışveriş zamanı üzerinden geçen zaman
# Frequency: Müşterinin alışveriş yapma sıklığı
# Monetary: Müşterinin kazandırdığı para
# Müşteri özelinde Recency, Frequency ve Monetary metriklerini hesaplayalım.
last_date = df["last_order_date"].max()
today_date = dt.datetime(2021, 6, 1) # Son tarihten 2 gün sonrası alındı.
df.groupby("master_id").agg(
{
"last_order_date": lambda x: (today_date - x.max()).days,
"total_order": lambda x: x.max(),
"total_customer_value": lambda x: x.max(),
}
)
# Hesapladığımız metrikleri rfm isimli bir değişkene atayalım.
rfm = df.groupby("master_id").agg(
{
"last_order_date": lambda x: (today_date - x.max()).days,
"total_order": lambda x: x.max(),
"total_customer_value": lambda x: x.max(),
}
)
rfm.reset_index(inplace=True)
rfm.head()
rfm.columns = ["master_id", "recency", "frequency", "monetary"]
rfm.head()
# #### RF Skorunun Hesaplanması
# Recency, Frequency ve Monetary metriklerini qcut yardımı ile 1-5 arasında skorlara çevirelim.
rfm["recency_score"] = pd.qcut(rfm["recency"], 5, labels=[5, 4, 3, 2, 1])
rfm["frequency_score"] = pd.qcut(
rfm["frequency"].rank(method="first"), 5, labels=[1, 2, 3, 4, 5]
)
rfm["monetary_score"] = pd.qcut(rfm["monetary"], 5, labels=[1, 2, 3, 4, 5])
# recency_score ve frequency_score’u tek bir değişken olarak ifade edelim ve RF_SCORE olarak kaydedelim.
rfm["RF_SCORE"] = rfm["recency_score"].astype("str") + rfm["frequency_score"].astype(
"str"
)
# #### RF Skorunun Segment Olarak Tanımlanması
seg_map = {
r"[1-2][1-2]": "hibernating",
r"[1-2][3-4]": "at_risk",
r"[1-2]5": "cant_loose",
r"3[1-2]": "about_to_sleep",
r"33": "need_attention",
r"[3-4][4-5]": "loyal_customers",
r"41": "promising",
r"51": "new_customers",
r"[4-5][2-3]": "potential_loyalists",
r"5[4-5]": "champions",
}
rfm["SEGMENT"] = rfm["RF_SCORE"].replace(seg_map, regex=True)
rfm.head()
# Segmentlerin recency, frequnecy ve monetary ortalamalarını inceleyelim.
rfm.groupby("SEGMENT").agg(
{
"recency": ["mean", "count"],
"frequency": ["mean", "count"],
"monetary": ["mean", "count"],
}
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.feature_selection import SelectKBest
# from sklearn.model_selection import cross_validation, metrics
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
import warnings
warnings.filterwarnings("ignore")
train = pd.read_csv("../input/titanic/train.csv", dtype={"Age": np.float64})
test = pd.read_csv("../input/titanic/test.csv", dtype={"Age": np.float64})
PassengerId = test["PassengerId"]
all_data = pd.concat([train, test], ignore_index=True)
train.info()
# 数据可视化
sns.barplot(x="Sex", y="Survived", data=train)
sns.barplot(x="Pclass", y="Survived", data=train, palette="Set3")
facet = sns.FacetGrid(train, hue="Survived", aspect=2)
facet.map(sns.kdeplot, "Age", shade=True)
facet.set(xlim=(0, train["Age"].max()))
facet.add_legend()
sns.barplot(x="SibSp", y="Survived", data=train, palette="Set3")
sns.barplot(x="Parch", y="Survived", data=train, palette="Set3")
train["Fare"].describe()
facet = sns.FacetGrid(train, hue="Survived", aspect=2)
facet.map(sns.kdeplot, "Fare", shade=True)
facet.set(xlim=(0, 200))
facet.add_legend()
train["fareclass"] = train["Fare"].apply(lambda x: 1 if x > 20 else 0)
train["fareclass"].value_counts()
sns.barplot(x="fareclass", y="Survived", data=train, palette="Set3")
sns.barplot(x="Embarked", y="Survived", data=train, palette="Set3")
train["hasCabin"] = train["Cabin"].apply(lambda x: 1 if pd.notnull(x) else 0)
sns.barplot(x="hasCabin", y="Survived", data=train, palette="Set3")
all_data["Cabin"] = all_data["Cabin"].fillna("Unknow")
all_data["Deck"] = all_data["Cabin"].str.get(0)
sns.barplot(x="Deck", y="Survived", data=all_data, palette="Set3")
all_data["Title"] = all_data["Name"].apply(
lambda x: x.split(",")[1].split(".")[0].strip()
)
Title_Dict = {}
Title_Dict.update(dict.fromkeys(["Capt", "Col", "Major", "Dr", "Rev"], "Officer"))
Title_Dict.update(
dict.fromkeys(["Don", "Sir", "the Countess", "Dona", "Lady"], "Royalty")
)
Title_Dict.update(dict.fromkeys(["Mme", "Ms", "Mrs"], "Mrs"))
Title_Dict.update(dict.fromkeys(["Mlle", "Miss"], "Miss"))
Title_Dict.update(dict.fromkeys(["Mr"], "Mr"))
Title_Dict.update(dict.fromkeys(["Master", "Jonkheer"], "Master"))
all_data["Title"] = all_data["Title"].map(Title_Dict)
sns.barplot(x="Title", y="Survived", data=all_data, palette="Set3")
# 2 创作新特征
all_data["FamilySize"] = all_data["SibSp"] + all_data["Parch"] + 1
sns.barplot(x="FamilySize", y="Survived", data=all_data, palette="Set3")
def Fam_label(s):
if (s >= 2) & (s <= 4):
return 2
elif ((s > 4) & (s <= 7)) | (s == 1):
return 1
elif s > 7:
return 0
all_data["FamilyLabel"] = all_data["FamilySize"].apply(Fam_label)
sns.barplot(x="FamilyLabel", y="Survived", data=all_data, palette="Set3")
Ticket_Count = dict(all_data["Ticket"].value_counts())
all_data["TicketGroup"] = all_data["Ticket"].apply(lambda x: Ticket_Count[x])
sns.barplot(x="TicketGroup", y="Survived", data=all_data, palette="Set3")
def ticket_label(s):
if (s >= 2) & (s <= 4):
return 2
elif ((s > 4) & (s <= 8)) | (s == 1):
return 1
elif s > 8:
return 0
all_data["TicketLabel"] = all_data["TicketGroup"].apply(ticket_label)
sns.barplot(x="TicketLabel", y="Survived", data=all_data, palette="Set3")
# 确实值的填充
for col in all_data.columns:
if all_data[col].isnull().sum() > 0:
print("{} is lack of {}".format(col, all_data[col].isnull().sum()))
from sklearn.model_selection import train_test_split
train = all_data[all_data["Survived"].notnull()]
test = all_data[all_data["Survived"].isnull()]
# 分割数据,按照 训练数据:cv数据 = 1:1的比例
train_split_1, train_split_2 = train_test_split(train, test_size=0.5, random_state=0)
def predict_age_use_cross_validationg(df1, df2, dfTest):
age_df1 = df1[["Age", "Pclass", "Sex", "Title"]]
age_df1 = pd.get_dummies(age_df1)
age_df2 = df2[["Age", "Pclass", "Sex", "Title"]]
age_df2 = pd.get_dummies(age_df2)
known_age = age_df1[age_df1.Age.notnull()].as_matrix()
unknow_age_df1 = age_df1[age_df1.Age.isnull()].as_matrix()
unknown_age = age_df2[age_df2.Age.isnull()].as_matrix()
print(unknown_age.shape)
y = known_age[:, 0]
X = known_age[:, 1:]
rfr = RandomForestRegressor(random_state=0, n_estimators=100, n_jobs=-1)
rfr.fit(X, y)
predictedAges = rfr.predict(unknown_age[:, 1::])
df2.loc[(df2.Age.isnull()), "Age"] = predictedAges
predictedAges = rfr.predict(unknow_age_df1[:, 1::])
df1.loc[(df1.Age.isnull()), "Age"] = predictedAges
# rf1 ---> rf1,rf2 # rf2+dftest---> dftest
age_Test = dfTest[["Age", "Pclass", "Sex", "Title"]]
age_Test = pd.get_dummies(age_Test)
age_Tmp = df2[["Age", "Pclass", "Sex", "Title"]]
age_Tmp = pd.get_dummies(age_Tmp)
age_Tmp = pd.concat([age_Test[age_Test.Age.notnull()], age_Tmp])
known_age1 = age_Tmp.as_matrix()
unknown_age1 = age_Test[age_Test.Age.isnull()].as_matrix()
y = known_age1[:, 0]
x = known_age1[:, 1:]
rfr.fit(x, y)
predictedAges = rfr.predict(unknown_age1[:, 1:])
dfTest.loc[(dfTest.Age.isnull()), "Age"] = predictedAges
return dfTest
t1 = train_split_1.copy()
t2 = train_split_2.copy()
tmp1 = test.copy()
t5 = predict_age_use_cross_validationg(t1, t2, tmp1)
t1 = pd.concat([t1, t2])
t3 = train_split_1.copy()
t4 = train_split_2.copy()
tmp2 = test.copy()
t6 = predict_age_use_cross_validationg(t4, t3, tmp2)
t3 = pd.concat([t3, t4])
train["Age"] = (t1["Age"] + t3["Age"]) / 2
test["Age"] = (t5["Age"] + t6["Age"]) / 2
all_data = pd.concat([train, test])
print(train.describe())
print(test.describe())
all_data[all_data["Embarked"].isnull()]
all_data["Embarked"].value_counts()
all_data["Embarked"] = all_data["Embarked"].fillna("C")
fare = all_data[(all_data["Embarked"] == "S") & (all_data["Pclass"] == 3)].Fare.median()
all_data["Fare"] = all_data["Fare"].fillna(fare)
fare
# 同一组的识别
all_data["Surname"] = all_data["Name"].apply(lambda x: x.split(",")[0].strip())
Surname_Count = dict(all_data["Surname"].value_counts())
all_data["FamilyGroup"] = all_data["Surname"].apply(lambda x: Surname_Count[x])
Female_Child_Group = all_data.loc[
(all_data["FamilyGroup"] >= 2)
& ((all_data["Age"] <= 12) | (all_data["Sex"] == "female"))
]
Male_Adult_Group = all_data.loc[
(all_data["FamilyGroup"] >= 2)
& (all_data["Age"] > 12)
& (all_data["Sex"] == "male")
]
Female_Child = pd.DataFrame(
Female_Child_Group.groupby("Surname")["Survived"].mean().value_counts()
)
Female_Child.columns = ["GroupCount"]
Female_Child
Female_Child_Group.groupby("Surname")["Survived"].mean().value_counts()
Male_Adult = pd.DataFrame(
Male_Adult_Group.groupby("Surname")["Survived"].mean().value_counts()
)
Male_Adult.columns = ["GroupCount"]
Male_Adult
Female_Child_Group = Female_Child_Group.groupby("Surname")["Survived"].mean()
Dead_List = set(Female_Child_Group[Female_Child_Group.apply(lambda x: x == 0)].index)
print(Dead_List)
Male_Adult_List = Male_Adult_Group.groupby("Surname")["Survived"].mean()
Survived_List = set(Male_Adult_List[Male_Adult_List.apply(lambda x: x == 1)].index)
print(Survived_List)
train = all_data.loc[all_data["Survived"].notnull()]
test = all_data.loc[all_data["Survived"].isnull()]
test.loc[(test["Surname"].apply(lambda x: x in Dead_List)), "Sex"] = "male"
test.loc[(test["Surname"].apply(lambda x: x in Dead_List)), "Age"] = 60
test.loc[(test["Surname"].apply(lambda x: x in Dead_List)), "Title"] = "Mr"
test.loc[(test["Surname"].apply(lambda x: x in Survived_List)), "Sex"] = "female"
test.loc[(test["Surname"].apply(lambda x: x in Survived_List)), "Age"] = 5
test.loc[(test["Surname"].apply(lambda x: x in Survived_List)), "Title"] = "Miss"
all_data.columns
all_data = pd.concat([train, test])
all_data = all_data[
[
"Survived",
"Pclass",
"Sex",
"Age",
"Fare",
"Embarked",
"Title",
"FamilyLabel",
"Deck",
"TicketLabel",
]
]
all_data = pd.get_dummies(all_data)
train = all_data[all_data["Survived"].notnull()]
test = all_data[all_data["Survived"].isnull()].drop("Survived", axis=1)
X = train.as_matrix()[:, 1:]
y = train.as_matrix()[:, 0]
train.shape
from sklearn.model_selection import GridSearchCV, StratifiedKFold
n_fold = StratifiedKFold(n_splits=3)
pipe = Pipeline(
[
("select", SelectKBest(k=20)),
(
"classify",
RandomForestClassifier(
random_state=10, max_features="sqrt", oob_score=True
),
),
]
)
param_test = {
"classify__n_estimators": [30],
"classify__max_depth": [6],
}
gsearch = GridSearchCV(
estimator=pipe,
param_grid=param_test,
scoring="roc_auc",
cv=n_fold,
n_jobs=10,
verbose=1,
)
gsearch.fit(X, y)
RFC_best = gsearch.best_estimator_
print(gsearch.best_params_, gsearch.best_score_)
RFC_best.fit(X, y)
RFC_best.score(X, y)
select = SelectKBest(k=20)
clf = RandomForestClassifier(
random_state=10,
warm_start=True,
n_estimators=30,
max_depth=6,
max_features="sqrt",
)
pipeline = make_pipeline(select, clf)
pipeline.fit(X, y)
predictions = pipeline.predict(test)
submission = pd.DataFrame(
{"PassengerId": PassengerId, "Survived": predictions.astype(np.int32)}
)
submission.to_csv("submission.csv", index=False)
|
#
# # **kNN Classifier Tutorial in Python**
# Hello friends,
# kNN or k-Nearest Neighbours Classifier is a very simple and easy to understand machine learning algorithm. In this kernel, I build a k Nearest Neighbours classifier to classify the patients suffering from Breast Cancer.
# So, let's get started.
# **I hope you find this kernel useful and some UPVOTES would be highly appreciated**.
# # **Table of Contents**
# 1. [Introduction to k Nearest Neighbours Algorithm](#1)
# 2. [k Nearest Neighbours intuition](#2)
# 3. [How to decide the number of neighbours in kNN](#3)
# 4. [Eager learners vs lazy learners](#4)
# 5. [Import libraries](#5)
# 6. [Import dataset](#6)
# 7. [Exploratory data analysis](#7)
# 8. [Data visualization](#8)
# 9. [Declare feature vector and target variable](#9)
# 10. [Split data into separate training and test set](#10)
# 11. [Feature engineering](#11)
# 12. [Feature scaling](#12)
# 13. [Fit Neighbours classifier to the training set](#13)
# 14. [Predict the test-set results](#14)
# 15. [Check the accuracy score](#15)
# 16. [Rebuild kNN classification model using different values of k](#16)
# 17. [Confusion matrix](#17)
# 18. [Classification metrices](#18)
# 19. [ROC - AUC](#19)
# 20. [k-Fold Cross Validation](#20)
# 21. [Results and conclusion](#21)
# 22. [References](#22)
# # **1. Introduction to k Nearest Neighbours algorithm**
# [Table of Contents](#0.1)
# In machine learning, k Nearest Neighbours or kNN is the simplest of all machine learning algorithms. It is a non-parametric algorithm used for classification and regression tasks. Non-parametric means there is no assumption required for data distribution. So, kNN does not require any underlying assumption to be made. In both classification and regression tasks, the input consists of the k closest training examples in the feature space. The output depends upon whether kNN is used for classification or regression purposes.
# - In kNN classification, the output is a class membership. The given data point is classified based on the majority of type of its neighbours. The data point is assigned to the most frequent class among its k nearest neighbours. Usually k is a small positive integer. If k=1, then the data point is simply assigned to the class of that single nearest neighbour.
# - In kNN regression, the output is simply some property value for the object. This value is the average of the values of k nearest neighbours.
# kNN is a type of instance-based learning or lazy learning. Lazy learning means it does not require any training data points for model generation. All training data will be used in the testing phase. This makes training faster and testing slower and costlier. So, the testing phase requires more time and memory resources.
# In kNN, the neighbours are taken from a set of objects for which the class or the object property value is known. This can be thought of as the training set for the kNN algorithm, though no explicit training step is required. In both classification and regression kNN algorithm, we can assign weight to the contributions of the neighbours. So, nearest neighbours contribute more to the average than the more distant ones.
# # **2. k Nearest Neighbours intuition**
# [Table of Contents](#0.1)
# The kNN algorithm intuition is very simple to understand. It simply calculates the distance between a sample data point and all the other training data points. The distance can be Euclidean distance or Manhattan distance. Then, it selects the k nearest data points where k can be any integer. Finally, it assigns the sample data point to the class to which the majority of the k data points belong.
# Now, we will see kNN algorithm in action. Suppose, we have a dataset with two variables which are classified as `Red` and `Blue`.
# In kNN algorithm, k is the number of nearest neighbours. Generally, k is an odd number because it helps to decide the majority of the class. When k=1, then the algorithm is known as the nearest neighbour algorithm.
# Now, we want to classify a new data point `X` into `Blue` class or `Red` class. Suppose the value of k is 3. The kNN algorithm starts by calculating the distance between `X` and all the other data points. It then finds the 3 nearest points with least distance to point `X`.
# In the final step of the kNN algorithm, we assign the new data point `X` to the majority of the class of the 3 nearest points. If 2 of the 3 nearest points belong to the class `Red` while 1 belong to the class `Blue`, then we classify the new data point as `Red`.
# # **3. How to decide the number of neighbours in kNN**
# [Table of Contents](#0.1)
# While building the kNN classifier model, one question that come to my mind is what should be the value of nearest neighbours (k) that yields highest accuracy. This is a very important question because the classification accuracy depends upon our choice of k.
# The number of neighbours (k) in kNN is a parameter that we need to select at the time of model building. Selecting the optimal value of k in kNN is the most critical problem. A small value of k means that noise will have higher influence on the result. So, probability of overfitting is very high. A large value of k makes it computationally expensive in terms of time to build the kNN model. Also, a large value of k will have a smoother decision boundary which means lower variance but higher bias.
# The data scientists choose an odd value of k if the number of classes is even. We can apply the elbow method to select the value of k. To optimize the results, we can use Cross Validation technique. Using the cross-validation technique, we can test the kNN algorithm with different values of k. The model which gives good accuracy can be considered to be an optimal choice. It depends on individual cases and at times best process is to run through each possible value of k and test our result.
# # **4. Eager learners vs lazy learners**
# [Table of Contents](#0.1)
# Eager learners mean when giving training data points, we will construct a generalized model before performing prediction on given new points to classify. We can think of such learners as being ready, active and eager to classify new data points.
# Lazy learning means there is no need for learning or training of the model and all of the data points are used at the time of prediction. Lazy learners wait until the last minute before classifying any data point. They merely store the training dataset and waits until classification needs to perform. Lazy learners are also known as instance-based learners because lazy learners store the training points or instances, and all learning is based on instances.
# Unlike eager learners, lazy learners do less work in the training phase and more work in the testing phase to make a classification.
# # **5. Import libraries**
# [Table of Contents](#0.1)
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # for data visualization purposes
import seaborn as sns # for data visualization
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import warnings
warnings.filterwarnings("ignore")
# # **6. Import dataset**
# [Table of Contents](#0.1)
data = "/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt"
df = pd.read_csv(data, header=None)
# # **7. Exploratory data analysis**
# [Table of Contents](#0.1)
# Now, I will explore the data to gain insights about the data.
# view dimensions of dataset
df.shape
# We can see that there are 699 instances and 11 attributes in the data set.
# In the dataset description, it is given that there are 10 attributes and 1 `Class` which is the target variable. So, we have 10 attributes and 1 target variable.
# ### View top 5 rows of dataset
# preview the dataset
df.head()
# ### Rename column names
# We can see that the dataset does not have proper column names. The columns are merely labelled as 0,1,2.... and so on. We should give proper names to the columns. I will do it as follows:-
col_names = [
"Id",
"Clump_thickness",
"Uniformity_Cell_Size",
"Uniformity_Cell_Shape",
"Marginal_Adhesion",
"Single_Epithelial_Cell_Size",
"Bare_Nuclei",
"Bland_Chromatin",
"Normal_Nucleoli",
"Mitoses",
"Class",
]
df.columns = col_names
df.columns
# We can see that the column names are renamed. Now, the columns have meaningful names.
# let's agian preview the dataset
df.head()
# ### Drop redundant columns
# We should drop any redundant columns from the dataset which does not have any predictive power. Here, `Id` is the redundant column. So, I will drop it first.
# drop Id column from dataset
df.drop("Id", axis=1, inplace=True)
# ### View summary of dataset
#
# view summary of dataset
df.info()
# We can see that the `Id` column has been removed from the dataset.
# We can see that there are 9 numerical variables and 1 categorical variable in the dataset. I will check the frequency distribution of values in the variables to confirm the same.
# ### Frequency distribution of values in variables
for var in df.columns:
print(df[var].value_counts())
# The distribution of values shows that data type of `Bare_Nuclei` is of type integer. But the summary of the dataframe shows that it is type object. So, I will explicitly convert its data type to integer.
# ### Convert data type of Bare_Nuclei to integer
df["Bare_Nuclei"] = pd.to_numeric(df["Bare_Nuclei"], errors="coerce")
# ### Check data types of columns of dataframe
df.dtypes
# Now, we can see that all the columns of the dataframe are of type numeric.
# ### Summary of variables
# - There are 10 numerical variables in the dataset.
# - All of the variables are of discrete type.
# - Out of all the 10 variables, the first 9 variables are feature variables and last variable `Class` is the target variable.
# ### Explore problems within variables
# Now, I will explore problems within variables.
# ### Missing values in variables
# check missing values in variables
df.isnull().sum()
# We can see that the `Bare_Nuclei` column contains missing values. We need to dig deeper to find the frequency distribution of
# values of `Bare_Nuclei`.
# check `na` values in the dataframe
df.isna().sum()
# We can see that the `Bare_Nuclei` column contains 16 `nan` values.
# check frequency distribution of `Bare_Nuclei` column
df["Bare_Nuclei"].value_counts()
# check unique values in `Bare_Nuclei` column
df["Bare_Nuclei"].unique()
# We can see that there are `nan` values in the `Bare_Nuclei` column.
# check for nan values in `Bare_Nuclei` column
df["Bare_Nuclei"].isna().sum()
# We can see that there are 16 `nan` values in the dataset. I will impute missing values after dividing the dataset into training and test set.
# ### check frequency distribution of target variable `Class`
# view frequency distribution of values in `Class` variable
df["Class"].value_counts()
# ### check percentage of frequency distribution of `Class`
# view percentage of frequency distribution of values in `Class` variable
df["Class"].value_counts() / np.float(len(df))
# We can see that the `Class` variable contains 2 class labels - `2` and `4`. `2` stands for benign and `4` stands for malignant cancer.
# ### Outliers in numerical variables
# view summary statistics in numerical variables
print(round(df.describe(), 2))
# kNN algorithm is robust to outliers.
# # **8. Data Visualization**
# [Table of Contents](#0.1)
# Now, we have a basic understanding of our data. I will supplement it with some data visualization to get better understanding
# of our data.
# ### Univariate plots
# ### Check the distribution of variables
# Now, I will plot the histograms to check variable distributions to find out if they are normal or skewed.
# plot histograms of the variables
plt.rcParams["figure.figsize"] = (30, 25)
df.plot(kind="hist", bins=10, subplots=True, layout=(5, 2), sharex=False, sharey=False)
plt.show()
# We can see that all the variables in the dataset are positively skewed.
# ### Multivariate plots
# ### Estimating correlation coefficients
# Our dataset is very small. So, we can compute the standard correlation coefficient (also called Pearson's r) between every pair of attributes. We can compute it using the `df.corr()` method as follows:-
correlation = df.corr()
# Our target variable is `Class`. So, we should check how each attribute correlates with the `Class` variable. We can do it as follows:-
correlation["Class"].sort_values(ascending=False)
# ### Interpretation
# - The correlation coefficient ranges from -1 to +1.
# - When it is close to +1, this signifies that there is a strong positive correlation. So, we can see that there is a strong positive correlation between `Class` and `Bare_Nuclei`, `Class` and `Uniformity_Cell_Shape`, `Class` and `Uniformity_Cell_Size`.
# - When it is clsoe to -1, it means that there is a strong negative correlation. When it is close to 0, it means that there is no correlation.
# - We can see that all the variables are positively correlated with `Class` variable. Some variables are strongly positive correlated while some variables are negatively correlated.
# ### Discover patterns and relationships
# An important step in EDA is to discover patterns and relationships between variables in the dataset. I will use the seaborn heatmap to explore the patterns and relationships in the dataset.
# ### Correlation Heat Map
plt.figure(figsize=(10, 8))
plt.title("Correlation of Attributes with Class variable")
a = sns.heatmap(correlation, square=True, annot=True, fmt=".2f", linecolor="white")
a.set_xticklabels(a.get_xticklabels(), rotation=90)
a.set_yticklabels(a.get_yticklabels(), rotation=30)
plt.show()
# ### Interpretation
# From the above correlation heat map, we can conclude that :-
# 1. `Class` is highly positive correlated with `Uniformity_Cell_Size`, `Uniformity_Cell_Shape` and `Bare_Nuclei`. (correlation coefficient = 0.82).
# 2. `Class` is positively correlated with `Clump_thickness`(correlation coefficient=0.72), `Marginal_Adhesion`(correlation coefficient=0.70), `Single_Epithelial_Cell_Size)`(correlation coefficient = 0.68) and `Normal_Nucleoli`(correlation coefficient=0.71).
# 3. `Class` is weekly positive correlated with `Mitoses`(correlation coefficient=0.42).
# 4. The `Mitoses` variable is weekly positive correlated with all the other variables(correlation coefficient < 0.50).
# # **9. Declare feature vector and target variable**
# [Table of Contents](#0.1)
X = df.drop(["Class"], axis=1)
y = df["Class"]
# # **10. Split data into separate training and test set**
# [Table of Contents](#0.1)
# split X and y into training and testing sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# check the shape of X_train and X_test
X_train.shape, X_test.shape
# # **11. Feature Engineering**
# [Table of Contents](#0.1)
# **Feature Engineering** is the process of transforming raw data into useful features that help us to understand our model better and increase its predictive power. I will carry out feature engineering on different types of variables.
#
# check data types in X_train
X_train.dtypes
# ### Engineering missing values in variables
#
# check missing values in numerical variables in X_train
X_train.isnull().sum()
# check missing values in numerical variables in X_test
X_test.isnull().sum()
# print percentage of missing values in the numerical variables in training set
for col in X_train.columns:
if X_train[col].isnull().mean() > 0:
print(col, round(X_train[col].isnull().mean(), 4))
# ### Assumption
# I assume that the data are missing completely at random (MCAR). There are two methods which can be used to impute missing values. One is mean or median imputation and other one is random sample imputation. When there are outliers in the dataset, we should use median imputation. So, I will use median imputation because median imputation is robust to outliers.
# I will impute missing values with the appropriate statistical measures of the data, in this case median. Imputation should be done over the training set, and then propagated to the test set. It means that the statistical measures to be used to fill missing values both in train and test set, should be extracted from the train set only. This is to avoid overfitting.
# impute missing values in X_train and X_test with respective column median in X_train
for df1 in [X_train, X_test]:
for col in X_train.columns:
col_median = X_train[col].median()
df1[col].fillna(col_median, inplace=True)
# check again missing values in numerical variables in X_train
X_train.isnull().sum()
# check missing values in numerical variables in X_test
X_test.isnull().sum()
# We can see that there are no missing values in X_train and X_test.
X_train.head()
X_test.head()
# We now have training and testing set ready for model building. Before that, we should map all the feature variables onto the same scale. It is called `feature scaling`. I will do it as follows.
# # **12. Feature Scaling**
# [Table of Contents](#0.1)
cols = X_train.columns
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
X_train = pd.DataFrame(X_train, columns=[cols])
X_test = pd.DataFrame(X_test, columns=[cols])
X_train.head()
# We now have `X_train` dataset ready to be fed into the Logistic Regression classifier. I will do it as follows.
# # **13. Fit K Neighbours Classifier to the training eet**
# [Table of Contents](#0.1)
# import KNeighbors ClaSSifier from sklearn
from sklearn.neighbors import KNeighborsClassifier
# instantiate the model
knn = KNeighborsClassifier(n_neighbors=3)
# fit the model to the training set
knn.fit(X_train, y_train)
# # **14. Predict test-set results**
# [Table of Contents](#0.1)
y_pred = knn.predict(X_test)
y_pred
# ### predict_proba method
# **predict_proba** method gives the probabilities for the target variable(2 and 4) in this case, in array form.
# `2 is for probability of benign cancer` and `4 is for probability of malignant cancer.`
# probability of getting output as 2 - benign cancer
knn.predict_proba(X_test)[:, 0]
# probability of getting output as 4 - malignant cancer
knn.predict_proba(X_test)[:, 1]
# # **15. Check accuracy score**
# [Table of Contents](#0.1)
from sklearn.metrics import accuracy_score
print("Model accuracy score: {0:0.4f}".format(accuracy_score(y_test, y_pred)))
# Here, **y_test** are the true class labels and **y_pred** are the predicted class labels in the test-set.
# ### Compare the train-set and test-set accuracy
# Now, I will compare the train-set and test-set accuracy to check for overfitting.
y_pred_train = knn.predict(X_train)
print(
"Training-set accuracy score: {0:0.4f}".format(
accuracy_score(y_train, y_pred_train)
)
)
# ### Check for overfitting and underfitting
# print the scores on training and test set
print("Training set score: {:.4f}".format(knn.score(X_train, y_train)))
print("Test set score: {:.4f}".format(knn.score(X_test, y_test)))
# The training-set accuracy score is 0.9821 while the test-set accuracy to be 0.9714. These two values are quite comparable. So, there is no question of overfitting.
# ### Compare model accuracy with null accuracy
# So, the model accuracy is 0.9714. But, we cannot say that our model is very good based on the above accuracy. We must compare it with the **null accuracy**. Null accuracy is the accuracy that could be achieved by always predicting the most frequent class.
# So, we should first check the class distribution in the test set.
# check class distribution in test set
y_test.value_counts()
# We can see that the occurences of most frequent class is 85. So, we can calculate null accuracy by dividing 85 by total number of occurences.
# check null accuracy score
null_accuracy = 85 / (85 + 55)
print("Null accuracy score: {0:0.4f}".format(null_accuracy))
# We can see that our model accuracy score is 0.9714 but null accuracy score is 0.6071. So, we can conclude that our K Nearest Neighbors model is doing a very good job in predicting the class labels.
# # **16. Rebuild kNN Classification model using different values of k**
# [Table of Contents](#0.1)
# I have build the kNN classification model using k=3. Now, I will increase the value of k and see its effect on accuracy.
# ### Rebuild kNN Classification model using k=5
# instantiate the model with k=5
knn_5 = KNeighborsClassifier(n_neighbors=5)
# fit the model to the training set
knn_5.fit(X_train, y_train)
# predict on the test-set
y_pred_5 = knn_5.predict(X_test)
print(
"Model accuracy score with k=5 : {0:0.4f}".format(accuracy_score(y_test, y_pred_5))
)
# ### Rebuild kNN Classification model using k=6
# instantiate the model with k=6
knn_6 = KNeighborsClassifier(n_neighbors=6)
# fit the model to the training set
knn_6.fit(X_train, y_train)
# predict on the test-set
y_pred_6 = knn_6.predict(X_test)
print(
"Model accuracy score with k=6 : {0:0.4f}".format(accuracy_score(y_test, y_pred_6))
)
# ### Rebuild kNN Classification model using k=7
# instantiate the model with k=7
knn_7 = KNeighborsClassifier(n_neighbors=7)
# fit the model to the training set
knn_7.fit(X_train, y_train)
# predict on the test-set
y_pred_7 = knn_7.predict(X_test)
print(
"Model accuracy score with k=7 : {0:0.4f}".format(accuracy_score(y_test, y_pred_7))
)
# ### Rebuild kNN Classification model using k=8
# instantiate the model with k=8
knn_8 = KNeighborsClassifier(n_neighbors=8)
# fit the model to the training set
knn_8.fit(X_train, y_train)
# predict on the test-set
y_pred_8 = knn_8.predict(X_test)
print(
"Model accuracy score with k=8 : {0:0.4f}".format(accuracy_score(y_test, y_pred_8))
)
# ### Rebuild kNN Classification model using k=9
# instantiate the model with k=9
knn_9 = KNeighborsClassifier(n_neighbors=9)
# fit the model to the training set
knn_9.fit(X_train, y_train)
# predict on the test-set
y_pred_9 = knn_9.predict(X_test)
print(
"Model accuracy score with k=9 : {0:0.4f}".format(accuracy_score(y_test, y_pred_9))
)
# ### Interpretation
# Our original model accuracy score with k=3 is 0.9714. Now, we can see that we get same accuracy score of 0.9714 with k=5. But, if we increase the value of k further, this would result in enhanced accuracy.
# With k=6,7,8 we get accuracy score of 0.9786. So, it results in performance improvement.
# If we increase k to 9, then accuracy decreases again to 0.9714.
# Now, based on the above analysis we can conclude that our classification model accuracy is very good. Our model is doing a very good job in terms of predicting the class labels.
# But, it does not give the underlying distribution of values. Also, it does not tell anything about the type of errors our classifer is making.
# We have another tool called `Confusion matrix` that comes to our rescue.
# # **17. Confusion matrix**
# [Table of Contents](#0.1)
# A confusion matrix is a tool for summarizing the performance of a classification algorithm. A confusion matrix will give us a clear picture of classification model performance and the types of errors produced by the model. It gives us a summary of correct and incorrect predictions broken down by each category. The summary is represented in a tabular form.
# Four types of outcomes are possible while evaluating a classification model performance. These four outcomes are described below:-
# **True Positives (TP)** – True Positives occur when we predict an observation belongs to a certain class and the observation actually belongs to that class.
# **True Negatives (TN)** – True Negatives occur when we predict an observation does not belong to a certain class and the observation actually does not belong to that class.
# **False Positives (FP)** – False Positives occur when we predict an observation belongs to a certain class but the observation actually does not belong to that class. This type of error is called **Type I error.**
# **False Negatives (FN)** – False Negatives occur when we predict an observation does not belong to a certain class but the observation actually belongs to that class. This is a very serious error and it is called **Type II error.**
# These four outcomes are summarized in a confusion matrix given below.
#
# Print the Confusion Matrix with k =3 and slice it into four pieces
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
print("Confusion matrix\n\n", cm)
print("\nTrue Positives(TP) = ", cm[0, 0])
print("\nTrue Negatives(TN) = ", cm[1, 1])
print("\nFalse Positives(FP) = ", cm[0, 1])
print("\nFalse Negatives(FN) = ", cm[1, 0])
# The confusion matrix shows `83 + 53 = 136 correct predictions` and `2 + 2 = 4 incorrect predictions`.
# In this case, we have
# - `True Positives` (Actual Positive:1 and Predict Positive:1) - 83
# - `True Negatives` (Actual Negative:0 and Predict Negative:0) - 53
# - `False Positives` (Actual Negative:0 but Predict Positive:1) - 2 `(Type I error)`
# - `False Negatives` (Actual Positive:1 but Predict Negative:0) - 2 `(Type II error)`
# Print the Confusion Matrix with k =7 and slice it into four pieces
cm_7 = confusion_matrix(y_test, y_pred_7)
print("Confusion matrix\n\n", cm_7)
print("\nTrue Positives(TP) = ", cm_7[0, 0])
print("\nTrue Negatives(TN) = ", cm_7[1, 1])
print("\nFalse Positives(FP) = ", cm_7[0, 1])
print("\nFalse Negatives(FN) = ", cm_7[1, 0])
# The above confusion matrix shows `83 + 54 = 137 correct predictions` and `2 + 1 = 4 incorrect predictions`.
# In this case, we have
# - `True Positives` (Actual Positive:1 and Predict Positive:1) - 83
# - `True Negatives` (Actual Negative:0 and Predict Negative:0) - 54
# - `False Positives` (Actual Negative:0 but Predict Positive:1) - 2 `(Type I error)`
# - `False Negatives` (Actual Positive:1 but Predict Negative:0) - 1 `(Type II error)`
# ### Comment
# So, kNN Classification model with k=7 shows more accurate predictions and less number of errors than k=3 model. Hence, we got performance improvement with k=7.
# visualize confusion matrix with seaborn heatmap
plt.figure(figsize=(6, 4))
cm_matrix = pd.DataFrame(
data=cm_7,
columns=["Actual Positive:1", "Actual Negative:0"],
index=["Predict Positive:1", "Predict Negative:0"],
)
sns.heatmap(cm_matrix, annot=True, fmt="d", cmap="YlGnBu")
# # **18. Classification metrices**
# [Table of Contents](#0.1)
# ### Classification Report
# **Classification report** is another way to evaluate the classification model performance. It displays the **precision**, **recall**, **f1** and **support** scores for the model. I have described these terms in later.
# We can print a classification report as follows:-
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred_7))
# ### Classification accuracy
TP = cm_7[0, 0]
TN = cm_7[1, 1]
FP = cm_7[0, 1]
FN = cm_7[1, 0]
# print classification accuracy
classification_accuracy = (TP + TN) / float(TP + TN + FP + FN)
print("Classification accuracy : {0:0.4f}".format(classification_accuracy))
# ### Classification error
# print classification error
classification_error = (FP + FN) / float(TP + TN + FP + FN)
print("Classification error : {0:0.4f}".format(classification_error))
# ### Precision
# **Precision** can be defined as the percentage of correctly predicted positive outcomes out of all the predicted positive outcomes. It can be given as the ratio of true positives (TP) to the sum of true and false positives (TP + FP).
# So, **Precision** identifies the proportion of correctly predicted positive outcome. It is more concerned with the positive class than the negative class.
# Mathematically, `precision` can be defined as the ratio of `TP to (TP + FP)`.
#
# print precision score
precision = TP / float(TP + FP)
print("Precision : {0:0.4f}".format(precision))
# ### Recall
# Recall can be defined as the percentage of correctly predicted positive outcomes out of all the actual positive outcomes.
# It can be given as the ratio of true positives (TP) to the sum of true positives and false negatives (TP + FN). **Recall** is also called **Sensitivity**.
# **Recall** identifies the proportion of correctly predicted actual positives.
# Mathematically, `recall` can be given as the ratio of `TP to (TP + FN)`.
#
recall = TP / float(TP + FN)
print("Recall or Sensitivity : {0:0.4f}".format(recall))
# ### True Positive Rate
# **True Positive Rate** is synonymous with **Recall**.
#
true_positive_rate = TP / float(TP + FN)
print("True Positive Rate : {0:0.4f}".format(true_positive_rate))
# ### False Positive Rate
false_positive_rate = FP / float(FP + TN)
print("False Positive Rate : {0:0.4f}".format(false_positive_rate))
# ### Specificity
specificity = TN / (TN + FP)
print("Specificity : {0:0.4f}".format(specificity))
# ### f1-score
# **f1-score** is the weighted harmonic mean of precision and recall. The best possible **f1-score** would be 1.0 and the worst
# would be 0.0. **f1-score** is the harmonic mean of precision and recall. So, **f1-score** is always lower than accuracy measures as they embed precision and recall into their computation. The weighted average of `f1-score` should be used to
# compare classifier models, not global accuracy.
# ### Support
# **Support** is the actual number of occurrences of the class in our dataset.
# ### Adjusting the classification threshold level
# print the first 10 predicted probabilities of two classes- 2 and 4
y_pred_prob = knn.predict_proba(X_test)[0:10]
y_pred_prob
# ### Observations
# - In each row, the numbers sum to 1.
# - There are 2 columns which correspond to 2 classes - 2 and 4.
# - Class 2 - predicted probability that there is benign cancer.
#
# - Class 4 - predicted probability that there is malignant cancer.
#
#
# - Importance of predicted probabilities
# - We can rank the observations by probability of benign or malignant cancer.
# - predict_proba process
# - Predicts the probabilities
#
# - Choose the class with the highest probability
#
#
# - Classification threshold level
# - There is a classification threshold level of 0.5.
#
# - Class 4 - probability of malignant cancer is predicted if probability > 0.5.
#
# - Class 2 - probability of benign cancer is predicted if probability < 0.5.
#
#
# store the probabilities in dataframe
y_pred_prob_df = pd.DataFrame(
data=y_pred_prob,
columns=["Prob of - benign cancer (2)", "Prob of - malignant cancer (4)"],
)
y_pred_prob_df
# print the first 10 predicted probabilities for class 4 - Probability of malignant cancer
knn.predict_proba(X_test)[0:10, 1]
# store the predicted probabilities for class 4 - Probability of malignant cancer
y_pred_1 = knn.predict_proba(X_test)[:, 1]
# plot histogram of predicted probabilities
# adjust figure size
plt.figure(figsize=(6, 4))
# adjust the font size
plt.rcParams["font.size"] = 12
# plot histogram with 10 bins
plt.hist(y_pred_1, bins=10)
# set the title of predicted probabilities
plt.title("Histogram of predicted probabilities of malignant cancer")
# set the x-axis limit
plt.xlim(0, 1)
# set the title
plt.xlabel("Predicted probabilities of malignant cancer")
plt.ylabel("Frequency")
# ### Observations
# - We can see that the above histogram is positively skewed.
# - The first column tell us that there are approximately 80 observations with 0 probability of malignant cancer.
# - There are few observations with probability > 0.5.
# - So, these few observations predict that there will be malignant cancer.
# ### Comments
# - In binary problems, the threshold of 0.5 is used by default to convert predicted probabilities into class predictions.
# - Threshold can be adjusted to increase sensitivity or specificity.
# - Sensitivity and specificity have an inverse relationship. Increasing one would always decrease the other and vice versa.
# - Adjusting the threshold level should be one of the last step you do in the model-building process.
# # **19. ROC-AUC**
# [Table of Contents](#0.1)
# ### ROC Curve
# Another tool to measure the classification model performance visually is **ROC Curve**. ROC Curve stands for **Receiver Operating Characteristic Curve**. An **ROC Curve** is a plot which shows the performance of a classification model at various
# classification threshold levels.
# The **ROC Curve** plots the **True Positive Rate (TPR)** against the **False Positive Rate (FPR)** at various threshold levels.
# **True Positive Rate (TPR)** is also called **Recall**. It is defined as the ratio of **TP to (TP + FN)**.
# **False Positive Rate (FPR)** is defined as the ratio of **FP to (FP + TN)**.
# In the ROC Curve, we will focus on the TPR (True Positive Rate) and FPR (False Positive Rate) of a single point. This will give us the general performance of the ROC curve which consists of the TPR and FPR at various threshold levels. So, an ROC Curve plots TPR vs FPR at different classification threshold levels. If we lower the threshold levels, it may result in more items being classified as positve. It will increase both True Positives (TP) and False Positives (FP).
#
# plot ROC Curve
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(y_test, y_pred_1, pos_label=4)
plt.figure(figsize=(6, 4))
plt.plot(fpr, tpr, linewidth=2)
plt.plot([0, 1], [0, 1], "k--")
plt.rcParams["font.size"] = 12
plt.title("ROC curve for Breast Cancer kNN classifier")
plt.xlabel("False Positive Rate (1 - Specificity)")
plt.ylabel("True Positive Rate (Sensitivity)")
plt.show()
# ROC curve help us to choose a threshold level that balances sensitivity and specificity for a particular context.
# ### ROC AUC
# **ROC AUC** stands for **Receiver Operating Characteristic - Area Under Curve**. It is a technique to compare classifier performance. In this technique, we measure the `area under the curve (AUC)`. A perfect classifier will have a ROC AUC equal to 1, whereas a purely random classifier will have a ROC AUC equal to 0.5.
# So, **ROC AUC** is the percentage of the ROC plot that is underneath the curve.
# compute ROC AUC
from sklearn.metrics import roc_auc_score
ROC_AUC = roc_auc_score(y_test, y_pred_1)
print("ROC AUC : {:.4f}".format(ROC_AUC))
# ### Interpretation
# - ROC AUC is a single number summary of classifier performance. The higher the value, the better the classifier.
# - ROC AUC of our model approaches towards 1. So, we can conclude that our classifier does a good job in predicting whether it is benign or malignant cancer.
# calculate cross-validated ROC AUC
from sklearn.model_selection import cross_val_score
Cross_validated_ROC_AUC = cross_val_score(
knn_7, X_train, y_train, cv=5, scoring="roc_auc"
).mean()
print("Cross validated ROC AUC : {:.4f}".format(Cross_validated_ROC_AUC))
# ### Interpretation
# Our Cross Validated ROC AUC is very close to 1. So, we can conclude that, the KNN classifier is indeed a very good model.
# # **20. k-fold Cross Validation**
# [Table of Contents](#0.1)
# In this section, I will apply k-fold Cross Validation technique to improve the model performance. Cross-validation is a statistical method of evaluating generalization performance It is more stable and thorough than using a train-test split to evaluate model performance.
# Applying 10-Fold Cross Validation
from sklearn.model_selection import cross_val_score
scores = cross_val_score(knn_7, X_train, y_train, cv=10, scoring="accuracy")
print("Cross-validation scores:{}".format(scores))
# We can summarize the cross-validation accuracy by calculating its mean.
# compute Average cross-validation score
print("Average cross-validation score: {:.4f}".format(scores.mean()))
|
# import chart_studio.plotly as py
# matplotlib
import matplotlib.pyplot as plt
# word cloud library
from wordcloud import WordCloud
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# plotly
# import plotly.plotly as py
from plotly.offline import init_notebook_mode, iplot, plot
import plotly as py
init_notebook_mode(connected=True)
import plotly.graph_objs as go
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# cwurData = pd.read_csv("../input/world-university-rankings/cwurData.csv")
# education_expenditure_supplementary_data = pd.read_csv("../input/world-university-rankings/education_expenditure_supplementary_data.csv")
# educational_attainment_supplementary_data = pd.read_csv("../input/world-university-rankings/educational_attainment_supplementary_data.csv")
# school_and_country_table = pd.read_csv("../input/world-university-rankings/school_and_country_table.csv")
# shanghaiData = pd.read_csv("../input/world-university-rankings/shanghaiData.csv")
timesData = pd.read_csv("../input/world-university-rankings/timesData.csv")
# # INTRODUCTION
# * In this kernel, we will learn how to use plotly library.
# * Plotly library: Plotly's Python graphing library makes interactive, publication-quality graphs online. Examples of how to make line plots, scatter plots, area charts, bar charts, error bars, box plots, histograms, heatmaps, subplots, multiple-axes, polar charts, and bubble charts.
# ## Content
# 1. [Loading Data and Explanation of Features](#1)
# 2. [Line Charts](#2)
# 3. [Scatter Charts](#3)
# 4. [Bar Charts](#4)
# 5. [Pie Charts](#5)
# 6. [Bubble Charts](#6)
# 7. [Histogram](#7)
# 8. [Word Cloud](#8)
# 9. [Box Plot](#9)
# 10. [Scatter Plot Matrix](#10)
# 11. [Inset Plots](#11)
# 12. [3D Scatter Plot with Colorscaling](#12)
# 13. [Multiple Subplots](#13)
#
# # Loading Data and Explanation of Features - Dataset nitelik incelemesi:
#
# - Üniversitelerin Dünyadaki Sıralaması Dataseti
# - timesData includes 14 features that are:
# - world_rank ==>(puanı)
# - university_name
# - country ==>(ülkesi)
# - teaching ==>(öğretmesi)
# - international ==>(
# - research ==>(Araştırma sayısı)
# - citations ==>(Alıntı sayısı)
# - income ==>(üniversite geliri)
# - total_score ==>(
# - num_students ==>(öğrenci sayısı)
# - student_staff_ratio ==>(çalışanlar ve öğretmenlerin oranı)
# - international_students ==>(üniversite bulunan uluslararası öğrencilerin oranı)
# - female_male_ratio ==>(kadın-erkek oranı)
# - year ==>(yıl)
#
timesData.info()
timesData.head()
# Data incelendiğinde: None değerler ve "-" değerler oldğu görülmektedir.
#
# # Line Charts
# **Line Charts Example: Citation and Teaching vs World Rank of Top 100 Universities **
# **2011(ilk 100) yılındaki üniversitelerin alıntı yapma(citation) ve öğretim (teaching) skorlarını görselleştiricez **
# Import graph_objs as go
# Creating traces
# x = x axis
# y = y axis
# mode = type of plot like marker, line or line + markers (Hangi plot ?)
# name = name of the plots (Plot ismi)
# marker = marker is used with dictionary. (plot rengi)
# color = color of lines. It takes RGB (red, green, blue) and opacity (alpha)
# text = The hover text (hover is curser) (plota ait bilgi dictionary tipinde)
# data = is a list that we add traces into it
# layout = it is dictionary.
# title = title of layout
# x axis = it is dictionary
# title = label of x axis
# ticklen = length of x axis ticks
# zeroline = showing zero line or not
# fig = it includes data and layout
# iplot() = plots the figure(fig) that is created by data and layout
# ---------------------------------------------------------------------
# graph_objs import et.
# traceleri oluştur. (Trace Parametreleri)
# x=x ekseni
# y=y ekseni
# mode= plot tipi markers,lines veya lines+markers olabilir.
# name = plotun ismi
# marker => dict tipinde color = RGB+Opacity(alpha) tipinde olabilir sadece color'da verilebilir ek olarak saydamlık(opacity) olabilir (0-1) arası değerleri vardır 0'dan yukarı saydamlık azalır.0'da hiç gözükmez. RGB'nin her biri 2**8'den 256 bit barındırır. Değerlerin değişimi ile farklı renkler elde edilir.
# text=plot üzerinde dolaşırken her bir noktad verilmesi istenen bilgi
# layout = plot dışındda kalan alanlar dictionary tipinde dic tipinde title(başlığı),
# x axis = x eksenine verilen isim
# ticklen = x aksisinin kalınlığı
# import graph objects as "go"
import plotly.graph_objs as go
# Problem : 2011(ilk 100 üniversite) yılındaki üniversitelerin
# alıntı yapma(citation) ve öğretim (teaching) skorlarını
# ------------------------- Kaba kod.-----------------------------------
# ilk 100 sample'ı tüm featureler ile birlikte elde et.
# Trace 1 oluştur, Alıntı yapma featuresini ait olduğu üniversite ve puan ile birlikte işle.
# Trace 2 oluştur Öğretim Featuresini ait olduğu üniversite ve puan ile birlikte işle
# trace1 ve trace2'i birleştir
# layout oluştur
# figüre oluştur, figüre traceleri ve layout'u göm.
# figüre iplot ile çiz.
# prepare data frame
df = timesData.iloc[
:100, :
] # timesData nın ilk 100 sample'si (bütün featureleri ile birlikte)
# Creating trace1
# Trace1 oluşturuldu, x ekseninde dünya puanı, y eksenindede alıntı yapma feature'sine ait değerler,
# moduna lines, isim olarak citations, etiket olarakta ilk 100 sampledaki university_name featuresi verildi.
trace1 = go.Scatter(
x=df.world_rank,
y=df.citations,
mode="lines", # plotun şeklini belirtir (markers,lines or lines + markers)
name="citations", #
marker=dict(color="rgba(16, 112, 2, 0.8)"), # RGB + opacity(alpha)
text=df.university_name,
)
# Creating trace2
trace2 = go.Scatter(
x=df.world_rank,
y=df.teaching,
mode="lines+markers",
name="teaching",
marker=dict(color="rgba(80, 26, 80, 0.8)"), # blue
text=df.university_name,
)
data = [trace1, trace2]
layout = dict(
title="Citation and Teaching vs World Rank of Top 100 Universities", # plot dışında kalan herşey, y ekseni,başlaık bölgesi...
xaxis=dict(title="World Rank", ticklen=5, zeroline=False),
)
fig = dict(data=data, layout=layout) # figure oluşturuldu
iplot(fig)
#
# # Scatter Charts
# Scatter Example: Citation vs world rank of top 100 universities with 2014, 2015 and 2016 years
# 2014,2015,2016 yıllarındaki, dünyadaki ilk 100 üniversitenin citation(alıntıları)ları karşılaştırlacak
# * Import graph_objs as *go*
# * Creating traces
# * x = x axis
# * y = y axis
# * mode = type of plot like marker, line or line + markers
# * name = name of the plots
# * marker = marker is used with dictionary.
# * color = color of lines. It takes RGB (red, green, blue) and opacity (alpha)
# * text = The hover text (hover is curser)
# * data = is a list that we add traces into it
# * layout = it is dictionary.
# * title = title of layout
# * x axis = it is dictionary
# * title = label of x axis
# * ticklen = length of x axis ticks
# * zeroline = showing zero line or not
# * y axis = it is dictionary and same with x axis
# * fig = it includes data and layout
# * iplot() = plots the figure(fig) that is created by data and layout
# prepare data frames
# 2014,2015,2016 yıllarına ait ilk 100 üniversite kayıtları dataframe'lere aktarıldı.
df2014 = timesData[timesData.year == 2014].iloc[:100, :] # 2014'ün ilk 100 sample'si
df2015 = timesData[timesData.year == 2015].iloc[:100, :] # 2015'in ilk 100 sample'si
df2016 = timesData[timesData.year == 2016].iloc[:100, :] # 2016'nın ilk 100 sample'si
# import graph objects as "go"
# import plotly.graph_objs as go #bir önceki örnekte zaten import ettik.
# ilk önce graph_objs kütüphanesinden yararlanarak scatterlarımızın yapısını oluşturuyoruz:
# creating trace1
trace1 = go.Scatter(
x=df2014.world_rank, # üniversitelerin sıralaması
y=df2014.citations, # üniversitelerin alıntıları
mode="markers", # plot tipi
name="2014",
marker=dict(color="rgba(255, 128, 255, 0.8)"), # (RGB,Opacity)
text=df2014.university_name,
)
# creating trace2
trace2 = go.Scatter(
x=df2015.world_rank,
y=df2015.citations,
mode="markers",
name="2015",
marker=dict(color="rgba(255, 128, 2, 0.8)"),
text=df2015.university_name,
)
# creating trace3
trace3 = go.Scatter(
x=df2016.world_rank,
y=df2016.citations,
mode="markers",
name="2016",
marker=dict(color="rgba(0, 255, 200, 0.8)"),
text=df2016.university_name,
)
# traceleri, data isimli list'de topladık.
data = [trace1, trace2, trace3]
# layoutumuzu oluşturduk
layout = dict(
title="Citation vs world rank of top 100 universities with 2014, 2015 and 2016 years",
xaxis=dict(title="World Rank", ticklen=5, zeroline=False),
yaxis=dict(title="Citation", ticklen=5, zeroline=False),
)
# figüremizi oluşturp datayı ve layoutu içine gömdük. Tipini ise dictionary yaptık
fig = dict(data=data, layout=layout)
# çizdirdik.
iplot(fig)
#
# # Bar Charts
# - First Bar Charts Example: citations and teaching of top 3 universities in 2014 (style1)
# - 2014 yılındaki ilk 3 üniversitenin citation(alıntı yapma) ve teaching(öğretim) skorları karşılaştırılacak.
#
timesData.head()
# prepare data frames
# timesData set içinde yılı 2014'e eşit olan ilk 3 kaydın tüm featurelerini timesData
# içine filtreleyerek bir dataframe elde ettik ve bu dataframe'i df2014 değişkenine atadık.
# Not: timesData nın içine "timesData.year==2014" filtresini gömerek 2014 yılına ait kayıtları gördük
# filtre= timesData.year == 2014]
# ve "illoc[:3,:]" ile (0-2) indexleri arasındaki kayıtları ve ":" ile tüm featurelere eriştik.
df2014 = timesData[timesData.year == 2014].iloc[:3, :]
# filter = timesData.year==2014
# df2014 = timesData[filter].iloc[:3,:]
df2014
# create trace1
trace1 = go.Bar(
x=df2014.university_name,
y=df2014.citations,
name="citations",
marker=dict(
color="rgba(255, 174, 255, 0.5)", line=dict(color="rgb(0,0,0)", width=1.5)
),
text=df2014.country,
)
# create trace2
trace2 = go.Bar(
x=df2014.university_name,
y=df2014.teaching,
name="teaching",
marker=dict(
color="rgba(255, 255, 128, 0.5)", line=dict(color="rgb(0,0,0)", width=1.5)
),
text=df2014.country,
)
data = [trace1, trace2]
# barmod ile gruplayarak layout'u oluşturduk.
layout = go.Layout(barmode="group")
fig = go.Figure(data=data, layout=layout)
iplot(fig)
#
# - Second Bar Charts Example: citations and teaching of top 3 universities in 2014 (style2)
# - Actually, if you change only the barmode from *group* to *relative* in previous example, you achieve what we did here. However, for diversity I use different syntaxes.
# - 2014 yılındaki ilk 3 üniversitenin alıntı yapma ve öğretim puanlarını karşılaştırıcaz
# - Tek farkımız karşılaştırma yaptığımız barlar üst üste duracak.
#
#
# * Import graph_objs as *go*
# * Creating traces
# * x = x axis
# * y = y axis
# * name = name of the plots
# * type = type of plot like bar plot
# * data = is a list that we add traces into it
# * layout = it is dictionary.
# * xaxis = label of x axis
# * barmode = bar mode of bars like grouped( previous example) or relative
# * title = title of layout
# * fig = it includes data and layout
# * iplot() = plots the figure(fig) that is created by data and layout
#
# Traceler bu sefer go kütüphanesi ile bir method(bar,scatter vs..) çağırmadan yapılıyor.
# bu methodu dışarı çağırmak yerine içeride type'sine verilerek oluşturuluyor
# Burada traceler bir methoda gömülmek yerine Dictionary'e gömülerek,
# tip (type) parametresine verilen input ile hangi plot olacağı söyleniyor.
# x parametresi ortak olduğu için dışarıda belirlenerek x değişkenine aktarılıyor
# trace dictionary içinde iki tracedeki x parametresinde x değişkenimizi veriyoruz.
x = df2014.university_name
trace1 = {
"x": x, # dışarıda oluşturduğumuz x değişkeni parametreye input olarak veriliyor
"y": df2014.citations,
"name": "citation",
"type": "bar", # plotun hangi tip plot olduğu söyleniyor.
}
trace2 = {
"x": x, # dışarıda oluşturduğumuz x değişkeni parametreye input olarak veriliyor
"y": df2014.teaching,
"name": "teaching",
"type": "bar", # plotun hangi tip plot olduğu söyleniyor.
}
data = [trace1, trace2]
layout = {
"xaxis": {"title": "Top 3 universities"},
"barmode": "relative", # relative ile yan yana değilde üst üste barlar olmasını sağladık.
"title": "citations and teaching of top 3 universities in 2014",
}
fig = go.Figure(data=data, layout=layout)
iplot(fig)
#
# Third Bar Charts Example: Horizontal bar charts. (style3) Citation vs income for universities
# - 2016 yılının ilk 7 üniversitesinin alıntı ve gelirlerinin karşılaştırması
# * Import graph_objs as *go* and importing tools
# * Tools: used for subplots
# * Creating trace1
# * bar: bar plot
# * x = x axis
# * y = y axis
# * marker
# * color: color of bars
# * line: bar line color and width
# * name: name of bar
# * orientation: orientation like horizontal
# * creating trace2
# * scatter: scatter plot
# * x = x axis
# * y = y axis
# * mode: scatter type line line + markers or only markers
# * line: properties of line
# * color: color of line
# * name: name of scatter plot
# * layout: axis, legend, margin, paper and plot properties
# *
df2016
# import graph objects as "go" and import tools
import plotly.graph_objs as go
from plotly import tools
import matplotlib.pyplot as plt
# prepare data frames
df2016 = timesData[timesData.year == 2016].iloc[
:7, :
] # 2016 yılının ilk 7 kaydına ulaştık
y_saving = [each for each in df2016.research] # 2016 yılındaki araştırmalar
y_net_worth = [
float(each) for each in df2016.income
] # 2016 yılındaki gelir bilgileri float tipine çevrilerek y_net_wort'e atandı
x_saving = [
each for each in df2016.university_name
] # 2016 yılındaki ilk 7 üniversitenin isimleri
x_net_worth = [each for each in df2016.university_name]
trace0 = go.Bar(
x=y_saving,
y=x_saving,
marker=dict(
color="rgba(171, 50, 96, 0.6)",
line=dict(color="rgba(171, 50, 96, 1.0)", width=1),
),
name="research",
orientation="h",
)
trace1 = go.Scatter(
x=y_net_worth,
y=x_net_worth,
mode="lines+markers",
line=dict(color="rgb(63, 72, 204)"),
name="income",
)
layout = dict(
title="Citations and income",
yaxis=dict(showticklabels=True, domain=[0, 0.85]),
yaxis2=dict(
showline=True,
showticklabels=False,
linecolor="rgba(102, 102, 102, 0.8)",
linewidth=2,
domain=[0, 0.85],
),
xaxis=dict(
zeroline=False,
showline=False,
showticklabels=True,
showgrid=True,
domain=[0, 0.42],
),
xaxis2=dict(
zeroline=False,
showline=False,
showticklabels=True,
showgrid=True,
domain=[0.47, 1],
side="top",
dtick=25,
),
legend=dict(x=0.029, y=1.038, font=dict(size=10)),
margin=dict(l=200, r=20, t=70, b=70),
paper_bgcolor="rgb(248, 248, 255)",
plot_bgcolor="rgb(248, 248, 255)",
)
annotations = []
y_s = np.round(y_saving, decimals=2)
y_nw = np.rint(y_net_worth)
# Adding labels
for ydn, yd, xd in zip(y_nw, y_s, x_saving):
# labeling the scatter savings
annotations.append(
dict(
xref="x2",
yref="y2",
y=xd,
x=ydn - 4,
text="{:,}".format(ydn),
font=dict(family="Arial", size=12, color="rgb(63, 72, 204)"),
showarrow=False,
)
)
# labeling the bar net worth
annotations.append(
dict(
xref="x1",
yref="y1",
y=xd,
x=yd + 3,
text=str(yd),
font=dict(family="Arial", size=12, color="rgb(171, 50, 96)"),
showarrow=False,
)
)
layout["annotations"] = annotations
# Creating two subplots
fig = tools.make_subplots(
rows=1,
cols=2,
specs=[[{}, {}]],
shared_xaxes=True,
shared_yaxes=False,
vertical_spacing=0.001,
)
fig.append_trace(trace0, 1, 1)
fig.append_trace(trace1, 1, 2)
fig["layout"].update(layout)
iplot(fig)
#
# # Pie Charts
# - Pie Charts Example: Students rate of top 7 universities in 2016
# - 2016 yılındaki ilk 7 üniversitenin öğrencilerinin oranı
#
# * fig: create figures
# * data: plot type
# * values: values of plot
# * labels: labels of plot
# * name: name of plots
# * hoverinfo: information in hover
# * hole: hole width
# * type: plot type like pie
# * layout: layout of plot
# * title: title of layout
# * annotations: font, showarrow, text, x, y
#
timesData[timesData.year == 2016].iloc[:7, :]
# data preparation
df2016 = timesData[timesData.year == 2016].iloc[
:7, :
] # 2016 yılına göre filtrelendi ve ilk 7 üniversite alındı
pie1 = df2016.num_students # öğrenci sayıları pie1'e atandı
# num_students içerisindeki değerlerin ondalık kısımları "," ile belirtilmiş bu türkçe yanlış gösterimdir ve veriler object türündedir bunlar float olmalıdır.
# Bu ifadeleri "." lı ondalık haline getirmeliyiz örneğin==> 2,243 = 2.243
# Aşağıda replace ile (,) kısımlar (.)'ya çevrildi ve her bir num_students değeri object tipinden float tipine çevrildi.'
pie1_list = [
float(each.replace(",", ".")) for each in df2016.num_students
] # str(2,4) => str(2.4) = > float(2.4) = 2.4
# etiketlere 2016 yılındaki ilk 7 üniversitenin isimleri atandı.
labels = df2016.university_name
# Bu yöntemde figüre içine data ve layout gömülü olarak oluşturulur
# values gösterilecek değerler
# figure
fig = {
"data": [
{ # trace oluşturuluyor
"values": pie1_list,
"labels": labels,
"domain": {"x": [0, 0.5]},
"name": "Number Of Students Rates",
"hoverinfo": "label+percent+name", # oranı,yüzdesi,adı
"hole": 0.2, # oluşacak pie çhartı ortasındaki deliğin büyüklüğü
"type": "pie", # trace'in tipi
},
],
"layout": { # layout oluşturuluyor
"title": "Universities Number of Students rates",
"annotations": [
{
"font": {"size": 20},
"showarrow": False,
"text": "Number of Students",
"x": 0.20,
"y": 1,
},
],
},
}
iplot(fig)
# Not: Oranlama işlemini Pie Chart'ın kendisi yapar. Mantığı.
# İlk 7 üniversitedeki öğrencilerin sayıları toplanır ==> 95.724....
# Örneğin Harvard Üni'nin 100delik dilimde oranı için yapılacak işlem :
# Harvard Öğrenci sayısı = 20.152 == > 20.152*100 / 95.724 = 21.05' gibi bir orana denk gelir
# Renklerin açıklamasının olduğu kısımda üniversitelerin üzerine basarak, dahil edilmediği taktirde olabilecek oranlamayı gösterir.
#
# # Bubble Charts
# - Bubble Charts Example: University world rank (first 20) vs teaching score with number of students(size) and international score (color) in 2016
# - Genelde renge ve size anlamlar katılarak işlemler görülür pie chart'dan farkı budur.
# - 2016 yılındaki ilk 20 üniversitenin Teaching skorunu üniversite rengine göre karşılaştırıcaz. Aynı zamanda number of students scatter'ın size'si olacak ve score'da rengi olacak
# * x = x axis
# * y = y axis
# * mode = markers(scatter)
# * marker = marker properties
# * color = third dimension of plot. Internaltional score
# * size = fourth dimension of plot. Number of students
# * text: university names
df2016.head(20)
# Bubble Charts Example: University world rank (first 20) vs
# teaching score with number of students(size) and international score (color) in 2016
# data preparation
df2016 = timesData[timesData.year == 2016].iloc[:20, :]
# num_students ',' ondalıkları '.' ya çevrildi ve float tipine dönüştürüldü ve bir listeye aktarıldı.
num_students_size = [float(each.replace(",", ".")) for each in df2016.num_students]
# uluslararası puanları renk değeri olarak bir listeye atadık.
international_color = [float(each) for each in df2016.international]
# Datayı oluşturuyoruz ve içinde trace gömüyoruz.
data = [
{
"y": df2016.teaching,
"x": df2016.world_rank,
"mode": "markers",
"marker": { # color ve size'a anlam katıyoruz.
"color": international_color, # uluslararası skora göre renk yoğunlaşır
"size": num_students_size, # üniversiteki öğrenci sayısına göre size büyür.
"showscale": True,
},
"text": df2016.university_name,
}
]
iplot(data)
# Bublelerin (yuvarlak,kabarcık) boyutu büyüdükçe ilgili üniversitenin öğrenci sayısınında arttığını gösterir.
# Bublelerin rengi açıldıkça öğretimininde(öğretim gücü) yükseldiğini anlayabiliriz.
# En açık renk öğretim gücü en düşük üniversite, en koyu renk öğretim gücü en yüksek üniversite.
#
# # Histogram
# Lets look at histogram of students-staff ratio in 2011 and 2012 years.
# 2011 ve 2012 yıllarındaki students-staff ları inceliyoruz.
#
# * trace1 = first histogram
# * x = x axis
# * y = y axis
# * opacity = opacity of histogram
# * name = name of legend
# * marker = color of histogram
# * trace2 = second histogram
# * layout = layout
# * barmode = mode of histogram like overlay. Also you can change it with *stack*
#
# prepare data
x2011 = timesData.student_staff_ratio[
timesData.year == 2011
] # 2011 yılı filtrelendi ve student_staff aktarıldı
x2012 = timesData.student_staff_ratio[
timesData.year == 2012
] # 2012 yılı filtrelendi ve student_staff aktarıldı
# Histogram Data içerisindeki x değerinden kaç adet olduğunu yani data yoğunluğunu count eder.
trace1 = go.Histogram(
x=x2011, # saydamlık
opacity=0.75,
name="2011",
marker=dict(color="rgba(171, 50, 96, 0.6)"),
)
trace2 = go.Histogram(
x=x2012, opacity=0.75, name="2012", marker=dict(color="rgba(12, 50, 196, 0.6)")
)
data = [trace1, trace2]
layout = go.Layout(
barmode="overlay", # barmode modu overlay: içiçe gelecek şekilde birleştir.
title=" students-staff ratio in 2011 and 2012",
xaxis=dict(title="students-staff ratio"),
yaxis=dict(title="Count"),
)
fig = go.Figure(data=data, layout=layout)
iplot(fig)
#
# # Work Cloud
# Plotly'dan farklı bir kütüphanedir!!!
# #from wordcloud import WordCloud
# Not a pyplot but learning it is good for visualization. Lets look at which country is mentioned most in 2011.
# 2011 yılındaki ülkelerin isimlerini görselleştir.
# Görselleştirirken hangi ülke ismi daha çok var ise puntosu okadar büyük olsun.
# * WordCloud = word cloud library that I import at the beginning of kernel
# * background_color = color of back ground
# * generate = generates the country name list(x2011) a word cloud
# data prepararion
# En sık geçen kelimeleri büyük yazdırır.
# En seyrek geçen kelimeleri küçük yazdırır.
x2011 = timesData.country[timesData.year == 2011]
plt.subplots(figsize=(8, 8))
wordcloud = WordCloud(background_color="white", width=512, height=384).generate(
" ".join(x2011)
) # kullanılan kelimeleri ayır ve en çok kullanılanları oranlayarak büyüt.
plt.imshow(wordcloud)
plt.axis("off") # x ve y eksenlerini kapat
plt.show()
#
# # Box Plots
# * Box Plots
# * Median (50th percentile) = middle value of the data set. Sort and take the data in the middle. It is also called 50% percentile that is 50% of data are less that median(50th quartile)(quartile)
# * 25th percentile = quartile 1 (Q1) that is lower quartile
# * 75th percentile = quartile 3 (Q3) that is higher quartile
# * height of box = IQR = interquartile range = Q3-Q1
# * Whiskers = 1.5 * IQR from the Q1 and Q3
# * Outliers = being more than 1.5*IQR away from median commonly.
#
#
# * trace = box
# * y = data we want to visualize with box plot
# * marker = color
# data preparation
# 2015 yılındaki üniversitelerin toplam skoru ve araştırma puanını detaylı olarak görselleştirmek
x2015 = timesData[timesData.year == 2015]
trace0 = go.Box(
y=x2015.total_score,
name="total score of universities in 2015",
marker=dict(
color="rgb(12, 12, 140)",
),
)
trace1 = go.Box(
y=x2015.research,
name="research of universities in 2015",
marker=dict(
color="rgb(12, 128, 128)",
),
)
data = [trace0, trace1]
iplot(data)
# plotly kütüphanesi ile boxplotu biraz daha canlı hale getirmek bu şekilde mümküdür.
# Hangi nokta hangi değerlere sahip, lower,higher quartile gibi outlierlarıda görmek mümkündür.
#
# # Scatter Matrix Plots
# - Scatter Matrix = it helps us to see covariance and relation between more than 2 features
# - İki feature arasındaki covariance(kovaryans,derecesiz ilişki)yi göstermek için kullanılır
# * import figure factory as ff
# * create_scatterplotmatrix = creates scatter plot
# * data2015 = prepared data. It includes research, international and total scores with index from 1 to 401
# * colormap = color map of scatter plot
# * colormap_type = color type of scatter plot
# * height and weight
# import figure factory
import plotly.figure_factory as ff
# prepare data
dataframe = timesData[timesData.year == 2015]
data2015 = dataframe.loc[:, ["research", "international", "total_score"]]
data2015["index"] = np.arange(1, len(data2015) + 1)
# scatter matrix
# diag (column and row) = boxplot, portland="koyusu kırmızı, açığı mavi olan renk paleti"
fig = ff.create_scatterplotmatrix(
data2015,
diag="box",
index="index",
colormap="Portland",
colormap_type="cat",
height=700,
width=700,
)
iplot(fig)
#
# # Inset Plots
# - Inset Matrix = 2 plots are in one frame
# - Inset Matrix = 1 frame içinde 2 plot göstermek için kullanılır.
# - Vurgulanmak istenen kısım büyük plot, yardımcı olarak kullanılcak plot isim küçük kısımda yer alır
#
dataframe.head()
# Üniversitelerin Teachin(Öğretme) ve income(Gelirler) featureleri karşılaştırılacak.
# dataframe = 2015 yılına ait üniversiteler
# first line plot
trace1 = go.Scatter(
x=dataframe.world_rank,
y=dataframe.teaching,
name="teaching", # label
marker=dict(color="rgba(16, 112, 2, 0.8)"), # RGB + Opacity(alpha)
)
# second line plot
# 2. plotumuz yardımcı plotumuz x ve y axisleri x2,y2 olarak veriliyor
trace2 = go.Scatter(
x=dataframe.world_rank,
y=dataframe.income,
xaxis="x2",
yaxis="y2",
name="income", # label
marker=dict(color="rgba(160, 112, 20, 0.8)"), # RGB + Opacity(alpha)
)
data = [trace1, trace2]
layout = go.Layout( # iki plot iç içe olduğu için domain ve anchor kullanılıyor. domain yer,kordinat belirtir. anchor ikinci plotu çizdirmek için kullandığımız bir parametre.
xaxis2=dict(
domain=[
0.6,
0.95,
], # iki değerin farkı plotun genişliğidir, 0.6 plotun başlayacağı nokta(1.plotun 6.çizgisi), 0.95 bitiş noktası(ilk plotun 9. ile 10. çizgisinin arası)
anchor="y2", # ikinci plotumuzun x ekseni, ilk plotumuzun y ekseninde yer aldığı için
),
yaxis2=dict(
domain=[0.6, 0.95],
anchor="x2", # ikinci plotumuzun y ekseni, ilk plotumuzun x ekseninde sabit yer aldığı için alacağı konum.
),
title="Income and Teaching vs World Rank of Universities",
)
fig = go.Figure(data=data, layout=layout)
iplot(fig)
#
# # 3D Scatter Plot with Colorscaling
# - 3D Scatter: Sometimes 2D is not enough to understand data. Therefore adding one more dimension increase the intelligibility of the data. Even we will add color that is actually 4th dimension.
# - Bazen 2d bir datayı anlamak için yeterli olmaz. Bu gibi durumlarda 1 dimension daha eklemek datanın anlaşılabilirliğini artırır.Yani 3d yaparak datanın anlaşılması kolaylaşabilir. Rengide eklersek 4 boyuta ulaşır, aynı şekilde size artılarak 5 boyutada ulaşılabilir. Bu örnek size arttırılmayacak
# * go.Scatter3d: create 3d scatter plot
# * x,y,z: axis of plots
# * mode: market that is scatter
# * size: marker size
# * color: axis of colorscale
# * colorscale: actually it is 4th dimension
dataframe.head()
# create trace 1 that is 3d scatter
trace1 = go.Scatter3d(
x=dataframe.world_rank, # 1d Dünya sıralaması
y=dataframe.research, # 2d Araştırma Puanı
z=dataframe.citations, # 3d Alıntı Puanı
mode="markers", # Mode marker = ..... şeklinde gösterim.
marker=dict(size=10, color="rgb(255,0,0)"),
)
data = [trace1]
layout = go.Layout(
margin=dict( # margin(pay) kenarlardan bırakılan boşluklar(paylar)
l=0, # left (soldan)
r=0, # right (sağdan)
b=0, # below (alttan)
t=0, # top (üstten)
)
)
fig = go.Figure(data=data, layout=layout)
iplot(fig)
#
# # Multiple Subplots
# - Multiple Subplots: While comparing more than one features, multiple subplots can be useful.
# - Aynı plot içinde birden fazla feature'yi görselleştirmek istediğimizde kullanacağımız yöntem.
#
#
trace1 = go.Scatter(x=dataframe.world_rank, y=dataframe.research, name="research")
trace2 = go.Scatter(
x=dataframe.world_rank,
y=dataframe.citations,
# domain1
xaxis="x2",
yaxis="y2",
name="citations",
)
trace3 = go.Scatter(
x=dataframe.world_rank,
y=dataframe.income,
# domain2
xaxis="x3",
yaxis="y3",
name="income",
)
trace4 = go.Scatter(
x=dataframe.world_rank,
y=dataframe.total_score,
# domain3
xaxis="x4",
yaxis="y4",
name="total_score",
)
data = [trace1, trace2, trace3, trace4]
layout = go.Layout(
xaxis=dict(domain=[0, 0.45]),
xaxis2=dict(domain=[0.55, 1]),
xaxis3=dict(domain=[0, 0.45], anchor="y3"),
xaxis4=dict(
# domain=[0.55, 1],
domain=[0.55, 1],
),
yaxis=dict(domain=[0, 0.45]),
yaxis2=dict(domain=[0.55, 1], anchor="x2"),
yaxis3=dict(domain=[0.55, 1]),
yaxis4=dict(domain=[0, 0.45], anchor="x4"),
title="Research, citation, income and total score VS World Rank of Universities",
)
fig = go.Figure(data=data, layout=layout)
iplot(fig)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# imports if fails run '!pip3 install [package name]'
from sklearn.metrics import f1_score
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
df_train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
df_test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
df_train
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, stop_words="english")
X = tfidf_vectorizer.fit_transform(df_train["text"].values)
y = df_train["target"].values
tresholds = []
models = []
n_splits = 5
fold = 0
for train_index, test_index in StratifiedKFold(n_splits=n_splits).split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
clf = LogisticRegression()
clf.fit(X_train, y_train)
y_hat = clf.predict_proba(X_test)
f_tracker = 0
treshold = 0
for i in range(100):
i = i / 100
f1 = f1_score(np.where(y_hat[:, 1] >= i, 1, 0), y_test)
if f1 > f_tracker:
f_tracker = f1
treshold = i
print(i, treshold, f1, f_tracker)
tresholds.append(treshold)
models.append(clf)
fold += 1
print(fold)
X_test = tfidf_vectorizer.transform(df_test["text"].values)
y_hat = clf.predict(X_test)
final = np.zeros((X_test.shape[0]))
for i in range(n_splits):
clf = models[i]
preds = clf.predict_proba(X_test)
preds = np.where(preds[:, 1] >= tresholds[i], 1, 0)
final += preds / n_splits
final = np.where(final >= 0.5, 1, 0)
import seaborn as sns
sns.distplot(final)
submission = df_train = pd.read_csv(
"/kaggle/input/nlp-getting-started/sample_submission.csv"
)
submission["target"] = final
submission.to_csv("submission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
import numpy as np
from tqdm import tqdm
tqdm.pandas(desc="progress-bar")
from gensim.models import Doc2Vec
from sklearn import utils
from sklearn.model_selection import train_test_split
import gensim
from sklearn.linear_model import LogisticRegression
from gensim.models.doc2vec import TaggedDocument
import re
import seaborn as sns
import matplotlib.pyplot as plt
import string
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
def remove_punctuation(text):
"""a function for removing punctuation"""
import string
# replacing the punctuations with no space,
# which in effect deletes the punctuation marks
translator = str.maketrans("", "", string.punctuation)
# return the text stripped of punctuation marks
return text.translate(translator)
# extracting the stopwords from nltk library
sw = stopwords.words("english")
# displaying the stopwords
np.array(sw)
def stopwords(text):
"""a function for removing the stopword"""
# removing the stop words and lowercasing the selected words
text = [word.lower() for word in text.split() if word.lower() not in sw]
# joining the list of words with space separator
return " ".join(text)
stemmer = SnowballStemmer("english")
def stemming(text):
"""a function which stems each word in the given text"""
text = [stemmer.stem(word) for word in text.split()]
return " ".join(text)
def clean_loc(x):
if x == "None":
return "None"
elif x == "Earth" or x == "Worldwide" or x == "Everywhere":
return "World"
elif "New York" in x or "NYC" in x:
return "New York"
elif "London" in x:
return "London"
elif "Mumbai" in x:
return "Mumbai"
elif "Washington" in x and "D" in x and "C" in x:
return "Washington DC"
elif "San Francisco" in x:
return "San Francisco"
elif "Los Angeles" in x:
return "Los Angeles"
elif "Seattle" in x:
return "Seattle"
elif "Chicago" in x:
return "Chicago"
elif "Toronto" in x:
return "Toronto"
elif "Sacramento" in x:
return "Sacramento"
elif "Atlanta" in x:
return "Atlanta"
elif "California" in x:
return "California"
elif "Florida" in x:
return "Florida"
elif "Texas" in x:
return "Texas"
elif "United States" in x or "USA" in x:
return "USA"
elif "United Kingdom" in x or "UK" in x or "Britain" in x:
return "UK"
elif "Canada" in x:
return "Canada"
elif "India" in x:
return "India"
elif "Kenya" in x:
return "Kenya"
elif "Nigeria" in x:
return "Nigeria"
elif "Australia" in x:
return "Australia"
elif "Indonesia" in x:
return "Indonesia"
elif x in top_loc:
return x
else:
return "Others"
def remove_URL(text):
url = re.compile(r"https?://\S+|www\.\S+")
return url.sub(r"", text)
def remove_html(text):
html = re.compile(r"<.*?>")
return html.sub(r"", text)
def remove_emoji(text):
emoji_pattern = re.compile(
"["
"\U0001F600-\U0001F64F" # emoticons
"\U0001F300-\U0001F5FF" # symbols & pictographs
"\U0001F680-\U0001F6FF" # transport & map symbols
"\U0001F1E0-\U0001F1FF" # flags (iOS)
"\U00002702-\U000027B0"
"\U000024C2-\U0001F251"
"]+",
flags=re.UNICODE,
)
return emoji_pattern.sub(r"", text)
def remove_punct(text):
table = str.maketrans("", "", string.punctuation)
return text.translate(table)
# importing data
new_df = pd.read_csv("../input/nlp-getting-started/train.csv")
final_test = pd.read_csv("../input/nlp-getting-started/test.csv")
new_df["keyword"] = new_df["keyword"].fillna("unknown")
new_df["location"] = new_df["location"].fillna("unknown")
new_df = new_df[["target", "location", "text", "keyword"]]
final_test = final_test[["location", "text", "keyword"]]
new_df["text"] = new_df["text"].apply(remove_punctuation)
new_df["text"] = new_df["text"].apply(stopwords)
new_df["text"] = new_df["text"].apply(stemming)
new_df["text"] = new_df["text"].apply(remove_URL)
new_df["text"] = new_df["text"].apply(remove_html)
new_df["text"] = new_df["text"].apply(remove_emoji)
new_df["text"] = new_df["text"].apply(remove_punct)
# new_df['keyword'] = new_df['keyword'].apply(remove_punctuation)
# new_df['keyword'] = new_df['keyword'].apply(stopwords)
# new_df['keyword'] = new_df['keyword'].apply(stemming)
# new_df['keyword'] = new_df['keyword'].apply(remove_URL)
# new_df['keyword'] = new_df['keyword'].apply(remove_html)
# new_df['keyword'] = new_df['keyword'].apply(remove_emoji)
# new_df['keyword'] = new_df['keyword'].apply(remove_punct)
final_test["text"] = final_test["text"].apply(remove_punctuation)
final_test["text"] = final_test["text"].apply(stopwords)
final_test["text"] = final_test["text"].apply(stemming)
final_test["text"] = final_test["text"].apply(remove_URL)
final_test["text"] = final_test["text"].apply(remove_html)
final_test["text"] = final_test["text"].apply(remove_emoji)
final_test["text"] = final_test["text"].apply(remove_punct)
# final_test['keyword'] = final_test['keyword'].apply(remove_punctuation)
# final_test['keyword'] = final_test['keyword'].apply(stopwords)
# final_test['keyword'] = final_test['keyword'].apply(stemming)
# final_test['keyword'] = final_test['keyword'].apply(remove_URL)
# final_test['keyword'] = final_test['keyword'].apply(remove_html)
# final_test['keyword'] = final_test['keyword'].apply(remove_emoji)
# final_test['keyword'] = final_test['keyword'].apply(remove_punct)
raw_loc = new_df.location.value_counts()
top_loc = list(raw_loc[raw_loc >= 10].index)
new_df["location_clean"] = new_df["location"].apply(lambda x: clean_loc(str(x)))
final_test["location_clean"] = final_test["location"].apply(lambda x: clean_loc(str(x)))
from bs4 import BeautifulSoup
def cleanText(text):
text = BeautifulSoup(text, "lxml").text
text = re.sub(r"\|\|\|", r" ", text)
text = re.sub(r"http\S+", r"<URL>", text)
text = text.lower()
text = text.replace("x", "")
return text
new_df["text"] = new_df["text"].apply(cleanText)
new_df["keyword"] = new_df["keyword"].apply(cleanText)
new_df["location_clean"] = new_df["location_clean"].apply(cleanText)
final_test["text"] = final_test["text"].apply(cleanText)
final_test["keyword"] = final_test["keyword"].fillna("unknown")
final_test["keyword"] = final_test["keyword"].apply(cleanText)
final_test["location_clean"] = final_test["location_clean"].apply(cleanText)
keyword_df = new_df.groupby(["keyword"]).count().reset_index()
keyword_test = final_test.groupby(["keyword"]).count().reset_index()
from gensim.test.utils import common_texts, get_tmpfile
from gensim.models import Word2Vec
path = get_tmpfile("word2vec.model")
model = Word2Vec(common_texts, size=100, window=1, min_count=1, workers=4)
model = Word2Vec(
[list(keyword_df["keyword"]) + list(keyword_test["keyword"])], min_count=1
)
# traing and test split
train, test = train_test_split(new_df, test_size=0.2, random_state=42)
# another encoding technique for location and keyword, with the event rate
keyword_val = train.groupby("keyword").agg({"target": "mean"})
location_val = train.groupby("location_clean").agg({"target": "mean"})
new_train = pd.merge(train, keyword_val, how="left", on="keyword")
new_train = pd.merge(new_train, location_val, how="left", on="location_clean")
new_test = pd.merge(test, keyword_val, how="left", on="keyword")
new_test = pd.merge(new_test, location_val, how="left", on="location_clean")
new_train["target_y"].fillna(new_train["target_y"].mean(), inplace=True)
new_train["target"].fillna(new_train["target"].mean(), inplace=True)
new_test["target_y"].fillna(new_train["target_y"].mean(), inplace=True)
new_test["target"].fillna(new_train["target"].mean(), inplace=True)
# now back to creating word embeddings vector for keywords
words = list(model.wv.vocab)
train_w2v = []
test_w2v = []
final_test_w2v = []
for elem in train["keyword"]:
train_w2v.append(model.wv[elem])
for elem in test["keyword"]:
test_w2v.append(model.wv[elem])
for elem in final_test["keyword"]:
final_test_w2v.append(model.wv[elem])
# below code to create doc2vec vector for text variable
import multiprocessing
cores = multiprocessing.cpu_count()
import nltk
from nltk.corpus import stopwords
def tokenize_text(text):
tokens = []
for sent in nltk.sent_tokenize(text):
for word in nltk.word_tokenize(sent):
if len(word) < 2:
continue
tokens.append(word.lower())
return tokens
train_tagged = train.apply(
lambda r: TaggedDocument(words=tokenize_text(r["text"]), tags=[r.target]), axis=1
)
test_tagged = test.apply(
lambda r: TaggedDocument(words=tokenize_text(r["text"]), tags=[r.target]), axis=1
)
model_dbow = Doc2Vec(
dm=0, vector_size=300, negative=5, hs=0, min_count=2, sample=0, workers=cores
)
model_dbow.build_vocab([x for x in tqdm(train_tagged.values)])
for epoch in range(30):
model_dbow.train(
utils.shuffle([x for x in tqdm(train_tagged.values)]),
total_examples=len(train_tagged.values),
epochs=1,
)
model_dbow.alpha -= 0.002
model_dbow.min_alpha = model_dbow.alpha
def vec_for_learning(model, tagged_docs):
sents = tagged_docs.values
targets, regressors = zip(
*[(doc.tags[0], model.infer_vector(doc.words, steps=20)) for doc in sents]
)
return targets, regressors
y_train, X_train = vec_for_learning(model_dbow, train_tagged)
y_test, X_test = vec_for_learning(model_dbow, test_tagged)
# now combining the doc2vec vector, with word2vec vector and keyword and location encoding
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
vectorizer = CountVectorizer(analyzer="word", binary=True)
vectorizer.fit(new_df["text"])
X_train_cvec = vectorizer.transform(train["text"]).todense()
X_test_cvec = vectorizer.transform(test["text"]).todense()
# y = tweets['target'].values
# X.shape, y.shape
X_train_cvec.shape, X_test_cvec.shape
tfidf = TfidfVectorizer(analyzer="word", binary=True)
tfidf.fit(new_df["text"])
X_train_tfidf = tfidf.transform(train["text"]).todense()
X_test_tfidf = tfidf.transform(test["text"]).todense()
new_X_train = list(map(lambda x, y: np.append(x, y), X_train, new_train["target_y"]))
new_X_train_2 = list(
map(lambda x, y: np.append(x, y), new_X_train, new_train["target"])
)
new_X_train_3 = list(map(lambda x, y: np.append(x, y), new_X_train_2, train_w2v))
new_X_test = list(map(lambda x, y: np.append(x, y), X_test, new_test["target_y"]))
new_X_test_2 = list(map(lambda x, y: np.append(x, y), new_X_test, new_test["target"]))
new_X_test_3 = list(map(lambda x, y: np.append(x, y), new_X_test_2, test_w2v))
# CountVectorizer
new_X_train_4 = list(map(lambda x, y: np.append(x, y), new_X_train_3, X_train_cvec))
new_X_test_4 = list(map(lambda x, y: np.append(x, y), new_X_test_3, X_test_cvec))
# TFIDF
new_X_train_5 = list(map(lambda x, y: np.append(x, y), new_X_train_4, X_train_tfidf))
new_X_test_5 = list(map(lambda x, y: np.append(x, y), new_X_test_4, X_test_tfidf))
new_X_test_5[0].dtype
# Simple logistic regression
from sklearn.metrics import accuracy_score, f1_score
logreg = LogisticRegression(n_jobs=1, C=1e5)
logreg.fit(new_X_train_5, y_train)
y_pred = logreg.predict(new_X_test_5)
print("Testing accuracy : {}".format(accuracy_score(y_test, y_pred)))
print("Testing F1 score : {}".format(f1_score(y_test, y_pred, average="weighted")))
var_lst = ["var_" + str(i) for i in range(len(new_X_train_4[0]))]
import xgboost as xgb
xgb_params = {
"eta": 0.05,
"max_depth": 5,
"subsample": 0.7,
"colsample_bytree": 0.7,
"objective": "binary:logistic",
"silent": 1,
"seed": 0,
"n_estimators": 200,
"eval_metric": "logloss",
}
dtrain = xgb.DMatrix(new_X_train_4, y_train, feature_names=var_lst)
xgb_model = xgb.train(dict(xgb_params, silent=0), dtrain, num_boost_round=50)
dtest = xgb.DMatrix(new_X_test_4, feature_names=var_lst)
y_pred = xgb_model.predict(dtest)
# print ('Testing accuracy : {}'.format(accuracy_score(y_test, y_pred)))
print(
"Testing F1 score : {}".format(f1_score(y_test, y_pred.round(), average="weighted"))
)
final_test["target"] = 0
final_test = pd.merge(final_test, keyword_val, how="left", on="keyword")
final_test = pd.merge(final_test, location_val, how="left", on="location_clean")
final_test["target_y"].fillna(new_train["target_y"].mean(), inplace=True)
final_test["target"].fillna(new_train["target"].mean(), inplace=True)
final_test_tagged = final_test.apply(
lambda r: TaggedDocument(words=tokenize_text(r["text"]), tags=[r.target]), axis=1
)
f_y_test, f_X_test = vec_for_learning(model_dbow, final_test_tagged)
final_X_test = list(map(lambda x, y: np.append(x, y), f_X_test, final_test["target_y"]))
final_X_test_2 = list(
map(lambda x, y: np.append(x, y), final_X_test, final_test["target"])
)
final_X_test_3 = list(map(lambda x, y: np.append(x, y), final_X_test_2, final_test_w2v))
X_f_test_cvec = vectorizer.transform(final_test["text"]).todense()
X_f_test_tfidf = tfidf.transform(final_test["text"]).todense()
final_X_test_4 = list(map(lambda x, y: np.append(x, y), final_X_test_3, X_f_test_cvec))
# final_X_test_5 = list(map(lambda x,y: np.append(x,y),final_X_test_4, X_f_test_tfidf))
new_final_5 = list((lambda x: map(x, float), final_X_test_5))
dtest = xgb.DMatrix(final_X_test_4, feature_names=var_lst)
y_pred_xgb = xgb_model.predict(dtest)
new_y_pred = np.where(y_pred_xgb > 0.45, 1, 0)
unique_elements, counts_elements = np.unique(new_y_pred, return_counts=True)
unique_elements, counts_elements
new_y_pred
y_pred = logreg.predict(final_X_test_5)
# creating the submissions file
sub_sample = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv")
submit = sub_sample.copy()
submit.target = y_pred
submit.to_csv("submit_lr_convec_tfidf.csv", index=False)
submit = sub_sample.copy()
submit.target = new_y_pred
submit.to_csv("submit_xgb_convec_tfidf.csv", index=False)
|
import numpy as np
import pandas as pd
import yfinance as yf
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
# Function to retrieve stock info
def get_stock_info(ticker_list, start_date, end_date, interval="1mo"):
"""
Extract stock price data from Yahoo.Finance using a list of tickers
:param ticker_list: A series/list of Tickets
:param start_date: time period to start retrieving the data, e.g.'1900-01-01'
:param end_date: time period to end retrieving the data, e.g.'1900-01-01'
:param interval: time interval to retrieve the data. Valid formats: {number}m/h/d/wk/mo
:return: a df with stock open, high, low, and close rpices, as well as trading volume and additional info
"""
ticker_list = list(ticker_list)
index_count = 0
for stock in ticker_list:
if index_count == 0:
print(f"[i] Collecting data for {stock} has been started")
ticker = yf.Ticker(f"{stock}")
data = ticker.history(interval=interval, start=start_date, end=end_date)
data["Company"] = f"{stock}"
print(f"[+] data for {stock} has been collected")
index_count += 1
elif index_count != 0:
print(f"[i] Collecting data for {stock} has been started")
ticker = yf.Ticker(f"{stock}")
data_2 = ticker.history(interval=interval, start=start_date, end=end_date)
data_2["Company"] = f"{stock}"
data = pd.concat([data, data_2])
print(f"[+] data for {stock} has been collected")
if index_count == len(ticker_list):
print("Info Data for all tickers has been collected")
return data[
[
"Company",
"Open",
"High",
"Low",
"Close",
"Volume",
"Dividends",
"Stock Splits",
]
]
# Function to calculate the sharpe ratio
def sharpe_ratio(df):
"""
Calculate the sharpe ratio for a given df
:param df: a df for which the calculation is needed
:return: a df with a calculated sharpe ratio
"""
ust_10 = pd.read_csv(
r"/kaggle/input/s-and-p-500-companies-price-dynamics/ust10.csv"
)
company_list = []
year_list = []
return_y_array = []
risk_free = []
excess_return = []
sector_list = []
industry_list = []
stdev_list = []
for company in df["Company"].unique():
for year in df["Year"].unique():
# Calculate yearly return
tmp_df = df.query(
f'Company == "{company}" & Year == {year} & Month == 1'
).reset_index()
try:
p1 = tmp_df["Close"][0]
tmp_df = df.query(
f'Company == "{company}" & Year == {year} & Month == 12'
).reset_index()
except:
return_y = np.nan
try:
p2 = tmp_df["Close"][0]
return_y = (p2 - p1) / p1
except:
return_y = np.nan
# Get risk-free rate
tmp_df = ust_10.query(f"Year == {year} & Month == 1").reset_index()
r = tmp_df["Close"][0] / 100
# Excess return
er = return_y - r
# Calcualte StDev
try:
tmp_df = df.query(
f'Company == "{company}" & Year == {year}'
).reset_index()
sd = tmp_df["mo_return"].std()
except:
sd = np.nan
# Get Sector and Industry
tmp_df = df.query(f'Company == "{company}"').reset_index()
sector = tmp_df["GICS Sector"][0]
industry = tmp_df["GICS Sub-Industry"][0]
# Append values
company_list.append(f"{company}")
year_list.append(year)
return_y_array.append(return_y)
risk_free.append(r)
sector_list.append(sector)
industry_list.append(industry)
excess_return.append(er)
stdev_list.append(sd)
df_f = pd.DataFrame(
{
"Company": company_list,
"year": year_list,
"yearly_return": return_y_array,
"risk_free_r": risk_free,
"excess_return": excess_return,
"StDev_return": stdev_list,
"Sector": sector_list,
"Industry": industry_list,
}
)
df_f["StDev_excess_return"] = np.nan
for company in df_f["Company"].unique():
df_f["StDev_excess_return"] = np.where(
df_f["Company"] == company,
df_f.query(f'Company == "{company}"').loc[:, "excess_return"].std(),
df_f["StDev_excess_return"],
)
df_f["sharpe_ratio"] = df_f["excess_return"] / df_f["StDev_excess_return"]
return df_f
# =============================================================================
# Data Extraction
# =============================================================================
# Selecting Dates to use
date_begin = input(
"Enter the beginning date for the analysis as a string, the formatting is YYYY-MM-DD without quotes:\n"
)
date_end = input(
"Enter the ending date for the analysis as a string, the formatting is YYYY-MM-DD without quotes:\n"
)
date_begin_n = int(date_begin.split("-")[0])
date_end_n = int(date_end.split("-")[0])
# Collecting a table from the Wikipedia Page about the S&P 500
wiki_url = "https://en.wikipedia.org/wiki/List_of_S%26P_500_companies"
wiki_list = pd.read_html(wiki_url, header=0)
wiki_list = wiki_list[0]
# Creating a data set with companies from Wiki
sp500_companies = wiki_list[
["Symbol", "Security", "GICS Sector", "GICS Sub-Industry", "Founded"]
].rename(columns={"Symbol": "Company"})
# Using a parsing fuction to retrieve the stock data
raw_sp500_stock = get_stock_info(sp500_companies["Company"], date_begin, date_end)
raw_ust_10 = get_stock_info(["^TNX"], date_begin, date_end)
raw_sp500_index = get_stock_info(["^GSPC"], date_begin, date_end)
# Removing timezone from columns
df_process_list = [raw_sp500_stock, raw_ust_10, raw_sp500_index]
for df in df_process_list:
# Add the Year, day and Month columns
df["Year"] = pd.DatetimeIndex(df.index).year
df["Month"] = pd.DatetimeIndex(df.index).month
df["Day"] = pd.DatetimeIndex(df.index).day
# Removing DateTime Index
raw_sp500_stock = (
raw_sp500_stock[raw_sp500_stock["Day"] == 1].reset_index().drop("Date", axis=1)
)
raw_ust_10 = raw_ust_10[raw_ust_10["Day"] == 1].reset_index().drop("Date", axis=1)
raw_sp500_index = (
raw_sp500_index[raw_sp500_index["Day"] == 1].reset_index().drop("Date", axis=1)
)
# =============================================================================
# Data Processing
# =============================================================================
# Add Sector and Industry columns
clean_sp500_stock = pd.merge(raw_sp500_stock, sp500_companies, on="Company", how="left")
clean_sp500_index = raw_sp500_index.copy()
# Calc montly returns
clean_sp500_stock["mo_return"] = np.nan
clean_sp500_index["mo_return"] = np.nan
df_process_list = [clean_sp500_stock, clean_sp500_index]
for df in df_process_list:
for company in df["Company"].unique():
for year in range(date_begin_n, date_end_n + 1):
for month in range(1, 13):
value_1 = df.query(
f'Company == "{company}" & Year == {year} & Month == {month}'
).reset_index()
value_2 = df.query(
f'Company == "{company}" & Year == {year} & Month == {month-1}'
).reset_index()
try:
value_1, value_2 = value_1["Close"][0], value_2["Close"][0]
except:
value_1 = df.query(
f'Company == "{company}" & Year == {year} & Month == {month}'
).reset_index()
value_2 = df.query(
f'Company == "{company}" & Year == {year-1} & Month == 12'
).reset_index()
try:
value_1, value_2 = value_1["Close"][0], value_2["Close"][0]
except:
value_1, value_2 = np.nan, np.nan
value_f = (value_1 - value_2) / value_2
df["mo_return"] = np.where(
(df["Company"] == company)
& (df["Year"] == year)
& (df["Month"] == month),
value_f,
(df["mo_return"]),
)
# Add Sector and Industry to use the sharpe function
clean_sp500_index["GICS Sector"] = "S&P500 Index"
clean_sp500_index["GICS Sub-Industry"] = "S&P500 Index"
# Apply the function
yearly_returns_stocks = sharpe_ratio(clean_sp500_stock)
yearly_returns_index = sharpe_ratio(clean_sp500_index)
# Compare each Stock Performance with S&P index
clean_sp500_stock["perform_vs_sp"] = np.nan
for company in clean_sp500_stock["Company"].unique():
for year in range(date_begin_n, date_end_n + 1):
for month in range(1, 13):
value = clean_sp500_stock.query(
f'Company == "{company}" & Year == {year} & Month == {month}'
).reset_index()
try:
value = value["mo_return"][0]
except:
value = np.nan
sp_value = clean_sp500_index.query(
f"Year == {year} & Month == {month}"
).reset_index()
try:
sp_value = sp_value["mo_return"][0]
except:
sp_value = np.nan
clean_sp500_stock["perform_vs_sp"] = np.where(
(clean_sp500_stock["Company"] == company)
& (clean_sp500_stock["Year"] == year)
& (clean_sp500_stock["Month"] == month),
value - sp_value,
(clean_sp500_stock["perform_vs_sp"]),
)
# Calculate % of time
clean_sp500_stock["perform_flag"] = np.where(
clean_sp500_stock["perform_vs_sp"] > 0, 1, 0
)
clean_sp500_stock["time_higher_sp"] = np.nan
for company in clean_sp500_stock["Company"].unique():
for year in range(date_begin_n, date_end_n + 1):
tmp_df = clean_sp500_stock.query(
f'Company == "{company}" & Year == {year}'
).reset_index()
clean_sp500_stock["time_higher_sp"] = np.where(
(clean_sp500_stock["Company"] == company)
& (clean_sp500_stock["Year"] == year),
tmp_df["perform_flag"].sum() / len(tmp_df),
clean_sp500_stock["time_higher_sp"],
)
# Calculating monthly returns
clean_sp500_stock["mo_return"] = np.nan
clean_sp500_index["mo_return"] = np.nan
df_process_list = [clean_sp500_stock, clean_sp500_index]
# Loop to calculate the values
# Take each df in the processing list
for df in df_process_list:
# Take each unique company
for company in df["Company"].unique():
# For each unique company in each year
for year in range(date_begin_n, date_end_n + 1):
# For each unique company in each year in each month
for month in range(1, 13):
# Get dfs with those company, year, and month filters
value_1 = df.query(
f'Company == "{company}" & Year == {year} & Month == {month}'
).reset_index()
value_2 = df.query(
f'Company == "{company}" & Year == {year} & Month == {month-1}'
).reset_index()
# Try to get the Close Price
try:
value_1, value_2 = value_1["Close"][0], value_2["Close"][0]
# If that does not work
except:
# Look at the 12 month for the second value and the year before the current year
value_1 = df.query(
f'Company == "{company}" & Year == {year} & Month == {month}'
).reset_index()
value_2 = df.query(
f'Company == "{company}" & Year == {year-1} & Month == 12'
).reset_index()
# Try to retrieve the Close Price
try:
value_1, value_2 = value_1["Close"][0], value_2["Close"][0]
# If that does not work, assign nans
except:
value_1, value_2 = np.nan, np.nan
# Calculate the return
value_f = (value_1 - value_2) / value_2
# Put the return in the correct cell
df["mo_return"] = np.where(
(df["Company"] == company)
& (df["Year"] == year)
& (df["Month"] == month),
value_f,
(df["mo_return"]),
)
# =============================================================================
# EDA
# =============================================================================
# 1. Central Tendency & Disperssion analysis
# Create a summary table with sector agg
sum_table_sectors = (
yearly_returns_stocks.loc[
:, ["Sector", "Industry", "year", "yearly_return", "sharpe_ratio"]
]
.groupby(by=["Sector", "Industry", "year"], as_index=False)
.mean()
)
# Select Top 5 the most performing and underperforming Sectors in each year
sum_table_sectors.loc[:, ["year", "Sector", "sharpe_ratio"]].groupby(
by=["year", "Sector"]
).mean().sort_values(by=["year", "sharpe_ratio"], ascending=[1, 0])
# Select Top 5 the most performing and underperforming Industries in each year
sum_table_sectors.loc[:, ["year", "Industry", "sharpe_ratio"]].groupby(
by=["year", "Industry"]
).mean().sort_values(by=["year", "sharpe_ratio"], ascending=[1, 0])
# 2. Outliers Analysis
# IQR setting up
yearly_returns_stocks["outlier_flag"] = 0
yearly_returns_stocks["iqr_low"] = 0
yearly_returns_stocks["iqr_high"] = 0
# IQR calculation
for year in range(date_begin_n, date_end_n):
tmp_df = yearly_returns_stocks[yearly_returns_stocks["year"] == year]
IQR = tmp_df["sharpe_ratio"].quantile(0.75) - tmp_df["sharpe_ratio"].quantile(0.25)
IQR_low = tmp_df["sharpe_ratio"].quantile(0.25) - 1.5 * IQR
IQR_high = tmp_df["sharpe_ratio"].quantile(0.75) + 1.5 * IQR
yearly_returns_stocks["iqr_low"] = np.where(
yearly_returns_stocks["year"] == year, IQR_low, yearly_returns_stocks["iqr_low"]
)
yearly_returns_stocks["iqr_high"] = np.where(
yearly_returns_stocks["year"] == year,
IQR_high,
yearly_returns_stocks["iqr_high"],
)
# IQR status flag
yearly_returns_stocks["outlier_flag"] = np.where(
(yearly_returns_stocks["sharpe_ratio"] < yearly_returns_stocks["iqr_low"])
| (yearly_returns_stocks["sharpe_ratio"] > yearly_returns_stocks["iqr_high"]),
1,
0,
)
# Specific Outliers
print(
yearly_returns_stocks[(yearly_returns_stocks["outlier_flag"] == 1)]
.loc[:, ["Company", "yearly_return"]]
.sort_values(by="yearly_return", ascending=False)
)
# 3. Time performance vs S&P500 analysis
sum_table_time = (
clean_sp500_stock.loc[:, ["Company", "time_higher_sp", "Year", "GICS Sector"]]
.groupby(by=["Company", "Year", "GICS Sector"])
.mean()
.sort_values(by=["Year"], ascending=[0])
)
# =============================================================================
# Data Visualization
# =============================================================================
# Sharpe Ratio Boxplots for each year
sns.set_style("darkgrid")
plt.figure(figsize=(15, 8))
plt.axhline(y=0, color="black", linestyle="--", ms=5)
plt.title("Sharpe Ratio for S&P500 companies per year")
plt.rcParams.update({"font.size": 10})
sns.boxplot(x="year", y="sharpe_ratio", data=yearly_returns_stocks)
# Bar charts for each year by sector
for year in range(date_begin_n, date_end_n + 1):
tmp_df = sum_table_sectors[sum_table_sectors["year"] == year]
sns.set_style("darkgrid")
plt.figure(figsize=(25, 8))
plt.axhline(y=0, color="black", linestyle="--", ms=5)
plt.title(f"{year} year Sharpe Ratio Results")
plt.rcParams.update({"font.size": 11})
sns.barplot(x="Sector", y="sharpe_ratio", data=tmp_df)
# Bar charts for each year by Industry
for year in range(date_begin_n, date_end_n + 1):
for sector in sum_table_sectors["Sector"].unique():
tmp_df = sum_table_sectors[
(sum_table_sectors["year"] == year)
& (sum_table_sectors["Sector"] == sector)
]
sns.set_style("darkgrid")
plt.figure(figsize=(40, 8))
plt.axhline(y=0, color="black", linestyle="--", ms=5)
plt.title(f"{year} year {sector} Sharpe Ratio Results")
plt.rcParams.update({"font.size": 6})
sns.barplot(x="Industry", y="sharpe_ratio", data=tmp_df)
# Hists & KDEs
for year in range(date_begin_n, date_end_n + 1):
tmp_df = yearly_returns_stocks[yearly_returns_stocks["year"] == year]
sns.set_style("darkgrid")
plt.figure(figsize=(15, 8))
plt.title(f"Sharpe Ratio Hist for S&P500 companies in {year}")
plt.rcParams.update({"font.size": 10})
sns.histplot(x="sharpe_ratio", data=tmp_df, kde=True)
# Scatters
for year in range(date_begin_n, date_end_n + 1):
tmp_df = yearly_returns_stocks[yearly_returns_stocks["year"] == year]
sns.set_style("darkgrid")
plt.figure(figsize=(15, 8))
plt.title(f"Yearly Return - SdDev for S&P500 companies in {year}")
plt.rcParams.update({"font.size": 10})
sns.scatterplot(x="StDev_return", y="sharpe_ratio", data=tmp_df, hue="outlier_flag")
# Scatter
sns.set_style("darkgrid")
plt.figure(figsize=(15, 8))
plt.title("% of time each stock was higher vs S&P500")
plt.rcParams.update({"font.size": 10})
sns.scatterplot(x="Year", y="time_higher_sp", data=sum_table_time, hue="GICS Sector")
# ABCD Qudrant
for year in range(date_begin_n, date_end_n + 1):
tmp_df = yearly_returns_index.query(f"year == {year}").reset_index()
value = tmp_df["yearly_return"][0]
value_2 = tmp_df["StDev_return"][0]
tmp_df = yearly_returns_stocks.query(f"year == {year}").reset_index()
sns.set_style("darkgrid")
plt.figure(figsize=(15, 8))
plt.title(f"Yearly Return vs StDev in {year}")
plt.rcParams.update({"font.size": 10})
plt.axhline(y=value, color="black", linestyle="--", ms=5)
plt.axvline(x=value_2, color="black", linestyle="--", ms=5)
sns.scatterplot(x="StDev_return", y="yearly_return", data=tmp_df, hue="Sector")
|
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use("seaborn")
data = pd.read_csv("/kaggle/input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv")
data.replace("China", "Mainland China", inplace=True)
data.Date = pd.to_datetime(data.Date)
metrics = ["Confirmed", "Deaths", "Recovered"]
data["date"] = data.Date.apply(lambda x: x.strftime("%Y-%m-%d"))
tots = data.groupby("date")[metrics].sum()
tots.plot(style="o-")
plt.title("Covid-19: Totals")
tots = data.groupby("date")[metrics].sum()
recovered_rate = tots["Recovered"] / tots["Confirmed"]
mortality_rate = tots["Deaths"] / tots["Confirmed"]
plt.figure()
plt.plot(recovered_rate, "o-", label="Recovery Rate")
plt.plot(mortality_rate, "o-", label="Mortality Rate")
plt.legend()
plt.xticks(rotation=90)
plt.show()
tots.pct_change().rolling(5).mean().plot(style="o-")
plt.title("Daily Growth Rate, Smoothed")
tbl = data.pivot_table(
index="date", columns="Country", values="Confirmed", aggfunc="sum"
)
tbl = tbl.fillna(0).T.sort_values("2020-02-17", ascending=False)
tbl["2020-02-17"].plot(kind="bar", logy=False)
plt.title("Total Confirmed (Log Scale)")
plt.show()
tbl["2020-02-17"][:20].plot(kind="bar", logy=True)
plt.title("Total Confirmed (Log Scale)")
plt.show()
china = data[data.Country == "Mainland China"]
japan = data[data.Country == "Japan"]
china_table = china.pivot_table(
index="date", columns="Province/State", values="Confirmed", aggfunc="sum"
).T.sort_values("2020-02-17", ascending=False)
china_table[:10].T.plot(style="o-", logy=False)
plt.title("Top 10 Chinese Provinces by Number of Confirmeds")
china_table[1:11].T.plot(style="o-", logy=False)
plt.title("Top 10 Chinese Provinces by Number of Confirmeds, Excluding Hubei Province")
china_table[:10].T.pct_change().rolling(5).mean().plot(style="o-", logy=False)
plt.title("Growth Rates")
plt.ylim(-0.2, 1)
japan.groupby("date")[metrics].sum().plot(style="o-")
plt.title("Japan: Num Confirmed, Deaths, and Recovereds")
japan.set_index("date")[metrics].pct_change().rolling(5).mean().plot(style="o-")
|
# # Load Library and Data
import numpy as np
import pandas as pd
import pandas_profiling
import seaborn as sns
import matplotlib.pyplot as plt
import plotly as py
import plotly.graph_objs as go
import plotly.express as px
import statsmodels.api as sm
from sklearn.preprocessing import PolynomialFeatures, MinMaxScaler, MaxAbsScaler
# Pipelines allow you to create a single object that includes all steps from data preprocessing & classification.
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import metrics
from sklearn.metrics import accuracy_score, recall_score
from IPython.display import display_html
import warnings
warnings.filterwarnings("ignore")
# custom function to display dataframes
def displayoutput(*args):
html_str = ""
for df in args:
html_str += df.to_html()
display_html(
html_str.replace(
"table",
'table style="display:inline;margin-left:50px !important;margin-right: 40px !important"',
),
raw=True,
)
def printoutput(string, color=None):
colorstr = "<span style='color:{}'>{}</span>".format(color, string)
display(Markdown(colorstr))
originalData = pd.read_csv(
"../input/portuguese-marketing-campaigns-dataset/bank-full.csv"
)
# # EDA
# **Bank client data**
# 1 - age
# 2 - job : type of job
# 3 - marital : marital status
# 4 - education
# 5 - default: has credit in default?
# 6 - housing: has housing loan?
# 7 - loan: has personal loan?
# 8 - balance in account
# **Related to previous contact**
# 8 - contact: contact communication type
# 9 - month: last contact month of year
# 10 - day_of_week: last contact day of the week
# 11 - duration: last contact duration, in seconds
# **Other attributes**
# 12 - campaign: number of contacts performed during this campaign and for this client
# 13 - pdays: number of days that passed by after the client was last contacted from a previous campaign
# 14 - previous: number of contacts performed before this campaign and for this client
# 15 - poutcome: outcome of the previous marketing campaign
# # Data Profiling
pandas_profiling.ProfileReport(originalData)
originalData.shape
# Descriptive Statistics for Categorical Variables
#
originalData.describe(include=["object"])
CatCloums = originalData.select_dtypes(include="object")
print("Categorical columns:")
print("___________________________________________________")
sno = 1
for i in CatCloums.columns:
print(sno, ".", i)
sno += 1
# Categorical data by Count plot
for catcol in CatCloums:
sns.countplot(
data=CatCloums, y=catcol, order=CatCloums[catcol].value_counts().index
)
plt.figure(figsize=(20, 20))
plt.show()
for i in CatCloums:
print("Cloumn Name :", i)
print(CatCloums[i].value_counts())
for i in CatCloums:
f, axes = plt.subplots(figsize=(7, 5))
print(i)
sns.countplot(originalData[i])
plt.xticks(rotation=50)
plt.show()
# Descriptive Statistics for Continuous(Numerical) Variables.
NumColums = originalData.select_dtypes(exclude="object")
sno = 1
print("Numerical columns:")
print("___________________________________________________")
for i in NumColums.columns:
print(sno, ".", i)
sno += 1
# Visualizing Distribution of Continuous Variables
_ = originalData.hist(
column=NumColums.columns,
figsize=(15, 15),
grid=False,
color="#86bf91",
zorder=2,
rwidth=1.0,
)
# Distribution plot-Skewness
for i in NumColums:
print("Column :", i)
sns.distplot(originalData[i])
plt.show()
# Five Point Summary
originalData.describe(include=["object"])
originalData.describe(exclude=["object"])
# Correlation of Features
originalData.corr()
# # DATA MINING
duplicates = originalData.duplicated()
sum(duplicates)
originalData.isna().sum()
originalData.isnull().sum()
# **Drop the Columns based on Corr.**
originalData.drop(["duration"], axis=1, inplace=True)
originalData.head(10)
# Creating Dummy Variables for Catagorical Features
originalData["pdays"] = originalData["pdays"].astype("category")
originalData["Target"] = originalData["Target"].astype("category")
originalData
# Group numerical variables by mean for the classes of Target variable
np.round(originalData.groupby(["Target"]).mean(), 1)
# # Model Building
# Data Preration for models
from sklearn.model_selection import train_test_split
X = originalData.loc[:, originalData.columns != "Target"]
y = originalData["Target"]
X = pd.get_dummies(X, drop_first=True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.40, random_state=101
)
# # Logistic Regression
logregmodel = LogisticRegression(solver="liblinear")
# Fit the model on train
logregmodel.fit(X_train, y_train)
# predict on test
y_predict = logregmodel.predict(X_test)
y_predict_df = pd.DataFrame(y_predict)
# Check is the model an overfit model?
y_pred = logregmodel.predict(X_test)
print(logregmodel.score(X_train, y_train))
print(logregmodel.score(X_test, y_test))
# Confusion Matrix[](http://)
cm = metrics.confusion_matrix(y_test, y_predict)
plt.clf()
plt.imshow(cm, interpolation="nearest", cmap=plt.cm.Wistia)
clsnames = ["Not_Subscribed", "Subscribed"]
plt.title("Confusion Matrix for Test Data")
plt.ylabel("Actual")
plt.xlabel("Predicted")
tick_marks = np.arange(len(clsnames))
plt.xticks(tick_marks, clsnames, rotation=45)
plt.yticks(tick_marks, clsnames)
s = [["TN", "FP"], ["FN", "TP"]]
for i in range(2):
for j in range(2):
plt.text(j, i, str(s[i][j]) + " = " + str(cm[i][j]))
plt.show()
# Model Score
logisticscore = logregmodel.score(X_test, y_test)
print(logisticscore)
# Classification accuracy
logisticaccuracy = metrics.accuracy_score(y_test, y_predict)
print(logisticaccuracy)
# # KNN Regression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
# instantiate learning model
knn = KNeighborsClassifier()
# fitting the model
knn.fit(X_train, y_train)
# predict the response
y_pred = knn.predict(X_test)
# evaluate accuracy
accuracy_score(y_test, y_pred)
# instantiate learning model (k = 3)
knn = KNeighborsClassifier(n_neighbors=3)
# fitting the model
knn.fit(X_train, y_train)
# evaluate accuracy
accuracy_score(y_test, y_pred)
# instantiate learning model (k = 9)
knn = KNeighborsClassifier(n_neighbors=9)
# fitting the model
knn.fit(X_train, y_train)
# evaluate accuracy
accuracy_score(y_test, y_pred)
# Find the optimal number of neighbours
# * Small value of K will lead to over-fitting
# * Large value of K will lead to under-fitting.
# creating odd list of K for KNN
myList = list(range(1, 20))
# subsetting just the odd ones
neighbors = list(filter(lambda x: x % 2 != 0, myList))
# empty list that will hold accuracy scores
ac_scores = []
# perform accuracy metrics for values from 1,3,5....19
for k in neighbors:
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
# predict the response
y_pred = knn.predict(X_test)
# evaluate accuracy
scores = accuracy_score(y_test, y_pred)
ac_scores.append(scores)
# changing to misclassification error
MSE = [1 - x for x in ac_scores]
# determining best k
optimal_k = neighbors[MSE.index(min(MSE))]
print("The optimal number of neighbors is %d" % optimal_k)
# Misclassification Error vs K
plt.plot(MSE, neighbors)
plt.xlabel("Number of Neighbors K")
plt.ylabel("MisClassification Error")
plt.show
# Accuracy v/s Neighbours
lstaccuracy = []
for k in range(25):
K_value = k + 1
neigh = KNeighborsClassifier(n_neighbors=K_value)
neigh.fit(X_train, y_train)
y_pred = neigh.predict(X_test)
lstaccuracy.append(accuracy_score(y_test, y_pred) * 100)
print(
"Accuracy is ", accuracy_score(y_test, y_pred) * 100, "% for K-Value:", K_value
)
plt.plot(lstaccuracy)
plt.ylabel("Accuracy")
plt.xlabel("Number of neighbors")
plt.title("Accuracy vs # Neighbors")
count_misclassified = (y_test != y_pred).sum()
count_misclassified
# k-Fold Cross-Validation
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
# create a new KNN model
knn_cv = KNeighborsClassifier(n_neighbors=3)
# train model with cv of 5
cv_scores = cross_val_score(knn_cv, X_test, y_test, cv=5)
# print each cv score (accuracy) and average them
print(cv_scores)
print(format(np.mean(cv_scores)))
# # DecisionTree Classifier
from sklearn.tree import DecisionTreeClassifier
dTree = DecisionTreeClassifier(criterion="entropy", random_state=10)
dTree.fit(X_train, y_train)
# Accuracy
print(dTree.score(X_train, y_train))
print(dTree.score(X_test, y_test))
print(recall_score(y_test, y_pred, average="binary", pos_label="yes"))
# ***The recall score is relatively low and this has to be improves in the model***
# Confusion Matrix
predict_dTree = dTree.predict(X_test)
cm = metrics.confusion_matrix(y_test, predict_dTree)
cm_df = pd.DataFrame(cm)
plt.figure(figsize=(5, 5))
sns.heatmap(cm_df, annot=True, fmt="g")
# Gini Importance
# print (pd.DataFrame(dTree.feature_importances_, columns = ["Importance"], index = X_train.columns))
feat_importance = dTree.tree_.compute_feature_importances(normalize=False)
feat_imp_dict = dict(zip(X_train.columns, dTree.feature_importances_))
feat_imp = pd.DataFrame.from_dict(feat_imp_dict, orient="index")
feat_imp.sort_values(by=0, ascending=False)
# Regularize/Prune
dTree_Pruning = DecisionTreeClassifier(
criterion="entropy", random_state=100, max_depth=10, min_samples_leaf=3
)
dTree_Pruning.fit(X_train, y_train)
# Accuracy with Purning
# preds_pruned = dTree_Pruning.predict(X_test)
# preds_pruned_train = dTree_Pruning.predict(X_train)
print(dTree_Pruning.score(X_train, y_train))
print(dTree_Pruning.score(X_test, y_test))
# Confusion Marix with pirning
predict_dTree_purning = dTree_Pruning.predict(X_test)
cm_purning = metrics.confusion_matrix(y_test, predict_dTree_purning)
cm_df_purning = pd.DataFrame(cm_purning)
plt.figure(figsize=(5, 5))
sns.heatmap(cm_df_purning, annot=True, fmt="g")
# Gini Importance - After Purning
## Calculating feature importance
feat_importance = dTree_Pruning.tree_.compute_feature_importances(normalize=False)
feat_imp_dict = dict(zip(X_train.columns, dTree_Pruning.feature_importances_))
feat_imp = pd.DataFrame.from_dict(feat_imp_dict, orient="index")
feat_imp.sort_values(by=0, ascending=False)
acc_DT = accuracy_score(y_test, predict_dTree_purning)
recall_DT = recall_score(
y_test, predict_dTree_purning, average="binary", pos_label="yes"
)
# Store the accuracy results for each model in a dataframe for final comparison
resultsDf = pd.DataFrame(
{"Method": ["Decision Tree"], "accuracy": acc_DT, "recall": recall_DT}
)
resultsDf = resultsDf[["Method", "accuracy", "recall"]]
resultsDf
# # Random Forest Model
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=50)
rfc = rfc.fit(X_train, y_train)
rfc
predict_rfc = rfc.predict(X_test)
accuracy_rfc = accuracy_score(y_test, predict_rfc)
recall_rfc = recall_score(y_test, predict_rfc, average="binary", pos_label="yes")
tempResultsDf = pd.DataFrame({"Method": ["Random Forest"], "accuracy": [accuracy_rfc]})
tempResultsDf
tempResultsDf = pd.DataFrame(
{"Method": ["Random Forest"], "accuracy": [accuracy_rfc], "recall": [recall_rfc]}
)
resultsDf = pd.concat([resultsDf, tempResultsDf])
resultsDf = resultsDf[["Method", "accuracy", "recall"]]
resultsDf
resultsDf
# Observation:
# * Compared to the decision tree, we can see that the accuracy has slightly improved for the Random forest model
# * Overfitting is reduced after pruning, but recall has slightly reduced
# # Adaboost for the same data
abc1 = AdaBoostClassifier(n_estimators=10, learning_rate=0.1, random_state=25)
abc1 = abc1.fit(X_train, y_train)
accuracy_AdaBoost = abc1.score(X_test, y_test)
print(accuracy_AdaBoost)
pred_AB = abc1.predict(X_test)
acc_AB = accuracy_score(y_test, pred_AB)
recall_AB = recall_score(y_test, pred_AB, pos_label="yes")
tempResultsDf = pd.DataFrame(
{"Method": ["Adaboost"], "accuracy": [acc_AB], "recall": [recall_AB]}
)
resultsDf = pd.concat([resultsDf, tempResultsDf])
resultsDf = resultsDf[["Method", "accuracy", "recall"]]
resultsDf
resultsDf
predict_AdaBoost = abc1.predict(X_test)
cm = metrics.confusion_matrix(y_test, pred_AB)
cm_df = pd.DataFrame(cm)
plt.figure(figsize=(5, 5))
sns.heatmap(
cm_df,
annot=True,
fmt="g",
)
# # Bagging for the same data
bgcl = BaggingClassifier(
n_estimators=100, max_samples=0.7, bootstrap=True, oob_score=True, random_state=22
)
bgcl = bgcl.fit(X_train, y_train)
pred_BG = bgcl.predict(X_test)
acc_BG = accuracy_score(y_test, pred_BG)
recall_BG = recall_score(y_test, pred_BG, pos_label="yes")
tempResultsDf = pd.DataFrame(
{"Method": ["Bagging"], "accuracy": [acc_BG], "recall": [recall_BG]}
)
resultsDf = pd.concat([resultsDf, tempResultsDf])
resultsDf = resultsDf[["Method", "accuracy", "recall"]]
resultsDf
resultsDf
# # Gradient Boosting for same data
gb_model = GradientBoostingClassifier(
n_estimators=200, learning_rate=0.1, random_state=22
)
gb_model = gb_model.fit(X_train, y_train)
predict_GB = gb_model.predict(X_test)
accuracy_GB = accuracy_score(y_test, predict_GB)
recall_GB = recall_score(y_test, predict_GB, pos_label="yes")
tempResultsDf = pd.DataFrame(
{"Method": ["Gradient Boost"], "accuracy": [accuracy_GB], "recall": [recall_GB]}
)
resultsDf = pd.concat([resultsDf, tempResultsDf])
resultsDf = resultsDf[["Method", "accuracy", "recall"]]
resultsDf
|
import keras
from keras.models import Sequential
from keras.layers import (
LSTM,
Embedding,
BatchNormalization,
Dense,
TimeDistributed,
Dropout,
Bidirectional,
Flatten,
GlobalMaxPool1D,
)
from nltk.tokenize import word_tokenize
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers.embeddings import Embedding
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from keras.optimizers import Adam
import pandas as pd
import numpy as np
from sklearn.metrics import (
precision_score,
recall_score,
f1_score,
classification_report,
accuracy_score,
)
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
def plot(history, arr):
fig, ax = plt.subplots(1, 2, figsize=(20, 5))
for idx in range(2):
ax[idx].plot(history.history[arr[idx][0]])
ax[idx].plot(history.history[arr[idx][1]])
ax[idx].legend([arr[idx][0], arr[idx][1]], fontsize=18)
ax[idx].set_xlabel("A ", fontsize=16)
ax[idx].set_ylabel("B", fontsize=16)
ax[idx].set_title(arr[idx][0] + " X " + arr[idx][1], fontsize=16)
dataset = pd.read_csv("../input/nlp-getting-started/train.csv")
test = pd.read_csv("../input/nlp-getting-started/test.csv")
submission = pd.read_csv("../input/nlp-getting-started/sample_submission.csv")
train = dataset.text.values
test = test.text.values
sentiments = dataset.target.values
word_tokenizer = Tokenizer()
word_tokenizer.fit_on_texts(train)
vocab_length = len(word_tokenizer.word_index) + 1
def embed(corpus):
return word_tokenizer.texts_to_sequences(corpus)
longest_train = max(train, key=lambda sentence: len(word_tokenize(sentence)))
length_long_sentence = len(word_tokenize(longest_train))
padded_sentences = pad_sequences(embed(train), length_long_sentence, padding="post")
test_sentences = pad_sequences(embed(test), length_long_sentence, padding="post")
# #Twitter Gloves
embeddings_dictionary = dict()
embedding_dim = 200
glove_file = open(
"../input/glove-twitter/glove.twitter.27B." + str(embedding_dim) + "d.txt",
encoding="utf8",
)
for line in glove_file:
records = line.split()
word = records[0]
vector_dimensions = np.asarray(records[1:], dtype="float32")
embeddings_dictionary[word] = vector_dimensions
glove_file.close()
# embeddings_dictionary = dict()
# embedding_dim = 200
# glove_file = open('../input/glove-global-vectors-for-word-representation/glove.6B.' + str(embedding_dim) + 'd.txt', encoding="utf8")
# for line in glove_file:
# records = line.split()
# word = records[0]
# vector_dimensions = np.asarray(records[1:], dtype='float32')
# embeddings_dictionary [word] = vector_dimensions
# glove_file.close()
embedding_matrix = np.zeros((vocab_length, embedding_dim))
for word, index in word_tokenizer.word_index.items():
embedding_vector = embeddings_dictionary.get(word)
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
X_train, X_test, y_train, y_test = train_test_split(
padded_sentences, sentiments, test_size=0.2, random_state=0
)
model = Sequential()
model.add(
Embedding(
vocab_length,
embedding_dim,
weights=[embedding_matrix],
input_length=length_long_sentence,
trainable=False,
)
)
model.add(Bidirectional(LSTM(16)))
# https://machinelearningmastery.com/how-to-accelerate-learning-of-deep-neural-networks-with-batch-normalization/
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"])
model.summary()
checkpoint = ModelCheckpoint(
"model.h5", monitor="val_loss", verbose=1, save_best_only=True
)
reduce_lr = ReduceLROnPlateau(
monitor="val_loss", factor=0.2, verbose=1, patience=1, min_lr=0.001
)
history = model.fit(
X_train,
y_train,
epochs=5,
batch_size=32,
validation_data=[X_test, y_test],
verbose=1,
callbacks=[reduce_lr, checkpoint],
)
loss, accuracy = model.evaluate(X_test, y_test)
print("Loss:", loss)
print("Accuracy:", accuracy)
# **Glove 45**
# * Loss: 0.4995410907879051
# * Accuracy: 0.7925148010253906
# **Glove 50**
# * Loss: 0.5164433578793657
# * Accuracy: 0.8187787532806396
# **Glove 100**
# * Loss: 0.4953955445568263
# * Accuracy: 0.8030203580856323
# **Glove 200**
# * Loss: 0.521741591897828
# * Accuracy: 0.7682206034660339
plot(history, [["loss", "val_loss"], ["accuracy", "val_accuracy"]])
preds = model.predict_classes(X_test)
def metrics(pred_tag, y_test):
print("F1-score: ", f1_score(pred_tag, y_test))
print("Precision: ", precision_score(pred_tag, y_test))
print("Recall: ", recall_score(pred_tag, y_test))
print("Acuracy: ", accuracy_score(pred_tag, y_test))
print("-" * 50)
print(classification_report(pred_tag, y_test))
metrics(preds, y_test)
model.load_weights("model.h5")
preds = model.predict_classes(X_test)
metrics(preds, y_test)
submission.target = model.predict_classes(test_sentences)
submission.to_csv("submission.csv", index=False)
submission.target.value_counts().plot.bar()
submission
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# Question - 01
import numpy as np
import pandas as pd
odi = pd.read_csv("/kaggle/input/trainings/odi-batting.csv")
# odi.head()
print(
"Sachin has played",
odi[(odi["Player"] == "Sachin R Tendulkar")].shape[0],
"matches.",
)
print("Rahul has played", odi[(odi["Player"] == "Rahul Dravid")].shape[0], "matches.")
print(
"Together they have played",
odi[(odi["Player"] == "Sachin R Tendulkar") | (odi["Player"] == "Rahul Dravid")][
"URL"
].nunique(),
"unique matches in total.",
)
odi_s = odi[(odi["Player"] == "Sachin R Tendulkar")]["URL"]
odi_r = odi[(odi["Player"] == "Rahul Dravid")]["URL"]
odi_common = pd.merge(odi_s, odi_r, how="inner")
print(
"They have played",
odi_common.shape[0],
"unique matches in common (i.e. both were in same match).",
)
# Question - 02
import numpy as np
import pandas as pd
def f_InactiveYears(pODI, pName):
pODI = pODI[(pODI["Player"] == pName)]
if len(pODI) == 0:
return -99 # -99 is a result when player is not found.
pODI = pODI["MatchDate"].str[-4:]
pODI = pODI.astype(np.int)
return pODI.max() - pODI.min() + 1 - len(np.unique(pODI))
odi_data = pd.read_csv("/kaggle/input/trainings/odi-batting.csv")
print(f_InactiveYears(odi_data, "Sachin R Tendulkar"))
print(f_InactiveYears(odi_data, "Rahul Dravid"))
print(f_InactiveYears(odi_data, "Rahul Ganguly"))
odi_data_players = odi_data[["Player"]]
odi_data_players = odi_data_players.drop_duplicates()
odi_data_players_inactiveyears = pd.DataFrame(columns=["Player", "InactiveYears"])
for x in odi_data_players["Player"]:
# print(x, f_InactiveYears(odi_data, x))
odi_data_players_inactiveyears = odi_data_players_inactiveyears.append(
{"Player": x, "InactiveYears": f_InactiveYears(odi_data, x)}, ignore_index=True
)
odi_data_players_inactiveyears.sort_values(by=["InactiveYears"], ascending=False).head(
10
)
# Question - 03
import numpy as np
import pandas as pd
from datetime import datetime
def f_YearsFor2000Runs(pODI, pName):
pODI = pODI[(pODI["Player"] == pName)]
if len(pODI) == 0:
return -99 # -99 is a result when player is not found.
pODI = pODI[["Player", "MatchDate", "Runs"]]
pODI["MatchDate"] = pd.to_datetime(pODI["MatchDate"])
pODI = pODI.sort_values(by=["Player", "MatchDate"])
runs = 0
matches = 0
for idx, idxrow in pODI[(pODI["Player"] == pName)].iterrows():
runs = runs + idxrow["Runs"]
matches = matches + 1
if runs > 1999:
return matches
return -98 # -98 is a result when player has not scored 2000 runs in his career.
odi_data = pd.read_csv("/kaggle/input/trainings/odi-batting.csv")
odi_data_players = odi_data[["Player"]]
odi_data_players = odi_data_players.drop_duplicates()
# print(odi_data_players.shape)
# BELOW CAN BE ADDED TO MAKE SET SMALLER BY TAKING ONLY THOE PLAYERS WHO HAVE SCORED 2000+ RUNS
# odi_data_players = odi_data.groupby(['Player'])['Runs'].agg('sum').reset_index()
# odi_data_players = odi_data_players[(odi_data_players['Runs'] > 1999)]
odi_data_players_years2000 = pd.DataFrame(columns=["Player", "Years2000"])
for x in odi_data_players["Player"]:
y = f_YearsFor2000Runs(odi_data, x)
print(x, y)
if y > 0:
odi_data_players_years2000 = odi_data_players_years2000.append(
{"Player": x, "Years2000": y}, ignore_index=True
)
odi_data_players_years2000.sort_values(by=["Years2000"], ascending=True).head(10)
# Question - 04
import numpy as np
import pandas as pd
from datetime import datetime
def f_MatchesFor10Hundreds(pODI, pName):
pODI = pODI[(pODI["Player"] == pName)]
if len(pODI) == 0:
return -99 # -99 is a result when player is not found.
pODI = pODI[["Player", "MatchDate", "Runs"]]
pODI["MatchDate"] = pd.to_datetime(pODI["MatchDate"])
pODI = pODI.sort_values(by=["Player", "MatchDate"])
hundreds = 0
matches = 0
for idx, idxrow in pODI[(pODI["Player"] == pName)].iterrows():
matches = matches + 1
if idxrow["Runs"] > 99:
hundreds = hundreds + 1
if hundreds > 9:
return matches
return -98 # -98 if the player has not scored more than 10 hundreds
odi_data = pd.read_csv("/kaggle/input/trainings/odi-batting.csv")
# print(f_MatchesFor10Hundreds(odi_data,'Sachin R Tendulkar'))
odi_data_players_hundreds = pd.DataFrame(columns=["Player", "MatchesForHundres"])
odi_data_players = odi_data[["Player"]]
odi_data_players = odi_data_players.drop_duplicates()
print(odi_data_players.shape)
for x in odi_data_players["Player"]:
y = f_MatchesFor10Hundreds(odi_data, x)
if y > 0:
odi_data_players_hundreds = odi_data_players_hundreds.append(
{"Player": x, "MatchesForHundres": y}, ignore_index=True
)
odi_data_players_hundreds.sort_values(by=["MatchesForHundres"], ascending=True).head(10)
odi_data_players_hundreds.sort_values(by=["MatchesForHundres"], ascending=True).head(
10
).plot.bar(x="Player", y="MatchesForHundres")
# Question - 06
import numpy as np
import pandas as pd
myDF = pd.DataFrame(
[100, 104, 99, 100, 100, 100, 98, 105, 105, 100, 110, 110, 110, 110, 100],
columns=["BaseData"],
)
myDF["AbsoluteDiff"] = myDF.BaseData.diff()
myDF["PercentDiff"] = myDF.BaseData.pct_change() * 100
constantpatches = 0
prevdiff = 10
for x in myDF["AbsoluteDiff"]:
if x == 0:
if prevdiff != 0:
constantpatches = constantpatches + 1
prevdiff = x
# print(prevdiff)
print("Number of Constant patches :", constantpatches)
print("Number of 5% Positive changes :", myDF[(myDF["PercentDiff"] >= 5)].shape[0])
|
# # COVID-19 data with SIR model
# Using a mathematical epidemic model, this notebook will predict the number of cases infected with COVID-19 (Novel Corona Virus 2019).
# * Arange training dataset
# * Calculate parameters of SIR model
# * Predict the number of cases
# * Remarks and To-do list
# Note: "Infected" = "Confirmed" - "Deaths" - "Recovered"
# ## Data cleening
# ### Packages
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import functools
from pprint import pprint
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.ticker import ScalarFormatter
import seaborn as sns
# matplotlib is useful to create a static figure rather than plotly express.
import optuna
import scipy
from scipy.integrate import solve_ivp
plt.style.use("seaborn-ticks")
plt.rcParams["xtick.direction"] = "in"
plt.rcParams["ytick.direction"] = "in"
plt.rcParams["font.size"] = 12
def line_plot(df, title, ylabel="Cases", h=None, v=None):
fig = plt.figure(figsize=(30, 20))
ax = df.plot()
ax.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
ax.ticklabel_format(style="sci", axis="y", scilimits=(0, 0))
ax.set_title(title)
ax.set_xlabel(None)
ax.set_ylabel(ylabel)
ax.legend(bbox_to_anchor=(1.02, 0), loc="lower left", borderaxespad=0)
if h is not None:
ax.axhline(y=h, color="black", linestyle="--")
if v is not None:
ax.axvline(x=v, color="black", linestyle="--")
plt.show()
# ### Raw data
raw = pd.read_csv("//kaggle/input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv")
datetime.now().strftime("%Y/%m/%d %H:%M:%S")
raw.tail()
raw.info()
raw.describe()
pd.DataFrame(raw.isnull().sum()).T
pprint(raw["Country"].unique().tolist(), compact=True)
raw.loc[raw["Country"] == "Others", :]
# ### Data Cleening
# Note: "Infected" = "Confirmed" - "Deaths" - "Recovered"
data_cols = ["Infected", "Deaths", "Recovered"]
rate_cols = ["Mortality rate", "Recovery rate"]
ncov_df = raw.rename({"Date": "Time", "Province/State": "Province"}, axis=1)
ncov_df["Time"] = pd.to_datetime(ncov_df["Time"])
ncov_df["Country"] = ncov_df["Country"].replace({"Mainland China": "China"})
ncov_df["Province"] = (
ncov_df["Province"]
.fillna("-")
.replace({"Cruise Ship": "Diamond Princess cruise ship "})
)
ncov_df["Infected"] = ncov_df["Confirmed"] - ncov_df["Deaths"] - ncov_df["Recovered"]
ncov_df[data_cols] = ncov_df[data_cols].astype(int)
ncov_df = ncov_df.loc[:, ["Time", "Country", "Province", *data_cols]]
ncov_df.tail()
ncov_df.info()
ncov_df.describe()
pd.DataFrame(ncov_df.isnull().sum()).T
pprint(ncov_df["Country"].unique().tolist(), compact=True)
# ### Visualize total values of all countries
total_df = ncov_df.groupby("Time").sum()
total_df["Mortality rate"] = total_df["Deaths"] / total_df[data_cols].sum(axis=1)
total_df["Recovery rate"] = total_df["Recovered"] / total_df[data_cols].sum(axis=1)
total_df.tail()
line_plot(total_df[data_cols], "The number of cases over time (all countries)")
line_plot(
total_df[rate_cols], "The number of cases over time (all countries)", ylabel=""
)
total_df["Recovery rate"].plot.kde()
total_df["Mortality rate"].plot.kde()
pd.DataFrame(total_df[rate_cols].describe()).T
# Recovery rate suddenly arised after 01Feb2020, but mortality rate is stable at around 0.02.
# ### The number of infected cases at country level
country_df = (
ncov_df.pivot_table(values="Infected", index="Time", columns="Country", aggfunc=sum)
.fillna(0)
.astype(int)
)
country_df.head()
country_df["Total except China"] = country_df.sum(axis=1) - country_df.loc[:, "China"]
line_plot(
country_df[["China", "Total except China"]], title="Infected cases at country level"
)
# Most of patients are confirmed in China.
# ### Infected cases in Hubei
hubei_series = (
ncov_df.loc[
(ncov_df["Country"] == "China") & (ncov_df["Province"] == "Hubei"),
["Time", "Infected"],
]
.groupby("Time")
.mean()["Infected"]
)
hubei_others_df = pd.DataFrame(
{
"Hubei": hubei_series,
"Total except Hubei": total_df.loc[:, "Infected"] - hubei_series,
}
)
hubei_others_df.head()
line_plot(hubei_others_df, title="Infected cases: Hubei and others")
# Total population
hubei_population = 59020000 # 2017
china_population = 1421000000 # 2017
except_name = "Total except Hubei"
df = pd.DataFrame(
{
"Hubei": hubei_others_df["Hubei"] / hubei_population,
except_name: hubei_others_df[except_name]
/ (china_population - hubei_population),
}
)
line_plot(df, title="Infected cases per population: Hubei")
# ### Training dataset
# The figures show that the number of cases in the other provinces of China is large. However, considering the population of China, I decided to use only Hubei data as the training dataset.
# Note: The unit of "elapsed time from the start time" is [hour].
start_time = ncov_df.iloc[0, 0]
start_time
train_df = ncov_df.loc[
(ncov_df["Country"] == "China") & (ncov_df["Province"] == "Hubei"),
["Time", *data_cols],
]
train_df = train_df.groupby("Time").mean().reset_index()
train_df["Elapsed"] = (
(train_df["Time"] - start_time).dt.total_seconds() / 3600
).astype(int)
train_df = train_df.loc[:, ["Elapsed", *data_cols]]
train_df.head()
line_plot(train_df.set_index("Elapsed"), title="Training dataset: data in Hubei")
hubei_population
# ## Calculate parameters of SIR model
# ### What is SIR model
# SIR model is a simple mathematical model to understand outbreak of infectious diseases.
# [The SIR epidemic model - Learning Scientific Programming with Python](https://scipython.com/book/chapter-8-scipy/additional-examples/the-sir-epidemic-model/)
# * S: Susceptible (=All - Confirmed)
# * I: Infected (=Confirmed - Deaths - Recovered)
# * R: Recovered or Died (=Recovered + Deaths)
# Note: THIS IS NOT THE GENERAL MODEL!
# Though R in SIR model is "Recovered and have immunity", I defined "R as Recovered or Died". This is because mortality rate cannot be ignored in the real data of COVID-19.
# Model:
# S + I $\overset{\beta}{\longrightarrow}$ 2I
# I $\overset{\gamma}{\longrightarrow}$ R
# $\beta$: effective contact rate
# $\gamma$: Recovery(+Mortality) rate
# Ordinary Differential Equation (ODE):
# $\frac{\mathrm{d}S}{\mathrm{dT}}= - N^{-1}\beta S I$
# $\frac{\mathrm{d}I}{\mathrm{dT}}= N^{-1}\beta S I - \gamma I$
# $\frac{\mathrm{d}R}{\mathrm{dT}}= \gamma I$
# Where $N=S+I+R$ is the total population, T is the elapsed time from the start date.
N = hubei_population
N
train_df["T"] = train_df.loc[:, "Elapsed"]
train_df["S"] = (
N
- train_df.loc[:, "Infected"]
- train_df.loc[:, "Deaths"]
- train_df.loc[:, "Recovered"]
)
train_df["I"] = train_df.loc[:, "Infected"]
train_df["R"] = train_df.loc[:, "Deaths"] + train_df.loc[:, "Recovered"]
train_df.head()
line_plot(train_df.set_index("T")[["I", "R"]], "Dataset: I, R", ylabel="")
# ### Non-dimentinalization of SIR model
# To simplify the model, remove the units from ODE.
# Set $(S, I, R) = N \times (x, y, z)$ and $(T, \beta, \gamma) = (\tau t, \tau^{-1} \rho, N \tau^{-1} \sigma)$.
# This results in the ODE
# $\frac{\mathrm{d}x}{\mathrm{dt}}= - \rho x y$
# $\frac{\mathrm{d}y}{\mathrm{dt}}= \rho x y - \sigma z$
# $\frac{\mathrm{d}z}{\mathrm{dt}}= \gamma z$
# Where $N$ is the total population and $\tau$ is a constant ([hours]).
# The range of variables:
# $0 < (x, y, z, \rho, \sigma) < 1$
# Basic reproduction number, Non-dimentional parameter, is defined as
# $R_0 = \rho \sigma^{-1} = N \beta \gamma^{-1}$
# [Customary values of R0 of well-known infectious diseases: (Wikipedia, CDC and WHO)](https://en.wikipedia.org/wiki/Basic_reproduction_number#cite_note-Smallpox-1)
# This link (Wikipedia) shows the following $R_0$ values, but I could not get the source documents.
# SARS: 2-5
# COVID-19: 1.4-6.6 (Check the Wikipedia page on 13Feb2020)
# Influenza: 2-3
# Ebola: 1.5-2.5
train_df[["x", "y", "z"]] = train_df[["S", "I", "R"]] / N
train_df.head()
# Variable t will be determined by $\tau$.
# We need to determine the three parameters ($\rho, \sigma$ and $\tau$) by model fitting.
# ### Describe model using Python
class SIR(object):
def __init__(self, rho, sigma, **kwargs):
self.rho = float(rho)
self.sigma = float(sigma)
def __call__(self, t, X):
x, y, z = [X[i] for i in range(3)]
dxdt = -self.rho * x * y
dydt = self.rho * x * y - self.sigma * y
dzdt = self.sigma * y
# tf.stack
return np.array([dxdt, dydt, dzdt])
def numerical_sir(initials, rho, sigma, tend, tstart=0, dt=1):
sol_sir = scipy.integrate.solve_ivp(
fun=SIR(rho=rho, sigma=sigma),
t_span=[tstart, tend],
y0=np.array(initials, dtype=np.float64),
t_eval=np.arange(tstart, tend + dt, dt),
)
sim_df = pd.DataFrame(
{
"t": sol_sir["t"],
"x": sol_sir["y"][0],
"y": sol_sir["y"][1],
"z": sol_sir["y"][2],
}
)
return sim_df
initials = train_df.loc[train_df.index[0], ["x", "y", "z"]].values
initials
# ## Example of SIR model
# For example, set $R_0 = 2.5, \rho=0.2, \tau=2$.
eg_r0, eg_rho, eg_tau = (2.5, 0.2, 2)
eg_sigma = eg_rho / eg_r0
eg_tend = train_df.loc[train_df.index[-1], "T"] / eg_tau
(eg_rho, eg_sigma, eg_rho, eg_tau, eg_tend)
eg_df = numerical_sir(initials, eg_rho, eg_sigma, eg_tend)
eg_df.tail()
line_plot(
eg_df.set_index("t"),
title=f"For exapmle: {eg_rho}, {eg_sigma}, {eg_rho}, {eg_tau}, {eg_tend}",
ylabel="",
)
# ### Hyperparameter optimization
# Using Optuna package, the three parameters ($\rho, \sigma$ and $\tau$) was estimated by model fitting.
def apply_tau(train_df, tau):
df = train_df.copy()
df["t"] = (df["T"] / tau).astype(int)
df = df.loc[:, ["t", "x", "y", "z"]]
return df
def error_f(train_df_divided, sim_df):
# We need to minimize the difference of the observed values and estimated values
df = pd.merge(
train_df_divided, sim_df, on="t", suffixes=("_observed", "_estimated")
)
diffs = [abs(df[f"{v}_observed"] - df[f"{v}_estimated"]).sum() for v in ("y", "z")]
diff = sum(diffs)
return diff
def target_f(train_df, initials, rho, sigma, tau):
train_df_divided = apply_tau(train_df, tau)
tend = train_df_divided.loc[train_df_divided.index[-1], "t"]
sim_df = numerical_sir(initials, rho, sigma, tend, tstart=0, dt=1)
return error_f(train_df_divided, sim_df)
def objective_with_param(trial, train_df, initials):
rho = trial.suggest_uniform("rho", 0, 1)
sigma = trial.suggest_uniform("sigma", 0, 1)
tau = trial.suggest_int("tau", 1, 24)
return target_f(train_df, initials, rho, sigma, tau)
objective = functools.partial(
objective_with_param, train_df=train_df, initials=initials
)
study = optuna.create_study(direction="minimize")
study.optimize(objective, n_trials=300)
optimize_df = study.trials_dataframe()
optimize_df["time"] = optimize_df["datetime_complete"] - optimize_df["datetime_start"]
optimize_df = optimize_df.drop(["datetime_complete", "datetime_start"], axis=1)
optimize_df.tail()
sns.pairplot(optimize_df[["params_rho", "params_sigma", "params_tau"]])
params = study.best_params.copy()
params
rho, sigma, tau = params["rho"], params["sigma"], params["tau"]
r0 = round(rho / sigma, 2)
r0
train_df_divided = apply_tau(train_df, tau)
tend = train_df_divided.loc[train_df_divided.index[-1], "t"]
sim_df = numerical_sir(initials, rho, sigma, tend, tstart=0, dt=1)
merged = pd.merge(
train_df_divided, sim_df, on="t", suffixes=("_observed", "_estimated")
)
merged = merged.set_index("t")
merged.tail()
# ### Compare observed/estimated values
line_plot(
merged[["y_observed", "y_estimated"]],
f"Comparison of observed/estimated x(t): R0 = {r0}",
ylabel="",
)
line_plot(
merged[["z_observed", "z_estimated"]],
title=f"Comparison of observed/estimated z(t): R0 = {r0}",
ylabel="",
)
merged["y_diff"] = merged["y_estimated"] - merged["y_observed"]
merged["z_diff"] = merged["z_estimated"] - merged["z_observed"]
line_plot(merged[["y_diff", "z_diff"]], "y(t): estimated - observed", ylabel="", h=0)
# z(t) was successfully estimated, but error of y(t) cannot be ignored. This needs further investigation.
# ### Predicted dynamics
# Predict the future using the estimated parameters.
(rho, sigma, tau, r0)
sim_df.tail()
how_many = 15
row_n = len(sim_df)
initials = sim_df.set_index("t").iloc[0, :].values
tend = sim_df.loc[sim_df.index[-1], "t"] * how_many
xyz_df = numerical_sir(initials, rho, sigma, tend, tstart=0, dt=1)
line_plot(xyz_df.set_index("t"), "Predicted dynamics", ylabel="", h=1.0)
# If no actions will be taken, almost all peaple may be infected by this disease...
# ## Predict the number of cases in Hubei
# Predict the numbers of cases in Hubei using the estimated parameters.
start_time
N = hubei_population
(N, tau)
# #### SIR model
sir_df = pd.DataFrame(
{
"Time": xyz_df["t"] * tau,
"Susceptible": xyz_df["x"] * N,
"Infected": xyz_df["y"] * N,
"Recovered/Deaths": xyz_df["z"] * N,
}
)
sir_df["Time"] = start_time + sir_df["Time"].apply(lambda x: timedelta(hours=x))
sir_df = sir_df.set_index("Time").astype(int)
sir_df.tail()
line_plot(sir_df, "SIR in Hubei: If no actions will be taken", v=datetime.now())
# #### Measurable variables
crd_df = sir_df.copy()
crd_df["Confirmed"] = crd_df["Infected"] + crd_df["Recovered/Deaths"]
crd_df = crd_df.loc[:, ["Confirmed", "Recovered/Deaths"]]
crd_df.tail()
line_plot(crd_df, "Values in Hubei: If no actions will be taken", v=datetime.now())
line_plot(
crd_df.iloc[: row_n * 3, :],
"Values in Hubei: If no actions will be taken",
v=datetime.now(),
)
|
from __future__ import absolute_import
from __future__ import print_function
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
import pandas as pd
import numpy as np
from keras.utils import np_utils
from keras.utils.np_utils import to_categorical # convert to one-hot-encoding
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras import optimizers
from keras import losses
from keras.models import load_model
from keras import regularizers
import time
from keras import initializers
# Load the training dataset ~87K states
all_train = pd.read_csv("../input/applied-ai-assignment-2/Assignment_2_train.csv")
all_train.loc[(all_train.state == 4), "state"] = 0
all_train.loc[(all_train.state == 5), "state"] = 1
len(all_train)
all_train[1:5]
# Create a train/validation split
data_to_use = 1
train = all_train[: int(len(all_train) * data_to_use)]
split = 0.9
Train = train[: int(len(train) * split)]
Valid = train[int(len(train) * split) :]
# Remove the first and last column from the data, as it is the board name and the label
X_train = Train.iloc[:, 1:-1].values
X_valid = Valid.iloc[:, 1:-1].values
# Remove everything except the last column from the data, as it is the label and put it in y
y_train = Train.iloc[:, -1:].values
y_valid = Valid.iloc[:, -1:].values
len(X_train)
len(y_valid)
sample_train = X_train[50].reshape(-1, 6, 7)[0]
sample_train
import matplotlib.pyplot as plt
# plot the first image in the dataset
plt.imshow(sample_train)
# set input to the shape of one X value
dimof_input = X_train.shape[1]
# Set y categorical
dimof_output = int(np.max(y_train) + 1)
y_train = np_utils.to_categorical(y_train, dimof_output)
y_valid = np_utils.to_categorical(y_valid, dimof_output)
y_valid
def initialize_model(
layer1=16,
layer2=0,
layer=0,
layer3=0,
layer4=0,
dropout1=0,
dropout2=0,
dropout3=0,
dropout4=0,
activation1="relu",
activation2="relu",
activation3="relu",
activation4="relu",
Optimizer=optimizers.Adam(learning_rate=0.001),
lossfunct=losses.categorical_crossentropy,
):
layer_1 = layer1
layer_2 = layer2
layer_3 = layer3
layer_4 = layer4
dropout_1 = dropout1
dropout_2 = dropout2
dropout_3 = dropout3
dropout_4 = dropout4
activation_1 = activation1
activation_2 = activation2
activation_3 = activation3
activation_4 = activation4
optimizer = Optimizer
loss_function = lossfunct
glorot = initializers.glorot_normal(seed=None)
mlp_model = Sequential()
mlp_model.add(
Dense(
layer_1,
input_dim=dimof_input,
kernel_initializer=glorot,
activation=activation_1,
)
)
if dropout_1 > 0:
mlp_model.add(Dropout(dropout_1))
if layer_2 > 0:
mlp_model.add(
Dense(
layer_2,
input_dim=dimof_input,
kernel_initializer=glorot,
activation=activation_2,
)
)
if dropout_2 > 0:
mlp_model.add(Dropout(dropout_2))
if layer_3 > 0:
mlp_model.add(
Dense(
layer_3,
input_dim=dimof_input,
kernel_initializer=glorot,
activation=activation_3,
)
)
if dropout_3 > 0:
mlp_model.add(Dropout(dropout_3))
if layer_4 > 0:
mlp_model.add(
Dense(
layer_4,
input_dim=dimof_input,
kernel_initializer=glorot,
activation=activation_4,
)
)
if dropout_4 > 0:
mlp_model.add(Dropout(dropout_4))
mlp_model.add(
Dense(dimof_output, kernel_initializer=glorot, activation="softmax")
) # do not change
mlp_model.compile(
loss=loss_function, # **** pick any suggested loss functions
optimizer=optimizer, # **** pick any suggested optimizers
metrics=["accuracy"],
) # do not change
return mlp_model
def save_output(
layer1,
dropout1,
activation1,
layer2,
dropout2,
activation2,
layer3,
dropout3,
activation3,
layer4,
dropout4,
activation4,
Optimizer,
pat,
lossfunct,
cur_output,
score,
time,
):
if len(cur_output) == 0:
columns = [
"Lay1",
"DO1",
"Act1",
"Lay2",
"DO2",
"Act2",
"Lay3",
"DO3",
"Act3",
"Lay4",
"DO4",
"Act4",
"Opt",
"Loss",
"Pat",
"Score",
"Time",
]
cur_output = pd.DataFrame(columns=columns)
cur_output.loc[len(cur_output)] = [
layer1,
dropout1,
activation1,
layer2,
dropout2,
activation2,
layer3,
dropout3,
activation3,
layer4,
dropout4,
activation4,
Optimizer,
pat,
lossfunct,
score,
time,
]
return cur_output
saved_output = []
sgd = optimizers.SGD(learning_rate=0.01) # default lr = 0.01
adagrad = optimizers.Adagrad(learning_rate=0.01) # default lr = 0.01
adadelta = optimizers.Adadelta(learning_rate=1.0) # default lr = 1.0
adam = optimizers.Adam(learning_rate=0.001) # default lr = 0.001
adamax = optimizers.Adamax(learning_rate=0.002) # default lr = 0.002
nadam = optimizers.Nadam(learning_rate=0.002) # default lr = 0.002
# Suggested loss functions
cat_cross = losses.categorical_crossentropy
mse = losses.mean_squared_error
binary = losses.binary_crossentropy
pat = 1
lay1 = 4096
DO1 = 0.5
lay2 = 0
DO2 = 0
lay3 = 0
DO3 = 0
lay4 = 0
DO4 = 0
lossf = mse
"""
for pat in [25,50,100]:
for DO1 in [0.0,0.25,0.5,0.75]:
for lay4 in [0]:
for lay3 in [0]:
for lay2 in [0]:
for lay1 in [8192]:
"""
for lay1 in [20]:
my_model = initialize_model(
layer1=lay1,
dropout1=DO1,
activation1="relu",
layer2=lay2,
dropout2=DO2,
activation2="relu",
layer3=lay3,
dropout3=DO3,
activation3="relu",
layer4=lay4,
dropout4=DO4,
activation4="relu",
Optimizer=adam,
lossfunct=lossf,
)
start = time.time()
es = EarlyStopping(
monitor="val_loss", # do not change
mode="min", # do not change
verbose=1, # allows you to see more info per epoch
patience=pat,
) # **** patience is how many validations to wait with nothing learned (patience * validation_freq)
mc = ModelCheckpoint(
"best_model.h5", monitor="val_loss", mode="min", verbose=0, save_best_only=True
) # do not change
history = my_model.fit(
x=X_train,
y=y_train,
batch_size=7900, # **** set from 1 to length of training data
epochs=10000, # do not change
verbose=0, # allows you to see more info per epoch
callbacks=[es, mc],
validation_data=(X_valid, y_valid),
validation_freq=1,
shuffle=True,
)
# load the best model
saved_model = load_model("best_model.h5")
# evaluate the model
_, train_acc = saved_model.evaluate(X_train, y_train, verbose=0)
_, valid_acc = saved_model.evaluate(X_valid, y_valid, verbose=0)
print("Train: %.3f, Valid: %.3f" % (train_acc, valid_acc))
runtime = time.time() - start
print(runtime)
if lossf == mse:
Lossf = "mse"
if lossf == cat_cross:
Lossf = "cat_cross"
if lossf == binary:
Lossf = "binary"
saved_output = save_output(
lay1,
DO1,
"relu",
lay2,
DO2,
"relu",
lay3,
DO3,
"relu",
lay4,
DO4,
"relu",
"adam",
Lossf,
pat,
saved_output,
valid_acc,
runtime,
)
saved_output.sort_values("Score", ascending=False)
pat = 1
lay1 = 32
DO1 = 0.5
lay2 = 0
DO2 = 0
lay3 = 0
DO3 = 0
lay4 = 0
DO4 = 0
lossf = mse
my_model = initialize_model(
layer1=lay1,
dropout1=DO1,
activation1="relu",
layer2=lay2,
dropout2=DO2,
activation2="relu",
layer3=lay3,
dropout3=DO3,
activation3="relu",
layer4=lay4,
dropout4=DO4,
activation4="relu",
Optimizer=adam,
lossfunct=mse,
)
start = time.time()
es = EarlyStopping(
monitor="val_loss", # do not change
mode="min", # do not change
verbose=1, # allows you to see more info per epoch
patience=1,
) # **** patience is how many validations to wait with nothing learned (patience * validation_freq)
mc = ModelCheckpoint(
"best_model.h5", monitor="val_loss", mode="min", verbose=0, save_best_only=True
) # do not change
history = my_model.fit(
x=X_train,
y=y_train,
batch_size=7900, # **** set from 1 to length of training data
epochs=10000, # do not change
verbose=0, # allows you to see more info per epoch
callbacks=[es, mc],
validation_data=(X_valid, y_valid),
validation_freq=1,
shuffle=True,
)
# load the best model
saved_model = load_model("best_model.h5")
# evaluate the model
_, train_acc = saved_model.evaluate(X_train, y_train, verbose=0)
_, valid_acc = saved_model.evaluate(X_valid, y_valid, verbose=0)
print("Train: %.3f, Valid: %.3f" % (train_acc, valid_acc))
runtime = time.time() - start
print(runtime)
# summarize history for accuracy
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "valid"], loc="upper left")
plt.show()
# summarize history for loss
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "valid"], loc="upper left")
plt.show()
# Test for standard deviation of a model
pat = 1
lay1 = 32
DO1 = 0.5
lay2 = 0
DO2 = 0
lay3 = 0
DO3 = 0
lay4 = 0
DO4 = 0
lossf = mse
valid_list = []
for i in range(10):
my_model = initialize_model(
layer1=lay1,
dropout1=DO1,
activation1="relu",
layer2=lay2,
dropout2=DO2,
activation2="relu",
layer3=lay3,
dropout3=DO3,
activation3="relu",
layer4=lay4,
dropout4=DO4,
activation4="relu",
Optimizer=adam,
lossfunct=mse,
)
start = time.time()
es = EarlyStopping(
monitor="val_loss", # do not change
mode="min", # do not change
verbose=1, # allows you to see more info per epoch
patience=pat,
) # **** patience is how many validations to wait with nothing learned (patience * validation_freq)
mc = ModelCheckpoint(
"best_model.h5", monitor="val_loss", mode="min", verbose=0, save_best_only=True
) # do not change
history = my_model.fit(
x=X_train,
y=y_train,
batch_size=7900, # **** set from 1 to length of training data
epochs=10000, # do not change
verbose=0, # allows you to see more info per epoch
callbacks=[es, mc],
validation_data=(X_valid, y_valid),
validation_freq=1,
shuffle=True,
)
# load the best model
saved_model = load_model("best_model.h5")
# evaluate the model
_, train_acc = saved_model.evaluate(X_train, y_train, verbose=0)
_, valid_acc = saved_model.evaluate(X_valid, y_valid, verbose=0)
valid_list.append(valid_acc)
print("Train: %.3f, Valid: %.3f" % (train_acc, valid_acc))
print(time.time() - start)
print("Average Score- " + str(np.mean(valid_list)))
print("Standard deviation - " + str(np.std(valid_list)))
# summarize history for accuracy
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "valid"], loc="upper left")
plt.show()
# summarize history for loss
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "valid"], loc="upper left")
plt.show()
# Go here when your saved model is ready
test = pd.read_csv("../input/applied-ai-assignment-2/Assignment_2_test.csv")
test.loc[(test.state == 4), "state"] = 0
test.loc[(test.state == 5), "state"] = 1
X_test = test.iloc[:, 1:-1].values
y_test = test.iloc[:, -1:].values
# creates the final output
list_of_boards = [i for i in list(test["file_names"])]
result = saved_model.predict(X_test)
test_results = []
for i in result:
test_results.append(np.argmax(i))
# Creates a dataframe that can be saved as a csv for submission
submission_data = pd.DataFrame({"BoardId": list_of_boards, "Label": test_results})
submission_data[1:9]
submission_data.to_csv("submission.csv", sep=",", index=False)
saved_output = pd.read_csv("../input/sampleoutput/output.csv")
saved_output
|
# # **Introduction and uploading data**
# **Hi, everyone! That's my analysis and classification for Heart Disease UCI.** Here you can find general analysis, comparison between different variables and investigation of features importance. If you find my notebook interesting and helpful, please **UPVOTE.** Enjoy the analysis :)
# **Import packages**
# data analysis and wrangling
import pandas as pd
import numpy as np
# visualization
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="ticks", rc={"figure.figsize": (15, 10)})
# machine learning
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import (
RandomForestClassifier,
GradientBoostingClassifier,
ExtraTreesClassifier,
)
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, roc_curve
# **Acquire data**
data = pd.read_csv("../input/heart-disease-uci/heart.csv")
# # **Exploratory Data Analysis (EDA)**
# **Let's learn some info about our data.** For that I create a function which can show us missing ratio, distincts, skewness, etc.
data.head()
def detailed_analysis(df):
obs = df.shape[0]
types = df.dtypes
counts = df.apply(lambda x: x.count())
nulls = df.apply(lambda x: x.isnull().sum())
distincts = df.apply(lambda x: x.unique().shape[0])
missing_ratio = (df.isnull().sum() / obs) * 100
uniques = df.apply(lambda x: [x.unique()])
skewness = df.skew()
kurtosis = df.kurt()
print("Data shape:", df.shape)
cols = [
"types",
"counts",
"nulls",
"distincts",
"missing ratio",
"uniques",
"skewness",
"kurtosis",
]
details = pd.concat(
[types, counts, nulls, distincts, missing_ratio, uniques, skewness, kurtosis],
axis=1,
)
details.columns = cols
dtypes = details.types.value_counts()
print("________________________\nData types:\n", dtypes)
print("________________________")
return details
details = detailed_analysis(data)
details
data.describe()
# Wow, our data is totally clear, so we can visualize some things
# **Target value distribution**
values = data.target.value_counts()
indexes = values.index
sns.barplot(indexes, values)
# **Pair plot between all variables**
sns.pairplot(data=data, vars=data.columns.values[:-1], hue="target")
# **Analysis of different chest types and their influence to the target value**
# Types of pain:
# - Value 0: typical angina
# - Value 1: atypical angina
# - Value 2: non-anginal pain
# - Value 3: asymptomatic
# Here we can see that people with the same chest pain have almost the same age regardless of the sex
sns.barplot(x="cp", y="age", data=data, hue="sex", ci=None)
# **Relationship between chest pain and different variables separated by target value.**
# 1. Here we can find out that fbs has significantly various values which are dependent on the chest pain
# 2. Resting ecg results with normal values mean that patient hasn't heart diseases (exception: asymptomatic chest pain, value 3)
# 3. If exang is 1 a patient must be healthy (exception: asymptomatic chest pain, value 3)
# 4. If oldpeak is high a patient must be healthy (exception: asymptomatic chest pain, value 3)
# 5. It's better if slope has low value (again asymptomatic chest pain as an exception)
# 6. High number of ca (major vessels) is always great
# 7. It's good when thal nearly equals 3
fig = plt.figure(figsize=(20, 25))
palettes = ["Greens", "Purples", "YlOrRd", "RdBu", "BrBG", "cubehelix"] * 2
for x in range(10):
fig1 = fig.add_subplot(5, 2, x + 1)
sns.barplot(
x="cp",
y=data.columns.values[x + 3],
data=data,
hue="target",
ci=None,
palette=palettes[x],
)
# **Correlation heatmap**
correlation = data.corr()
fig = plt.figure(figsize=(12, 10))
sns.heatmap(correlation, annot=True, center=1, cmap="RdBu")
# **Relationship between slope and oldpeak**
# This plot confirms our statement that lower slope is better. According to the jointplot lower slope values have higher oldpeak values which mean our patient is healthy
sns.jointplot(x="slope", y="oldpeak", data=data, height=10)
# **Violin plots for all variables**
# Here we can investigate things about features importance too. If plots for 0 and 1 are the same it means that correlation is low. Moreover we can see smooth values distribution for each variable
fig = plt.figure(figsize=(20, 25))
palettes = ["Greens", "Purples", "YlOrRd", "RdBu", "BrBG", "cubehelix"] * 2
for x in range(12):
fig1 = fig.add_subplot(6, 2, x + 1)
sns.violinplot(x="target", y=data.columns.values[x], data=data, palette=palettes[x])
# **SelectKBest**
# Finally for EDA we're gonna check the best features using SelectKBest
X = data.drop("target", axis=1)
y = data.target
selector = SelectKBest(score_func=chi2, k=5)
fitted = selector.fit(X, y)
features_scores = pd.DataFrame(fitted.scores_)
features_columns = pd.DataFrame(X.columns)
best_features = pd.concat([features_columns, features_scores], axis=1)
best_features.columns = ["Feature", "Score"]
best_features.sort_values(by="Score", ascending=False, inplace=True)
best_features
# # **Model**
# **Let's split our data to test and train**
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42
)
print("Input train shape", X_train.shape)
print("Output train shape", y_train.shape)
print("Input test shape", X_test.shape)
print("Output test shape", y_test.shape)
# **Now we're gonna scale our data**
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)
type(X_train), type(X_test)
# **So, we can test some classification algorithms on our data. Also we create a DataFrame to collect scores**
scores = pd.DataFrame(columns=["Model", "Score"])
# **Also we define a function to show additional metrics (Confusion Matrix and ROC Curve)**
def show_metrics():
fig = plt.figure(figsize=(25, 10))
# Confusion matrix
fig.add_subplot(121)
sns.heatmap(confusion_matrix(y_test, y_pred), annot=True)
# ROC Curve
fig.add_subplot(122)
ns_probs = [0 for _ in range(len(y_test))]
p_probs = model.predict_proba(X_test)[:, 1]
ns_fpr, ns_tpr, _ = roc_curve(y_test, ns_probs)
lr_fpr, lr_tpr, _ = roc_curve(y_test, p_probs)
plt.plot(ns_fpr, ns_tpr, linestyle="--", label="No Skill")
plt.plot(lr_fpr, lr_tpr, marker="o", label="Logistic")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.legend()
plt.show()
# **Logistic Regression**
model = LogisticRegression(solver="lbfgs")
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy * 100)
scores = scores.append(
{"Model": "Logistic Regression", "Score": accuracy}, ignore_index=True
)
show_metrics()
# **Support Vector Classifier (SVC)**
model = SVC(probability=True)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy * 100)
scores = scores.append({"Model": "SVC", "Score": accuracy}, ignore_index=True)
show_metrics()
# **Random Forest Classifier**
model = GridSearchCV(
estimator=RandomForestClassifier(),
param_grid={"n_estimators": [50, 100, 200, 300], "max_depth": [2, 3, 4, 5]},
cv=4,
)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy * 100, model.best_params_)
scores = scores.append({"Model": "Random Forest", "Score": accuracy}, ignore_index=True)
show_metrics()
# **Gradien Boosting Classifier**
model = GradientBoostingClassifier()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy * 100)
scores = scores.append(
{"Model": "Gradient Boosting", "Score": accuracy}, ignore_index=True
)
show_metrics()
# **Extra Trees Classifier**
model = GridSearchCV(
estimator=ExtraTreesClassifier(),
param_grid={"n_estimators": [50, 100, 200, 300], "max_depth": [2, 3, 4, 5]},
cv=4,
)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy * 100)
scores = scores.append({"Model": "Extra Trees", "Score": accuracy}, ignore_index=True)
show_metrics()
# **K-Neighbors Classifier**
model = GridSearchCV(
estimator=KNeighborsClassifier(), param_grid={"n_neighbors": [1, 2, 3]}, cv=4
)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy * 100)
scores = scores.append({"Model": "K-Neighbors", "Score": accuracy}, ignore_index=True)
show_metrics()
# **Gaussian Naive Bayes**
model = GaussianNB()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy * 100)
scores = scores.append({"Model": "Gaussian NB", "Score": accuracy}, ignore_index=True)
show_metrics()
# **Decision Tree Classifier**
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy * 100)
scores = scores.append({"Model": "Decision Tree", "Score": accuracy}, ignore_index=True)
show_metrics()
# XGB Classifier
model = XGBClassifier()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy * 100)
scores = scores.append(
{"Model": "XGB Classifier", "Score": accuracy}, ignore_index=True
)
show_metrics()
# **Finally, let's review our scores**
scores.sort_values(by="Score", ascending=False)
sns.lineplot(x="Model", y="Score", data=scores)
# **Top-3 are Random Forest, K-Neighbors and Extra Trees.** If you can give some feedback feel free to do it, any help and advices are appreciated. **Thanks for reading my notebook :)**
# # Tuning and Ensemble Stacking (will be soon)
# **Ok, now let's tune XGBoost Classifier and try to get better score.** We select our params and model. We'll tune it gradually to save time. **At first we tune max_depth and min_child_weight**
"""params = {
'max_depth': range(3, 10, 2),
'min_child_weight': range(1, 6, 2)
}
xgb = XGBClassifier(learning_rate=0.001, n_estimators=200, objective='binary:logistic',
silent=True, nthread=4, max_depth=5, min_child_weight=1, tree_method='gpu_hist',
gamma=0, subsample=0.8, colsample_bytree=0.8, scale_pos_weight=1, seed=228)
grid_search = GridSearchCV(xgb, params, n_jobs=4, cv=5, scoring='accuracy', verbose=1)
grid_search.fit(X_train, y_train)"""
|
get_ipython().magic("reload_ext autoreload")
get_ipython().magic("autoreload 2")
get_ipython().magic("matplotlib inline")
import os
import tensorflow as tf
from fastai.vision import *
from fastai.metrics import error_rate, accuracy
from sklearn.metrics import roc_auc_score
import pandas as pd
print(
"Training: nb malignant images:",
len(os.listdir("../input/skin-cancer-malignant-vs-benign/train/malignant")),
)
print(
"Training: nb benign images:",
len(os.listdir("../input/skin-cancer-malignant-vs-benign/train/benign")),
)
print(
"Test: nb malignant images:",
len(os.listdir("../input/skin-cancer-malignant-vs-benign/test/malignant")),
)
print(
"Test: nb benign images:",
len(os.listdir("../input/skin-cancer-malignant-vs-benign/test/benign")),
)
path = "../input/skin-cancer-malignant-vs-benign/data/"
sz = 200
bs = 64
classes = ("Malignant", "Benign")[::-1]
np.random.seed(42)
data = ImageDataBunch.from_folder(
path,
train="train",
test="test",
ds_tfms=get_transforms(flip_vert=True),
size=sz,
num_workers=4,
).normalize(imagenet_stats)
data
data.classes
data.classes, data.c, len(data.train_ds), len(data.valid_ds), len(data.test_ds)
class VGG16Like(nn.Module):
def __init__(self, shrink=True, dense_units=None):
assert shrink or dense_units is not None
super().__init__()
# Input size: batch x img_depth x 200 x 200
self.block1 = self.two_conv_block(
in_channel=3, out_channel=64, kernel=3, stride=1, padding=1, pool_kernel=2
)
# Input size: batch x 64 x 100 x 100
self.block2 = self.two_conv_block(64, 128, 3, 1, 1, 2)
# Input size: batch x 128 x 50 x 50
self.block3 = self.three_conv_block(128, 256, 3, 1, 1, 2)
# Input size: batch x 256 x 25 x 25
self.block4 = self.three_conv_block(256, 512, 3, 1, 1, 2)
# Input size: batch x 512 x 6 x 6
self.block5 = self.three_conv_block(512, 512, 3, 1, 1, 2)
self.dense = self.dense_block(
in_channel=512 * 6 * 6,
out_channel=dense_units,
nb_classes=2,
shrink=shrink,
)
def forward(self, x):
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = x.view(-1, 512 * 6 * 6)
x = self.dense(x)
return F.log_softmax(x, dim=-1)
@classmethod
def two_conv_block(
cls, in_channel, out_channel, kernel, stride, padding, pool_kernel
):
return nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel, stride=stride, padding=padding),
nn.BatchNorm2d(out_channel),
nn.ReLU(),
nn.Conv2d(out_channel, out_channel, kernel, stride=stride, padding=padding),
nn.BatchNorm2d(out_channel),
nn.ReLU(),
nn.AvgPool2d(pool_kernel),
)
@classmethod
def three_conv_block(
cls, in_channel, out_channel, kernel, stride, padding, pool_kernel
):
return nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel, stride=stride, padding=padding),
nn.BatchNorm2d(out_channel),
nn.ReLU(),
nn.Conv2d(out_channel, out_channel, kernel, stride=stride, padding=padding),
nn.BatchNorm2d(out_channel),
nn.ReLU(),
nn.Conv2d(out_channel, out_channel, kernel, stride=stride, padding=padding),
nn.BatchNorm2d(out_channel),
nn.ReLU(),
nn.AvgPool2d(pool_kernel),
)
@classmethod
def dense_block(cls, in_channel, out_channel, nb_classes, shrink=True):
if shrink:
# Chop off the original dense layers
return nn.Linear(in_channel, nb_classes)
return nn.Sequential(
nn.Linear(in_channel, out_channel),
nn.Linear(out_channel, out_channel),
nn.Linear(out_channel, out_channel),
nn.Linear(out_channel, nb_classes),
)
model = VGG16Like()
learn = Learner(
data,
model,
loss_func=nn.CrossEntropyLoss(),
metrics=[mean_squared_logarithmic_error],
)
learn.model_dir = "/kaggle/working/output"
learn.unfreeze()
learn.lr_find()
learn.recorder.plot()
lr = 6.0e-2
learn.unfreeze()
learn.fit_one_cycle(120, max_lr=lr, wd=0.01)
learn.save("vgg16like-model")
|
# Exploratory data analytics requires understanding of data and how different variables are correlated.
# To understand the data using python data visualization and pandas we will be taking a case study of bank loan applications data.
# The bank applications data has a dependent variable named TARGET variable whose value is 1 when the applicant is a defaulter and have missed the replayment of loan. For other applicants this value will be 0. All other columns in the data are considered independent variables.
# We will explore the data step by step, visualize the data, create count/count percentage tables and see how all variables like age, credit ratings, income etc of the applicant varies with the TARGET variable.
# **Note**
# * Kindly go through the code comments for details.
# * As we explore the data, I have added the Analysis and conclusion that we can make out till that particular point.
# ## 1. Getting the data ready
# This is the first and most important step in data analysis. Usually there are large number of columns, redundant rows and missing values in a data. Its is very important to clean the data before performing any analysis.
# ## 1.1 Understanding the data
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
# df = pd.read_csv("application_data.csv")
df = pd.read_csv("/kaggle/input/application_data.csv")
# Sanity checks on the data
df.head()
df.info()
df.shape
df.dtypes
# ## 1.2 Cleaning data by dropping unwanted rows and columns
# sum it up to check how many rows have all missing values
df.isnull().all(axis=1).sum()
# % of the missing values (column-wise)
col_missing_perc = round(100 * (df.isnull().sum() / len(df.index)), 2)
col_missing_perc
# getting cols with more than 20% missing and dropping them
col_missing_perc_greater_20 = []
for i in range(0, len(col_missing_perc)):
if col_missing_perc[i] > 20:
col_missing_perc_greater_20.append(col_missing_perc.index[i])
# dropping cols with more than 20% missing
df.drop(col_missing_perc_greater_20, axis=1, inplace=True)
# remaining columns
df.shape
# Of the remaining columns with < 20% missing data,a detailed analysis has been done to pick the below 24 columns for further analysis.
# subsetting the data
df = df[
[
"SK_ID_CURR",
"TARGET",
"NAME_CONTRACT_TYPE",
"CODE_GENDER",
"CNT_CHILDREN",
"AMT_INCOME_TOTAL",
"AMT_CREDIT",
"AMT_ANNUITY",
"AMT_GOODS_PRICE",
"NAME_INCOME_TYPE",
"NAME_EDUCATION_TYPE",
"NAME_FAMILY_STATUS",
"NAME_HOUSING_TYPE",
"DAYS_BIRTH",
"DAYS_EMPLOYED",
"CNT_FAM_MEMBERS",
"ORGANIZATION_TYPE",
"AMT_REQ_CREDIT_BUREAU_HOUR",
"AMT_REQ_CREDIT_BUREAU_DAY",
"AMT_REQ_CREDIT_BUREAU_WEEK",
"AMT_REQ_CREDIT_BUREAU_MON",
"AMT_REQ_CREDIT_BUREAU_QRT",
"AMT_REQ_CREDIT_BUREAU_YEAR",
"EXT_SOURCE_2",
]
]
##final list of columns for analysis
df.columns
# checking missing % in remaining columns
round(100 * (df.isnull().sum() / len(df.index)), 2)
# ## 1.3 Handling missing values
# The below 9 columns have been chosen to showcase the imputation of missing values from the subset of the data selected.
# a) ORGANIZATION_TYPE - Unordered Categorical variable.
# b) CODE_GENDER - Unordered Categorical variable.
# c) CNT_FAM_MEMBERS - Ordered categorical variable.
# For the above variables,MODE has been used to impute the missing data as the data is categorical in nature and mode represents the most common category.
#
# d) AMT_REQ_CREDIT_BUREAU_HOUR - Continuous variable.
# e) AMT_REQ_CREDIT_BUREAU_DAY - Continuous variable.
# f) AMT_REQ_CREDIT_BUREAU_WEEK - Continuous variable.
# g) AMT_REQ_CREDIT_BUREAU_MON - Continuous variable.
# h) AMT_REQ_CREDIT_BUREAU_QTR - Continuous variable.
# i) AMT_REQ_CREDIT_BUREAU_YEAR - Continuous variable.
# For the above 6 variables,MEDIAN has been used to impute the missing data as the data is continuous numerical value(Integer) and MEDIAN represents the TYPICAL value for the given dataset.
# 1.Handling missing values - Categorical
df["ORGANIZATION_TYPE"] = np.where(
df["ORGANIZATION_TYPE"].isnull(),
df["ORGANIZATION_TYPE"].mode(),
df["ORGANIZATION_TYPE"],
)
df["CODE_GENDER"] = np.where(
df["CODE_GENDER"] == "XNA", df["CODE_GENDER"].mode(), df["CODE_GENDER"]
)
df.loc[np.isnan(df["CNT_FAM_MEMBERS"]), ["CNT_FAM_MEMBERS"]] = df[
"CNT_FAM_MEMBERS"
].median()
# 2.Handling missing values -Continuous variables
df.loc[np.isnan(df["AMT_REQ_CREDIT_BUREAU_HOUR"]), ["AMT_REQ_CREDIT_BUREAU_HOUR"]] = df[
"AMT_REQ_CREDIT_BUREAU_HOUR"
].median()
df.loc[np.isnan(df["AMT_REQ_CREDIT_BUREAU_DAY"]), ["AMT_REQ_CREDIT_BUREAU_DAY"]] = df[
"AMT_REQ_CREDIT_BUREAU_DAY"
].median()
df.loc[np.isnan(df["AMT_REQ_CREDIT_BUREAU_WEEK"]), ["AMT_REQ_CREDIT_BUREAU_WEEK"]] = df[
"AMT_REQ_CREDIT_BUREAU_WEEK"
].median()
df.loc[np.isnan(df["AMT_REQ_CREDIT_BUREAU_MON"]), ["AMT_REQ_CREDIT_BUREAU_MON"]] = df[
"AMT_REQ_CREDIT_BUREAU_MON"
].median()
df.loc[np.isnan(df["AMT_REQ_CREDIT_BUREAU_QRT"]), ["AMT_REQ_CREDIT_BUREAU_QRT"]] = df[
"AMT_REQ_CREDIT_BUREAU_QRT"
].median()
df.loc[np.isnan(df["AMT_REQ_CREDIT_BUREAU_YEAR"]), ["AMT_REQ_CREDIT_BUREAU_YEAR"]] = df[
"AMT_REQ_CREDIT_BUREAU_YEAR"
].median()
df["CNT_FAM_MEMBERS"].mode()
# checking missing % in remaining columns
round(100 * (df.isnull().sum() / len(df.index)), 2)
# We still have 0.21% of EXT_SOURCE_2 missing. As it is very less percentage, the rows with null value of EXT_SOURCE_2 has been deleted. Also EXT_SOURCE_2 is the credit rating of an applicant and is an important column for performing analysis on it is better to delete rather than imputing with an incorrect value.
df = df.dropna(axis=0, subset=["EXT_SOURCE_2"])
round(100 * (df.isnull().sum() / len(df.index)), 2)
df.shape
# #### 0.09% of missing value of AMT_GOODS_PRICE will be handled after removal of outliers in the data.
# ## 1.4 Handling outliers
# Identification and handling of outliers has been done on the 3 columns - 'AMT_ANNUITY','AMT_GOODS_PRICE','AMT_CREDIT' for which the quantile range of 25-75% has been considered.
# It is important to understand that extremely high value is not always outlier. In some cases extremely high or extremely low value can indicate the missing information (eg: -999999 or 999999 in case of numerical data)
# Here in our case outliers have been chosen for removal in order to calculate mean value of columns. Ouliers in our case are rare occurances and may misrepresent the dataset if considered for analysis.
#
# Identifying and treating Outliers on columns - AMT_ANNUITY,AMT_GOODS_PRICE,AMT_CREDIT
df_outliers = df[["AMT_ANNUITY", "AMT_GOODS_PRICE", "AMT_CREDIT"]]
# df_outliers.shape (306574, 3)--before outlier removal
Q1 = df_outliers.quantile(0.25)
Q3 = df_outliers.quantile(0.75)
IQR = Q3 - Q1
print(IQR)
df_out_final = df_outliers[
~((df_outliers < (Q1 - 1.5 * IQR)) | (df_outliers > ((Q3 + 1.5 * IQR)))).any(axis=1)
]
# The mean value will be used further to impute missing value in the respective columns
df_out_final["AMT_GOODS_PRICE"].mean()
df_out_final["AMT_ANNUITY"].mean()
len(df_out_final.index) / len(df.index)
# This shows that 7% of the data is having outliers for AMT_GOODS_PRICE and AMT_GOODS_PRICE. Therefore, these rows are not entirely deleted from the dataset and only missing values for AMT_GOODS_PRICE are imputed by calculating mean after removing the outliers.
# imputing missing value of AMT_GOODS_PRICE with mean of data after removing the outlier
df.loc[np.isnan(df["AMT_GOODS_PRICE"]), ["AMT_GOODS_PRICE"]] = round(
df_out_final["AMT_GOODS_PRICE"].mean(), 1
)
df.loc[np.isnan(df["AMT_ANNUITY"]), ["AMT_ANNUITY"]] = round(
df_out_final["AMT_ANNUITY"].mean(), 1
)
## verification of the fixes
round(100 * (df.isnull().sum() / len(df.index)), 2)
# #### We can now confirm now that we do not have any missing values and continue further with analysis.
# ## 1.5 Changing the data types of columns
# The following columns had integer data but the datatype is float.The below code converts them to int.
# changing datatype of the columns
dt_dict = {
"AMT_REQ_CREDIT_BUREAU_HOUR": int,
"CNT_FAM_MEMBERS": int,
"AMT_REQ_CREDIT_BUREAU_WEEK": int,
"AMT_REQ_CREDIT_BUREAU_MON": int,
"AMT_REQ_CREDIT_BUREAU_DAY": int,
"AMT_REQ_CREDIT_BUREAU_QRT": int,
"AMT_REQ_CREDIT_BUREAU_YEAR": int,
}
df = df.astype(dt_dict)
# checking the datatypes
df.info()
# ## 1.6 Changing column names into meaningful ones for analysis.
# removing unnecessary spaces in column names
df.columns = [df.columns[i].strip() for i in range(len(df.columns))]
# renaming columns
df.rename(columns={"EXT_SOURCE_2": "CREDIT_RATINGS"}, inplace=True)
# ## 1.7 Creating derived variables and binning the data.
# Two columns have been chosen for binning,namely
# DAYS_BIRTH
# CREDIT_RATINGS
# From the DAYS_BIRTH, AGE of the client is calculated and then AGE_GROUPS are formed based on the age.
#
# Categorising customers into following
# Youth (<18)
# Young Adult (18 to 35)
# Adult (36 to 55)
# Senior (56 and up)
df["AGE"] = abs(df["DAYS_BIRTH"])
df["AGE"] = round(df["AGE"] / 365, 1)
df["AGE"]
df["AGE"].describe()
def age_group(y):
if y >= 56:
return "Senior"
elif y >= 36 and y < 56:
return "Adult"
elif y >= 18 and y < 36:
return "Young Adult"
else:
return "Youth"
df["AGE_GROUP"] = df["AGE"].apply(lambda x: age_group(x))
sns.countplot(x="AGE_GROUP", hue="TARGET", data=df)
# Just a quick analysis from above plot: Seniors for less likely to be a defaulter in replaying the loan.
# Binning CREDIT_RATINGS based on quantiles
# The credit rating is being categorized ino C1,C2,C3 and C4, where C1 category is the highest.
# The categorization is done based on the quantiles.
df["CREDIT_RATINGS"].describe()
sns.boxplot(y=df["CREDIT_RATINGS"])
credit_category_quantile = list(df["CREDIT_RATINGS"].quantile([0.20, 0.5, 0.80, 1]))
credit_category_quantile
def credit_group(x):
if x >= credit_category_quantile[2]:
return "C1"
elif x >= credit_category_quantile[1]:
return "C2"
elif x >= credit_category_quantile[0]:
return "C3"
else:
return "C4"
df["CREDIT_CATEGORY"] = df["CREDIT_RATINGS"].apply(lambda x: credit_group(x))
sns.countplot(x="CREDIT_CATEGORY", hue="TARGET", data=df)
# ## 1.8 Understanding the data imbalance
df["TARGET"].value_counts(normalize=True)
# There is an imbalance in the data where only 8% clients are with payment difficulties and 91% clients are all others.
# ## 2. Data Analysis
# Before performing analysis, you need to identify the variables as categorical variable or numerical variable.
# checking for unique values per column to see what all columns can be categorised
df.nunique().sort_values()
# From above we can see that there are variables above AGE can be considered as categorical variable (as unique values for them is very less compared to length of the data) and rest as contiuous variables.
# #### Dividing the data based on the dependent variable (TARGET) for further analysis
df0 = df[df["TARGET"] == 0]
df1 = df[df["TARGET"] == 1]
# ## 2.1 Univariate Analysis
# To check the impact of one independent variable on a dependent variable
# What are average values of numerical features
df.pivot_table(columns="TARGET", aggfunc="median")
# ## 2.1.1 Univariate Analysis for Categorical Variable
# o Need to check: Counts/Count%
# o Plots: bar-charts, stacked bar charts
#
df["NAME_INCOME_TYPE"].unique()
plt.figure(figsize=(20, 9))
sns.countplot(x="NAME_INCOME_TYPE", hue="TARGET", data=df)
# ##### As there is huge data imbalance, converting the numbers into percentages and using them for plots and analyzing
incomeCategories0 = pd.DataFrame(
df0["NAME_INCOME_TYPE"].value_counts().rename("Count_0").reset_index()
)
incomeCategories0_perct = pd.DataFrame(
df0["NAME_INCOME_TYPE"].value_counts(normalize=True).rename("Perct_0").reset_index()
)
incomeCategories0.rename(columns={"index": "NAME_INCOME_TYPE"})
incomeCategories0_perct.rename(columns={"index": "NAME_INCOME_TYPE"})
# Merging data to get the overall view of the variable "NAME_INCOME_TYPE"
incomeCategories0 = pd.merge(
incomeCategories0, incomeCategories0_perct, how="inner"
).rename(columns={"index": "NAME_INCOME_TYPE"})
incomeCategories0
incomeCategories1 = pd.DataFrame(
df1["NAME_INCOME_TYPE"].value_counts().rename("Count_1").reset_index()
)
incomeCategories1_perct = pd.DataFrame(
df1["NAME_INCOME_TYPE"].value_counts(normalize=True).rename("Perct_1").reset_index()
)
incomeCategories1.rename(columns={"index": "NAME_INCOME_TYPE"})
incomeCategories1_perct.rename(columns={"index": "NAME_INCOME_TYPE"})
# Merging data to get the overall view of the variable "NAME_INCOME_TYPE"
incomeCategories1 = pd.merge(
incomeCategories1, incomeCategories1_perct, how="inner"
).rename(columns={"index": "NAME_INCOME_TYPE"})
incomeCategories1
incomeCategories = pd.merge(incomeCategories0, incomeCategories1, how="inner").rename(
columns={"index": "NAME_INCOME_TYPE"}
)
def income_percentage_contri_0(count_0, count_1):
return 100 * (count_0 / (count_0 + count_1))
def income_percentage_contri_1(count_0, count_1):
return 100 * (count_1 / (count_0 + count_1))
incomeCategories["percentage_contri_0"] = incomeCategories[
["Count_0", "Count_1"]
].apply(lambda x: income_percentage_contri_0(*x), axis=1)
incomeCategories["percentage_contri_1"] = incomeCategories[
["Count_0", "Count_1"]
].apply(lambda x: income_percentage_contri_1(*x), axis=1)
incomeCategories.set_index("NAME_INCOME_TYPE", inplace=True)
incomeCategories
# For the above table the column description is:
#
# Count_0 = Total number of non defaulters applicants which belong to that particular income type.
# Ex: There are total 143550 non defaulter applicants that have "Working" as their income type.
#
# Count_1 = Total number of defaulter applicants which belong to that particular income time.
# Ex: There are total 15224 defaulter applicants that have "Working" as their income type.
#
# Perct_0 = How much percentage the particular category contibutes to all the non defaulter applicants. Target = 0
# Ex: Out of total applicants of Target=0, 50% of them have "Working" as their income type.
#
# Perct_1 = How much percentage the particular category contibutes to all the defaulter applicants. Target=1
# Ex: Out of total applicants of Target=1, 61% of them have "Working" as their income type.
#
# percentage_contri_0 = Out of all the applicants with the particular category, how much percentage belong to Target=0
# Ex: Out of total working applicants, 90% are those with Target =0
#
# percentage_contri_1 = Out of all the applicants with the particular category, how much percentage belong to Target=1
# Ex: Out of total working applicants, 9.5% are those with Target =1.
#
#
# #### Similar table with these columns will be used for analysis of other variables
#
fig = plt.figure(figsize=(20, 5))
plt.subplot(1, 2, 1)
plt.title("Target = 1")
plt.ylabel('Percentage contribution to "defaulters"')
plt.plot(incomeCategories["percentage_contri_1"])
# ax1.set_xticklabels(labels = ax1.get_xticklabels(),rotation=30)
plt.rcParams.update(plt.rcParamsDefault)
incomeCategories = incomeCategories.sort_values(by="percentage_contri_1")
incomeCategories[["percentage_contri_1", "percentage_contri_0"]].plot(
kind="bar", stacked=True
)
# ## Analysis:
# Ignoring Income type "Unemployed" and "Maternity Leave" as the data is very less.
# The percentage of working people is maximum among the applicants.
# Out of income type "Working", "Commercial associate","Pensioners" and "state_servants", Pensioners are highly likely to repay their loans on time.
# Out of all pensioners who applied for the loan, there is 94.6% chance that he will repay the loan and 5.3% that he will default.
# Similarly out of all the working applicants, there is 90.4% chance that he will repay the loan and 9.5% chance that he will default.
# #### Therefore, the total impact of income type on the defaulters is 4.2% (9.58% being the max and 5.3% being the worst.)
# #### So if we want to consider applicants, applicants with income type as pensioners should be given the highest priority.
# Let us consider an another variable categorical variable CODE_GENDER - the gender of the applicant and see how the gender of a person impacts the target variable.
df.CODE_GENDER.unique()
sns.countplot(x="CODE_GENDER", hue="TARGET", data=df)
genderCategories0 = pd.DataFrame(
df0["CODE_GENDER"].value_counts().rename("Count_0").reset_index()
)
genderCategories0_perct = pd.DataFrame(
df0["CODE_GENDER"].value_counts(normalize=True).rename("Perct_0").reset_index()
)
genderCategories0.rename(columns={"index": "CODE_GENDER"})
genderCategories0_perct.rename(columns={"index": "CODE_GENDER"})
# Merging data to get the overall view of the variable "NAME_INCOME_TYPE"
genderCategories0 = pd.merge(
genderCategories0, genderCategories0_perct, how="inner"
).rename(columns={"index": "CODE_GENDER"})
genderCategories0
genderCategories1 = pd.DataFrame(
df1["CODE_GENDER"].value_counts().rename("Count_1").reset_index()
)
genderCategories1_perct = pd.DataFrame(
df1["CODE_GENDER"].value_counts(normalize=True).rename("Perct_1").reset_index()
)
genderCategories1.rename(columns={"index": "CODE_GENDER"})
genderCategories1_perct.rename(columns={"index": "CODE_GENDER"})
# Merging data to get the overall view of the variable "NAME_INCOME_TYPE"
genderCategories1 = pd.merge(
genderCategories1, genderCategories1_perct, how="inner"
).rename(columns={"index": "CODE_GENDER"})
genderCategories1
genderCategories = pd.merge(genderCategories0, genderCategories1, how="inner").rename(
columns={"index": "CODE_GENDER"}
)
def gender_percentage_contri_0(count_0, count_1):
return 100 * (count_0 / (count_0 + count_1))
def gender_percentage_contri_1(count_0, count_1):
return 100 * (count_1 / (count_0 + count_1))
genderCategories["percentage_contri_0"] = genderCategories[
["Count_0", "Count_1"]
].apply(lambda x: gender_percentage_contri_0(*x), axis=1)
genderCategories["percentage_contri_1"] = genderCategories[
["Count_0", "Count_1"]
].apply(lambda x: gender_percentage_contri_1(*x), axis=1)
genderCategories.set_index("CODE_GENDER", inplace=True)
genderCategories
plt.rcParams.update(plt.rcParamsDefault)
genderCategories = genderCategories.sort_values(by="percentage_contri_1")
genderCategories[["percentage_contri_1", "percentage_contri_0"]].plot(
kind="bar", stacked=True
)
# ## Analysis:
# Out of all the Females who have applied, 93% of them have repayed their loan and 7% of them are defaulters.
# And if a male candidate applies, there is a 10% chance that he will default.
# Gender can decrease the total % of "defaulters" by -3.24%.
# We can say that Females are likely to repay their loans on time
# ## 2.1.2 Univariate Analysis for Numerical Variable
# o Need to check: mean, median, mode, min, max,range, quantiles, standard deviation.
# o Plots: Distribution, histogram, Box Plots
#
df1["CREDIT_RATINGS"].describe()
df0["CREDIT_RATINGS"].describe()
sns.barplot(x="TARGET", y="CREDIT_RATINGS", data=df)
sns.boxplot(x="TARGET", y="CREDIT_RATINGS", data=df, palette="rainbow")
# Quick Analysis:
# The median of credit ratings of defaulters tends to be lower than that of non defaulter applicants.
# As we see that upper quantile of defaulters overlaps with the lower quantile of non defaulters, they still have tendency to repay but are defaulting. Therefore credit rating is not the only factor affecting the rate of defaulters. We need to consider other factors also.
target = [0, 1]
for i in target:
subset = df[df["TARGET"] == i]
sns.distplot(
subset["CREDIT_RATINGS"], hist=False, kde=True, kde_kws={"shade": True}, label=i
)
# Distribution of '0' is skewed. Applicants with high credit ratings, tends to repay their loan.
# Applicants with lower credit score tends to have a larger defaulter rate.
plt.figure(figsize=(20, 9))
plt.subplot(1, 2, 1)
df0["CREDIT_RATINGS"].hist(bins=50)
plt.subplot(1, 2, 2)
df1["CREDIT_RATINGS"].hist(bins=50)
# Quick Analysis
# There are more random peeks to be observed in above plot with target = 1 as compared to those with Target= 0.
# The detailed analysis of categories of credit rating will be done in segmented univariate analaysis with CREDIT_CATEGORY
# Next step is to check the various categories of credit ratings CREDIT_CATEGORY. (We previously binned the credit ratings into categories C1,C3,C3,C4 with C1 being the highest.
df["CREDIT_CATEGORY"].value_counts()
creditCategories0 = pd.DataFrame(
df0["CREDIT_CATEGORY"].value_counts().rename("Count_0").reset_index()
)
creditCategories0_perct = pd.DataFrame(
df0["CREDIT_CATEGORY"].value_counts(normalize=True).rename("Perct_0").reset_index()
)
creditCategories0.rename(columns={"index": "CREDIT_CATEGORY"})
creditCategories0_perct.rename(columns={"index": "CREDIT_CATEGORY"})
# Merging data to get the overall view of the variable "NAME_INCOME_TYPE"
creditCategories0 = pd.merge(
creditCategories0, creditCategories0_perct, how="inner"
).rename(columns={"index": "CREDIT_CATEGORY"})
creditCategories0
creditCategories1 = pd.DataFrame(
df1["CREDIT_CATEGORY"].value_counts().rename("Count_1").reset_index()
)
creditCategories1_perct = pd.DataFrame(
df1["CREDIT_CATEGORY"].value_counts(normalize=True).rename("Perct_1").reset_index()
)
creditCategories1.rename(columns={"index": "CREDIT_CATEGORY"})
creditCategories1_perct.rename(columns={"index": "CREDIT_CATEGORY"})
# Merging data to get the overall view of the variable "NAME_INCOME_TYPE"
creditCategories1 = pd.merge(
creditCategories1, creditCategories1_perct, how="inner"
).rename(columns={"index": "CREDIT_CATEGORY"})
creditCategories1
creditCategories = pd.merge(creditCategories0, creditCategories1, how="inner").rename(
columns={"index": "CREDIT_CATEGORY"}
)
def credit_percentage_contri_0(count_0, count_1):
return 100 * (count_0 / (count_0 + count_1))
def credit_percentage_contri_1(count_0, count_1):
return 100 * (count_1 / (count_0 + count_1))
creditCategories["percentage_contri_0"] = creditCategories[
["Count_0", "Count_1"]
].apply(lambda x: credit_percentage_contri_0(*x), axis=1)
creditCategories["percentage_contri_1"] = creditCategories[
["Count_0", "Count_1"]
].apply(lambda x: credit_percentage_contri_1(*x), axis=1)
creditCategories.set_index("CREDIT_CATEGORY", inplace=True)
creditCategories
plt.plot(creditCategories["percentage_contri_1"].sort_values())
creditCategories[["percentage_contri_1", "percentage_contri_0"]].plot(
kind="bar", stacked=True
)
# Analysis:
#
# There tends to be a inverse relationship between the credit rating and the defaulters, with C1 being the highest credit rating group, there are less percentage of defaulters.
# If we consider an applicant from C1 category credit rating, there is only 3.5% chance that he will be a defaulter and if we consider an applicant with C4 category, there is 15% chance that he will be a defaulter.
# Therefore the credit rating can decrease the percentage of defaulters by (15%- 3.5%) 11.5%.
# So if we want to consider applicants, applicants with higher credit ratings should be given the highest priority.
# ## 2.2 Bivariate Analysis
# To check the impact of two independent variables on a dependent variable.
# ## 2.2.1 Categorical & Categorical
# o Need to check: Counts/Count%
# o Plots: Bar chart, Stacked bar chart, 2-y Axis plot – line charts
#
pt = df.pivot_table(
columns="NAME_INCOME_TYPE",
index="CREDIT_CATEGORY",
values="TARGET",
aggfunc="sum",
fill_value=0,
)
# pt.reset_index()
pt
pt["Row_Total"] = (
pt["Businessman"]
+ pt["Commercial associate"]
+ pt["Maternity leave"]
+ pt["Pensioner"]
+ pt["State servant"]
+ pt["Student"]
+ pt["Unemployed"]
+ pt["Working"]
)
Column_Total = []
for c in pt.columns:
Column_Total.append(pt[c].sum())
Column_Total
pt.loc["Column_Total"] = Column_Total
pt
for i in pt.index:
pt.loc[i, "Total%"] = 100 * (
pt.loc[i, "Row_Total"] / pt.loc["Column_Total", "Row_Total"]
)
for j in df.NAME_INCOME_TYPE.unique():
for i in pt.index:
pt.loc[i, j + "%"] = 100 * (pt.loc[i, j] / pt.loc["Column_Total", j])
pt
credit_income_type = pt.iloc[0:-1][
["Working%", "State servant%", "Commercial associate%", "Pensioner%", "Unemployed%"]
]
credit_income_type
credit_income_type.T.plot.bar(stacked="TRUE")
# Analysis:
# Of all the C1 credit rating category applicants, the maximum percentage of defaulters are 'Commmercial Associates'
# The Percentage of applicants with income type as 'working' and with C1 credit rating is the lowest in all the defaulters.
#
# ## 2.2.2 Numerical & Numerical
#
#
# o Need to check: correlations
# o Plots: heatmaps, scatter plots, hex plots
#
df1_corr = df[df["TARGET"] == 1]
df0_corr = df[df["TARGET"] == 0]
df1_corr = df1_corr[
[
"AMT_INCOME_TOTAL",
"AMT_CREDIT",
"AMT_ANNUITY",
"AMT_GOODS_PRICE",
"AGE",
"DAYS_EMPLOYED",
]
]
df0_corr = df0_corr[
[
"AMT_INCOME_TOTAL",
"AMT_CREDIT",
"AMT_ANNUITY",
"AMT_GOODS_PRICE",
"AGE",
"DAYS_EMPLOYED",
]
]
df1_corr_matrix = df1_corr.corr()
df0_corr_matrix = df1_corr.corr()
df1_corr_matrix
# narrowing down the data and considering less than the upper quantile AMT_INCOME_TOTAL
df1_corr["AMT_INCOME_TOTAL"] = df1_corr[
df1_corr["AMT_INCOME_TOTAL"] < df1_corr["AMT_INCOME_TOTAL"].quantile(0.85)
]["AMT_INCOME_TOTAL"]
# df1_corr['AMT_ANNUITY'] = df1_corr[df1_corr['AMT_GOODS_PRICE']<df1_corr['AMT_GOODS_PRICE'].quantile(.85)]['AMT_GOODS_PRICE']
fig, ax = plt.subplots(figsize=(10, 10))
sns.scatterplot(x="AMT_INCOME_TOTAL", y="AMT_ANNUITY", data=df1_corr)
df1_corr.plot.hexbin(x="AMT_INCOME_TOTAL", y="AMT_ANNUITY", gridsize=30)
# Analysis:
# From the above hexplot we can see that large number of application lie in the region where amount annuity tends to be between 20% to 30% of the applicants income
#
# narrowing down the data and considering less than the upper quantile AMT_INCOME_TOTAL
df1_corr["AMT_CREDIT"] = df1_corr[
df1_corr["AMT_CREDIT"] < df1_corr["AMT_CREDIT"].quantile(0.85)
]["AMT_CREDIT"]
# df1_corr['AMT_ANNUITY'] = df1_corr[df1_corr['AMT_GOODS_PRICE']<df1_corr['AMT_GOODS_PRICE'].quantile(.85)]['AMT_GOODS_PRICE']
df1_corr.plot.hexbin(x="AGE", y="AMT_CREDIT", gridsize=15)
# Analysis:
# From the above hexplot we can see that applicants with age 28 to 37 tend to get the larger amount of credit as compared to older applicants. We can easily confirm this with the following box plot.
#
sns.boxplot(x="AGE_GROUP", y="AMT_CREDIT", data=df1, palette="rainbow")
# The median of AMT_CREDIT tends to highest in Adults and lowest in Young Adults.
# Finding correlations and heatmaps for all the continous variables in your data. Follow the below steps to calculate correlations between all the numerical variables in the data.
#
df1_corrdf = df1_corr_matrix.where(
np.triu(np.ones(df1_corr_matrix.shape), k=1).astype(np.bool)
)
df0_corrdf = df0_corr_matrix.where(
np.triu(np.ones(df0_corr_matrix.shape), k=1).astype(np.bool)
)
df1_corrdf = df1_corrdf.unstack().reset_index()
df0_corrdf = df0_corrdf.unstack().reset_index()
df1_corrdf.columns = ["var1", "var2", "correlation"]
df0_corrdf.columns = ["var1", "var2", "correlation"]
df1_corrdf.dropna(subset=["correlation"], inplace=True)
df0_corrdf.dropna(subset=["correlation"], inplace=True)
df1_corrdf.sort_values(by=["correlation"], ascending=False)
# df0_corrdf.sort_values(by=['correlation'],ascending=False)
# Analysis:
# From the above table we can easily see that AMT_GOODS_PRICE and AMT_CREDIT have the highest corelation, which is quite obvious as higher the good price, higher will be the loan value.
#
sns.heatmap(
df1_corr_matrix, annot=True, linewidth=1, annot_kws={"size": 10}, cbar=False
)
# ## 2.2.3 Categorical & Numerical
# * Plots: Box plots, line chart
#
# removing outlier for AMT_INCOME_TOTAL
df1_filtered = df1[df1["AMT_INCOME_TOTAL"] < df["AMT_INCOME_TOTAL"].quantile(0.90)]
df_stats_credit = df1_filtered.groupby("NAME_INCOME_TYPE").mean()[
["AMT_CREDIT", "AMT_INCOME_TOTAL", "AMT_GOODS_PRICE"]
]
# df_stats_credit = df1_filtered.groupby('AGE_GROUP').mean()[['CREDIT_RATINGS']]
df_stats_credit.sort_values(by="AMT_CREDIT", ascending=False)
df_stats_credit.plot.line(x_compat=True)
plt.figure(figsize=(10, 5))
df_filtered = df[df["AMT_INCOME_TOTAL"] < df["AMT_INCOME_TOTAL"].quantile(0.90)]
sns.boxplot(
x="NAME_CONTRACT_TYPE",
y="AMT_CREDIT",
data=df_filtered,
palette="rainbow",
hue="TARGET",
)
plt.yscale("log")
|
# ## References -
# 1 - https://www.kaggle.com/abhinand05/in-depth-guide-to-convolutional-neural-networks
# 2 - https://www.kaggle.com/ateplyuk/pytorch-efficientnet
# ## Import Libraries
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import cv2
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# to unzip
import zipfile
# Import PyTorch
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader, Dataset
import torchvision
import torch.optim as optim
# Any results you write to the current directory are saved as output.
# ## Unzipping train and test
def unzip(path):
with zipfile.ZipFile(path, "r") as z:
z.extractall(".")
# unzip train folder
train_zip_path = "../input/train.zip"
unzip(train_zip_path)
test_zip_path = "../input/test.zip"
unzip(test_zip_path)
# ## Loading Training Data + EDA
train_df = pd.read_csv("../input/train.csv")
train_df.head()
print(f"Train Size: {len(os.listdir('/kaggle/working/train'))}")
print(f"Test Size: {len(os.listdir('/kaggle/working/test'))}")
|
import numpy as np
import pandas as pd
import os
import json
from pathlib import Path
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
from matplotlib import colors
import warnings
warnings.filterwarnings("ignore")
data_path = Path("/kaggle/input/abstraction-and-reasoning-challenge/")
training_path = data_path / "training"
evaluation_path = data_path / "evaluation"
test_path = data_path / "test"
training_tasks = sorted(os.listdir(training_path))
evaluation_tasks = sorted(os.listdir(evaluation_path))
test_tasks = sorted(os.listdir(test_path))
# from: https://www.kaggle.com/boliu0/visualizing-all-task-pairs-with-gridlines
cmap = colors.ListedColormap(
[
"#000000",
"#0074D9",
"#FF4136",
"#2ECC40",
"#FFDC00",
"#AAAAAA",
"#F012BE",
"#FF851B",
"#7FDBFF",
"#870C25",
]
)
norm = colors.Normalize(vmin=0, vmax=9)
def plot_one(ax, i, train_or_test, input_or_output):
input_matrix = task[train_or_test][i][input_or_output]
ax.imshow(input_matrix, cmap=cmap, norm=norm)
ax.grid(True, which="both", color="lightgrey", linewidth=0.5)
ax.set_yticks([x - 0.5 for x in range(1 + len(input_matrix))])
ax.set_xticks([x - 0.5 for x in range(1 + len(input_matrix[0]))])
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_title(train_or_test + " " + input_or_output)
def plot_task(task):
"""
Plots the first train and test pairs of a specified task,
using same color scheme as the ARC app
"""
num_train = len(task["train"])
fig, axs = plt.subplots(2, num_train, figsize=(3 * num_train, 3 * 2))
for i in range(num_train):
plot_one(axs[0, i], i, "train", "input")
plot_one(axs[1, i], i, "train", "output")
plt.tight_layout()
plt.show()
num_test = len(task["test"])
fig, axs = plt.subplots(2, num_test, figsize=(3 * num_test, 3 * 2))
if num_test == 1:
plot_one(axs[0], 0, "test", "input")
plot_one(axs[1], 0, "test", "output")
else:
for i in range(num_test):
plot_one(axs[0, i], i, "test", "input")
plot_one(axs[1, i], i, "test", "output")
plt.tight_layout()
plt.show()
plt.figure(figsize=(5, 2), dpi=200)
plt.imshow([list(range(10))], cmap=cmap, norm=norm)
plt.xticks(list(range(10)))
plt.yticks([])
plt.show()
# from: https://www.kaggle.com/nagiss/manual-coding-for-the-first-10-tasks
def get_data(task_filename):
with open(task_filename, "r") as f:
task = json.load(f)
return task
num2color = [
"black",
"blue",
"red",
"green",
"yellow",
"gray",
"magenta",
"orange",
"sky",
"brown",
]
color2num = {c: n for n, c in enumerate(num2color)}
def check_p(task, pred_func):
n = len(task["train"]) + len(task["test"])
fig, axs = plt.subplots(3, n, figsize=(4 * n, 12), dpi=50)
plt.subplots_adjust(wspace=0.3, hspace=0.3)
fig_num = 0
for i, t in enumerate(task["train"]):
t_in, t_out = np.array(t["input"]), np.array(t["output"])
t_pred = pred_func(t_in)
axs[0][fig_num].imshow(t_in, cmap=cmap, norm=norm)
axs[0][fig_num].set_title(f"Train-{i} in")
axs[0][fig_num].set_yticks(list(range(t_in.shape[0])))
axs[0][fig_num].set_xticks(list(range(t_in.shape[1])))
axs[1][fig_num].imshow(t_out, cmap=cmap, norm=norm)
axs[1][fig_num].set_title(f"Train-{i} out")
axs[1][fig_num].set_yticks(list(range(t_out.shape[0])))
axs[1][fig_num].set_xticks(list(range(t_out.shape[1])))
axs[2][fig_num].imshow(t_pred, cmap=cmap, norm=norm)
axs[2][fig_num].set_title(f"Train-{i} pred")
axs[2][fig_num].set_yticks(list(range(t_pred.shape[0])))
axs[2][fig_num].set_xticks(list(range(t_pred.shape[1])))
fig_num += 1
for i, t in enumerate(task["test"]):
t_in, t_out = np.array(t["input"]), np.array(t["output"])
t_pred = pred_func(t_in)
axs[0][fig_num].imshow(t_in, cmap=cmap, norm=norm)
axs[0][fig_num].set_title(f"Test-{i} in")
axs[0][fig_num].set_yticks(list(range(t_in.shape[0])))
axs[0][fig_num].set_xticks(list(range(t_in.shape[1])))
axs[1][fig_num].imshow(t_out, cmap=cmap, norm=norm)
axs[1][fig_num].set_title(f"Test-{i} out")
axs[1][fig_num].set_yticks(list(range(t_out.shape[0])))
axs[1][fig_num].set_xticks(list(range(t_out.shape[1])))
axs[2][fig_num].imshow(t_pred, cmap=cmap, norm=norm)
axs[2][fig_num].set_title(f"Test-{i} pred")
axs[2][fig_num].set_yticks(list(range(t_pred.shape[0])))
axs[2][fig_num].set_xticks(list(range(t_pred.shape[1])))
fig_num += 1
def check(task, pred_func):
try:
ok = 0
for i, t in enumerate(task["train"]):
t_in, t_out = np.array(t["input"]), np.array(t["output"])
t_pred = pred_func(t_in)
if len(t_out) == len(t_pred):
if len(t_out) == 1:
if t_pred == t_out:
ok += 1
elif (t_pred == t_out).all():
ok += 1
t_pred = []
if ok == len(task["train"]):
for i, t in enumerate(task["test"]):
t_in = np.array(t["input"])
t_pred.append(pred_func(t_in))
return t_pred
else:
return None
except:
return None
def flattener(pred):
str_pred = str([row for row in pred])
return (
str_pred.replace(", ", "")
.replace("[[", "|")
.replace("][", "|")
.replace("]]", "|")
)
def crop_min(a):
try:
b = np.bincount(a.flatten(), minlength=10)
c = int(np.where(b == np.min(b[np.nonzero(b)]))[0])
coords = np.argwhere(a == c)
x_min, y_min = coords.min(axis=0)
x_max, y_max = coords.max(axis=0)
return a[x_min : x_max + 1, y_min : y_max + 1]
except:
return a
def crop_max(a):
try:
b = np.bincount(a.flatten(), minlength=10)
c = np.argsort(b)[-2]
coords = np.argwhere(a == c)
x_min, y_min = coords.min(axis=0)
x_max, y_max = coords.max(axis=0)
return a[x_min : x_max + 1, y_min : y_max + 1]
except:
return a
# ## some examples
task = get_data(str(training_path / training_tasks[262]))
check_p(task, crop_max)
task = get_data(str(training_path / training_tasks[299]))
check_p(task, crop_max)
task = get_data(str(training_path / training_tasks[13]))
check_p(task, crop_min)
task = get_data(str(training_path / training_tasks[30]))
check_p(task, crop_min)
task = get_data(str(training_path / training_tasks[35]))
check_p(task, crop_min)
task = get_data(str(training_path / training_tasks[48]))
check_p(task, crop_min)
task = get_data(str(training_path / training_tasks[176]))
check_p(task, crop_min)
task = get_data(str(training_path / training_tasks[309]))
check_p(task, crop_min)
task = get_data(str(training_path / training_tasks[318]))
check_p(task, crop_min)
task = get_data(str(training_path / training_tasks[383]))
check_p(task, crop_min)
# Resize
# train - 288, 268, 222, 122
# eval - 325, 347, 311, 251, 148, 0
submission = pd.read_csv(data_path / "sample_submission.csv", index_col="output_id")
for output_id in tqdm(submission.index):
task_id = output_id.split("_")[0]
pair_id = int(output_id.split("_")[1])
f = test_path / str(task_id + ".json")
if f.is_file():
task = get_data(f)
data = task["test"][pair_id]["input"]
pred_1 = flattener(data)
for oper in ["crop_min", "crop_max"]:
data = check(task, globals()[oper])
if data:
pred_1 = flattener(data)
break
data = task["test"][pair_id]["input"]
pred_2 = flattener(data)
data = [[5 if i == 0 else i for i in j] for j in data]
pred_3 = flattener(data)
# concatenate and add to the submission output
pred = pred_1 + " " + pred_2 + " " + pred_3 + " "
submission.loc[output_id, "output"] = pred
submission.to_csv("submission.csv")
|
import argparse
import torch
import torchvision.transforms as transforms
import os, sys
from PIL import Image
import glob
import tqdm
sys.path.insert(1, os.path.join(sys.path[0], ".."))
cwd = os.getcwd()
print(cwd)
import numpy as np
# from Utils.utils import str2bool, AverageMeter, depth_read
# import Models
# import Datasets
from PIL import ImageOps
import matplotlib.pyplot as plt
import time
import os
import sys
import re
import numpy as np
from PIL import Image
sys.path.insert(1, os.path.join(sys.path[0], ".."))
# Utils
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (35, 30)
from PIL import Image
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import argparse
import os
import torch.optim
from torch.optim import lr_scheduler
import errno
import sys
from torchvision import transforms
import torch.nn.init as init
import torch.distributed as dist
def define_optim(optim, params, lr, weight_decay):
if optim == "adam":
optimizer = torch.optim.Adam(params, lr=lr, weight_decay=weight_decay)
elif optim == "sgd":
optimizer = torch.optim.SGD(
params, lr=lr, momentum=0.9, weight_decay=weight_decay
)
elif optim == "rmsprop":
optimizer = torch.optim.RMSprop(
params, lr=lr, momentum=0.9, weight_decay=weight_decay
)
else:
raise KeyError("The requested optimizer: {} is not implemented".format(optim))
return optimizer
def define_scheduler(optimizer, args):
if args.lr_policy == "lambda":
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + 1 - args.niter) / float(args.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif args.lr_policy == "step":
scheduler = lr_scheduler.StepLR(
optimizer, step_size=args.lr_decay_iters, gamma=args.gamma
)
elif args.lr_policy == "plateau":
scheduler = lr_scheduler.ReduceLROnPlateau(
optimizer,
mode="min",
factor=args.gamma,
threshold=0.0001,
patience=args.lr_decay_iters,
)
elif args.lr_policy == "none":
scheduler = None
else:
return NotImplementedError(
"learning rate policy [%s] is not implemented", args.lr_policy
)
return scheduler
def define_init_weights(model, init_w="normal", activation="relu"):
print("Init weights in network with [{}]".format(init_w))
if init_w == "normal":
model.apply(weights_init_normal)
elif init_w == "xavier":
model.apply(weights_init_xavier)
elif init_w == "kaiming":
model.apply(weights_init_kaiming)
elif init_w == "orthogonal":
model.apply(weights_init_orthogonal)
else:
raise NotImplementedError(
"initialization method [{}] is not implemented".format(init_w)
)
def first_run(save_path):
txt_file = os.path.join(save_path, "first_run.txt")
if not os.path.exists(txt_file):
open(txt_file, "w").close()
else:
saved_epoch = open(txt_file).read()
if saved_epoch is None:
print("You forgot to delete [first run file]")
return ""
return saved_epoch
return ""
def depth_read(img, sparse_val):
# loads depth map D from png file
# and returns it as a numpy array,
# for details see readme.txt
depth_png = np.array(img, dtype=int)
depth_png = np.expand_dims(depth_png, axis=2)
# make sure we have a proper 16bit depth map here.. not 8bit!
assert np.max(depth_png) > 255
depth = depth_png.astype(np.float) / 256.0
depth[depth_png == 0] = sparse_val
return depth
class show_figs:
def __init__(self, input_type, savefig=False):
self.input_type = input_type
self.savefig = savefig
def save(self, img, name):
img.save(name)
def transform(self, input, name="test.png"):
if isinstance(input, torch.tensor):
input = torch.clamp(input, min=0, max=255).int().cpu().numpy()
input = input * 256.0
img = Image.fromarray(input)
elif isinstance(input, np.array):
img = Image.fromarray(input)
else:
raise NotImplementedError("Input type not recognized type")
if self.savefig:
self.save(img, name)
else:
return img
# trick from stackoverflow
def str2bool(argument):
if argument.lower() in ("yes", "true", "t", "y", "1"):
return True
elif argument.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError(
"Wrong argument in argparse, should be a boolean"
)
def mkdir_if_missing(directory):
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def write_file(content, location):
file = open(location, "w")
file.write(str(content))
file.close()
class Logger(object):
"""
Source https://github.com/Cysu/open-reid/blob/master/reid/utils/logging.py.
"""
def __init__(self, fpath=None):
self.console = sys.stdout
self.file = None
self.fpath = fpath
if fpath is not None:
mkdir_if_missing(os.path.dirname(fpath))
self.file = open(fpath, "w")
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if self.file is not None:
self.file.write(msg)
def flush(self):
self.console.flush()
if self.file is not None:
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
self.console.close()
if self.file is not None:
self.file.close()
def save_image(img_merge, filename):
img_merge = Image.fromarray(img_merge.astype("uint8"))
img_merge.save(filename)
def weights_init_normal(m):
classname = m.__class__.__name__
# print(classname)
if classname.find("Conv") != -1 or classname.find("ConvTranspose") != -1:
init.normal_(m.weight.data, 0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
elif classname.find("Linear") != -1:
init.normal_(m.weight.data, 0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
elif classname.find("BatchNorm2d") != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
# print(classname)
if classname.find("Conv") != -1 or classname.find("ConvTranspose") != -1:
init.xavier_normal_(m.weight.data, gain=0.02)
if m.bias is not None:
m.bias.data.zero_()
elif classname.find("Linear") != -1:
init.xavier_normal_(m.weight.data, gain=0.02)
if m.bias is not None:
m.bias.data.zero_()
elif classname.find("BatchNorm2d") != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find("Conv") != -1 or classname.find("ConvTranspose") != -1:
init.kaiming_normal_(m.weight.data, a=0, mode="fan_in", nonlinearity="relu")
if m.bias is not None:
m.bias.data.zero_()
elif classname.find("Linear") != -1:
init.kaiming_normal_(m.weight.data, a=0, mode="fan_in", nonlinearity="relu")
if m.bias is not None:
m.bias.data.zero_()
elif classname.find("BatchNorm2d") != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_orthogonal(m):
classname = m.__class__.__name__
# print(classname)
if classname.find("Conv") != -1 or classname.find("ConvTranspose") != -1:
init.orthogonal(m.weight.data, gain=1)
if m.bias is not None:
m.bias.data.zero_()
elif classname.find("Linear") != -1:
init.orthogonal(m.weight.data, gain=1)
if m.bias is not None:
m.bias.data.zero_()
elif classname.find("BatchNorm2d") != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def save_fig(inp, name="saved.png"):
if isinstance(inp, torch.Tensor):
# inp = inp.permute([2, 0, 1])
inp = transforms.ToPILImage()(inp.int())
inp.save(name)
return
pil = Image.fromarray(inp)
pil.save(name)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def init_distributed_mode(args):
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
else:
print("Not using distributed mode")
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = "nccl"
print(
"| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True
)
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
# Does not seem to work?
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
# Kitti_loader
class Random_Sampler:
"Class to downsample input lidar points"
def __init__(self, num_samples):
self.num_samples = num_samples
def sample(self, depth):
mask_keep = depth > 0
n_keep = np.count_nonzero(mask_keep)
if n_keep == 0:
return mask_keep
else:
depth_sampled = np.zeros(depth.shape)
prob = float(self.num_samples) / n_keep
mask_keep = np.bitwise_and(
mask_keep, np.random.uniform(0, 1, depth.shape) < prob
)
depth_sampled[mask_keep] = depth[mask_keep]
return depth_sampled
class Kitti_preprocessing(object):
def __init__(self, dataset_path, input_type="depth", side_selection=""):
self.train_paths = {"img": [], "lidar_in": [], "gt": []}
self.val_paths = {"img": [], "lidar_in": [], "gt": []}
self.selected_paths = {"img": [], "lidar_in": [], "gt": []}
self.test_files = {"img": [], "lidar_in": []}
self.dataset_path = dataset_path
self.side_selection = side_selection
self.left_side_selection = "image_02"
self.right_side_selection = "image_03"
self.depth_keyword = "proj_depth"
self.rgb_keyword = "Rgb"
# self.use_rgb = input_type == 'rgb'
self.use_rgb = True
self.date_selection = "2011_09_26"
def get_paths(self):
# train and validation dirs
for type_set in os.listdir(self.dataset_path):
for root, dirs, files in os.walk(os.path.join(self.dataset_path, type_set)):
if re.search(self.depth_keyword, root):
self.train_paths["lidar_in"].extend(
sorted(
[
os.path.join(root, file)
for file in files
if re.search("velodyne_raw", root)
and re.search("train", root)
and re.search(self.side_selection, root)
]
)
)
self.val_paths["lidar_in"].extend(
sorted(
[
os.path.join(root, file)
for file in files
if re.search("velodyne_raw", root)
and re.search("val", root)
and re.search(self.side_selection, root)
]
)
)
self.train_paths["gt"].extend(
sorted(
[
os.path.join(root, file)
for file in files
if re.search("groundtruth", root)
and re.search("train", root)
and re.search(self.side_selection, root)
]
)
)
self.val_paths["gt"].extend(
sorted(
[
os.path.join(root, file)
for file in files
if re.search("groundtruth", root)
and re.search("val", root)
and re.search(self.side_selection, root)
]
)
)
if self.use_rgb:
if re.search(self.rgb_keyword, root) and re.search(
self.side_selection, root
):
self.train_paths["img"].extend(
sorted(
[
os.path.join(root, file)
for file in files
if re.search("train", root)
]
)
)
# and (re.search('image_02', root) or re.search('image_03', root))
# and re.search('data', root)]))
# if len(self.train_paths['img']) != 0:
# test = [os.path.join(root, file) for file in files if re.search('train', root)]
self.val_paths["img"].extend(
sorted(
[
os.path.join(root, file)
for file in files
if re.search("val", root)
]
)
)
# and (re.search('image_02', root) or re.search('image_03', root))
# and re.search('data', root)]))
# if len(self.train_paths['lidar_in']) != len(self.train_paths['img']):
# print(root)
def downsample(self, lidar_data, destination, num_samples=500):
# Define sampler
sampler = Random_Sampler(num_samples)
for i, lidar_set_path in tqdm.tqdm(enumerate(lidar_data)):
# Read in lidar data
name = os.path.splitext(os.path.basename(lidar_set_path))[0]
sparse_depth = Image.open(lidar_set_path)
# Convert to numpy array
sparse_depth = np.array(sparse_depth, dtype=int)
assert np.max(sparse_depth) > 255
# Downsample per collumn
sparse_depth = sampler.sample(sparse_depth)
# Convert to img
sparse_depth_img = Image.fromarray(sparse_depth.astype(np.uint32))
# Save
folder = os.path.join(*str.split(lidar_set_path, os.path.sep)[7:12])
os.makedirs(os.path.join(destination, os.path.join(folder)), exist_ok=True)
sparse_depth_img.save(
os.path.join(destination, os.path.join(folder, name)) + ".png"
)
def convert_png_to_rgb(self, rgb_images, destination):
for i, img_set_path in tqdm.tqdm(enumerate(rgb_images)):
name = os.path.splitext(os.path.basename(img_set_path))[0]
im = Image.open(img_set_path)
rgb_im = im.convert("RGB")
folder = os.path.join(*str.split(img_set_path, os.path.sep)[8:12])
os.makedirs(os.path.join(destination, os.path.join(folder)), exist_ok=True)
rgb_im.save(os.path.join(destination, os.path.join(folder, name)) + ".jpg")
# rgb_im.save(os.path.join(destination, name) + '.jpg')
def get_selected_paths(self, selection):
files = []
for file in sorted(os.listdir(os.path.join(self.dataset_path, selection))):
files.append(os.path.join(self.dataset_path, os.path.join(selection, file)))
return files
def prepare_dataset(self):
path_to_val_sel = "depth_selection/val_selection_cropped"
path_to_test = "depth_selection/test_depth_completion_anonymous"
# self.get_paths()
self.selected_paths["lidar_in"] = self.get_selected_paths(
os.path.join(path_to_val_sel, "velodyne_raw")
)
self.selected_paths["gt"] = self.get_selected_paths(
os.path.join(path_to_val_sel, "groundtruth_depth")
)
self.selected_paths["img"] = self.get_selected_paths(
os.path.join(path_to_val_sel, "image")
)
self.test_files["lidar_in"] = self.get_selected_paths(
os.path.join(path_to_test, "velodyne_raw")
)
if self.use_rgb:
self.selected_paths["img"] = self.get_selected_paths(
os.path.join(path_to_val_sel, "image")
)
self.test_files["img"] = self.get_selected_paths(
os.path.join(path_to_test, "image")
)
# print(len(self.train_paths['lidar_in']))
# print(len(self.train_paths['img']))
# print(len(self.train_paths['gt']))
print(len(self.val_paths["lidar_in"]))
print(len(self.val_paths["img"]))
print(len(self.val_paths["gt"]))
print(len(self.test_files["lidar_in"]))
print(len(self.test_files["img"]))
def compute_mean_std(self):
nums = np.array([])
means = np.array([])
stds = np.array([])
max_lst = np.array([])
for i, raw_img_path in tqdm.tqdm(enumerate(self.train_paths["lidar_in"])):
raw_img = Image.open(raw_img_path)
raw_np = depth_read(raw_img)
vec = raw_np[raw_np >= 0]
# vec = vec/84.0
means = np.append(means, np.mean(vec))
stds = np.append(stds, np.std(vec))
nums = np.append(nums, len(vec))
max_lst = np.append(max_lst, np.max(vec))
mean = np.dot(nums, means) / np.sum(nums)
std = np.sqrt(
(np.dot(nums, stds**2) + np.dot(nums, (means - mean) ** 2)) / np.sum(nums)
)
return mean, std, max_lst
# ERFNet
import torch
import torch.nn as nn
import torch.nn.functional as F
class DownsamplerBlock(nn.Module):
def __init__(self, ninput, noutput):
super().__init__()
self.conv = nn.Conv2d(
ninput, noutput - ninput, (3, 3), stride=2, padding=1, bias=True
)
self.pool = nn.MaxPool2d(2, stride=2)
self.bn = nn.BatchNorm2d(noutput, eps=1e-3)
def forward(self, input):
output = torch.cat([self.conv(input), self.pool(input)], 1)
output = self.bn(output)
return F.relu(output)
class non_bottleneck_1d(nn.Module):
def __init__(self, chann, dropprob, dilated):
super().__init__()
self.conv3x1_1 = nn.Conv2d(
chann, chann, (3, 1), stride=1, padding=(1, 0), bias=True
)
self.conv1x3_1 = nn.Conv2d(
chann, chann, (1, 3), stride=1, padding=(0, 1), bias=True
)
self.bn1 = nn.BatchNorm2d(chann, eps=1e-03)
self.conv3x1_2 = nn.Conv2d(
chann,
chann,
(3, 1),
stride=1,
padding=(1 * dilated, 0),
bias=True,
dilation=(dilated, 1),
)
self.conv1x3_2 = nn.Conv2d(
chann,
chann,
(1, 3),
stride=1,
padding=(0, 1 * dilated),
bias=True,
dilation=(1, dilated),
)
self.bn2 = nn.BatchNorm2d(chann, eps=1e-03)
self.dropout = nn.Dropout2d(dropprob)
def forward(self, input):
output = self.conv3x1_1(input)
output = F.relu(output)
output = self.conv1x3_1(output)
output = self.bn1(output)
output = F.relu(output)
output = self.conv3x1_2(output)
output = F.relu(output)
output = self.conv1x3_2(output)
output = self.bn2(output)
if self.dropout.p != 0:
output = self.dropout(output)
return F.relu(output + input)
class Encoder(nn.Module):
def __init__(self, in_channels, num_classes):
super().__init__()
chans = 32 if in_channels > 16 else 16
self.initial_block = DownsamplerBlock(in_channels, chans)
self.layers = nn.ModuleList()
self.layers.append(DownsamplerBlock(chans, 64))
for x in range(0, 5):
self.layers.append(non_bottleneck_1d(64, 0.03, 1))
self.layers.append(DownsamplerBlock(64, 128))
for x in range(0, 2):
self.layers.append(non_bottleneck_1d(128, 0.3, 2))
self.layers.append(non_bottleneck_1d(128, 0.3, 4))
self.layers.append(non_bottleneck_1d(128, 0.3, 8))
self.layers.append(non_bottleneck_1d(128, 0.3, 16))
# Only in encoder mode:
self.output_conv = nn.Conv2d(
128, num_classes, 1, stride=1, padding=0, bias=True
)
def forward(self, input, predict=False):
output = self.initial_block(input)
for layer in self.layers:
output = layer(output)
if predict:
output = self.output_conv(output)
return output
class UpsamplerBlock(nn.Module):
def __init__(self, ninput, noutput):
super().__init__()
self.conv = nn.ConvTranspose2d(
ninput, noutput, 3, stride=2, padding=1, output_padding=1, bias=True
)
self.bn = nn.BatchNorm2d(noutput, eps=1e-3)
def forward(self, input):
output = self.conv(input)
output = self.bn(output)
return F.relu(output)
class Decoder(nn.Module):
def __init__(self, num_classes):
super().__init__()
self.layer1 = UpsamplerBlock(128, 64)
self.layer2 = non_bottleneck_1d(64, 0, 1)
self.layer3 = non_bottleneck_1d(64, 0, 1) # 64x64x304
self.layer4 = UpsamplerBlock(64, 32)
self.layer5 = non_bottleneck_1d(32, 0, 1)
self.layer6 = non_bottleneck_1d(32, 0, 1) # 32x128x608
self.output_conv = nn.ConvTranspose2d(
32, num_classes, 2, stride=2, padding=0, output_padding=0, bias=True
)
def forward(self, input):
output = input
output = self.layer1(output)
output = self.layer2(output)
output = self.layer3(output)
em2 = output
output = self.layer4(output)
output = self.layer5(output)
output = self.layer6(output)
em1 = output
output = self.output_conv(output)
return output, em1, em2
class Net(nn.Module):
def __init__(
self, in_channels=1, out_channels=1
): # use encoder to pass pretrained encoder
super().__init__()
self.encoder = Encoder(in_channels, out_channels)
self.decoder = Decoder(out_channels)
def forward(self, input, only_encode=False):
if only_encode:
return self.encoder.forward(input, predict=True)
else:
output = self.encoder(input)
return self.decoder.forward(output)
# model
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
import numpy as np
# from .ERFNet import Net
class uncertainty_net(nn.Module):
def __init__(self, in_channels, out_channels=1, thres=15):
super(uncertainty_net, self).__init__()
out_chan = 2
combine = "concat"
self.combine = combine
self.in_channels = in_channels
out_channels = 3
self.depthnet = Net(in_channels=in_channels, out_channels=out_channels)
local_channels_in = 2 if self.combine == "concat" else 1
self.convbnrelu = nn.Sequential(
convbn(local_channels_in, 32, 3, 1, 1, 1), nn.ReLU(inplace=True)
)
self.hourglass1 = hourglass_1(32)
self.hourglass2 = hourglass_2(32)
self.fuse = nn.Sequential(
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(32, out_chan, kernel_size=3, padding=1, stride=1, bias=True),
)
self.activation = nn.ReLU(inplace=True)
self.thres = thres
self.softmax = torch.nn.Softmax(dim=1)
def forward(self, input, epoch=50):
if self.in_channels > 1:
rgb_in = input[:, 1:, :, :]
lidar_in = input[:, 0:1, :, :]
else:
lidar_in = input
# 1. GLOBAL NET
embedding0, embedding1, embedding2 = self.depthnet(input)
global_features = embedding0[:, 0:1, :, :]
precise_depth = embedding0[:, 1:2, :, :]
conf = embedding0[:, 2:, :, :]
# 2. Fuse
if self.combine == "concat":
input = torch.cat((lidar_in, global_features), 1)
elif self.combine == "add":
input = lidar_in + global_features
elif self.combine == "mul":
input = lidar_in * global_features
elif self.combine == "sigmoid":
input = lidar_in * nn.Sigmoid()(global_features)
else:
input = lidar_in
# 3. LOCAL NET
out = self.convbnrelu(input)
out1, embedding3, embedding4 = self.hourglass1(out, embedding1, embedding2)
out1 = out1 + out
out2 = self.hourglass2(out1, embedding3, embedding4)
out2 = out2 + out
out = self.fuse(out2)
lidar_out = out
# 4. Late Fusion
lidar_to_depth, lidar_to_conf = torch.chunk(out, 2, dim=1)
lidar_to_conf, conf = torch.chunk(
self.softmax(torch.cat((lidar_to_conf, conf), 1)), 2, dim=1
)
out = conf * precise_depth + lidar_to_conf * lidar_to_depth
return out, lidar_out, precise_depth, global_features
def convbn(in_planes, out_planes, kernel_size, stride, pad, dilation):
return nn.Sequential(
nn.Conv2d(
in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
padding=dilation if dilation > 1 else pad,
dilation=dilation,
bias=False,
)
)
# nn.BatchNorm2d(out_planes))
class hourglass_1(nn.Module):
def __init__(self, channels_in):
super(hourglass_1, self).__init__()
self.conv1 = nn.Sequential(
convbn(
channels_in, channels_in, kernel_size=3, stride=2, pad=1, dilation=1
),
nn.ReLU(inplace=True),
)
self.conv2 = convbn(
channels_in, channels_in, kernel_size=3, stride=1, pad=1, dilation=1
)
self.conv3 = nn.Sequential(
convbn(
channels_in * 2,
channels_in * 2,
kernel_size=3,
stride=2,
pad=1,
dilation=1,
),
nn.ReLU(inplace=True),
)
self.conv4 = nn.Sequential(
convbn(
channels_in * 2,
channels_in * 2,
kernel_size=3,
stride=1,
pad=1,
dilation=1,
)
)
self.conv5 = nn.Sequential(
nn.ConvTranspose2d(
channels_in * 4,
channels_in * 2,
kernel_size=3,
padding=1,
output_padding=1,
stride=2,
bias=False,
),
nn.BatchNorm2d(channels_in * 2),
nn.ReLU(inplace=True),
)
self.conv6 = nn.Sequential(
nn.ConvTranspose2d(
channels_in * 2,
channels_in,
kernel_size=3,
padding=1,
output_padding=1,
stride=2,
bias=False,
),
nn.BatchNorm2d(channels_in),
)
def forward(self, x, em1, em2):
x = self.conv1(x)
x = self.conv2(x)
x = F.relu(x, inplace=True)
x = torch.cat((x, em1), 1)
x_prime = self.conv3(x)
x_prime = self.conv4(x_prime)
x_prime = F.relu(x_prime, inplace=True)
x_prime = torch.cat((x_prime, em2), 1)
out = self.conv5(x_prime)
out = self.conv6(out)
return out, x, x_prime
class hourglass_2(nn.Module):
def __init__(self, channels_in):
super(hourglass_2, self).__init__()
self.conv1 = nn.Sequential(
convbn(
channels_in, channels_in * 2, kernel_size=3, stride=2, pad=1, dilation=1
),
nn.BatchNorm2d(channels_in * 2),
nn.ReLU(inplace=True),
)
self.conv2 = convbn(
channels_in * 2, channels_in * 2, kernel_size=3, stride=1, pad=1, dilation=1
)
self.conv3 = nn.Sequential(
convbn(
channels_in * 2,
channels_in * 2,
kernel_size=3,
stride=2,
pad=1,
dilation=1,
),
nn.BatchNorm2d(channels_in * 2),
nn.ReLU(inplace=True),
)
self.conv4 = nn.Sequential(
convbn(
channels_in * 2,
channels_in * 4,
kernel_size=3,
stride=1,
pad=1,
dilation=1,
)
)
self.conv5 = nn.Sequential(
nn.ConvTranspose2d(
channels_in * 4,
channels_in * 2,
kernel_size=3,
padding=1,
output_padding=1,
stride=2,
bias=False,
),
nn.BatchNorm2d(channels_in * 2),
nn.ReLU(inplace=True),
)
self.conv6 = nn.Sequential(
nn.ConvTranspose2d(
channels_in * 2,
channels_in,
kernel_size=3,
padding=1,
output_padding=1,
stride=2,
bias=False,
),
nn.BatchNorm2d(channels_in),
)
def forward(self, x, em1, em2):
x = self.conv1(x)
x = self.conv2(x)
x = x + em1
x = F.relu(x, inplace=True)
x_prime = self.conv3(x)
x_prime = self.conv4(x_prime)
x_prime = x_prime + em2
x_prime = F.relu(x_prime, inplace=True)
out = self.conv5(x_prime)
out = self.conv6(out)
return out
# Training setttings
parser = argparse.ArgumentParser(description="KITTI Depth Completion Task TEST")
parser.add_argument(
"--dataset",
type=str,
default="kitti",
choices={"kitti"},
help="dataset to work with",
)
parser.add_argument(
"--mod", type=str, default="mod", choices={"mod"}, help="Model for use"
)
parser.add_argument("--no_cuda", action="store_true", help="no gpu usage")
parser.add_argument(
"--input_type", type=str, default="rgb", help="use rgb for rgbdepth"
)
# Data augmentation settings
parser.add_argument(
"--crop_w", type=int, default=1216, help="width of image after cropping"
)
parser.add_argument(
"--crop_h", type=int, default=256, help="height of image after cropping"
)
# Paths settings
parser.add_argument(
"--save_path", type=str, default="/kaggle/input/pretainedmodel", help="save path"
)
parser.add_argument(
"--data_path",
type=str,
default="/kaggle/input/kitti-depth-estimation-selection",
help="path to desired datasets",
)
# Cudnn
parser.add_argument(
"--cudnn",
type=str2bool,
nargs="?",
const=True,
default=True,
help="cudnn optimization active",
)
parser.add_argument(
"--multi",
type=str2bool,
nargs="?",
const=True,
default=False,
help="use multiple gpus",
)
parser.add_argument(
"--normal",
type=str2bool,
nargs="?",
const=True,
default=False,
help="Normalize input",
)
parser.add_argument(
"--max_depth", type=float, default=85.0, help="maximum depth of input"
)
parser.add_argument(
"--sparse_val", type=float, default=0.0, help="encode sparse values with 0"
)
parser.add_argument("--num_samples", default=0, type=int, help="number of samples")
def main():
global args
global dataset
# args = parser.parse_args()
args = parser.parse_known_args()[0]
torch.backends.cudnn.benchmark = args.cudnn
best_file_name = glob.glob(os.path.join(args.save_path, "model_best*"))[0]
save_root = "/kaggle/working/results"
if not os.path.isdir(save_root):
os.makedirs(save_root)
print("==========\nArgs:{}\n==========".format(args))
# INIT
print("Init model: '{}'".format(args.mod))
channels_in = 1 if args.input_type == "depth" else 4
model = uncertainty_net(in_channels=channels_in)
print(
"Number of parameters in model {} is {:.3f}M".format(
args.mod.upper(), sum(tensor.numel() for tensor in model.parameters()) / 1e6
)
)
if not args.no_cuda:
# Load on gpu before passing params to optimizer
if not args.multi:
model = model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
if os.path.isfile(best_file_name):
print("=> loading checkpoint '{}'".format(best_file_name))
checkpoint = torch.load(best_file_name)
model.load_state_dict(checkpoint["state_dict"])
lowest_loss = checkpoint["loss"]
best_epoch = checkpoint["best epoch"]
print(
"Lowest RMSE for selection validation set was {:.4f} in epoch {}".format(
lowest_loss, best_epoch
)
)
else:
print("=> no checkpoint found at '{}'".format(best_file_name))
return
if not args.no_cuda:
model = model.cuda()
print("Initializing dataset {}".format(args.dataset))
# dataset = Datasets.define_dataset(args.dataset, args.data_path, args.input_type)
dataset = Kitti_preprocessing(args.data_path, args.input_type)
dataset.prepare_dataset()
to_pil = transforms.ToPILImage()
to_tensor = transforms.ToTensor()
norm = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
depth_norm = transforms.Normalize(
mean=[14.97 / args.max_depth], std=[11.15 / args.max_depth]
)
model.eval()
print("===> Start testing")
total_time = []
with torch.no_grad():
for i, (img, rgb, gt) in tqdm.tqdm(
enumerate(
zip(
dataset.selected_paths["lidar_in"],
dataset.selected_paths["img"],
dataset.selected_paths["gt"],
)
)
):
raw_path = os.path.join(img)
raw_pil = Image.open(raw_path)
gt_path = os.path.join(gt)
gt_pil = Image.open(gt)
assert raw_pil.size == (1216, 352)
crop = 352 - args.crop_h
raw_pil_crop = raw_pil.crop((0, crop, 1216, 352))
gt_pil_crop = gt_pil.crop((0, crop, 1216, 352))
raw = depth_read(raw_pil_crop, args.sparse_val)
raw = to_tensor(raw).float()
gt = depth_read(gt_pil_crop, args.sparse_val)
gt = to_tensor(gt).float()
valid_mask = (raw > 0).detach().float()
# input = torch.unsqueeze(raw, 0)
# gt = torch.unsqueeze(gt, 0)
input = torch.unsqueeze(raw, 0).cuda()
gt = torch.unsqueeze(gt, 0).cuda()
if args.normal:
# Put in {0-1} range and then normalize
input = input / args.max_depth
# input = depth_norm(input)
if args.input_type == "rgb":
rgb_path = os.path.join(rgb)
rgb_pil = Image.open(rgb_path)
assert rgb_pil.size == (1216, 352)
rgb_pil_crop = rgb_pil.crop((0, crop, 1216, 352))
rgb = to_tensor(rgb_pil_crop).float()
# rgb = torch.unsqueeze(rgb, 0)
rgb = torch.unsqueeze(rgb, 0).cuda()
if not args.normal:
rgb = rgb * 255.0
input = torch.cat((input, rgb), 1)
torch.cuda.synchronize()
a = time.perf_counter()
output, _, _, _ = model(input)
torch.cuda.synchronize()
b = time.perf_counter()
total_time.append(b - a)
if args.normal:
output = output * args.max_depth
output = torch.clamp(output, min=0, max=85)
output = output * 256.0
raw = raw * 256.0
output = output[0][0:1].cpu()
data = output[0].numpy()
if crop != 0:
padding = (0, 0, crop, 0)
output = torch.nn.functional.pad(output, padding, "constant", 0)
output[:, 0:crop] = output[:, crop].repeat(crop, 1)
pil_img = to_pil(output.int())
assert pil_img.size == (1216, 352)
pil_img.save(os.path.join(save_root, os.path.basename(img)))
# print(os.path.join(save_root, os.path.basename(img)))
print("average_time: ", sum(total_time[100:]) / (len(total_time[100:])))
print("num imgs: ", i + 1)
if __name__ == "__main__":
main()
from pathlib import Path
import zipfile
img_root = Path("/kaggle/working/results")
with zipfile.ZipFile("imgs.zip", "w") as z:
for img_name in img_root.iterdir():
z.write(img_name)
|
# > **
# This version tries to implement different strategies for dealing with missing values mostly in ordinal data. Most tests show nominal variables perform better untouched, dealing with them with OneHotEncoder. The final solution is Stack of the all method used in this kernel**
from fancyimpute import (
NuclearNormMinimization,
SoftImpute,
BiScaler,
IterativeSVD,
NuclearNormMinimization,
)
from sklearn.preprocessing import (
StandardScaler,
RobustScaler,
QuantileTransformer,
PowerTransformer,
)
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from autoimpute.imputations import SingleImputer, MultipleImputer
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import SimpleImputer, IterativeImputer
from sklearn.model_selection import StratifiedKFold
import time, os, warnings, random, string, re, gc
from sklearn.feature_selection import RFE, RFECV
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics import roc_auc_score
from IPython.display import display
from scipy.stats import rankdata
from autoimpute.visuals import *
import matplotlib.pyplot as plt
import category_encoders as ce
import plotly_express as px
import impyute as impy
import seaborn as sns
import pandas as pd
import scipy as sp
import numpy as np
sns.set_style("whitegrid")
warnings.filterwarnings("ignore")
warnings.simplefilter("ignore")
pd.set_option("display.max_columns", 1000)
pd.set_option("display.max_rows", 500)
SEED = 2020
SPLITS = 25
def set_seed(seed=SEED):
random.seed(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
set_seed()
base = pd.read_csv("/kaggle/input/cat-in-the-dat-ii/train.csv")
baseTe = pd.read_csv("/kaggle/input/cat-in-the-dat-ii/test.csv")
baseTe["target"] = -1
score = dict()
pd.DataFrame(base.isna().sum(axis=1).describe(), columns=["Value"]).T
pd.DataFrame(
base.isna().sum(axis=0) / len(base), columns=["missing percent"]
).sort_values("missing percent", ascending=False).T
plot_md_locations(base)
plot_nullility_dendogram(base)
def null_analysis(df):
"""
desc: get nulls for each column in counts & percentages
arg: dataframe
return: dataframe
"""
null_cnt = df.isnull().sum() # calculate null counts
null_cnt = null_cnt[null_cnt != 0] # remove non-null cols
null_percent = null_cnt / len(df) * 100 # calculate null percentages
null_table = pd.concat([pd.DataFrame(null_cnt), pd.DataFrame(null_percent)], axis=1)
null_table.columns = ["counts", "percentage"]
null_table.sort_values("counts", ascending=False, inplace=True)
return null_table
null_table = null_analysis(base)
px.bar(null_table.reset_index(), x="index", y="percentage", text="counts", height=500)
score = dict()
def SiavashMapping(df):
bin_3_mapping = {"T": 1, "F": 0}
bin_4_mapping = {"Y": 1, "N": 0}
nom_0_mapping = {"Red": 0, "Blue": 1, "Green": 2}
nom_1_mapping = {
"Trapezoid": 0,
"Star": 1,
"Circle": 2,
"Triangle": 3,
"Polygon": 4,
}
nom_2_mapping = {
"Hamster": 0,
"Axolotl": 1,
"Lion": 2,
"Dog": 3,
"Cat": 4,
"Snake": 5,
}
nom_3_mapping = {
"Russia": 0,
"Canada": 1,
"Finland": 2,
"Costa Rica": 3,
"China": 4,
"India": 5,
}
nom_4_mapping = {"Bassoon": 0, "Theremin": 1, "Oboe": 2, "Piano": 3}
nom_5_mapping = dict(
zip((df.nom_5.dropna().unique()), range(len((df.nom_5.dropna().unique()))))
)
nom_6_mapping = dict(
zip((df.nom_6.dropna().unique()), range(len((df.nom_6.dropna().unique()))))
)
nom_7_mapping = dict(
zip((df.nom_7.dropna().unique()), range(len((df.nom_7.dropna().unique()))))
)
nom_8_mapping = dict(
zip((df.nom_8.dropna().unique()), range(len((df.nom_8.dropna().unique()))))
)
nom_9_mapping = dict(
zip((df.nom_9.dropna().unique()), range(len((df.nom_9.dropna().unique()))))
)
ord_1_mapping = {
"Novice": 0,
"Contributor": 1,
"Expert": 2,
"Master": 3,
"Grandmaster": 4,
}
ord_2_mapping = {
"Freezing": 0,
"Cold": 1,
"Warm": 2,
"Hot": 3,
"Boiling Hot": 4,
"Lava Hot": 5,
}
ord_3_mapping = {
"a": 0,
"b": 1,
"c": 2,
"d": 3,
"e": 4,
"f": 5,
"g": 6,
"h": 7,
"i": 8,
"j": 9,
"k": 10,
"l": 11,
"m": 12,
"n": 13,
"o": 14,
}
ord_4_mapping = {
"A": 0,
"B": 1,
"C": 2,
"D": 3,
"E": 4,
"F": 5,
"G": 6,
"H": 7,
"I": 8,
"J": 9,
"K": 10,
"L": 11,
"M": 12,
"N": 13,
"O": 14,
"P": 15,
"Q": 16,
"R": 17,
"S": 18,
"T": 19,
"U": 20,
"V": 21,
"W": 22,
"X": 23,
"Y": 24,
"Z": 25,
}
sorted_ord_5 = sorted(df.ord_5.dropna().unique())
ord_5_mapping = dict(zip(sorted_ord_5, range(len(sorted_ord_5))))
df["bin_3"] = df.loc[df.bin_3.notnull(), "bin_3"].map(bin_3_mapping)
df["bin_4"] = df.loc[df.bin_4.notnull(), "bin_4"].map(bin_4_mapping)
df["nom_0"] = df.loc[df.nom_0.notnull(), "nom_0"].map(nom_0_mapping)
df["nom_1"] = df.loc[df.nom_1.notnull(), "nom_1"].map(nom_1_mapping)
df["nom_2"] = df.loc[df.nom_2.notnull(), "nom_2"].map(nom_2_mapping)
df["nom_3"] = df.loc[df.nom_3.notnull(), "nom_3"].map(nom_3_mapping)
df["nom_4"] = df.loc[df.nom_4.notnull(), "nom_4"].map(nom_4_mapping)
df["nom_5"] = df.loc[df.nom_5.notnull(), "nom_5"].map(nom_5_mapping)
df["nom_6"] = df.loc[df.nom_6.notnull(), "nom_6"].map(nom_6_mapping)
df["nom_7"] = df.loc[df.nom_7.notnull(), "nom_7"].map(nom_7_mapping)
df["nom_8"] = df.loc[df.nom_8.notnull(), "nom_8"].map(nom_8_mapping)
df["nom_9"] = df.loc[df.nom_9.notnull(), "nom_9"].map(nom_9_mapping)
df["ord_1"] = df.loc[df.ord_1.notnull(), "ord_1"].map(ord_1_mapping)
df["ord_2"] = df.loc[df.ord_2.notnull(), "ord_2"].map(ord_2_mapping)
df["ord_3"] = df.loc[df.ord_3.notnull(), "ord_3"].map(ord_3_mapping)
df["ord_4"] = df.loc[df.ord_4.notnull(), "ord_4"].map(ord_4_mapping)
df["ord_5"] = df.loc[df.ord_5.notnull(), "ord_5"].map(ord_5_mapping)
return df
def AntMapping(df, ordinal):
ord_maps = {
"ord_0": {val: i for i, val in enumerate([1, 2, 3])},
"ord_1": {
val: i
for i, val in enumerate(
["Novice", "Contributor", "Expert", "Master", "Grandmaster"]
)
},
"ord_2": {
val: i
for i, val in enumerate(
["Freezing", "Cold", "Warm", "Hot", "Boiling Hot", "Lava Hot"]
)
},
**{
col: {val: i for i, val in enumerate(sorted(df[col].dropna().unique()))}
for col in ["ord_3", "ord_4", "ord_5", "day", "month"]
},
}
ord_cols = pd.concat(
[
df[col].map(ord_map).fillna(max(ord_map.values()) // 2).astype("float32")
for col, ord_map in ord_maps.items()
],
axis=1,
)
ord_cols /= ord_cols.max()
ord_sqr = 4 * (ord_cols - 0.5) ** 2
ord_cols_sqr = [feat + "_sqr" for feat in ordinal]
df[ordinal] = ord_cols
df[ord_cols_sqr] = ord_sqr
return df
def CountEncoding(df, cols, df_test=None):
for col in cols:
frequencies = df[col].value_counts().reset_index()
df_values = (
df[[col]]
.merge(frequencies, how="left", left_on=col, right_on="index")
.iloc[:, -1]
.values
)
df[col + "_counts"] = df_values
if df_test is not None:
df_test_values = (
df_test[[col]]
.merge(frequencies, how="left", left_on=col, right_on="index")
.fillna(1)
.iloc[:, -1]
.values
)
df_test[col + "_counts"] = df_test_values
count_cols = [col + "_counts" for col in cols]
if df_test is not None:
return df, df_test, count_cols
else:
return df, count_cols
def YurgensenMapping(df):
def TLC(s):
s = str(s)
return ((ord(s[0])) - 64) * 52 + ((ord(s[1])) - 64) - 6
df["ord_0_ord_2"] = df["ord_0"].astype("str") + df["ord_2"].astype("str")
df["ord_0"] = df["ord_0"].fillna(2.01)
df.loc[df.ord_2 == "Freezing", "ord_2"] = 0
df.loc[df.ord_2 == "Cold", "ord_2"] = 1
df.loc[df.ord_2 == "Warm", "ord_2"] = 2
df.loc[df.ord_2 == "Hot", "ord_2"] = 3
df.loc[df.ord_2 == "Boiling Hot", "ord_2"] = 4
df.loc[df.ord_2 == "Lava Hot", "ord_2"] = 5
df["ord_2"] = df["ord_2"].fillna(2.37)
df.loc[df.ord_1 == "Novice", "ord_1"] = 0
df.loc[df.ord_1 == "Contributor", "ord_1"] = 1
df.loc[df.ord_1 == "Expert", "ord_1"] = 2
df.loc[df.ord_1 == "Master", "ord_1"] = 3
df.loc[df.ord_1 == "Grandmaster", "ord_1"] = 4
df["ord_1"] = df["ord_1"].fillna(1.86)
df["ord_5"] = df.loc[df.ord_5.notnull(), "ord_5"].apply(lambda x: TLC(x))
df["ord_5"] = df["ord_5"].fillna("Zx").apply(lambda x: TLC(x))
df["ord_5"] = df["ord_5"].rank()
df["ord_3"] = df.loc[df.ord_3.notnull(), "ord_3"].apply(lambda x: ord(str(x)) - 96)
df["ord_3"] = df["ord_3"].fillna(8.44)
df["ord_4"] = df.loc[df.ord_4.notnull(), "ord_4"].apply(lambda x: ord(str(x)) - 64)
df["ord_4"] = df["ord_4"].fillna(14.31)
return df
def RidgeClf(
train, test, ordinal, ohe, scaler, seed, splits, drop_idx=None, dimreducer=None
):
y_train = train["target"].values.copy()
train_length = train.shape[0]
test["target"] = -1
data = pd.concat([train, test], axis=0).reset_index(drop=True)
X_ohe = pd.get_dummies(
data[ohe],
columns=ohe,
drop_first=True,
dummy_na=True,
sparse=True,
dtype="int8",
).sparse.to_coo()
if dimreducer is not None:
X_ohe = sp.sparse.csr_matrix(dimreducer.fit_transform(X_ohe))
gc.collect()
if ordinal is not None:
if scaler is not None:
X_ord = scaler.fit_transform(data[ordinal])
else:
X_ord = data[ordianl].values
data_ = sp.sparse.hstack([X_ohe, X_ord]).tocsr()
else:
data_ = sp.sparse.hstack([X_ohe]).tocsr()
train = data_[:train_length]
test = data_[train_length:]
model = RidgeClassifier(alpha=152.5)
skf = StratifiedKFold(n_splits=splits, shuffle=True, random_state=seed)
oof = np.zeros((train.shape[0],))
y_pred = np.zeros((test.shape[0],))
for tr_ind, val_ind in skf.split(train, y_train):
if drop_idx is not None:
idx = list(set(tr_ind) - set(drop_idx))
X_tr, X_val = train[idx], train[val_ind]
y_tr, y_val = y_train[idx], y_train[val_ind]
else:
X_tr, X_val = train[tr_ind], train[val_ind]
y_tr, y_val = y_train[tr_ind], y_train[val_ind]
train_set = {"X": X_tr, "y": y_tr}
val_set = {"X": X_val, "y": y_val}
model.fit(train_set["X"], train_set["y"])
oof[val_ind] = model.decision_function(val_set["X"])
y_pred += model.decision_function(test) / splits
oof_auc_score = roc_auc_score(y_train, oof)
oof = rankdata(oof) / len(oof)
y_pred = rankdata(y_pred) / len(y_pred)
return oof, y_pred, oof_auc_score
def LogRegClf(
train, test, ordinal, ohe, scaler, seed, splits, drop_idx=None, dimreducer=None
):
params = {
"fit_intercept": True,
"random_state": SEED,
"penalty": "l2",
"verbose": 0,
"solver": "lbfgs",
"max_iter": 1000,
"n_jobs": 4,
"C": 0.05,
}
y_train = train["target"].values.copy()
train_length = train.shape[0]
test["target"] = -1
data = pd.concat([train, test], axis=0).reset_index(drop=True)
X_ohe = pd.get_dummies(
data[ohe],
columns=ohe,
drop_first=True,
dummy_na=True,
sparse=True,
dtype="int8",
).sparse.to_coo()
if dimreducer is not None:
X_ohe = sp.sparse.csr_matrix(dimreducer.fit_transform(X_ohe))
gc.collect()
if ordinal is not None:
if scaler is not None:
X_ord = scaler.fit_transform(data[ordinal])
else:
X_ord = data[ordinal].values
data_ = sp.sparse.hstack([X_ohe, X_ord]).tocsr()
else:
data_ = sp.sparse.hstack([X_ohe]).tocsr()
train = data_[:train_length]
test = data_[train_length:]
model = LogisticRegression(**params)
skf = StratifiedKFold(n_splits=splits, shuffle=True, random_state=seed)
oof = np.zeros((train.shape[0],))
y_pred = np.zeros((test.shape[0],))
for tr_ind, val_ind in skf.split(train, y_train):
if drop_idx is not None:
idx = list(set(tr_ind) - set(drop_idx))
X_tr, X_val = train[idx], train[val_ind]
y_tr, y_val = y_train[idx], y_train[val_ind]
else:
X_tr, X_val = train[tr_ind], train[val_ind]
y_tr, y_val = y_train[tr_ind], y_train[val_ind]
train_set = {"X": X_tr, "y": y_tr}
val_set = {"X": X_val, "y": y_val}
model.fit(train_set["X"], train_set["y"])
oof[val_ind] = model.predict_proba(val_set["X"])[:, 1]
y_pred += model.predict_proba(test)[:, 1] / splits
oof_auc_score = roc_auc_score(y_train, oof)
oof = rankdata(oof) / len(oof)
y_pred = rankdata(y_pred) / len(y_pred)
return oof, y_pred, oof_auc_score
# Imputing with a constant value means creating new class for missing values in each column of data. Sklearn's SimpleImputer and IterativeImputer have four strategies (initial strategies):
# > * Constant
# > * Most-frequent
# > * Mean
# > * Median
# First we impute all columns and use StandardScaler for ordinal columns and OHE for for others.
train = base.copy()
test = baseTe.copy()
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = [
feat
for feat in features
if feat not in ["ord_0", "ord_1", "ord_2", "ord_3", "ord_4", "ord_5"]
]
ordinal = [feat for feat in features if feat not in ohe]
train[features] = SiavashMapping(train[features])
test[features] = SiavashMapping(test[features])
imp = SimpleImputer(strategy="constant")
train[features] = imp.fit_transform(train[features])
test[features] = imp.transform(test[features])
train[features] = train[features].astype(np.int16)
test[features] = test[features].astype(np.int16)
scaler = RobustScaler(quantile_range=(10.0, 90.0))
oof1, pred1, score["Constant"] = RidgeClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=None,
)
print(f'AUC score for Constant Imputation : {score["Constant"]}')
# This time we impute ordinal columns and use StandardScaler for ordinal columns and OHE for for others and let the OHE handle missing values in ohe columns.
train = base.copy()
test = baseTe.copy()
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = [
feat
for feat in features
if feat not in ["ord_0", "ord_1", "ord_2", "ord_3", "ord_4", "ord_5"]
]
ordinal = [feat for feat in features if feat not in ohe]
train[features] = SiavashMapping(train[features])
test[features] = SiavashMapping(test[features])
imp = SimpleImputer(strategy="constant")
train[ordinal] = imp.fit_transform(train[ordinal])
test[ordinal] = imp.transform(test[ordinal])
train[ordinal] = train[ordinal].astype(np.int16)
test[ordinal] = test[ordinal].astype(np.int16)
scaler = RobustScaler(quantile_range=(10.0, 90.0))
oof2, pred2, score["Constant-OrdinalOnly"] = LogRegClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=None,
)
print(
f'AUC score for Constant Imputation of ordinal columns: {score["Constant-OrdinalOnly"]}'
)
# OHE on all features except 'ord_3', 'ord_4', 'ord_5'
train = base.copy()
test = baseTe.copy()
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = [feat for feat in features if feat not in ["ord_3", "ord_4", "ord_5"]]
ordinal = [feat for feat in features if feat not in ohe]
train[features] = SiavashMapping(train[features])
test[features] = SiavashMapping(test[features])
imp = SimpleImputer(strategy="constant")
train[ordinal] = imp.fit_transform(train[ordinal])
test[ordinal] = imp.transform(test[ordinal])
train[ordinal] = train[ordinal].astype(np.int16)
test[ordinal] = test[ordinal].astype(np.int16)
scaler = RobustScaler(quantile_range=(10.0, 90.0))
oof3, pred3, score["Constant-Ord4Only"] = LogRegClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=None,
)
print(
f'AUC score for Constant Imputation of ord_4 column: {score["Constant-Ord4Only"]}'
)
# OneHotEncoding all columns
train = base.copy()
test = baseTe.copy()
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = features
ordinal = None
train[features] = SiavashMapping(train[features])
test[features] = SiavashMapping(test[features])
scaler = RobustScaler(quantile_range=(10.0, 90.0))
oof4, pred4, score["CompleteOHE"] = LogRegClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=None,
)
print(f'AUC score for No imputation just OHE: {score["CompleteOHE"]}')
# Complete OHE on all columns after constant imputation on all columns
train = base.copy()
test = baseTe.copy()
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = features
ordinal = None
train[features] = SiavashMapping(train[features])
test[features] = SiavashMapping(test[features])
imp = SimpleImputer(strategy="constant")
train[features] = imp.fit_transform(train[features])
test[features] = imp.transform(test[features])
train[features] = train[features].astype(np.int16)
test[features] = test[features].astype(np.int16)
scaler = RobustScaler(quantile_range=(10.0, 90.0))
oof5, pred5, score["Constant-CompleteOHE"] = LogRegClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=None,
)
print(
f'AUC score for Constant Imputation with complete OHE: {score["Constant-CompleteOHE"]}'
)
# The mode imputer calculates the mode of the observed dataset and uses it to impute missing observations. After mode imputation we apply OHE to all columns.
train = base.copy()
test = baseTe.copy()
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = features
ordinal = None
train[features] = SiavashMapping(train[features])
test[features] = SiavashMapping(test[features])
imp = SingleImputer(strategy="mode")
train[features] = imp.fit_transform(train[features])
test[features] = imp.transform(test[features])
train[features] = train[features].astype(np.int16)
test[features] = test[features].astype(np.int16)
scaler = RobustScaler(quantile_range=(10.0, 90.0))
oof6, pred6, score["Mode-CompleteOHE"] = LogRegClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=None,
)
print(f'AUC score for Mode Imputation with complete OHE: {score["Mode-CompleteOHE"]}')
# Median imputing with OHE of non-ordinal features
train = base.copy()
test = baseTe.copy()
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = [
feat
for feat in features
if feat not in ["ord_0", "ord_1", "ord_2", "ord_3", "ord_4", "ord_5"]
]
ordinal = [feat for feat in features if feat not in ohe]
train[features] = SiavashMapping(train[features])
test[features] = SiavashMapping(test[features])
imp = SingleImputer(strategy="mean")
train[ordinal] = imp.fit_transform(train[ordinal])
test[ordinal] = imp.transform(test[ordinal])
train[ordinal] = train[ordinal].astype(np.int16)
test[ordinal] = test[ordinal].astype(np.int16)
scaler = RobustScaler(quantile_range=(10.0, 90.0))
oof7, pred7, score["Mean-OHE"] = LogRegClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=None,
)
print(
f'AUC score for Mean Imputation with OHE on non-ordinal columns: {score["Mean-OHE"]}'
)
# The Ordinal Feature mappings from next method are borrowed from [let-s-overfit-some from Sergey Yurgensen](https://www.kaggle.com/ccccat/let-s-overfit-some)
train = base.copy()
test = baseTe.copy()
train = YurgensenMapping(train)
test = YurgensenMapping(test)
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = [
feat
for feat in features
if feat not in ["ord_0", "ord_1", "ord_2", "ord_3", "ord_4", "ord_5"]
]
ordinal = [feat for feat in features if feat not in ohe]
scaler = RobustScaler(quantile_range=(10.0, 90.0))
oof8, pred8, score["Yurgensen"] = LogRegClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=None,
)
print(f'AUC score for Yurgensen ordinal-Mapping : {score["Yurgensen"]}')
# Imputing ordinal features with max(x)/2 and OHE on all non-ordinal columns. Most of this part is borrowed from [oh-my-plain-logreg from Ant](https://www.kaggle.com/superant/oh-my-plain-logreg)
train = base.copy()
test = baseTe.copy()
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = [
feat
for feat in features
if feat not in ["ord_0", "ord_1", "ord_2", "ord_3", "ord_4", "ord_5"]
]
ordinal = [feat for feat in features if feat not in ohe] + ["day", "month"]
train = AntMapping(train, ordinal)
test = AntMapping(test, ordinal)
scaler = None
oof9, pred9, score["Ant"] = LogRegClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=None,
)
print(
f'AUC score for Max Imputation of ordinals with OHE on non-ordinal columns: {score["Ant"]}'
)
# Imputing ordinal features with max(x)/2 and OHE on all non-ordinal columns with Frequency Encoding of ordinal columns.
train = base.copy()
test = baseTe.copy()
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = [
feat
for feat in features
if feat not in ["ord_0", "ord_1", "ord_2", "ord_3", "ord_4", "ord_5"]
]
ordinal = [feat for feat in features if feat not in ohe] + ["day", "month"]
train = AntMapping(train, ordinal)
test = AntMapping(test, ordinal)
train, test, count_cols = CountEncoding(train, ordinal, test)
ordinal += count_cols
scaler = RobustScaler(quantile_range=(10.0, 90.0))
oof10, pred10, score["Ant-CE"] = LogRegClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=None,
)
print(
f'AUC score for Max Imputation of ordinals with OHE on non-ordinal columns with CountEncoding of Ordinals: {score["Ant-CE"]}'
)
# Mean imputation of ordinal columns with OHE of non-ordinals
train = base.copy()
test = baseTe.copy()
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = [feat for feat in features if feat not in ["ord_0", "ord_1", "ord_2"]]
ordinal = [feat for feat in features if feat not in ohe]
train[features] = SiavashMapping(train[features])
test[features] = SiavashMapping(test[features])
imp = SimpleImputer(strategy="median")
train[ordinal] = imp.fit_transform(train[ordinal])
test[ordinal] = imp.transform(test[ordinal])
train[ordinal] = train[ordinal].astype(np.int16)
test[ordinal] = test[ordinal].astype(np.int16)
scaler = RobustScaler(quantile_range=(10.0, 90.0))
oof11, pred11, score["median-OrdPartial"] = LogRegClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=None,
)
print(
f'AUC score for median Imputation of ord_0, ord_1 and ord_2 columns: {score["median-OrdPartial"]}'
)
# The Norm Strategy constructs a normal distribution using the sample mean and variance of the observed data. The imputer then randomly samples from this distribution to impute missing data.
train = base.copy()
test = baseTe.copy()
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = [
feat
for feat in features
if feat not in ["ord_0", "ord_1", "ord_2", "ord_3", "ord_4", "ord_5"]
]
ordinal = [feat for feat in features if feat not in ohe]
train[features] = SiavashMapping(train[features])
test[features] = SiavashMapping(test[features])
imp = SingleImputer(strategy="norm")
train[ordinal] = imp.fit_transform(train[ordinal])
test[ordinal] = imp.transform(test[ordinal])
train[ordinal] = train[ordinal].astype(np.int16)
test[ordinal] = test[ordinal].astype(np.int16)
scaler = RobustScaler(quantile_range=(10.0, 90.0))
oof12, pred12, score["Norm"] = LogRegClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=None,
)
print(f'AUC score for Norm Imputation of ordinals: {score["Norm"]}')
# LOCF carries the last observation forward to impute missing data. A Similar stragy is NOCB which carries the next observation backward to impute missing data.
train = base.copy()
test = baseTe.copy()
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = [
feat
for feat in features
if feat not in ["ord_0", "ord_1", "ord_2", "ord_3", "ord_4", "ord_5"]
]
ordinal = [feat for feat in features if feat not in ohe]
train[features] = SiavashMapping(train[features])
test[features] = SiavashMapping(test[features])
imp = SingleImputer(strategy="locf")
train[ordinal] = imp.fit_transform(train[ordinal])
test[ordinal] = imp.transform(test[ordinal])
train[ordinal] = train[ordinal].astype(np.int16)
test[ordinal] = test[ordinal].astype(np.int16)
scaler = RobustScaler(quantile_range=(10.0, 90.0))
oof13, pred13, score["LOCF"] = LogRegClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=None,
)
print(f'AUC score for LOCF Imputation of ordinals: {score["LOCF"]}')
# This default univariate imputation determines how to impute based on the column type of each column in a dataframe. It uses mean for numerc data and mode for categorical data.
train = base.copy()
test = baseTe.copy()
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = [
feat
for feat in features
if feat not in ["ord_0", "ord_1", "ord_2", "ord_3", "ord_4", "ord_5"]
]
ordinal = [feat for feat in features if feat not in ohe]
train[features] = SiavashMapping(train[features])
test[features] = SiavashMapping(test[features])
imp = SingleImputer(strategy="default univariate")
train[ordinal] = imp.fit_transform(train[ordinal])
test[ordinal] = imp.transform(test[ordinal])
train[ordinal] = train[ordinal].astype(np.int16)
test[ordinal] = test[ordinal].astype(np.int16)
scaler = RobustScaler(quantile_range=(10.0, 90.0))
oof14, pred14, score["DefUnivariate"] = LogRegClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=None,
)
print(f'AUC score for Default Univariate Imputation: {score["DefUnivariate"]}')
# Interpolate imputes missing values uses a valid pd.Series interpolation strategy.
train = base.copy()
test = baseTe.copy()
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = features
ordinal = None
train[features] = SiavashMapping(train[features])
test[features] = SiavashMapping(test[features])
imp = SingleImputer(strategy="interpolate")
train[features] = imp.fit_transform(train[features])
test[features] = imp.transform(test[features])
train[features] = train[features].astype(np.int16)
test[features] = test[features].astype(np.int16)
scaler = RobustScaler(quantile_range=(10.0, 90.0))
oof15, pred15, score["Interpolate-OHE"] = LogRegClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=None,
)
print(
f'Average AUC score for Interpolate Imputation with OHE om all columns: {score["Interpolate-OHE"]}'
)
# Simple implementation of [Exact Matrix Completion via Convex Optimization](http://statweb.stanford.edu/~candes/papers/MatrixCompletion.pdf)
train = base.copy()
test = baseTe.copy()
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = features
ordinal = None
train[features] = SiavashMapping(train[features])
test[features] = SiavashMapping(test[features])
imp = SoftImpute(max_iters=100, verbose=False)
train[features] = imp.fit_transform(train[features])
test[features] = imp.fit_transform(test[features])
train[features] = train[features].astype(np.int16)
test[features] = test[features].astype(np.int16)
scaler = RobustScaler(quantile_range=(10.0, 90.0))
oof16, pred16, score["SoftImpute-OHE"] = LogRegClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=None,
)
print(f'AUC score for SoftImpute Imputation : {score["SoftImpute-OHE"]}')
# The LeastSquares Strategy produces predictions using the least squares methodology. The prediction from the line of best fit given a set of predictors become the imputations.
train = base.copy()
test = baseTe.copy()
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = [feat for feat in features if feat not in ["ord_0", "ord_1", "ord_2"]]
ordinal = [feat for feat in features if feat not in ohe]
train[features] = SiavashMapping(train[features])
test[features] = SiavashMapping(test[features])
imp = SingleImputer(strategy="least squares")
train[ordinal] = imp.fit_transform(train[ordinal])
test[ordinal] = imp.transform(test[ordinal])
train[ordinal] = train[ordinal].astype(np.int16)
test[ordinal] = test[ordinal].astype(np.int16)
scaler = RobustScaler(quantile_range=(10.0, 90.0))
oof17, pred17, score["LQ-PartialOHE"] = LogRegClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=None,
)
print(f'AUC score for LQ Imputation with Partial OHE : {score["LQ-PartialOHE"]}')
# The Stochastic Strategy predicts using the least squares methodology. The imputer then samples from the regression’s error distribution and adds the random draw to the prediction. This draw adds the stochastic element to the imputations.
train = base.copy()
test = baseTe.copy()
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = [
feat
for feat in features
if feat not in ["ord_0", "ord_1", "ord_2", "ord_3", "ord_4", "ord_5"]
]
ordinal = [feat for feat in features if feat not in ohe]
train[features] = SiavashMapping(train[features])
test[features] = SiavashMapping(test[features])
imp = SingleImputer(strategy="stochastic")
train[ordinal] = imp.fit_transform(train[ordinal])
test[ordinal] = imp.transform(test[ordinal])
train[ordinal] = train[ordinal].astype(np.int16)
test[ordinal] = test[ordinal].astype(np.int16)
scaler = RobustScaler(quantile_range=(10.0, 90.0))
oof18, pred18, score["stochastic"] = LogRegClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=None,
)
print(f'AUC score for stochastic Imputation : {score["stochastic"]}')
# IterativeImputer a Multivariate imputer that estimates each feature from all the others with an initila strategy. An estimator (the default estimator is BayesianRidge) used at each step of the round-robin imputation. If sample_posterior is True, the estimator must support return_std in its predict method.
#
train = base.copy()
test = baseTe.copy()
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = [
feat
for feat in features
if feat not in ["ord_0", "ord_1", "ord_2", "ord_3", "ord_4", "ord_5"]
]
ordinal = [feat for feat in features if feat not in ohe]
train[features] = SiavashMapping(train[features])
test[features] = SiavashMapping(test[features])
imp = IterativeImputer(
max_iter=500, initial_strategy="most_frequent", random_state=SEED
)
train[features] = imp.fit_transform(train[features])
test[features] = imp.transform(test[features])
train[features] = train[features].astype(np.int16)
test[features] = test[features].astype(np.int16)
scaler = RobustScaler(quantile_range=(10.0, 90.0))
oof19, pred19, score["Iterative"] = LogRegClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=None,
)
print(f'AUC score for Iterative Imputation : {score["Iterative"]}')
# Another important strategy is listwise dropping (drop all the rows with missing values) which is not a viable option in this comp so instead we use a threshold of 4 missing values in each row for dropping in combination with iterative imputation.
train = base.copy()
test = baseTe.copy()
drop_idx = base[(base.isna().sum(axis=1) > 3)].index.values
drop_idx = [i for i in drop_idx]
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = [
feat
for feat in features
if feat not in ["ord_0", "ord_1", "ord_2", "ord_3", "ord_4", "ord_5"]
]
ordinal = [feat for feat in features if feat not in ohe]
train[features] = SiavashMapping(train[features])
test[features] = SiavashMapping(test[features])
imp = IterativeImputer(
max_iter=500, initial_strategy="most_frequent", random_state=SEED
)
train[features] = imp.fit_transform(train[features])
test[features] = imp.transform(test[features])
train[features] = train[features].astype(np.int16)
test[features] = test[features].astype(np.int16)
scaler = RobustScaler(quantile_range=(10.0, 90.0))
oof20, pred20, score["IterativeWithDrop"] = LogRegClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=drop_idx,
)
print(
f'AUC score for Iterative Imputation With threshold Drop : {score["IterativeWithDrop"]}'
)
# Another technique is adding indicators for imputed data which is implemented in sklearn and have the type of MissingIndicator. If the option is selected in sklearn's imputers the indicator array is added to the original data.
train = base.copy()
test = baseTe.copy()
drop_idx = base[(base.isna().sum(axis=1) > 3)].index.values
drop_idx = [i for i in drop_idx]
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = [
feat
for feat in features
if feat not in ["ord_0", "ord_1", "ord_2", "ord_3", "ord_4", "ord_5"]
]
ordinal = [feat for feat in features if feat not in ohe]
train[features] = SiavashMapping(train[features])
test[features] = SiavashMapping(test[features])
imp = IterativeImputer(
max_iter=500,
initial_strategy="most_frequent",
random_state=SEED,
add_indicator=True,
)
indicator_cols = [feat + "_ind" for feat in ordinal]
for col in indicator_cols:
train[col] = 0
test[col] = 0
train[col] = train[col].astype(np.uint8)
test[col] = test[col].astype(np.uint8)
train[ordinal + indicator_cols] = imp.fit_transform(train[ordinal])
test[ordinal + indicator_cols] = imp.transform(test[ordinal])
train[ordinal] = train[ordinal].astype(np.int16)
test[ordinal] = test[ordinal].astype(np.int16)
scaler = RobustScaler(quantile_range=(10.0, 90.0))
ohe += indicator_cols
oof21, pred21, score["IterativeWithIndicator"] = LogRegClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=drop_idx,
)
print(
f'AUC score for Iterative Imputation With Indicator : {score["IterativeWithIndicator"]}'
)
# Dimentionality-Reduction of OHE cols in Iterative Mode Imputation with MissingIndicator
# We can also add the sum of missing values to data. This is IterativeImputer with Sum of missing values foe each row.
train = base.copy()
test = baseTe.copy()
train["nulls"] = train.isna().sum(axis=1)
test["nulls"] = test.isna().sum(axis=1)
features = [feat for feat in train.columns if feat not in ["target", "id"]]
ohe = [
feat
for feat in features
if feat not in ["ord_0", "ord_1", "ord_2", "ord_3", "ord_4", "ord_5", "nulls"]
]
ordinal = [feat for feat in features if feat not in ohe]
train[features] = SiavashMapping(train[features])
test[features] = SiavashMapping(test[features])
imp = IterativeImputer(
max_iter=500, initial_strategy="most_frequent", random_state=SEED
)
train[ordinal] = imp.fit_transform(train[ordinal])
test[ordinal] = imp.transform(test[ordinal])
train[ordinal] = train[ordinal].astype(np.int16)
test[ordinal] = test[ordinal].astype(np.int16)
scaler = RobustScaler(quantile_range=(10.0, 90.0))
oof22, pred22, score["IterativeWithSum"] = LogRegClf(
train=train,
test=test,
ordinal=ordinal,
ohe=ohe,
scaler=scaler,
seed=SEED,
splits=SPLITS,
drop_idx=None,
)
print(
f'AUC score for Iterative Imputation with sum of missing values: {score["IterativeWithSum"]}'
)
scores = pd.DataFrame(score, index=["OOF-AUC"]).T.sort_values(
by="OOF-AUC", ascending=False
)
scores
ax = scores.plot(
kind="barh",
title="ROC-AUC Score For Different Imputation Techniques",
figsize=(15, 10),
legend=True,
fontsize=12,
alpha=0.85,
cmap="gist_gray",
)
ax.set_xlim((0.7, 0.8))
plt.show()
# More options to consider:
# > * Random Forest from missingpy
# > * Bayesian Binary and Multinomial Logistic from autoimp
# > * PMM, LRD from autoimp
# > * NuclearNormMinimization from fancyimpute
# > * IterativeSVD from fancyimpute
# Here we use stacking of top models with RidgeClassifier and RFECV and create the final submission file!
def StackModels():
idx = baseTe.id.values
train_oofs = [oof21, oof9, oof4]
test_oofs = [pred21, pred9, pred4]
X_train = pd.concat([pd.DataFrame(file) for file in train_oofs], axis=1)
X_test = pd.concat([pd.DataFrame(file) for file in test_oofs], axis=1)
X_train.columns = ["y_" + str(i) for i in range(len(train_oofs))]
X_test.columns = ["y_" + str(i) for i in range(len(train_oofs))]
X_train = pd.concat([X_train, base[["target"]]], axis=1)
X_test = pd.concat([X_test, baseTe[["target"]]], axis=1)
for i, (oof, pred) in enumerate(zip(train_oofs, test_oofs)):
train_oofs[i] = rankdata(oof) / len(oof)
test_oofs[i] = rankdata(pred) / len(pred)
for f in X_train.columns:
X_train[f] = X_train[f].astype("float32")
X_test[f] = X_test[f].astype("float32")
features = np.array([f for f in X_train.columns if f not in ["target"]])
target = ["target"]
oof_pred_final = np.zeros((len(base),))
y_pred_final = np.zeros((len(baseTe),))
skf = StratifiedKFold(n_splits=SPLITS, shuffle=True, random_state=SEED)
model = RidgeClassifier()
selector = RFECV(model, step=1, cv=skf, scoring="roc_auc", verbose=0, n_jobs=4)
selector.fit(X_train[features], X_train[target])
selected_features = [i for i, y in enumerate(selector.ranking_) if y == 1]
selected_features = features[selected_features]
for fold, (tr_ind, val_ind) in enumerate(skf.split(X_train, X_train[target])):
x_tr, x_val = (
X_train[selected_features].iloc[tr_ind],
X_train[selected_features].iloc[val_ind],
)
y_tr, y_val = X_train[target].iloc[tr_ind], X_train[target].iloc[val_ind]
train_set = {"X": x_tr, "y": y_tr}
val_set = {"X": x_val, "y": y_val}
model = RidgeClassifier()
model.fit(train_set["X"], train_set["y"])
fold_pred = model.decision_function(val_set["X"])
oof_pred_final[val_ind] = fold_pred
y_pred_final += model.decision_function(X_test[selected_features]) / (SPLITS)
oof_auc_score = roc_auc_score(base[target], oof_pred_final)
print(f"OOF Stack ROC-AUC Score is: {oof_auc_score:.7f}")
y_pred_final = rankdata(y_pred_final) / len(y_pred_final)
np.save("oof_pred_final.npy", oof_pred_final)
np.save("y_pred_final.npy", y_pred_final)
print("*" * 36)
print(" OOF files saved!")
print("*" * 36)
submission = pd.DataFrame.from_dict({"id": idx, "target": y_pred_final})
submission.to_csv("submission.csv", index=False)
print("*" * 36)
print(" Submission file saved!")
print("*" * 36)
return
StackModels()
|
import pandas as pd
df = pd.read_csv("../input/chennai_reservoir_rainfall.csv", parse_dates=["Date"])
df.head()
df.info()
expected_days = df.Date.max() - df.Date.min()
print(expected_days.days)
actual_days = df.shape[0]
print(actual_days)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(18, 6))
df.plot(kind="line", x="Date", y="POONDI", ax=ax)
zoom_range = df[(df.Date >= "2010-01-01") & (df.Date < "2011-01-01")].index
fig, ax = plt.subplots(figsize=(18, 6))
df.loc[zoom_range].plot(kind="line", x="Date", y="POONDI", ax=ax)
from statsmodels.tsa.seasonal import seasonal_decompose
defreq = 12
model = "additive"
decomposition = seasonal_decompose(
df.set_index("Date").POONDI.interpolate("linear"), freq=defreq, model=model
)
trend = decomposition.trend
sesonal = decomposition.seasonal
resid = decomposition.resid
fig, ax = plt.subplots(figsize=(18, 6))
df.plot(kind="line", x="Date", y="POONDI", ax=ax)
trend.plot(ax=ax, label="trend")
# #Monthly trend and sesonality
mon = df.Date.dt.to_period("M")
mon_group = df.groupby(mon)
df_monthly = mon_group.sum()
df_monthly.head()
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(18, 6))
df_monthly.plot(kind="line", y="POONDI", ax=ax)
from statsmodels.tsa.seasonal import seasonal_decompose
defreq = 12
model = "additive"
decomposition = seasonal_decompose(
df_monthly.POONDI.interpolate("linear"), freq=defreq, model=model
)
trend = decomposition.trend
sesonal = decomposition.seasonal
resid = decomposition.resid
fig, ax = plt.subplots(figsize=(18, 6))
df_monthly.plot(kind="line", y="POONDI", ax=ax, label="Poondi")
trend.plot(ax=ax, label="Trend")
plt.legend(loc="upper left")
fig, ax = plt.subplots(figsize=(18, 6))
df_monthly.plot(kind="line", y="POONDI", ax=ax, label="Poondi")
sesonal.plot(ax=ax, label="Seasonality")
plt.legend(loc="upper left")
fig, ax = plt.subplots(figsize=(18, 6))
df_monthly.plot(kind="line", y="POONDI", ax=ax, label="Poondi")
resid.plot(ax=ax, label="Residual")
plt.legend(loc="upper left")
# #Yearly trend and sesonality
year = df.Date.dt.to_period("Y")
year_group = df.groupby(year)
df_yearly = year_group.sum()
df_yearly.head()
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(18, 6))
df_yearly.plot(kind="line", y="POONDI", ax=ax)
from statsmodels.tsa.seasonal import seasonal_decompose
defreq = 12
model = "additive"
decomposition = seasonal_decompose(
df_yearly.POONDI.interpolate("linear"), freq=defreq, model=model
)
trend = decomposition.trend
sesonal = decomposition.seasonal
resid = decomposition.resid
fig, ax = plt.subplots(figsize=(18, 6))
df_yearly.plot(kind="line", y="POONDI", ax=ax, label="Poondi")
trend.plot(ax=ax, label="Trend")
plt.legend(loc="upper left")
fig, ax = plt.subplots(figsize=(18, 6))
df_yearly.plot(kind="line", y="POONDI", ax=ax, label="Poondi")
sesonal.plot(ax=ax, label="Seasonality")
plt.legend(loc="upper left")
|
# ## Hope everything will be OK[](http://)
# Severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2),formerly known as the 2019 novel coronavirus (2019-nCoV),is a positive-sense, single-stranded RNA coronavirus.
# The virus is the cause of coronavirus disease 2019 (COVID-19) and is contagious among humans.There is no vaccine against the virus at this time. SARS-CoV-2 has genetic similarities to SARS-CoV (79.5%) and bat coronaviruses (96%),which makes an origin in bats likely,although an intermediate reservoir such as a pangolin is thought to be involved.
# SARS-CoV-2 is implicated in the ongoing 2019–20 Wuhan coronavirus outbreak, a Public Health Emergency of International Concern.Because of this connection, the virus is sometimes referred to informally as the "Wuhan coronavirus"
# From wikipedia
# 
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import warnings
warnings.filterwarnings("ignore")
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
# Import data of 2020 Feb 06
df = pd.read_csv(
"/kaggle/input/2019-coronavirus-dataset-01212020-01262020/2019_nCoV_20200121_20200206.csv",
header=0,
names=[
"province",
"country",
"last_update",
"confirmed",
"suspected",
"recovered",
"death",
],
)
df["last_update"] = pd.to_datetime(df["last_update"]).dt.date
df.info()
# It is considered here that Hong Kong, Taiwan and Macau belong to Mainland China. The contry column should be changed.
df["country"].replace(
{
"Hong Kong": "Mainland China",
"Taiwan": "Mainland China",
"Macau": "Mainland China",
},
inplace=True,
)
# According to global data, China has the highest number of confirmed cases in the world and far exceeds other countries. Creat a new datafram to look for the distribution of confirmed/recovered/deaths cases in China.
china = df[df["country"] == "Mainland China"]
# Take the latest update data (05 Feb 2020) for further analysis.
from datetime import date
d = china["last_update"].astype("str")
year = int(d.values[0].split("-")[0])
month = int(d.values[0].split("-")[1])
day = int(d.values[0].split("-")[2].split()[0])
china_update = china[china["last_update"] == pd.Timestamp(date(2020, 2, 5))]
china_update.head()
china_last = (
china_update.groupby(["province", "last_update"])
.agg({"confirmed": sum, "recovered": sum, "death": sum})
.reset_index()
.sort_values("confirmed", ascending=False)
)
china_last.head()
print(
"Until 05 Feb 2020, there are %s provinces infected in China."
% format(len(china_update["province"].unique()))
)
# Until 05 Feb 2020, confirmed cases have been detected in 30 provinces in China. Obviously, Hubei Province has the most confirmed cases. For other provinces, which one has more confirmed cases? List 5 provinces which have most confirmed cases (Hubei is included).
# Top five province for confirmed
china_last.groupby("province")["confirmed"].sum().sort_values(ascending=False)[:5]
# The number of confirmed cases in Hubei Province is about 10 times that of the Zhejiang. When was the fastest increase of confirmed cases in Hubei and another province?
china_growth = (
china.groupby(["province", "last_update"])
.max()
.reset_index()
.sort_values("last_update")
)
hubei = china_growth[china_growth["province"] == "Hubei"]
trace1 = go.Scatter(
name="Hubei confimred growth",
x=hubei["last_update"],
y=hubei["confirmed"],
line_shape="linear",
)
data = [trace1]
fig = go.Figure(data)
fig.update_layout(title="Confirmation growth for Hubei confirmed cases")
fig.show()
# hubei = china[china['province'] == 'Hubei'].reset_index()
zhejiang = china_growth[china_growth["province"] == "Zhejiang"]
guangdong = china_growth[china_growth["province"] == "Guangdong"]
henan = china_growth[china_growth["province"] == "Henan"]
hunan = china_growth[china_growth["province"] == "Hunan"]
# hubei_confirmed_growth = hubei['confirmed'].groupby(hubei['last_update']).max().reset_index()
# zhejiang_confirmed_growth = zhejiang.groupby(zhejiang['last_update']).agg({'confirmed':sum}).reset_index()
# guangdong_confirmed_growth = guangdong.groupby(guangdong['last_update']).agg({'confirmed':sum}).reset_index()
# henan_confirmed_growth = henan.groupby(henan['last_update']).agg({'confirmed':sum}).reset_index()
# hunan_confirmed_growth = hunan.groupby(hunan['last_update']).agg({'confirmed':sum}).reset_index()
# trace1 = go.Scatter(name = 'Hubei confimred growth', x = hubei_confirmed_growth['last_update'], y = hubei_confirmed_growth['confirmed'], line_shape = 'spline')
trace2 = go.Scatter(
name="Zhejiang confimred growth",
x=zhejiang["last_update"],
y=zhejiang["confirmed"],
line_shape="linear",
)
trace3 = go.Scatter(
name="Guangdong confimred growth",
x=guangdong["last_update"],
y=guangdong["confirmed"],
line_shape="linear",
)
trace4 = go.Scatter(
name="Henan confimred growth",
x=henan["last_update"],
y=henan["confirmed"],
line_shape="linear",
)
trace5 = go.Scatter(
name="Hunan confimred growth",
x=hunan["last_update"],
y=hunan["confirmed"],
line_shape="linear",
)
data = [trace2, trace3, trace4, trace5]
fig = go.Figure(data)
fig.update_layout(title="Confirmation growth for 5 most confirmed cases")
fig.show()
# In Hubei province, Wuhan is a city which has the most confirmed cases. Compare the relationship between the cumulative number of confirmed cases across the Hubei and out of Hubei.
out_of_hubei = china[china["province"] != "Hubei"]
oohubei_confirmed_growth = (
out_of_hubei.groupby(out_of_hubei["last_update"])
.agg({"confirmed": sum})
.reset_index()
)
china_growth_ = (
china.groupby(china["last_update"]).agg({"confirmed": sum}).reset_index()
)
trace1 = go.Scatter(
name="Hubei confirmed cases",
x=hubei["last_update"],
y=hubei["confirmed"],
line_shape="spline",
)
trace2 = go.Scatter(
name="Out of Hubei confirmed casses",
x=oohubei_confirmed_growth["last_update"],
y=oohubei_confirmed_growth["confirmed"],
line_shape="spline",
)
trace3 = go.Scatter(
name="China confirmed casses",
x=china_growth_["last_update"],
y=china_growth_["confirmed"],
line_shape="spline",
)
data = [trace1, trace2, trace3]
fig = go.Figure(data)
fig.update_layout(title="Confirmed cases of Hubei VS out of Hubei")
fig.show()
# The growth curve trend of Hubei is similar to the whole country, and the values are similar. After January 31, the number of confirmed cases in other provinces (out of Hubei) has increased significantly. Inflection point of confirmed cases across provinces is presented at February 5. ***Refer to official data from Tencent, only 10 confirmed cases increased on February 16.*** Maybe it's a beginning of the good news.
# The death and recovered data can be check together. List top 5 provinces for mortality and recovery.
# Top five province for mortality
china_last.groupby("province")["death"].sum().sort_values(ascending=False)[:5]
# Top five provinces for recovery
china_last.groupby("province")["recovered"].sum().sort_values(ascending=False)[:5]
# Check recovered and death trends across the provinces
china_recovery_growth = (
china.groupby(china["last_update"]).agg({"recovered": sum}).reset_index()
)
china_death_growth = (
china.groupby(china["last_update"]).agg({"death": sum}).reset_index()
)
trace1 = go.Scatter(
name="China recovered cases trend",
x=china_recovery_growth["last_update"],
y=china_recovery_growth["recovered"],
line_shape="spline",
)
trace2 = go.Scatter(
name="China death cases trend",
x=china_death_growth["last_update"],
y=china_death_growth["death"],
line_shape="spline",
)
data = [trace1, trace2]
fig = go.Figure(data)
fig.update_layout(title="Recovery/Death cases of China")
fig.show()
# How about the ratio of recovery and death in mainland china?
china["recovered_ratio"] = (china["recovered"] / china["confirmed"]) * 100
china["death_ratio"] = (china["death"] / china["confirmed"]) * 100
china_recovered_ratio = (
china.groupby(china["last_update"]).agg({"recovered_ratio": "mean"}).reset_index()
)
china_death_ratio = (
china.groupby(china["last_update"]).agg({"death_ratio": "mean"}).reset_index()
)
trace1 = go.Scatter(
name="Recovered Ratio %",
x=china_recovered_ratio["last_update"],
y=china_recovered_ratio["recovered_ratio"],
)
trace2 = go.Scatter(
name="Death Ratio %",
x=china_death_ratio["last_update"],
y=china_death_ratio["death_ratio"],
)
data = [trace1, trace2]
fig = go.Figure(data)
fig.update_layout(title="Recovery/Death ratio for China")
fig.show()
# As confirmed cases, compare the relationship between recovery/death cases across the Hubei, out of Hubei and china.
hubei["recovered_ratio"] = (hubei["recovered"] / hubei["confirmed"]) * 100
hubei_recovery_ratio = (
hubei.groupby(hubei["last_update"]).agg({"recovered_ratio": "mean"}).reset_index()
)
out_of_hubei["recovered_ratio"] = (
out_of_hubei["recovered"] / out_of_hubei["confirmed"]
) * 100
out_of_hubei_ratio = (
out_of_hubei.groupby(out_of_hubei["last_update"])
.agg({"recovered_ratio": "mean"})
.reset_index()
)
trace1 = go.Scatter(
name="Hubei recovered ratio %",
x=hubei_recovery_ratio["last_update"],
y=hubei_recovery_ratio["recovered_ratio"],
line_shape="spline",
)
trace2 = go.Scatter(
name="Out of Hubei recovered ratio %",
x=out_of_hubei_ratio["last_update"],
y=out_of_hubei_ratio["recovered_ratio"],
line_shape="spline",
)
trace3 = go.Scatter(
name="China recovered ratio %",
x=china_recovered_ratio["last_update"],
y=china_recovered_ratio["recovered_ratio"],
line_shape="spline",
)
data = [trace1, trace2, trace3]
fig = go.Figure(data)
fig.update_layout(title="Recovery ratio % of China, Hubei and Out of Hubei")
fig.show()
hubei["death_ratio"] = (hubei["death"] / hubei["confirmed"]) * 100
hubei_death_ratio = (
hubei.groupby(hubei["last_update"]).agg({"death_ratio": "mean"}).reset_index()
)
out_of_hubei["death_ratio"] = (out_of_hubei["death"] / out_of_hubei["confirmed"]) * 100
out_of_hubei_death_ratio = (
out_of_hubei.groupby(out_of_hubei["last_update"])
.agg({"death_ratio": "mean"})
.reset_index()
)
trace1 = go.Scatter(
name="Hubei death ratio %",
x=hubei_death_ratio["last_update"],
y=hubei_death_ratio["death_ratio"],
line_shape="spline",
)
trace2 = go.Scatter(
name="Out of Hubei death ratio %",
x=out_of_hubei_death_ratio["last_update"],
y=out_of_hubei_death_ratio["death_ratio"],
line_shape="spline",
)
trace3 = go.Scatter(
name="China death ratio %",
x=china_death_ratio["last_update"],
y=china_death_ratio["death_ratio"],
line_shape="spline",
)
data = [trace1, trace2, trace3]
fig = go.Figure(data)
fig.update_layout(title="Death ratio % of China, Hubei and Out of Hubei")
fig.show()
|
import pandas as pd
import numpy as np
from pathlib import Path
from geopy.geocoders import Nominatim
import folium
df = pd.read_csv("/kaggle/input/prisons-fr/prisons_FR_coordinates.csv")
df = df.set_index(["dep", "type", "idx"])
df.head()
# Create a map centered on the first prison's coordinates
m = folium.Map(location=[df.iloc[0]["latitude"], df.iloc[0]["longitude"]], zoom_start=2)
# Add markers for each prison
for i, row in df.iterrows():
folium.Marker(
location=[row["latitude"], row["longitude"]], tooltip=row["name"]
).add_to(m)
# Display the map
m
|
import numpy as np
import pandas as pd
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
from xgboost import XGBRegressor
from sklearn.model_selection import cross_val_score
from xgboost import plot_importance
import time
import datetime
import re
from itertools import product
from math import isnan
import scipy.stats as stats
import gc
import pickle
import os
print(os.listdir("../input"))
# DISCLAIMER: Some procedures and ideas (in particular feature couples to extract lag and duplicated russian shop_names) in this kernel have been influenced by the following two kernels:
# https://www.kaggle.com/kyakovlev/1st-place-solution-part-1-hands-on-data
# https://www.kaggle.com/dlarionov/feature-engineering-xgboost
##########################################################################################################
########################################## STEP 1: LOAD DATA #########################################
##########################################################################################################
# Load input files
sales_train = pd.read_csv(
"../input/competitive-data-science-predict-future-sales/sales_train.csv",
parse_dates=["date"],
infer_datetime_format=False,
dayfirst=True,
)
test = pd.read_csv("../input/competitive-data-science-predict-future-sales/test.csv")
item_categories = pd.read_csv(
"../input/competitive-data-science-predict-future-sales/item_categories.csv"
)
items = pd.read_csv("../input/competitive-data-science-predict-future-sales/items.csv")
shops = pd.read_csv("../input/competitive-data-science-predict-future-sales/shops.csv")
# Take a brief look on the content
print("Sales_train")
display(sales_train.head(10))
print("Test")
display(test.head(10))
print("Item_categories")
display(item_categories.head(10))
print("Items")
display(items.head(10))
print("Shops")
display(shops.head(1))
# Auxiliar function to reduce data storage
def downcast_dtypes(df):
# Columns to downcast
float_cols = [c for c in df if df[c].dtype == "float64"]
int_cols = [c for c in df if df[c].dtype == "int64"]
# Downcast
df[float_cols] = df[float_cols].astype(np.float16)
df[int_cols] = df[int_cols].astype(np.int16)
return df
# Prepare the test set to merge it with sales_train
test["date_block_num"] = 34
test["date"] = datetime.datetime(2015, 11, 1, 0, 0, 0)
# Join train and test sets. Fill date_block_num = 34 for test rows
all_data = pd.concat([sales_train, test], axis=0, sort=False)
all_data["date_block_num"].fillna(34, inplace=True)
# Create flag (in_test) for month=34
all_data["in_test"] = 0
all_data.loc[all_data.date_block_num == 34, "in_test"] = 1
# Create a flag (is_new_item) for elements in test not in sales_train
new_items = set(test["item_id"].unique()) - set(sales_train["item_id"].unique())
all_data.loc[all_data["item_id"].isin(new_items), "is_new_item"] = 1
# Fill missings with 0
all_data.fillna(0, inplace=True)
all_data = downcast_dtypes(all_data)
all_data = all_data.reset_index()
display(all_data.head(10))
print("Train set size: ", len(sales_train))
print("Test set size: ", len(test))
print("Item categories set size: ", len(item_categories))
print("Items set size: ", len(items))
print("Shops set size: ", len(shops))
print("All data size: ", len(all_data))
print("Duplicates in train dataset: ", len(sales_train[sales_train.duplicated()]))
##########################################################################################################
###################################### STEP 2: DATA EXPLORATION ######################################
##########################################################################################################
# Describe merged data to look for inusual values
display(all_data.describe())
print("Item_price outlier: ")
print(all_data.loc[all_data["item_price"].idxmax()])
print("\nItem_cnt_day maximum: ")
print(all_data.loc[all_data["item_cnt_day"].idxmax()])
f1, axes = plt.subplots(1, 2, figsize=(15, 5))
f1.subplots_adjust(hspace=0.4, wspace=0.2)
sns.boxplot(x=all_data["item_price"], ax=axes[0])
sns.boxplot(x=all_data["item_cnt_day"], ax=axes[1])
# print(shops['shop_name'].unique())
# Conclusions:
# 1 - There are negative prices and counts (errors, returns?)
# 2 - Item_id = 6066 has an abnormal large price (item_price = 307980), and is only sold one time
# 3 - 2 items have very large item_cnt_day when compared with the other products
# 4 - Shop_name contains the shops' city names (Москва, Moscow). An additional feature can be obtained
# 5 - Якутск city is expressed as Якутск and !Якутск. This could be fixed
# 6 - Shop_id = 0 & 1 are the same than 57 & 58 but for фран (Google translator => fran, maybe franchise). Shop_id = 10 & 11 are the same
# Drop outliers and negative counts (see graphs below)
all_data = all_data.drop(all_data[all_data["item_price"] > 100000].index)
all_data = all_data.drop(all_data[all_data["item_cnt_day"] > 1100].index)
sales_train = sales_train.drop(sales_train[sales_train["item_price"] > 100000].index)
sales_train = sales_train.drop(sales_train[sales_train["item_cnt_day"] > 1100].index)
# There are shops with same address and almost same name in russian.
# Unify duplicated shops (see https://www.kaggle.com/dlarionov/feature-engineering-xgboost)
all_data.loc[all_data["shop_id"] == 11, "shop_id"] = 10
all_data.loc[all_data["shop_id"] == 57, "shop_id"] = 0
all_data.loc[all_data["shop_id"] == 58, "shop_id"] = 1
sales_train.loc[sales_train["shop_id"] == 11, "shop_id"] = 10
sales_train.loc[sales_train["shop_id"] == 57, "shop_id"] = 0
sales_train.loc[sales_train["shop_id"] == 58, "shop_id"] = 1
test.loc[test["shop_id"] == 11, "shop_id"] = 10
test.loc[test["shop_id"] == 57, "shop_id"] = 0
test.loc[test["shop_id"] == 58, "shop_id"] = 1
# Instead of deleting negative price items, replace them with the median value for the impacted group:
all_data.loc[all_data["item_price"] < 0, "item_price"] = all_data[
(all_data["shop_id"] == 32)
& (all_data["item_id"] == 2973)
& (all_data["date_block_num"] == 4)
& (all_data["item_price"] > 0)
].item_price.median()
print("Raw data length: ", len(sales_train), ", post-outliers length: ", len(all_data))
# last_sales = sales_train.groupby(by=['item_id', 'shop_id'], as_index=False)[['item_id', 'shop_id', 'date_block_num']].transform('min')
# min_date_block_num = sales_train.groupby(by=['item_id', 'shop_id'])['date_block_num'].transform('min')
# last_sales = last_sales.join(min_date_block_num)
ts = time.time()
# Enrich data with additional features and aggregates for data exploration purposes
def enrich_data(all_data, items, shops, item_categories):
# Aggregate at month level. Calculate item_cnt_month and item_price (median)
count_data = (
all_data.groupby(
["shop_id", "item_id", "date_block_num", "in_test", "is_new_item"]
)["item_cnt_day"]
.sum()
.rename("item_cnt_month")
.reset_index()
)
price_data = (
all_data.groupby(
["shop_id", "item_id", "date_block_num", "in_test", "is_new_item"]
)["item_price"]
.median()
.rename("item_price_median")
.reset_index()
)
all_data = pd.merge(
count_data,
price_data,
on=["shop_id", "item_id", "in_test", "date_block_num", "is_new_item"],
how="left",
)
# Extract day, month, year
# all_data['day'] = all_data['date'].dt.day
# all_data['month'] = all_data['date'].dt.month
# all_data['year'] = all_data['date'].dt.year
# Add item, shop and item_category details
all_data = all_data.join(items, on="item_id", rsuffix="_item")
all_data = all_data.join(shops, on="shop_id", rsuffix="_shop")
all_data = all_data.join(
item_categories, on="item_category_id", rsuffix="_item_category"
)
all_data = all_data.drop(
columns=[
"item_id_item",
"shop_id_shop",
"item_category_id_item_category",
"item_name",
]
)
# Extract main category and subcategory from category name
categories_split = all_data["item_category_name"].str.split("-")
all_data["main_category"] = categories_split.map(lambda row: row[0].strip())
all_data["secondary_category"] = categories_split.map(
lambda row: row[1].strip() if (len(row) > 1) else "N/A"
)
# Extract cities information from shop_name. Replace !Якутск by Якутск since it's the same city
all_data["city"] = all_data["shop_name"].str.split(" ").map(lambda row: row[0])
all_data.loc[all_data.city == "!Якутск", "city"] = "Якутск"
# Encode cities and categories
encoder = sklearn.preprocessing.LabelEncoder()
all_data["city_label"] = encoder.fit_transform(all_data["city"])
all_data["main_category_label"] = encoder.fit_transform(all_data["main_category"])
all_data["secondary_category_label"] = encoder.fit_transform(
all_data["secondary_category"]
)
all_data = all_data.drop(
[
"city",
"shop_name",
"item_category_name",
"main_category",
"secondary_category",
],
axis=1,
)
# Create price categories (0-5, 5-10, 10,20, 20,30, 30-50, 50-100, >100)
def price_category(row):
if row.item_price_median < 5.0:
val = 1
elif row.item_price_median < 10.0:
val = 2
elif row.item_price_median < 100.0:
val = 3
elif row.item_price_median < 200.0:
val = 4
elif row.item_price_median < 300.0:
val = 5
elif row.item_price_median < 500.0:
val = 6
elif row.item_price_median < 1000.0:
val = 7
elif row.item_price_median > 1000.0:
val = 8
else:
val = 0
return val
all_data["price_cat"] = all_data.apply(price_category, axis=1)
# Downgrade numeric data types
all_data = downcast_dtypes(all_data)
# Performance test dropping month_cnt
# all_data.drop('item_cnt_month', axis=1, inplace=True)
return all_data
all_data2 = enrich_data(all_data, items, shops, item_categories)
items_prices = all_data2[
["item_id", "shop_id", "date_block_num", "item_price_median", "price_cat"]
]
time.time() - ts
all_data2.head()
# Analyze monthly sells for all shops
all_data2["item_cnt_month"] = all_data2["item_cnt_month"].astype(np.float64)
count_monthly_sales = all_data2.groupby("date_block_num").item_cnt_month.sum(axis=0)
f = plt.figure()
ax = f.add_subplot(111)
plt.plot(count_monthly_sales)
plt.axvline(x=12, color="grey", linestyle="--") # Vertical grey line for December month
plt.axvline(x=24, color="grey", linestyle="--")
plt.xlabel("date_block_num")
plt.title("Monthly total sells")
plt.show()
# Analyze monthly sells for each price category
count_price_cat_sales = all_data2.groupby("price_cat").item_cnt_month.sum(axis=0)
f = plt.figure()
ax = f.add_subplot(111)
plt.plot(count_price_cat_sales)
plt.xticks(
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[
"others",
"0<p<5₽",
"5<p<10₽",
"10<p<100₽",
"100<p<200₽",
"200<p<300₽",
"300<p<500₽",
"500<p<1000₽",
">1000₽",
],
rotation="45",
)
plt.title("Price category sells")
plt.show()
# Correlation matrix for monthly sales
all_data2 = all_data2[all_data2["date_block_num"] < 34]
# all_data2 = all_data2.drop(columns=['in_test', 'is_new_item'], inplace=True)
# Correlation matrix
f = plt.figure(figsize=(9, 5))
plt.matshow(all_data2.corr(), fignum=f.number)
plt.xticks(range(all_data2.shape[1]), all_data2.columns, fontsize=10, rotation=90)
plt.yticks(range(all_data2.shape[1]), all_data2.columns, fontsize=10)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=14)
##########################################################################################################
###################################### STEP 3: MISSINGS CLEANING #####################################
##########################################################################################################
# Missings count. Surprisingly there are no missings
missings_count = {col: all_data[col].isnull().sum() for col in all_data.columns}
missings = pd.DataFrame.from_dict(missings_count, orient="index")
print(missings.nlargest(30, 0))
##########################################################################################################
##################################### STEP 4: FEATURE ENGINEERING ####################################
##########################################################################################################
ts = time.time()
# Extend all_data for all item/shop pairs.
def add_all_pairs(sales_train, test, items, shops, item_categories, items_prices):
tmp = []
for month in range(34):
sales = sales_train[sales_train.date_block_num == month]
tmp.append(
np.array(
list(product([month], sales.shop_id.unique(), sales.item_id.unique())),
dtype="int16",
)
)
tmp = pd.DataFrame(np.vstack(tmp), columns=["date_block_num", "shop_id", "item_id"])
tmp["date_block_num"] = tmp["date_block_num"].astype(np.int8)
tmp["shop_id"] = tmp["shop_id"].astype(np.int8)
tmp["item_id"] = tmp["item_id"].astype(np.int16)
tmp.sort_values(["date_block_num", "shop_id", "item_id"], inplace=True)
sales_train["revenue"] = sales_train["item_price"] * sales_train["item_cnt_day"]
group = sales_train.groupby(["date_block_num", "shop_id", "item_id"]).agg(
{"item_cnt_day": ["sum"]}
)
group.columns = ["item_cnt_month"]
group.reset_index(inplace=True)
tmp = pd.merge(tmp, group, on=["date_block_num", "shop_id", "item_id"], how="left")
tmp["item_cnt_month"] = (
tmp["item_cnt_month"].fillna(0).clip(0, 20).astype(np.float16)
)
tmp = pd.concat(
[tmp, test],
ignore_index=True,
sort=False,
keys=["date_block_num", "shop_id", "item_id"],
)
# price_data = tmp.groupby(['shop_id', 'item_id', 'date_block_num', 'in_test', 'is_new_item'])['item_price'].median().rename('item_price_median').reset_index()
# tmp = tmp.join(price_data, on=[[]])
# Add item, shop and item_category details
tmp = tmp.join(items, on="item_id", rsuffix="_item")
tmp = tmp.join(shops, on="shop_id", rsuffix="_shop")
tmp = tmp.join(item_categories, on="item_category_id", rsuffix="_item_category")
tmp = pd.merge(
tmp, items_prices, on=["date_block_num", "shop_id", "item_id"], how="left"
)
tmp = tmp.drop(
columns=[
"item_id_item",
"shop_id_shop",
"item_category_id_item_category",
"item_name",
]
)
# Extract day, month, year & nºdays in each month
tmp["month"] = tmp["date_block_num"] % 12
tmp["days"] = tmp["month"].map(
pd.Series([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])
)
# Extract main category and subcategory from category name
categories_split = tmp["item_category_name"].str.split("-")
tmp["main_category"] = categories_split.map(lambda row: row[0].strip())
tmp["secondary_category"] = categories_split.map(
lambda row: row[1].strip() if (len(row) > 1) else "N/A"
)
# Extract cities information from shop_name. Replace !Якутск by Якутск since it's the same city.
tmp["city"] = tmp["shop_name"].str.split(" ").map(lambda row: row[0])
tmp.loc[tmp.city == "!Якутск", "city"] = "Якутск"
tmp["shop_type"] = tmp["shop_name"].apply(
lambda x: "мтрц"
if "мтрц" in x
else "трц"
if "трц" in x
else "трк"
if "трк" in x
else "тц"
if "тц" in x
else "тк"
if "тк" in x
else "NO_DATA"
)
# Encode cities and categories
encoder = sklearn.preprocessing.LabelEncoder()
tmp["city_label"] = encoder.fit_transform(tmp["city"])
tmp["shop_type_label"] = encoder.fit_transform(tmp["shop_type"])
tmp["main_category_label"] = encoder.fit_transform(tmp["main_category"])
tmp["secondary_category_label"] = encoder.fit_transform(tmp["secondary_category"])
tmp = tmp.drop(
[
"ID",
"city",
"date",
"shop_name",
"item_category_name",
"main_category",
"secondary_category",
],
axis=1,
)
# Downgrade numeric data types
tmp = downcast_dtypes(tmp)
tmp.fillna(0, inplace=True)
return tmp
all_pairs = add_all_pairs(
sales_train, test, items, shops, item_categories, items_prices
)
time.time() - ts
##########################################################################################################
######################################## STEP 5: MEAN ENCODING #######################################
##########################################################################################################
ts = time.time()
# First downgrade some columns (still more) to fasten the mean encoding
all_pairs["date_block_num"] = all_pairs["date_block_num"].astype(np.int8)
all_pairs["city_label"] = all_pairs["city_label"].astype(np.int8)
all_pairs["item_cnt_month"] = all_pairs["item_cnt_month"].astype(np.int8)
all_pairs["item_category_id"] = all_pairs["item_category_id"].astype(np.int8)
all_pairs["main_category_label"] = all_pairs["main_category_label"].astype(np.int8)
all_pairs["secondary_category_label"] = all_pairs["secondary_category_label"].astype(
np.int8
)
# Function to calculate lag over different columns. Lag gives information about a variable from different past times
def calculate_lag(df, lag, column):
ancilla = df[["date_block_num", "shop_id", "item_id", column]]
for l in lag:
shift_ancilla = ancilla.copy()
shift_ancilla.columns = [
"date_block_num",
"shop_id",
"item_id",
column + "_lag_" + str(l),
]
shift_ancilla["date_block_num"] += l
df = pd.merge(
df, shift_ancilla, on=["date_block_num", "shop_id", "item_id"], how="left"
)
return df
# Function to specify lag columns,compute item_cnt aggregate (mean) and call calculate_lag
def prepare_lag_columns(df, lag, column_list, name):
ancilla = df.groupby(column_list).agg({"item_cnt_month": ["mean"]})
ancilla.columns = [name]
ancilla.reset_index(inplace=True)
df = pd.merge(df, ancilla, on=column_list, how="left")
df[name] = df[name].astype(np.float16)
df = calculate_lag(df, lag, name)
df.drop([name], axis=1, inplace=True)
return df
# Auxiliar function to compute item_price groups (for trends). Lags will be calculated post-preparation
def prepare_lag_columns_price(df, column_list, name):
ancilla = sales_train.groupby(column_list).agg({"item_price": ["mean"]})
ancilla.columns = [name]
ancilla.reset_index(inplace=True)
df = pd.merge(df, ancilla, on=column_list, how="left")
df[name] = df[name].astype(np.float16)
return df
# Let's compute all lags for sells. Arguments of the function are :(df, lag_list, column_list, name of the column)
all_pairs = calculate_lag(all_pairs, [1, 2, 3, 4, 5, 6, 12], "item_cnt_month")
all_pairs = prepare_lag_columns(
all_pairs, [1], ["date_block_num", "item_id"], "total_avg_month_cnt"
)
all_pairs = prepare_lag_columns(
all_pairs, [1, 2, 3, 4, 5, 6, 12], ["date_block_num"], "item_avg_month_cnt"
)
all_pairs = prepare_lag_columns(
all_pairs,
[1, 2, 3, 4, 5, 6, 12],
["date_block_num", "shop_id"],
"shop_avg_month_cnt",
)
all_pairs = prepare_lag_columns(
all_pairs, [1], ["date_block_num", "city_label"], "city_avg_month_cnt"
)
all_pairs = prepare_lag_columns(
all_pairs,
[1],
["date_block_num", "item_id", "city_label"],
"item_city_avg_month_cnt",
)
all_pairs = prepare_lag_columns(
all_pairs, [1], ["date_block_num", "item_category_id"], "category_id_avg_month_cnt"
)
all_pairs = prepare_lag_columns(
all_pairs,
[1],
["date_block_num", "main_category_label"],
"main_category_avg_month_cnt",
)
all_pairs = prepare_lag_columns(
all_pairs,
[1],
["date_block_num", "secondary_category_label"],
"secondary_category_avg_month_cnt",
)
all_pairs = prepare_lag_columns(
all_pairs,
[1],
["date_block_num", "shop_id", "item_category_id"],
"shop_category_id_avg_month_cnt",
)
all_pairs = prepare_lag_columns(
all_pairs,
[1],
["date_block_num", "shop_id", "main_category_label"],
"shop_main_category_avg_month_cnt",
)
all_pairs = prepare_lag_columns(
all_pairs,
[1],
["date_block_num", "shop_id", "secondary_category_label"],
"shop_secondary_category_avg_month_cnt",
)
# For item_price the procedure is more tricky. Compute both item price and monthly price in order to compute the trend.
all_pairs = prepare_lag_columns_price(all_pairs, ["item_id"], "item_avg_price")
all_pairs = prepare_lag_columns_price(
all_pairs, ["date_block_num", "item_id"], "item_avg_price_month"
)
all_pairs = calculate_lag(all_pairs, [1, 2, 3, 4, 5, 6], "item_avg_price_month")
for lag in [1, 2, 3, 4, 5, 6]:
all_pairs["trend_price_lag_" + str(lag)] = (
all_pairs["item_avg_price_month_lag_" + str(lag)] - all_pairs["item_avg_price"]
) / all_pairs["item_avg_price"]
def clean_trend_price_lag(row):
for l in [1, 2, 3, 4, 5, 6]:
if row["trend_price_lag_" + str(l)]:
return row["trend_price_lag_" + str(l)]
return 0
# For some reason my kernel expodes when using df.apply() for all rows, so I had to segment it
dummy_1, dummy_2, dummy_3, dummy_4 = [], [], [], []
dummy_1 = pd.DataFrame(dummy_1)
dummy_2 = pd.DataFrame(dummy_2)
dummy_3 = pd.DataFrame(dummy_3)
dummy_4 = pd.DataFrame(dummy_4)
dummy_1 = all_pairs[:3000000].apply(clean_trend_price_lag, axis=1)
dummy_2 = all_pairs[3000000:6000000].apply(clean_trend_price_lag, axis=1)
dummy_3 = all_pairs[6000000:9000000].apply(clean_trend_price_lag, axis=1)
dummy_4 = all_pairs[9000000:].apply(clean_trend_price_lag, axis=1)
all_pairs["trend_price_lag"] = pd.concat([dummy_1, dummy_2, dummy_3, dummy_4])
all_pairs["trend_price_lag"] = all_pairs["trend_price_lag"].astype(np.float16)
all_pairs["trend_price_lag"].fillna(0, inplace=True)
# all_pairs.drop(['item_avg_price','item_avg_price_month'], axis=1, inplace=True)
for i in [1, 2, 3, 4, 5, 6]:
all_pairs.drop(
["item_avg_price_month_lag_" + str(i), "trend_price_lag_" + str(i)],
axis=1,
inplace=True,
)
all_pairs.drop("shop_type", axis=1, inplace=True)
time.time() - ts
ts = time.time()
group = sales_train.groupby(["date_block_num", "shop_id"]).agg({"revenue": ["sum"]})
group.columns = ["date_shop_revenue"]
group.reset_index(inplace=True)
all_pairs = pd.merge(all_pairs, group, on=["date_block_num", "shop_id"], how="left")
all_pairs["date_shop_revenue"] = all_pairs["date_shop_revenue"].astype(np.float32)
group = group.groupby(["shop_id"]).agg({"date_shop_revenue": ["mean"]})
group.columns = ["shop_avg_revenue"]
group.reset_index(inplace=True)
all_pairs = pd.merge(all_pairs, group, on=["shop_id"], how="left")
all_pairs["shop_avg_revenue"] = all_pairs["shop_avg_revenue"].astype(np.float32)
all_pairs["delta_revenue"] = (
all_pairs["date_shop_revenue"] - all_pairs["shop_avg_revenue"]
) / all_pairs["shop_avg_revenue"]
all_pairs["delta_revenue"] = all_pairs["delta_revenue"].astype(np.float16)
all_pairs = calculate_lag(all_pairs, [1], "delta_revenue")
all_pairs.drop(
["date_shop_revenue", "shop_avg_revenue", "delta_revenue"], axis=1, inplace=True
)
time.time() - ts
ts = time.time()
all_pairs["item_shop_first_sale"] = all_pairs["date_block_num"] - all_pairs.groupby(
["item_id", "shop_id"]
)["date_block_num"].transform("min")
all_pairs["item_first_sale"] = all_pairs["date_block_num"] - all_pairs.groupby(
"item_id"
)["date_block_num"].transform("min")
time.time() - ts
# Correlation matrix for monthly sales
all_pairs2 = all_pairs[all_pairs["date_block_num"] < 34]
# all_data2 = all_data2.drop(columns=['in_test', 'is_new_item'], inplace=True)
# Correlation matrix
f = plt.figure(figsize=(9, 5))
plt.matshow(all_pairs2.corr(), fignum=f.number)
plt.xticks(range(all_pairs2.shape[1]), all_pairs2.columns, fontsize=7, rotation=90)
plt.yticks(range(all_pairs2.shape[1]), all_pairs2.columns, fontsize=7)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=14)
ts = time.time()
all_pairs = all_pairs[all_pairs.date_block_num > 11]
time.time() - ts
ts = time.time()
def fill_na(df):
for col in df.columns:
if ("_lag_" in col) & (df[col].isnull().any()):
if "item_cnt" in col:
df[col].fillna(0, inplace=True)
return df
all_pairs = fill_na(all_pairs)
all_pairs.fillna(0, inplace=True)
time.time() - ts
all_pairs.columns
all_pairs.drop(
[
"item_price_median",
"price_cat",
"item_avg_price",
"item_avg_price_month",
"main_category_avg_month_cnt_lag_1",
"secondary_category_avg_month_cnt_lag_1",
"shop_main_category_avg_month_cnt_lag_1",
"shop_secondary_category_avg_month_cnt_lag_1",
],
inplace=True,
axis=1,
)
all_pairs.to_pickle("data.pkl")
data = pd.read_pickle("data.pkl")
X_train = data[data.date_block_num < 33].drop(["item_cnt_month"], axis=1)
Y_train = data[data.date_block_num < 33]["item_cnt_month"]
X_valid = data[data.date_block_num == 33].drop(["item_cnt_month"], axis=1)
Y_valid = data[data.date_block_num == 33]["item_cnt_month"]
X_test = data[data.date_block_num == 34].drop(["item_cnt_month"], axis=1)
gc.collect()
model = lgb.LGBMRegressor(
n_estimators=10000,
learning_rate=0.3,
min_child_weight=300,
# num_leaves=32,
colsample_bytree=0.8,
subsample=0.8,
max_depth=8,
# reg_alpha=0.04,
# reg_lambda=0.073,
# min_split_gain=0.0222415,
verbose=1,
seed=21,
)
model.fit(
X_train,
Y_train,
eval_metric="rmse",
eval_set=[(X_train, Y_train), (X_valid, Y_valid)],
verbose=1,
early_stopping_rounds=10,
)
# Cross validation accuracy for 3 folds
# scores = cross_val_score(model, X_train, Y_train, cv=3)
# print(scores)
"""
ts = time.time()
model = XGBRegressor(
max_depth=8,
n_estimators=1000,
min_child_weight=300,
colsample_bytree=0.8,
subsample=0.8,
eta=0.3,
seed=21)
model.fit(
X_train,
Y_train,
eval_metric="rmse",
eval_set=[(X_train, Y_train), (X_valid, Y_valid)],
verbose=True,
early_stopping_rounds = 10)
time.time() - ts
"""
Y_pred = model.predict(X_valid).clip(0, 20)
Y_test = model.predict(X_test).clip(0, 20)
submission = pd.DataFrame({"ID": test.index, "item_cnt_month": Y_test})
submission.to_csv("submission.csv", index=False)
# save predictions for an ensemble
pickle.dump(Y_pred, open("xgb_train.pickle", "wb"))
pickle.dump(Y_test, open("xgb_test.pickle", "wb"))
"""
def plot_features(booster, figsize):
fig, ax = plt.subplots(1,1,figsize=figsize)
return plot_importance(booster=booster, ax=ax)
plot_features(model, (10,14))
"""
submission
X_train
"""
del sales_train,
del test
del all_data
del all_data2
del group
del item_categories
del items
del shops
del data
gc.collect();
"""
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# # Features
features = pd.read_csv(
"/kaggle/input/walmart-recruiting-store-sales-forecasting/features.csv.zip"
)
features
# # Train
train = pd.read_csv(
"/kaggle/input/walmart-recruiting-store-sales-forecasting/train.csv.zip"
)
train.head()
train.dtypes
train["Date"] = pd.to_datetime(train["Date"])
# train 데이터의 Date열 삭제
train2 = train.drop(["Date", "Weekly_Sales"], axis=1)
# # Stores
stores = pd.read_csv(
"/kaggle/input/walmart-recruiting-store-sales-forecasting/stores.csv"
)
stores.head()
# # Test
test = pd.read_csv(
"/kaggle/input/walmart-recruiting-store-sales-forecasting/test.csv.zip"
)
test.head()
test.dtypes
test["Date"] = pd.to_datetime(test["Date"])
test2 = test.drop(["Date"], axis=1)
# # Modeling
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor()
rf.fit(train2, train["Weekly_Sales"])
result = rf.predict(test2)
sub = pd.read_csv(
"/kaggle/input/walmart-recruiting-store-sales-forecasting/sampleSubmission.csv.zip"
)
sub.head()
sub["Weekly_Sales"] = result
sub.head()
sub.to_csv("Walmart_sub.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### Importing the data
data = pd.read_csv("/kaggle/input/us-college-completion-rate-analysis/train.csv")
data
data = data.drop("Unnamed: 0", axis=1)
x_train = data.drop("Completion_rate", axis=1)
y_train = data["Completion_rate"]
print(x_train, y_train)
# ### Creating a simple regression model
from xgboost import XGBClassifier
import xgboost as xgb
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.compose import TransformedTargetRegressor
from sklearn.preprocessing import QuantileTransformer
from sklearn.model_selection import GridSearchCV
param_grid = {
"n_estimators": [200],
"max_depth": [17, 19],
}
grid = GridSearchCV(
xgb.XGBRFRegressor(), param_grid, refit=True, verbose=3, n_jobs=-1
) #
regr_trans = TransformedTargetRegressor(
regressor=grid, transformer=QuantileTransformer(output_distribution="normal")
)
grid_result = regr_trans.fit(x_train, y_train)
best_params = grid_result.regressor_.best_params_
print(best_params)
# using best params to create and fit model
best_model = xgb.XGBRFRegressor(
max_depth=best_params["max_depth"], n_estimators=best_params["n_estimators"]
)
regr_trans = TransformedTargetRegressor(
regressor=best_model, transformer=QuantileTransformer(output_distribution="normal")
)
regr_trans.fit(x_train, y_train)
# dtrain_reg = xgb.DMatrix(x_train, y_train, enable_categorical=True)
# params = {"objective": "reg:squarederror"}
# n = 100
# model = xgb.train(
# params=params,
# dtrain=dtrain_reg,
# num_boost_round=n,
# )
# print(model)
test = pd.read_csv("/kaggle/input/us-college-completion-rate-analysis/x_test.csv")
test = test.drop("Unnamed: 0", axis=1)
x_test = test
# dtest_reg = xgb.DMatrix(x_test, enable_categorical=True)
y_test = regr_trans.predict(x_test)
print(y_test)
# ### Creating our submission
submission = pd.DataFrame.from_dict({"Completion_rate": y_test})
submission
submission.to_csv("submission.csv", index=True, index_label="id")
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from collections import defaultdict
import copy
import random
from PIL import Image
import shutil
from urllib.request import urlretrieve
import os
import cv2
import time
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
import torchvision
import torch.nn.functional as F
import albumentations as A
from albumentations.pytorch import ToTensorV2
import glob
from tqdm import tqdm
from torchvision.datasets import OxfordIIITPet
from torchvision.utils import make_grid
from torch.utils.data.dataloader import DataLoader
from torch.utils.data import random_split, ConcatDataset
from torchvision import transforms
import torchvision.transforms as tt
cudnn.benchmark = True
# Metrics
from sklearn import metrics
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report, confusion_matrix
import itertools
#
# Documentation can be found below to understand how data set can be constructed and retrieved in different ways:
# * https://pytorch.org/vision/stable/_modules/torchvision/datasets/oxford_iiit_pet.html#OxfordIIITPet
# * https://pytorch.org/vision/stable/generated/torchvision.datasets.OxfordIIITPet.html#torchvision.datasets.OxfordIIITPet
# * https://discuss.pytorch.org/t/how-to-split-dataset-into-test-and-validation-sets/33987
# * https://blog.paperspace.com/dataloaders-abstractions-pytorch/
# * https://albumentations.ai/docs/examples/pytorch_semantic_segmentation/
# * https://www.tensorflow.org/datasets/catalog/oxford_iiit_pet
# # About Data
# They have created a 37 category pet dataset with roughly 200 images for each class. The images have a large variations in scale, pose and lighting. All images have an associated ground truth annotation of breed, head ROI, and pixel level trimap segmentation.
# - https://www.robots.ox.ac.uk/~vgg/data/pets/
# # Prepare Data
# Here, first I tried prepared methods by using PyTorch datasets to retrieve data.
# However, there is an important problem that PyTorch or TensorFlow do not allow create custom train-test-val. Moreover, it is more complicated to handle with data retrieved by any framework.
#
# Original dataset tools retrieve data with splitting as %50 training %50 test which we do not prefer almost any case. I will be splitting as %80 trainval %20 test, then trainval -> %70 training and %30 validation.
# Therefore, I manually handled and splitted the data. The entire preparation can be found below.
class TqdmUpTo(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def download_url(url, filepath):
directory = os.path.dirname(os.path.abspath(filepath))
os.makedirs(directory, exist_ok=True)
if os.path.exists(filepath):
print("Dataset already exists on the disk. Skipping download.")
return
with TqdmUpTo(
unit="B",
unit_scale=True,
unit_divisor=1024,
miniters=1,
desc=os.path.basename(filepath),
) as t:
urlretrieve(url, filename=filepath, reporthook=t.update_to, data=None)
t.total = t.n
def extract_archive(filepath):
extract_dir = os.path.dirname(os.path.abspath(filepath))
shutil.unpack_archive(filepath, extract_dir)
def merge_trainval_test(filepath):
"""
# Image CLASS-ID SPECIES BREED ID
# ID: 1:37 Class ids
# SPECIES: 1:Cat 2:Dog
# BREED ID: 1-25:Cat 1:12:Dog
# All images with 1st letter as captial are cat images
# images with small first letter are dog images
"""
merge_dir = os.path.dirname(os.path.abspath(f"{filepath}/annotations/data.txt"))
# if os.path.exists(merge_dir):
# print("Merged data is already exists on the disk. Skipping creating new data file.")
# return
df = pd.read_csv(
f"{filepath}/annotations/trainval.txt",
sep=" ",
names=["Image", "ID", "SPECIES", "BREED ID"],
)
df2 = pd.read_csv(
f"{filepath}/annotations/test.txt",
sep=" ",
names=["Image", "ID", "SPECIES", "BREED ID"],
)
frame = [df, df2]
df = pd.concat(frame)
df.reset_index(drop=True)
df.to_csv(f"{filepath}/annotations/data.txt", index=None, sep=" ")
print("Merged data is created.")
# ## Retrieving Data
# **For below implementation, you can simply adjust the path for Google Drive and also use Colab or local.**
dataset_directory = os.path.join("./dataset")
os.mkdir(dataset_directory)
filepath = os.path.join(dataset_directory, "images.tar.gz")
download_url(
url="https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz",
filepath=filepath,
)
extract_archive(filepath)
filepath = os.path.join(dataset_directory, "annotations.tar.gz")
download_url(
url="https://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz",
filepath=filepath,
)
extract_archive(filepath)
filepath = os.path.join(dataset_directory)
merge_trainval_test(filepath)
dataset = pd.read_csv(f"{filepath}/annotations/data.txt", sep=" ")
dataset
# Now, we can be able to create any size of train test val also with stratifying labels.
# custom data.txt file includes all information we need.
# # Image CLASS-ID SPECIES BREED ID
# # ID: 1:37 Class ids
# # SPECIES: 1:Cat 2:Dog
# # BREED ID: 1-25:Cat 1:12:Dog
# # All images with 1st letter as captial are cat images
# # images with small first letter are dog images
# We need to retrieve which id is belong to which animal, manually.
# After we create a map for indexes, it will be used for mapping idx to class.
image_ids = []
labels = []
with open(f"{filepath}/annotations/trainval.txt") as file:
for line in file:
image_id, label, *_ = line.strip().split()
image_ids.append(image_id)
labels.append(int(label) - 1)
classes = [
" ".join(part.title() for part in raw_cls.split("_"))
for raw_cls, _ in sorted(
{
(image_id.rsplit("_", 1)[0], label)
for image_id, label in zip(image_ids, labels)
},
key=lambda image_id_and_label: image_id_and_label[1],
)
]
idx_to_class = dict(zip(range(len(classes)), classes))
idx_to_class
# ### (Note: the original data is between 1-37, not 0-36, we must set these values to the range 0-36 to use the dictionary)
# ### ID is between 1-37 but we need 0-36
dataset["nID"] = dataset["ID"] - 1
decode_map = idx_to_class
def decode_label(label):
return decode_map[int(label)]
dataset["class"] = dataset["nID"].apply(lambda x: decode_label(x))
dataset
# Now we have everything we need
# - string class
# - id
# - species
# - breed
# - image id
dataset["class"].value_counts()
#
# trainval -> image names of train and validation, x_test -> image names of test
# y_trainval -> image class, y_test -> image class
# x_train, x_val -> image names of train and validation
# y_trainval, y_test -> image class
# ### Using stratify is an important concept to maintain label distribution while splitting.
from sklearn.model_selection import train_test_split
y = dataset["class"]
x = dataset["Image"]
trainval, x_test, y_trainval, y_test = train_test_split(
x, y, stratify=y, test_size=0.2, random_state=42
)
x_train, x_val, y_train, y_val = train_test_split(
trainval, y_trainval, stratify=y_trainval, test_size=0.3, random_state=42
)
# My custom data set contains:
# - training: 4115 samples
# - validation: 1764 samples
# - test: 1470 samples
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
df_train = pd.DataFrame(y_train)
print("About data:")
df_train.describe()
print("\n****** Class Distribution ******")
df_train["class"].value_counts()
df_val = pd.DataFrame(y_val)
print("About data:")
df_val.describe()
print("\n****** Class Distribution ******")
df_val["class"].value_counts()
df_test = pd.DataFrame(y_test)
print("About data:")
df_test.describe()
print("\n****** Class Distribution ******")
df_test["class"].value_counts()
# End of data preperation
# ------
# -------
# # Data Analysis
root_directory = os.path.join(dataset_directory)
images_directory = os.path.join(root_directory, "images")
masks_directory = os.path.join(root_directory, "annotations", "trimaps")
train_images_filenames = x_train.reset_index(drop=True)
val_images_filenames = x_val.reset_index(drop=True)
test_images_filenames = x_test.reset_index(drop=True)
print(
" train size: ",
len(train_images_filenames),
"\n",
"val size: ",
len(val_images_filenames),
"\n",
"test size: ",
len(test_images_filenames),
)
def preprocess_mask(mask):
mask = np.float32(mask) / 255
mask[mask == 2.0] = 0.0
mask[(mask == 1.0) | (mask == 3.0)] = 1.0
return mask
def display_image_grid(
images_filenames, images_directory, masks_directory, predicted_masks=None
):
cols = 3 if predicted_masks else 2
rows = len(images_filenames)
figure, ax = plt.subplots(nrows=rows, ncols=cols, figsize=(10, 24))
for i, image_filename in enumerate(images_filenames):
image = Image.open(
os.path.join(images_directory, f"{image_filename}.jpg")
).convert("RGB")
mask = Image.open(os.path.join(masks_directory, f"{image_filename}.png"))
mask = preprocess_mask(mask)
ax[i, 0].imshow(image)
ax[i, 1].imshow(mask, interpolation="nearest")
ax[i, 0].set_title("Image")
ax[i, 1].set_title("Ground truth mask")
ax[i, 0].set_axis_off()
ax[i, 1].set_axis_off()
if predicted_masks:
predicted_mask = predicted_masks[i]
ax[i, 2].imshow(predicted_mask, interpolation="nearest")
ax[i, 2].set_title("Predicted mask")
ax[i, 2].set_axis_off()
plt.tight_layout()
plt.show()
display_image_grid(train_images_filenames[:10], images_directory, masks_directory)
# ### This section is an example of what happens if you prefer to augment data. I will not use augmentations while training.
example_image_filename = train_images_filenames[10]
image = plt.imread(os.path.join(images_directory, f"{example_image_filename}.jpg"))
resized_image = A.resize(image, height=256, width=256)
padded_image = A.pad(image, min_height=512, min_width=512)
padded_constant_image = A.pad(
image, min_height=512, min_width=512, border_mode=cv2.BORDER_CONSTANT
)
cropped_image = A.center_crop(image, crop_height=256, crop_width=256)
InteractiveShell.ast_node_interactivity = "last_expr_or_assign"
figure, ax = plt.subplots(nrows=1, ncols=5, figsize=(18, 10))
ax.ravel()[0].imshow(image)
ax.ravel()[0].set_title("Original image")
ax.ravel()[1].imshow(resized_image)
ax.ravel()[1].set_title("Resized image")
ax.ravel()[2].imshow(cropped_image)
ax.ravel()[2].set_title("Cropped image")
ax.ravel()[3].imshow(padded_image)
ax.ravel()[3].set_title("Image padded with reflection")
ax.ravel()[4].imshow(padded_constant_image)
ax.ravel()[4].set_title("Image padded with constant padding")
plt.tight_layout()
plt.show()
# # End of data analysis
# ----
# # Create Custom DataLoader
# train_images_filenames = train_images_filenames.apply(lambda x:x + '.jpg')
# val_images_filenames = val_images_filenames.apply(lambda x:x + '.jpg')
from PIL import Image
class OxfordPetDataset(Dataset):
def __init__(
self,
images_filenames,
images_directory,
masks_directory,
transform=None,
transform_mask=None,
):
self.images_filenames = images_filenames
self.images_directory = images_directory
self.masks_directory = masks_directory
self.transform = transform
self.transform_mask = transform_mask
def __len__(self):
return len(self.images_filenames)
def __getitem__(self, idx):
image_filename = self.images_filenames.loc[idx] + ".jpg"
image = Image.open(os.path.join(self.images_directory, image_filename)).convert(
"RGB"
)
mask = Image.open(
os.path.join(self.masks_directory, image_filename.replace(".jpg", ".png"))
)
# mask = preprocess_mask(mask)
if self.transform is not None:
transformed = self.transform(image)
transformed_m = self.transform_mask(mask)
image = transformed
mask = transformed_m
return image, mask
train_transform = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
target_transform = transforms.Compose(
[
transforms.PILToTensor(),
transforms.Resize((256, 256)),
transforms.Lambda(lambda x: (x - 1).squeeze().type(torch.LongTensor)),
]
)
train_dataset = OxfordPetDataset(
train_images_filenames,
images_directory,
masks_directory,
transform=train_transform,
transform_mask=target_transform,
)
val_dataset = OxfordPetDataset(
val_images_filenames,
images_directory,
masks_directory,
transform=train_transform,
transform_mask=target_transform,
)
torch.cuda.is_available()
# ### If you are going to augment the data, you can use this part to check that the masks have changed correctly. Otherwise, just ignore.
# def visualize_augmentations(dataset, idx=0, samples=5):
# dataset = copy.deepcopy(dataset)
# dataset.transform = .Compose([t for t in dataset.transform if not isinstance(t, (A.Normalize, ToTensorV2))])
# figure, ax = plt.subplots(nrows=samples, ncols=2, figsize=(10, 24))
# for i in range(samples):
# image, mask = dataset[idx]
# print(mask.shape)
# ax[i, 0].imshow(image)
# ax[i, 1].imshow(mask, interpolation="nearest")
# ax[i, 0].set_title("Augmented image")
# ax[i, 1].set_title("Augmented mask")
# ax[i, 0].set_axis_off()
# ax[i, 1].set_axis_off()
# plt.tight_layout()
# plt.show()
# random.seed(42)
# visualize_augmentations(train_dataset, idx=128)
# # LinkNet
from torchvision.models import resnet
nonlinearity = nn.ReLU
class DecoderBlock(nn.Module):
def __init__(self, in_channels, n_filters):
super().__init__()
# B, C, H, W -> B, C/4, H, W
self.conv1 = nn.Conv2d(in_channels, in_channels // 4, 1)
self.norm1 = nn.BatchNorm2d(in_channels // 4)
self.relu1 = nonlinearity(inplace=True)
# B, C/4, H, W -> B, C/4, H, W
self.deconv2 = nn.ConvTranspose2d(
in_channels // 4, in_channels // 4, 3, stride=2, padding=1, output_padding=1
)
self.norm2 = nn.BatchNorm2d(in_channels // 4)
self.relu2 = nonlinearity(inplace=True)
# B, C/4, H, W -> B, C, H, W
self.conv3 = nn.Conv2d(in_channels // 4, n_filters, 1)
self.norm3 = nn.BatchNorm2d(n_filters)
self.relu3 = nonlinearity(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.deconv2(x)
x = self.norm2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.norm3(x)
x = self.relu3(x)
return x
class LinkNet(nn.Module):
def __init__(self, num_classes, num_channels=3, encoder="resnet34"):
super().__init__()
assert encoder in ["resnet18", "resnet34"]
filters = [64, 128, 256, 512]
res = resnet.resnet34(pretrained=True)
self.firstconv = res.conv1
self.firstbn = res.bn1
self.firstrelu = res.relu
self.firstmaxpool = res.maxpool
self.encoder1 = res.layer1
self.encoder2 = res.layer2
self.encoder3 = res.layer3
self.encoder4 = res.layer4
# Decoder
self.decoder4 = DecoderBlock(filters[3], filters[2])
self.decoder3 = DecoderBlock(filters[2], filters[1])
self.decoder2 = DecoderBlock(filters[1], filters[0])
self.decoder1 = DecoderBlock(filters[0], filters[0])
# Final Classifier
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 3, stride=2)
self.finalrelu1 = nonlinearity(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nonlinearity(inplace=True)
self.finalconv3 = nn.Conv2d(32, num_classes, 2, padding=1)
def forward(self, x):
# Encoder
x = self.firstconv(x)
x = self.firstbn(x)
x = self.firstrelu(x)
x = self.firstmaxpool(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# Decoder with Skip Connections
d4 = self.decoder4(e4) + e3
d3 = self.decoder3(d4) + e2
d2 = self.decoder2(d3) + e1
d1 = self.decoder1(d2)
# Final Classification
x = self.finaldeconv1(d1)
x = self.finalrelu1(x)
x = self.finalconv2(x)
x = self.finalrelu2(x)
x = self.finalconv3(x)
return x
# - https://stackoverflow.com/questions/70279287/runtimeerror-expected-scalar-type-long-but-found-float-pytorch
# This error was a nightmare, solution is here...
# - https://discuss.pytorch.org/t/target-size-torch-size-3-3-256-256-must-be-the-same-as-input-size-torch-size-3-65536/149359
# Also this error. Mine was (batchsize, 3, 256, 256) vs (batchsize, 256, 256). The problem was all about using wrong loss function.
# I solved many problems while coding, but couldn't find all to refer.
# **Writing code from scratch might has annoying process to figure out what can be the problems.**
# import torchvision
# from torchview import draw_graph
# model_graph = draw_graph(model, input_size=(1,3,256,256), expand_nested=True)
# model_graph.visual_graph
class MetricMonitor:
def __init__(self, float_precision=3):
self.float_precision = float_precision
self.reset()
def reset(self):
self.metrics = defaultdict(lambda: {"val": 0, "count": 0, "avg": 0})
def update(self, metric_name, val):
metric = self.metrics[metric_name]
metric["val"] += val
metric["count"] += 1
metric["avg"] = metric["val"] / metric["count"]
def __str__(self):
return " | ".join(
[
"{metric_name}: {avg:.{float_precision}f}".format(
metric_name=metric_name,
avg=metric["avg"],
float_precision=self.float_precision,
)
for (metric_name, metric) in self.metrics.items()
]
)
def create_model(params):
model = LinkNet(3)
model = model.to(params["device"])
return model
from torchmetrics.functional import dice
def fit(model, train_dataset, val_dataset, params):
torch.cuda.empty_cache()
criterion = nn.CrossEntropyLoss().to(params["device"])
optimizer = torch.optim.Adam(model.parameters(), lr=params["lr"])
history = pd.DataFrame(
columns=[
"end_loss",
"end_correct",
"end_dice",
"end_val_loss",
"end_val_correct",
"end_val_dice",
]
)
for epoch in range(1, params["epochs"] + 1):
loss = 0
correct = 0
dice_score = 0
train_loss = 0
train_correct = 0
train_dice_score = 0
val_loss = 0
val_correct = 0
val_dice_score = 0
# Train
metric_monitor = MetricMonitor()
stream = tqdm(train_loader)
for i, (images, targets) in enumerate(stream, start=1):
images = images.to(params["device"], non_blocking=True)
targets = targets.to(params["device"], non_blocking=True)
outputs = model(images)
loss = criterion(outputs, targets)
metric_monitor.update("Loss", loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
stream.set_description(
"Epoch: {epoch}. Train. {metric_monitor}".format(
epoch=epoch, metric_monitor=metric_monitor
)
)
train_loss = loss.item()
_, pred = torch.max(outputs, 1)
correct = torch.mean((pred == targets).type(torch.float64))
dice_score = dice(pred, targets, average="macro", num_classes=3)
print(
"-> Epoch: {:.1f}. Train. Dice Score: {:.3f} Accuracy: {:.3f}".format(
epoch, dice_score, correct.cpu().numpy()
)
)
with torch.no_grad():
stream = tqdm(val_loader)
for i, (images, targets) in enumerate(stream, start=1):
images = images.to(params["device"], non_blocking=True)
targets = targets.to(params["device"], non_blocking=True)
outputs = model(images)
loss = criterion(outputs, targets)
metric_monitor.update("Loss", loss.item())
stream.set_description(
"Epoch: {epoch}. Validation. {metric_monitor}".format(
epoch=epoch, metric_monitor=metric_monitor
)
)
val_loss = loss.item()
_, pred = torch.max(outputs, 1)
val_correct = torch.mean((pred == targets).type(torch.float32))
val_dice_score = dice(pred, targets, average="macro", num_classes=3)
print(
"-> Epoch: {:.1f}. Validation. Dice Score: {:.3f} Accuracy: {:.3f}".format(
epoch, val_dice_score, val_correct.cpu().numpy()
)
)
history.loc[len(history.index)] = [
train_loss,
correct.cpu().numpy(),
dice_score.cpu().numpy(),
val_loss,
val_correct.cpu().numpy(),
val_dice_score.cpu().numpy(),
]
return history
###########
#
# After some thought for this part, I created a custom history dataframe. This dataframe holds training loss, training accuracy, training dice score, validation loss, validation accuracy and validation dice score information.
# **History can be configurable easily by using my method.**
params = {
"device": "cuda",
"lr": 0.001,
"batch_size": 32,
"num_workers": 2,
"epochs": 15,
}
train_loader = DataLoader(
train_dataset,
batch_size=params["batch_size"],
shuffle=True,
num_workers=params["num_workers"],
pin_memory=False,
)
val_loader = DataLoader(
val_dataset,
batch_size=params["batch_size"],
shuffle=True,
num_workers=params["num_workers"],
pin_memory=False,
)
model = create_model(params)
history = fit(model, train_loader, val_loader, params)
history
# torch.save(model.state_dict(), 'linknet.pth')
class OxfordPetInferenceDataset(Dataset):
def __init__(self, images_filenames, images_directory, transform=None):
self.images_filenames = images_filenames
self.images_directory = images_directory
self.transform = transform
def __len__(self):
return len(self.images_filenames)
def __getitem__(self, idx):
image_filename = self.images_filenames[idx] + ".jpg"
image = cv2.imread(os.path.join(self.images_directory, image_filename))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
original_size = tuple(image.shape[:2])
if self.transform is not None:
transformed = self.transform(image=image)
image = transformed["image"]
return image, original_size
test_transform = A.Compose(
[
A.Resize(256, 256),
A.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
ToTensorV2(),
]
)
test_dataset = OxfordPetInferenceDataset(
test_images_filenames,
images_directory,
transform=test_transform,
)
def predict(model, params, test_dataset, batch_size):
test_loader = DataLoader(
test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=params["num_workers"],
pin_memory=True,
)
model.eval()
predictions = []
with torch.no_grad():
for images, (original_heights, original_widths) in test_loader:
images = images.to(params["device"], non_blocking=True)
output = model(images).squeeze()
_, predicted_masks = torch.max(output, 1)
predicted_masks = predicted_masks.cpu().numpy()
for predicted_mask, original_height, original_width in zip(
predicted_masks, original_heights.numpy(), original_widths.numpy()
):
predictions.append((predicted_mask, original_height, original_width))
return predictions
predictions = predict(model, params, test_dataset, batch_size=16)
print(predictions)
predicted_masks = []
for predicted_256x256_mask, original_height, original_width in predictions:
full_sized_mask = A.resize(
predicted_256x256_mask,
height=original_height,
width=original_width,
interpolation=cv2.INTER_NEAREST,
)
predicted_masks.append(full_sized_mask)
display_image_grid(
test_images_filenames[:10],
images_directory,
masks_directory,
predicted_masks=predicted_masks,
)
# plot loss by epochs
# create plots
plt.subplots(figsize=(18, 4))
min_loss = np.min(history["end_val_loss"])
plt.subplot(1, 3, 1)
plt.plot(history["end_loss"], "bo", label="Training loss")
plt.plot(history["end_val_loss"], "cornflowerblue", label="Validation loss")
plt.title("Loss by Epochs")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.axhline(y=min_loss, color="darkslategray", linestyle="--")
plt.legend()
# plot accuracy by epochs
plt.subplot(1, 3, 2)
plt.plot(history["end_val_correct"], "cornflowerblue", label="Accuracy")
plt.title("Pixel Accuracy by Epochs")
plt.xlabel("Epochs")
plt.ylabel("Pixel Accuracy")
plt.legend()
# plot dice score by epochs
plt.subplot(1, 3, 3)
plt.plot(history["end_dice"], "cornflowerblue", label="Dice Score")
plt.title("Dice Score by Epochs")
plt.xlabel("Epochs")
plt.ylabel("Dice Score")
plt.legend()
class test(Dataset):
def __init__(self, images_filename, images_directory, transform=None):
self.images_filenames = images_filename
self.images_directory = images_directory
self.transform = transform
def __len__(self):
return len(self.images_filenames)
def __getitem__(self, idx):
# testjzcpath='/kaggle/input/part2-5-3/part2-5'
# image_filename = os.listdir(testjzcpath)
image_filename = self.images_filenames[idx] + ".jpg"
image = cv2.imread(os.path.join(self.images_directory, image_filename))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
original_size = tuple(image.shape[:2])
if self.transform is not None:
transformed = self.transform(image=image)
image = transformed["image"]
return image, original_size
test1 = "/kaggle/input/part2-5/Part2-5"
df = pd.DataFrame({"image": ["img1", "img2", "img3"]})
test_images_filename1 = df["image"]
test_dataset1 = test(
test_images_filename1,
test1,
transform=test_transform,
)
print(test_dataset1)
predictions1 = predict(model, params, test_dataset1, batch_size=3)
predicted_test1 = []
for predicted_256x256_mask, original_height, original_width in predictions1:
full_sized_mask1 = A.resize(
predicted_256x256_mask,
height=original_height,
width=original_width,
interpolation=cv2.INTER_NEAREST,
)
predicted_test1.append(full_sized_mask1)
def display_image_test(images_filenames, images_directory, predicted_masks):
# cols = 3 if predicted_masks else 2
rows = len(images_filenames)
figure, ax = plt.subplots(nrows=rows, ncols=2, figsize=(10, 24))
for i, image_filename in enumerate(images_filenames):
image = Image.open(
os.path.join(images_directory, f"{image_filename}.jpg")
).convert("RGB")
ax[i, 0].imshow(image)
ax[i, 0].set_title("Image")
ax[i, 0].set_axis_off()
predicted_mask = predicted_masks[i]
ax[i, 1].imshow(predicted_mask, interpolation="nearest")
ax[i, 1].set_title("Predicted mask")
ax[i, 1].set_axis_off()
plt.tight_layout()
plt.show()
display_image_test(test_images_filename1, test1, predicted_test1)
test2 = "/kaggle/input/part2-7/Part2-7"
df = pd.DataFrame({"image": ["nighttime1", "nighttime2", "nighttime3"]})
test_images_filename2 = df["image"]
test_dataset2 = test(
test_images_filename2,
test2,
transform=test_transform,
)
print(test_dataset2)
predictions2 = predict(model, params, test_dataset2, batch_size=3)
predicted_test2 = []
for predicted_256x256_mask, original_height, original_width in predictions2:
full_sized_mask2 = A.resize(
predicted_256x256_mask,
height=original_height,
width=original_width,
interpolation=cv2.INTER_NEAREST,
)
predicted_test2.append(full_sized_mask2)
display_image_test(test_images_filename2, test2, predicted_test2)
|
# # ***Library import***
import pandas as pd
import numpy as np
import plotly.express as px
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# # ***Read csv***
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
def le(chat):
leHomme = pd.DataFrame(chat.dtypes, columns=["data types"])
leHomme["Missing"] = chat.isnull().sum()
leHomme["unique"] = chat.nunique()
return leHomme
le(train).style.background_gradient(cmap="Oranges")
def je(seschat):
famme = pd.DataFrame(seschat.dtypes, columns=["data types"])
famme["missing"] = seschat.isnull().sum()
famme["unique"] = seschat.nunique()
return famme
je(test).style.background_gradient(cmap="Oranges_r")
# # ***EDA***
plt.figure(facecolor="#db8f14")
train.dtypes.value_counts().plot(kind="pie", cmap="ocean")
print(" TRAIN DATA")
plt.figure(facecolor="#db8f14")
test.dtypes.value_counts().plot(kind="pie", cmap="ocean")
print(" TEST DATA")
plt.figure(facecolor="#db8f14")
sns.violinplot(data=train, x="label", y="id")
plt.figure(facecolor="#db8f14")
sns.violinplot(data=test)
plt.figure(facecolor="#db8f14", figsize=(10, 5))
sns.heatmap(train.corr(), annot=True, fmt=".0%", cmap="ocean")
# # ***Preprocessing***
from sklearn.preprocessing import LabelEncoder
l = LabelEncoder()
n = train[["tweet"]]
for i in n:
n[i] = l.fit_transform(n[i])
train[["tweet"]] = n
labe = LabelEncoder()
m = test[["tweet"]]
for i in m:
m[i] = labe.fit_transform(m[i])
test[["tweet"]] = m
plt.figure(facecolor="#db8f14")
train.dtypes.value_counts().plot(kind="pie", cmap="winter")
plt.figure(facecolor="#db8f14")
test.dtypes.value_counts().plot(kind="pie", cmap="winter_r")
# # ***Auto ML***
from pycaret.classification import *
# # ***Setup Model***
setup(
train,
target="label",
ignore_features="id",
fix_imbalance=True,
remove_outliers=True,
)
# # ***List of Models***
models()
# # ***Compare Models***
compare_models()
# # ***Create Model***
best = create_model("rf")
# # ***Plots***
plot_model(best, plot="auc")
plot_model(best, plot="class_report")
# # ***Pipeline***
finalize_model(best)
# # ***Prediction***
predictions = best.predict(test.drop("id", axis=1))
predict_model(best).head()
# # ***Submission***
submission = pd.DataFrame({"id": test.id, "label": predictions})
submission.tail()
submission.to_csv("submission.csv", index=False)
|
# 
# # Introduction
# ## History
# According to World Health Organisation (WHO), heart diseases, also a part of cardiovascular diseases, are the number 1 cause of death globally- killing 17.9 millions of lives every year. People who are suffering from heart diseases are known to demonstrate high blood pressure, lipids, glucose as well as obesity and overweight issues. The ability to identify these high risk factors will ensure that the patients receieve appropriate medical care and prevent premature deaths.
# ## Understanding this study
# We have the following information about our dataset:
# - Age
# - Sex: (1 = Male, 0 = Female)
# - cp(chest pain type):
# * 1 = typical angina
# * 2 = atypical angina
# * 3 = non-anginal pain
# * 4 = asymptomatic
# - trestbps: Resting blood pressure (in mm Hg on admission to the hospital)
# - chol: Serum cholestoral in mg/dl
# - fbs: Fasting blood sugar > 120 mg/dl (1 = true; 0 = false)
# - restecg: Resting electrocardiographic results
# * 0: Normal
# * 1: Having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of > 0.05 mV)
# * 2: Showing probable or definite left ventricular hypertrophy by Estes' criteria
# - thalach: Maximum heart rate achieved
# - exang: Exercise induced angina (1 = yes; 0 = no)
# - oldpeak: ST depression induced by exercise relative to rest
# - slope: The slope of the peak exercise ST segment
# * 1: Upsloping
# * 2: Flat
# * 3: Downsloping
# - ca: Number of major vessels (0-3) colored by flourosopy
# - thal: Thalium heart scan
# * 3: Normal
# * 6: Fixed defect
# * 7: Reversable defect
# - target: Diagnosis of heart disease
# * 1: Yes
# * 0: No
#
# ## Objective
# - Find any correlations between attributes
# - Find correlations between each attribute and the diagnosis of heart disease
# First step is to import the required packages, namely numpy, pandas, matplotlib and seaborn.
# Importing packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Importing dataset into notebook and have a preview.
# Importing dataset
heart_data = pd.read_csv("../input/heart-disease/heart.csv")
heart_data.head()
# Now, let's check for any unknown, NaN or NULL values.
heart_data.isnull().sum()
# Looking good! We do know that some of the attributes like sex, slope, target have numbers denoting their categorical attributes. We will need to change them to something we can understand without looking back. So here is what we're going to do:
# - For sex, we will change 1 to 'Male' and 0 to 'Female'.
# - For cp (chest pain), we will change:
# * 1 to 'typical_ang'
# * 2 to 'atypical_ang'
# * 3 to 'non_anginal_pain'
# * 4 to 'asymptomatic'
# - fbs (fasting blood sugar):
# * 1 to 'True'
# * 0 to 'False'
# - restecg:
# * 0 to 'normal'
# * 1 to 'st_abnormality'
# * 2 to 'prob_lvh'
# - exang (Exercise induced angina):
# * 1 to 'yes'
# * 0 to 'no'
# - slope: The slope of the peak exercise ST segment
# * 1 to 'upsloping'
# * 2 to 'flat'
# * 3 to 'downsloping'
# - thal: Thalium heart scan
# * 3 to 'normal'
# * 6 to 'fixed_def'
# * 7 to 'rev_def'
# - target: 1 to 'yes', 0 to 'no'
#
heart_data["sex"] = heart_data.sex.replace([1, 0], ["male", "female"])
heart_data["cp"] = heart_data.cp.replace(
[0, 1, 2, 3, 4],
["no_cp", "typical_ang", "atypical_ang", "non_anginal_pain", "asymptomatic"],
)
heart_data["fbs"] = heart_data.fbs.replace([1, 0], ["true", "false"])
heart_data["restecg"] = heart_data.restecg.replace(
[0, 1, 2], ["normal", "st_abnormality", "prob_lvh"]
)
heart_data["exang"] = heart_data.exang.replace([0, 1], ["no", "yes"])
heart_data["slope"] = heart_data.slope.replace(
[0, 1, 2, 3], ["no_slope", "upsloping", "flat", "downsloping"]
)
heart_data["thal"] = heart_data.thal.replace(
[3, 6, 7], ["normal", "fixed_def", "rev_def"]
)
heart_data["target"] = heart_data.target.replace([1, 0], ["yes", "no"])
heart_data.head()
# Here, we will use the PairPlot tool from Seaborn to see the distribution and relationships among variables. Since pairplot won't work well with categorical data, we can only pick numerical data for this case.
g = sns.pairplot(
heart_data, vars=["age", "trestbps", "chol", "thalach", "oldpeak"], hue="target"
)
g.map_diag(sns.distplot)
g.add_legend()
g.fig.suptitle("FacetGrid plot", fontsize=20)
g.fig.subplots_adjust(top=0.9)
# ## What do we see here?
# - Other than resting blood pressure, we do see distinct differences between heart disease patients and healthy patients in the targeted attributes.
# - For instance, we do see an even distribution of heart disease patients in the age category, while healthly patients are more distributed to the right.
# ## Let's look at correlations!
# - Note: Correlation is determined by Person's R and can't be defined when the data is categorical. Hence, we need to change the categorical atttributes back to numeric for this analysis.
# - We will simply rename the required variable.
# Plotting correlation matrix
heart_data1 = pd.read_csv("../input/heart-disease/heart.csv")
corr = heart_data1.corr()
corr.style.background_gradient(cmap="RdBu_r")
# From here, we can see that there is a close correlation between chest pain factors, maximum heart rate achieved and the slope and whether the patient is healthy or a heart disease patient. Except for these attributes, the rest seem to show very weak correlation.
# ### Let's look closely into some attributes.
# ## Finding correlation between age and whether the patient has heart disease
# Firstly, let's look at the distribution.
plt.figure(figsize=(10, 4))
plt.legend(loc="upper left")
g = sns.countplot(data=heart_data, x="age", hue="target")
g.legend(
title="Heart disease patient?",
loc="center left",
bbox_to_anchor=(1.25, 0.5),
ncol=1,
)
age_corr = ["age", "target"]
age_corr1 = heart_data[age_corr]
age_corr_y = (
age_corr1[age_corr1["target"] == "yes"]
.groupby(["age"])
.size()
.reset_index(name="count")
)
age_corr_y.corr()
sns.regplot(data=age_corr_y, x="age", y="count").set_title(
"Correlation graph for Age vs heart disease patient"
)
age_corr_n = (
age_corr1[age_corr1["target"] == "no"]
.groupby(["age"])
.size()
.reset_index(name="count")
)
age_corr_n.corr()
sns.regplot(data=age_corr_n, x="age", y="count").set_title(
"Correlation graph for Age vs healthy patient"
)
|
# Numpy arrau can perform calculation based, but can only contain one data type
import numpy as np
print(np.array([1, 2, 3, 4, 5]))
# convert Python list to numpy array
#
var = [1, 2, 3]
var_array = np.array(var)
print(var_array)
# ND numpy array
print(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
array1 = np.array([1.457, 2.356, 3.654], dtype=np.float32)
print(array1)
array2 = np.array([[True, False], [False, True]], dtype=np.bool_)
print(array2.dtype)
array3 = array1.astype(int)
print(array3.dtype)
print(array3)
# Numpy filtering
array1 = np.arange(1, 6)
print(array1)
array1[:] > 2
result1 = array1[array1[:] > 2]
print(result1)
np.where(array1[:] > 2)
result2 = array1[np.where(array1[:] > 2)]
print(result2)
# Replace
result3 = np.where(array1[:] > 2, 0, array1)
print(result3)
# Numpy Concatenating
array1 = np.array([[1, 1, 1, 1, 1], [2, 2, 2, 2, 2]])
array2 = np.array([[3, 3, 3, 3, 3]])
array12 = np.concatenate((array1, array2))
print(array12)
print(array12.shape)
array3 = np.array([[1, 1, 1, 1, 1], [2, 2, 2, 2, 2]])
array4 = np.array([[1], [2]])
array34 = np.concatenate((array3, array4), axis=1)
print(array34)
array5 = np.array([[1, 1, 1, 1, 1], [2, 2, 2, 2, 2]])
array6 = np.array([1, 2]).reshape(2, 1)
array56 = np.concatenate((array5, array6), axis=1)
print(array56)
# Numpy deleting
np.delete(array12, 2, axis=0)
np.delete(array34, 2, axis=1)
# Numpy calculation
# SUM
array_sum_all = array12.sum()
array_sum_all_K = array12.sum(keepdims=True)
array_sum_row = array12.sum(axis=0)
array_sum_col = array12.sum(axis=1)
print(array12)
print(array_sum_all)
print(array_sum_all_K)
print(array_sum_row)
print(array_sum_col)
# MAX
array_max_all = array12.max()
array_max_row = array12.max(axis=0)
array_max_col = array12.max(axis=1)
print(array12)
print(array_max_all)
print(array_max_row)
print(array_max_col)
|
# # Visualization of CNN Layers and Filters
# ## Outline
# 1. Using torchvision.datasets with a custom folder of images
# 2. Occlusion analysis with pretrained model
# 3. Filter visualisation with pretrained model
#!ls ../input/padhaivisdata
# reading the labels of data we uploaded
with open("data/imagenet_labels.txt") as f:
classes = eval(f.read())
# type(classes)
print(list(classes.values())[0:5])
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import torch
import torch.nn as nn
import torchvision
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
# parameters
batch_size = 1 # batch size
cuda = True
# ## 1 Load dataset
# defining the transformations for the data
transform = transforms.Compose(
[
transforms.Resize(224),
transforms.ToTensor(),
# normalize the images with imagenet data mean and std
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
]
)
# define the data we uploaded as evaluation data and apply the transformations
evalset = torchvision.datasets.ImageFolder(root="./data/imagenet", transform=transform)
# create a data loader for evaluation
evalloader = torch.utils.data.DataLoader(evalset, batch_size=batch_size, shuffle=True)
# looking at data using iter
dataiter = iter(evalloader)
images, labels = dataiter.next()
# shape of images bunch
print(images.shape)
# label of the image
print(labels[0].item())
# ## 2 Load pretrained model
# for visualization we will use vgg16 pretrained on imagenet data
model = models.vgg16(pretrained=True)
if cuda:
model.cuda()
model.eval()
# ## 3 Visualise image
def imshow(img, title):
"""Custom function to display the image using matplotlib"""
# define std correction to be made
std_correction = np.asarray([0.229, 0.224, 0.225]).reshape(3, 1, 1)
# define mean correction to be made
mean_correction = np.asarray([0.485, 0.456, 0.406]).reshape(3, 1, 1)
# convert the tensor img to numpy img and de normalize
if cuda:
img = img.cpu()
npimg = np.multiply(img.numpy(), std_correction) + mean_correction
# plot the numpy image
plt.figure(figsize=(batch_size * 4, 4))
plt.axis("off")
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.title(title)
plt.show()
def show_batch_images(dataloader):
"""custom function to fetch images from dataloader"""
images, _ = next(iter(dataloader))
if cuda:
images = images.cuda()
# run the model on the images
outputs = model(images)
if cuda:
outputs = outputs.cpu()
# get the maximum class
_, pred = torch.max(outputs.data, 1)
# make grid
img = torchvision.utils.make_grid(images)
# call the function
imshow(img, title=[classes[x.item()] for x in pred])
return images, pred
images, pred = show_batch_images(evalloader)
# ## 4 Occlusion analysis
# running inference on the images without occlusion
# vgg16 pretrained model
if cuda:
images = images.cuda()
outputs = model(images)
# passing the outputs through softmax to interpret them as probability
outputs = nn.functional.softmax(outputs, dim=1)
# getting the maximum predicted label
prob_no_occ, pred = torch.max(outputs.data, 1)
# get the first item
prob_no_occ = prob_no_occ[0].item()
print(prob_no_occ)
def occlusion(model, image, label, occ_size=50, occ_stride=50, occ_pixel=0.5):
"""custom function to conduct occlusion experiments"""
# get the width and height of the image
width, height = image.shape[-2], image.shape[-1]
# setting the output image width and height
output_height = int(np.ceil((height - occ_size) / occ_stride))
output_width = int(np.ceil((width - occ_size) / occ_stride))
# create a white image of sizes we defined
heatmap = torch.zeros((output_height, output_width))
# iterate all the pixels in each column
for h in range(0, height):
for w in range(0, width):
h_start = h * occ_stride
w_start = w * occ_stride
h_end = min(height, h_start + occ_size)
w_end = min(width, w_start + occ_size)
if (w_end) >= width or (h_end) >= height:
continue
input_image = image.clone().detach()
# replacing all the pixel information in the image with occ_pixel(grey) in the specified location
input_image[:, :, w_start:w_end, h_start:h_end] = occ_pixel
if cuda:
input_image = input_image.cuda()
# run inference on modified image
output = model(input_image)
output = nn.functional.softmax(output, dim=1)
prob = output.tolist()[0][label]
# setting the heatmap location to probability value
heatmap[h, w] = prob
return heatmap
heatmap = occlusion(model, images, pred[0].item(), 32, 14)
# displaying the image using seaborn heatmap and also setting the maximum value of gradient to probability
imgplot = sns.heatmap(heatmap, xticklabels=False, yticklabels=False, vmax=prob_no_occ)
figure = imgplot.get_figure()
figure.savefig("svm_conf.png", dpi=400)
# ## 5 Filter visualisation
# for filter visualization, we will use alexnet pretrained with imagenet data
alexnet = models.alexnet(pretrained=True)
# if cuda: alexnet.cuda()
print(alexnet)
def plot_filters_single_channel_big(t):
# setting the rows and columns
nrows = t.shape[0] * t.shape[2]
ncols = t.shape[1] * t.shape[3]
npimg = np.array(t.numpy(), np.float32)
npimg = npimg.transpose((0, 2, 1, 3))
npimg = npimg.ravel().reshape(nrows, ncols)
npimg = npimg.T
fig, ax = plt.subplots(figsize=(ncols / 10, nrows / 200))
imgplot = sns.heatmap(
npimg, xticklabels=False, yticklabels=False, cmap="gray", ax=ax, cbar=False
)
def plot_filters_single_channel(t):
# kernels depth * number of kernels
nplots = t.shape[0] * t.shape[1]
ncols = 12
nrows = 1 + nplots // ncols
# convert tensor to numpy image
npimg = np.array(t.numpy(), np.float32)
count = 0
fig = plt.figure(figsize=(ncols, nrows))
# looping through all the kernels in each channel
for i in range(t.shape[0]):
for j in range(t.shape[1]):
count += 1
ax1 = fig.add_subplot(nrows, ncols, count)
npimg = np.array(t[i, j].numpy(), np.float32)
npimg = (npimg - np.mean(npimg)) / np.std(npimg)
npimg = np.minimum(1, np.maximum(0, (npimg + 0.5)))
ax1.imshow(npimg)
ax1.set_title(str(i) + "," + str(j))
ax1.axis("off")
ax1.set_xticklabels([])
ax1.set_yticklabels([])
plt.tight_layout()
plt.show()
def plot_filters_multi_channel(t):
# get the number of kernals
num_kernels = t.shape[0]
# define number of columns for subplots
num_cols = 12
# rows = num of kernels
num_rows = num_kernels
# set the figure size
fig = plt.figure(figsize=(num_cols, num_rows))
# looping through all the kernels
for i in range(t.shape[0]):
ax1 = fig.add_subplot(num_rows, num_cols, i + 1)
# for each kernel, we convert the tensor to numpy
npimg = np.array(t[i].numpy(), np.float32)
# standardize the numpy image
npimg = (npimg - np.mean(npimg)) / np.std(npimg)
npimg = np.minimum(1, np.maximum(0, (npimg + 0.5)))
npimg = npimg.transpose((1, 2, 0))
ax1.imshow(npimg)
ax1.axis("off")
ax1.set_title(str(i))
ax1.set_xticklabels([])
ax1.set_yticklabels([])
plt.savefig("myimage.png", dpi=100)
plt.tight_layout()
plt.show()
def plot_weights(model, layer_num, single_channel=True, collated=False):
# extracting the model features at the particular layer number
layer = model.features[layer_num]
# checking whether the layer is convolution layer or not
if isinstance(layer, nn.Conv2d):
# getting the weight tensor data
weight_tensor = model.features[layer_num].weight.data
if single_channel:
if collated:
plot_filters_single_channel_big(weight_tensor)
else:
plot_filters_single_channel(weight_tensor)
else:
if weight_tensor.shape[1] == 3:
plot_filters_multi_channel(weight_tensor)
else:
print(
"Can only plot weights with three channels with single channel = False"
)
else:
print("Can only visualize layers which are convolutional")
# visualize weights for alexnet - first conv layer
plot_weights(alexnet, 0, single_channel=False)
# plotting single channel images
plot_weights(alexnet, 0, single_channel=True)
# plot for 3rd layer -> 2nd conv layer
plot_weights(alexnet, 3, single_channel=True)
plot_weights(alexnet, 0, single_channel=True, collated=True)
plot_weights(alexnet, 3, single_channel=True, collated=True)
plot_weights(alexnet, 6, single_channel=True, collated=True)
# for vgg16
plot_weights(model, 0, single_channel=True, collated=True)
plot_weights(model, 2, single_channel=True, collated=True)
plot_weights(model, 5, single_channel=True, collated=True)
plot_weights(model, 0, single_channel=False, collated=False)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn import metrics
df = pd.read_csv("Downloads/insurance.csv")
df.head()
df.shape
df.isnull().sum()
df.describe()
df.info()
sns.set()
plt.figure(figsize=(6, 6))
sns.distplot(df["age"])
plt.title("Age information")
plt.show()
sns.set()
plt.figure(figsize=(6, 6))
sns.countplot(x="sex", data=df)
plt.title("sex information")
plt.show()
sns.set()
plt.figure(figsize=(6, 6))
sns.distplot(df["bmi"])
plt.title("bmi information")
plt.show()
df
sns.set()
plt.figure(figsize=(6, 6))
sns.countplot(x="children", data=df)
plt.title("children information")
plt.show()
sns.set()
plt.figure(figsize=(6, 6))
sns.countplot(x="smoker", data=df, hue="sex")
plt.title("children information")
plt.show()
sns.set()
plt.figure(figsize=(10, 10))
sns.countplot(x="smoker", data=df, hue="children")
plt.title("somker information")
plt.show()
sns.set()
plt.figure(figsize=(10, 10))
sns.countplot(x="region", data=df)
plt.title("region information")
plt.show()
df.replace({"sex": {"male": 0, "female": 1}}, inplace=True)
df.replace({"smoker": {"yes": 0, "no": 1}}, inplace=True)
df.replace(
{"region": {"southwest": 0, "southeast": 1, "northwest": 2, "northeast": 3}},
inplace=True,
)
df
x = df.drop(columns="charges", axis=1)
y = df["charges"]
x
y
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=42
)
model = LinearRegression()
model.fit(x_train, y_train)
score_train = metrics.r2_score(y_train, model_predict)
print("squred vale : ", score_train)
test_model_predict = model.predict(x_test)
score_test = metrics.r2_score(y_test, test_model_predict)
print("squred vale : ", score_test)
|
import os
import cv2
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import pandas as pd
def crop_image_from_gray(img, tol=7):
if img.ndim == 2:
mask = img > tol
return img[np.ix_(mask.any(1), mask.any(0))]
elif img.ndim == 3:
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
mask = gray_img > tol
check_shape = img[:, :, 0][np.ix_(mask.any(1), mask.any(0))].shape[0]
if check_shape == 0: # image is too dark so that we crop out everything,
return img # return original image
else:
img1 = img[:, :, 0][np.ix_(mask.any(1), mask.any(0))]
img2 = img[:, :, 1][np.ix_(mask.any(1), mask.any(0))]
img3 = img[:, :, 2][np.ix_(mask.any(1), mask.any(0))]
# print(img1.shape,img2.shape,img3.shape)
img = np.stack([img1, img2, img3], axis=-1)
# print(img.shape)
return img
path = [
"/kaggle/input/glaucomadataset/Non Glaucoma",
"/kaggle/input/glaucomadataset/Glaucoma",
]
images = []
labels = []
for n, i in enumerate(path):
for j in tqdm(os.listdir(i)):
img_path = os.path.join(i, j)
img = cv2.imread(img_path)
img = crop_image_from_gray(img, tol=7)
img = cv2.resize(img, (224, 224))
images.append(img)
labels.append(n)
images = np.array(images) / 255
labels = np.array(labels)
plt.figure(figsize=(20, 20))
for i in range(1, 26):
plt.subplot(5, 5, i)
n = np.random.randint(1022)
plt.imshow(images[n])
plt.title(labels[n])
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
images, labels = shuffle(images, labels, random_state=32)
x_train, x_valid, y_train, y_valid = train_test_split(
images, labels, test_size=0.15, random_state=44
)
x_train, x_test, y_train, y_test = train_test_split(
x_train, y_train, test_size=0.15, random_state=40
)
from keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(
zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True,
fill_mode="constant",
cval=0.0,
)
train_gen = datagen.flow(x_train, y_train, batch_size=32)
from keras.models import Sequential, model_from_json
from keras.layers import (
Conv2D,
Dense,
Dropout,
BatchNormalization,
Flatten,
MaxPool2D,
GlobalAveragePooling2D,
)
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from keras.applications.densenet import DenseNet121
tr = DenseNet121(weights="imagenet", include_top=False, input_shape=(224, 224, 3))
model = Sequential()
model.add(tr)
model.add(GlobalAveragePooling2D())
model.add(Dropout(0.5))
model.add(Dense(1, activation="sigmoid"))
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
ck = ModelCheckpoint(
"glaucoma_weights.hdf5",
monitor="val_loss",
save_best_only=True,
mode="auto",
verbose=1,
)
re = ReduceLROnPlateau(
monitor="val_loss", mode="auto", factor=0.2, patience=4, verbose=1
)
model.summary()
history = model.fit_generator(
train_gen,
epochs=50,
steps_per_epoch=800 // 32,
verbose=1,
validation_data=(x_valid, y_valid),
callbacks=[ck, re],
)
history_df = pd.DataFrame(history.history)
history_df[["loss", "val_loss"]].plot()
history_df[["accuracy", "val_accuracy"]].plot()
pred = model.evaluate(x_test, y_test)
print("Test Accuracy:", pred[1] * 100)
model_json = model.to_json()
with open("glaucoma_model.json", "w") as json_file:
json_file.write(model_json)
|
#
# # Laboratory 3: Reinforcement Learning
# Reinforcement learning (RL) is a subset of machine learning which poses learning problems as interactions between agents and environments. It often assumes agents have no prior knowledge of a world, so they must learn to navigate environments by optimizing a reward function. Within an environment, an agent can take certain actions and receive feedback, in the form of positive or negative rewards, with respect to their decision. As such, an agent's feedback loop is somewhat akin to the idea of "trial and error", or the manner in which a child might learn to distinguish between "good" and "bad" actions.
# In practical terms, our RL agent will interact with the environment by taking an action at each timestep, receiving a corresponding reward, and updating its state according to what it has "learned".
# 
# While the ultimate goal of reinforcement learning is to teach agents to act in the real, physical world, games provide a convenient proving ground for developing RL algorithms and agents. Games have some properties that make them particularly well suited for RL:
# 1. In many cases, games have perfectly describable environments. For example, all rules of chess can be formally written and programmed into a chess game simulator;
# 2. Games are massively parallelizable. Since they do not require running in the real world, simultaneous environments can be run on large data clusters;
# 3. Simpler scenarios in games enable fast prototyping. This speeds up the development of algorithms that could eventually run in the real-world; and
# 4. ... Games are fun!
# In previous labs, we have explored both supervised (with LSTMs, CNNs) and unsupervised / semi-supervised (with VAEs) learning tasks. Reinforcement learning is fundamentally different, in that we are training a deep learning algorithm to govern the actions of our RL agent, that is trying, within its environment, to find the optimal way to achieve a goal. The goal of training an RL agent is to determine the best next step to take to earn the greatest final payoff or return. In this lab, we focus on building a reinforcement learning algorithm to master two different environments with varying complexity.
# 1. **Cartpole**: Balance a pole, protruding from a cart, in an upright position by only moving the base left or right. Environment with a low-dimensional observation space.
# 2. [**Pong**](https://en.wikipedia.org/wiki/Pong): Beat your competitors (whether other AI or humans!) at the game of Pong. Environment with a high-dimensional observation space -- learning directly from raw pixels.
# Let's get started! First we'll import TensorFlow, the course package, and some dependencies.
#
#!pip install 'gym[box2d]'
# %tensorflow_version 2.x
import tensorflow as tf
import numpy as np
import base64, io, time, gym
import IPython, functools
import matplotlib.pyplot as plt
from tqdm import tqdm
import mitdeeplearning as mdl
# Before we dive in, let's take a step back and outline our approach, which is generally applicable to reinforcement learning problems in general:
# 1. **Initialize our environment and our agent**: here we will describe the different observations and actions the agent can make in the environemnt.
# 2. **Define our agent's memory**: this will enable the agent to remember its past actions, observations, and rewards.
# 3. **Define a reward function**: describes the reward associated with an action or sequence of actions.
# 4. **Define the learning algorithm**: this will be used to reinforce the agent's good behaviors and discourage bad behaviors.
# # Part 1: Cartpole
# ## 3.1 Define the Cartpole environment and agent
# ### Environment
# In order to model the environment for both the Cartpole and Pong tasks, we'll be using a toolkit developed by OpenAI called [OpenAI Gym](https://gym.openai.com/). It provides several pre-defined environments for training and testing reinforcement learning agents, including those for classic physics control tasks, Atari video games, and robotic simulations. To access the Cartpole environment, we can use `env = gym.make("CartPole-v0")`, which we gained access to when we imported the `gym` package. We can instantiate different [environments](https://gym.openai.com/envs/#classic_control) by passing the enivronment name to the `make` function.
# One issue we might experience when developing RL algorithms is that many aspects of the learning process are inherently random: initializing game states, changes in the environment, and the agent's actions. As such, it can be helpful to set a initial "seed" for the environment to ensure some level of reproducibility. Much like you might use `numpy.random.seed`, we can call the comparable function in gym, `seed`, with our defined environment to ensure the environment's random variables are initialized the same each time.
### Instantiate the Cartpole environment ###
env = gym.make("CartPole-v0")
env.seed(1)
# In Cartpole, a pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The pole starts upright, and the goal is to prevent it from falling over. The system is controlled by applying a force of +1 or -1 to the cart. A reward of +1 is provided for every timestep that the pole remains upright. The episode ends when the pole is more than 15 degrees from vertical, or the cart moves more than 2.4 units from the center of the track. A visual summary of the cartpole environment is depicted below:
# Given this setup for the environment and the objective of the game, we can think about: 1) what observations help define the environment's state; 2) what actions the agent can take.
# First, let's consider the observation space. In this Cartpole environment our observations are:
# 1. Cart position
# 2. Cart velocity
# 3. Pole angle
# 4. Pole rotation rate
# We can confirm the size of the space by querying the environment's observation space:
#
n_observations = env.observation_space
print("Environment has observation space =", n_observations)
# Second, we consider the action space. At every time step, the agent can move either right or left. Again we can confirm the size of the action space by querying the environment:
n_actions = env.action_space.n
print("Number of possible actions that the agent can choose from =", n_actions)
# ### Cartpole agent
# Now that we have instantiated the environment and understood the dimensionality of the observation and action spaces, we are ready to define our agent. In deep reinforcement learning, a deep neural network defines the agent. This network will take as input an observation of the environment and output the probability of taking each of the possible actions. Since Cartpole is defined by a low-dimensional observation space, a simple feed-forward neural network should work well for our agent. We will define this using the `Sequential` API.
#
### Define the Cartpole agent ###
# Defines a feed-forward neural network
def create_cartpole_model():
model = tf.keras.models.Sequential(
[
# First Dense layer
tf.keras.layers.Dense(units=32, activation="relu"),
# TODO: Define the last Dense layer, which will provide the network's output.
# Think about the space the agent needs to act in!
tf.keras.layers.Dense(units=n_actions, activation=None),
]
)
return model
cartpole_model = create_cartpole_model()
# Now that we have defined the core network architecture, we will define an *action function* that executes a forward pass through the network, given a set of observations, and samples from the output. This sampling from the output probabilities will be used to select the next action for the agent.
# **Critically, this action function is totally general -- we will use this function for both Cartpole and Pong, and it is applicable to other RL tasks, as well!**
### Define the agent's action function ###
# Function that takes observations as input, executes a forward pass through model,
# and outputs a sampled action.
# Arguments:
# model: the network that defines our agent
# observation: observation which is fed as input to the model
# Returns:
# action: choice of agent action
def choose_action(model, observation):
# add batch dimension to the observation
observation = np.expand_dims(observation, axis=0)
"""TODO: feed the observations through the model to predict the log probabilities of each possible action."""
logits = model.predict(observation)
# pass the log probabilities through a softmax to compute true probabilities
prob_weights = tf.nn.softmax(logits).numpy()
"""TODO: randomly sample from the prob_weights to pick an action.
Hint: carefully consider the dimensionality of the input probabilities (vector) and the output action (scalar)"""
action = np.random.choice(n_actions, size=1, p=prob_weights.flatten())[0]
return action
observation = env.reset()
action = choose_action(cartpole_model, observation)
print("Random action from first observation: ", action)
# ## 3.2 Define the agent's memory
# Now that we have instantiated the environment and defined the agent network architecture and action function, we are ready to move on to the next step in our RL workflow:
# 1. **Initialize our environment and our agent**: here we will describe the different observations and actions the agent can make in the environemnt.
# 2. **Define our agent's memory**: this will enable the agent to remember its past actions, observations, and rewards.
# 3. **Define the learning algorithm**: this will be used to reinforce the agent's good behaviors and discourage bad behaviors.
# In reinforcement learning, training occurs alongside the agent's acting in the environment; an *episode* refers to a sequence of actions that ends in some terminal state, such as the pole falling down or the cart crashing. The agent will need to remember all of its observations and actions, such that once an episode ends, it can learn to "reinforce" the good actions and punish the undesirable actions via training. Our first step is to define a simple memory buffer that contains the agent's observations, actions, and received rewards from a given episode.
# **Once again, note the modularity of this memory buffer -- it can and will be applied to other RL tasks as well!**
### Agent Memory ###
class Memory:
def __init__(self):
self.clear()
# Resets/restarts the memory buffer
def clear(self):
self.observations = []
self.actions = []
self.rewards = []
# Add observations, actions, rewards to memory
def add_to_memory(self, new_observation, new_action, new_reward):
self.observations.append(new_observation)
self.actions.append(new_action)
self.rewards.append(new_reward)
def print(self):
print(f"The memory observations {self.observations}")
print(f"The memory actions {self.actions}")
print(f"The memory rewards {self.rewards}")
memory = Memory()
observation = env.reset()
action = choose_action(cartpole_model, observation)
next_observation, reward, done, info = env.step(action)
memory.add_to_memory(next_observation, action, reward)
memory.print()
# ## 3.3 Reward function
# We're almost ready to begin the learning algorithm for our agent! The next step is to compute the rewards of our agent as it acts in the environment. Since we (and the agent) is uncertain about if and when the game or task will end (i.e., when the pole will fall), it is useful to emphasize getting rewards **now** rather than later in the future -- this is the idea of discounting. This is a similar concept to discounting money in the case of interest. ecall from lecture, we use reward discount to give more preference at getting rewards now rather than later in the future. The idea of discounting rewards is similar to discounting money in the case of interest.
# To compute the expected cumulative reward, known as the **return**, at a given timestep in a learning episode, we sum the discounted rewards expected at that time step $t$, within a learning episode, and projecting into the future. We define the return (cumulative reward) at a time step $t$, $R_{t}$ as:
# >$R_{t}=\sum_{k=0}^\infty\gamma^kr_{t+k}$
# where $0 < \gamma < 1$ is the discount factor and $r_{t}$ is the reward at time step $t$, and the index $k$ increments projection into the future within a single learning episode. Intuitively, you can think of this function as depreciating any rewards received at later time steps, which will force the agent prioritize getting rewards now. Since we can't extend episodes to infinity, in practice the computation will be limited to the number of timesteps in an episode -- after that the reward is assumed to be zero.
# Take note of the form of this sum -- we'll have to be clever about how we implement this function. Specifically, we'll need to initialize an array of zeros, with length of the number of time steps, and fill it with the real discounted reward values as we loop through the rewards from the episode, which will have been saved in the agents memory. What we ultimately care about is which actions are better relative to other actions taken in that episode -- so, we'll normalize our computed rewards, using the mean and standard deviation of the rewards across the learning episode.
#
### Reward function ###
# Helper function that normalizes an np.array x
def normalize(x):
x -= np.mean(x)
x /= np.std(x)
return x.astype(np.float32)
# Compute normalized, discounted, cumulative rewards (i.e., return)
# Arguments:
# rewards: reward at timesteps in episode
# gamma: discounting factor
# Returns:
# normalized discounted reward
def discount_rewards(rewards, gamma=0.95):
discounted_rewards = np.zeros_like(rewards)
R = 0
for t in reversed(range(0, len(rewards))):
# update the total discounted reward
R = R * gamma + rewards[t]
discounted_rewards[t] = R
return normalize(discounted_rewards)
observation = env.reset()
memory.clear()
for i in range(10):
action = choose_action(cartpole_model, observation)
next_observation, reward, done, info = env.step(action)
memory.add_to_memory(next_observation, action, reward)
memory.print()
discounted_rewards = discount_rewards(memory.rewards)
print("Rewards: ", memory.rewards)
print("Discounted rewards: ", discounted_rewards)
# ## 3.4 Learning algorithm
# Now we can start to define the learing algorithm which will be used to reinforce good behaviors of the agent and discourage bad behaviours. In this lab, we will focus on *policy gradient* methods which aim to **maximize** the likelihood of actions that result in large rewards. Equivalently, this means that we want to **minimize** the negative likelihood of these same actions. We achieve this by simply **scaling** the probabilities by their associated rewards -- effectively amplifying the likelihood of actions that resujlt in large rewards.
# Since the log function is monotonically increasing, this means that minimizing **negative likelihood** is equivalent to minimizing **negative log-likelihood**. Recall that we can easily compute the negative log-likelihood of a discrete action by evaluting its [softmax cross entropy](https://www.tensorflow.org/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits). Like in supervised learning, we can use stochastic gradient descent methods to achieve the desired minimization.
# Let's begin by defining the loss function.
### Loss function ###
# Arguments:
# logits: network's predictions for actions to take
# actions: the actions the agent took in an episode
# rewards: the rewards the agent received in an episode
# Returns:
# loss
def compute_loss(logits, actions, rewards):
"""TODO: complete the function call to compute the negative log probabilities"""
neg_logprob = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=actions
)
"""TODO: scale the negative log probability by the rewards"""
loss = tf.reduce_mean(neg_logprob * rewards)
return loss
observation = env.reset()
memory.clear()
for i in range(10):
action = choose_action(cartpole_model, observation)
next_observation, reward, done, info = env.step(action)
memory.add_to_memory(next_observation, action, reward)
memory.print()
logits = cartpole_model.predict(np.asarray(memory.observations))
print("Logits: ", logits)
loss = compute_loss(logits, memory.actions, memory.rewards)
print("Tensor loss:", loss)
# Now let's use the loss function to define a training step of our learning algorithm:
### Training step (forward and backpropagation) ###
def train_step(model, optimizer, observations, actions, discounted_rewards):
with tf.GradientTape() as tape:
# Forward propagate through the agent network
logits = model(observations)
"""TODO: call the compute_loss function to compute the loss"""
loss = compute_loss(logits, actions, discounted_rewards)
"""TODO: run backpropagation to minimize the loss using the tape.gradient method.
Use `model.trainable_variables`"""
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
learning_rate = 1e-3
optimizer = tf.keras.optimizers.Adam(learning_rate)
observation = env.reset()
memory.clear()
for i in range(10):
action = choose_action(cartpole_model, observation)
next_observation, reward, done, info = env.step(action)
memory.add_to_memory(next_observation, action, reward)
memory.print()
train_step(
cartpole_model,
optimizer,
np.asarray(memory.observations),
np.asarray(memory.actions),
discount_rewards(memory.rewards),
)
# ## 3.5 Run cartpole!
# Having had no prior knowledge of the environment, the agent will begin to learn how to balance the pole on the cart based only on the feedback received from the environment! Having defined how our agent can move, how it takes in new observations, and how it updates its state, we'll see how it gradually learns a policy of actions to optimize balancing the pole as long as possible. To do this, we'll track how the rewards evolve as a function of training -- how should the rewards change as training progresses?
### Cartpole training! ###
# Learning rate and optimizer
learning_rate = 1e-3
optimizer = tf.keras.optimizers.Adam(learning_rate)
# instantiate cartpole agent
cartpole_model = create_cartpole_model()
# to track our progress
smoothed_reward = mdl.util.LossHistory(smoothing_factor=0.9)
plotter = mdl.util.PeriodicPlotter(sec=2, xlabel="Iterations", ylabel="Rewards")
if hasattr(tqdm, "_instances"):
tqdm._instances.clear() # clear if it exists
for i_episode in range(500):
plotter.plot(smoothed_reward.get())
# Restart the environment
observation = env.reset()
memory.clear()
while True:
# using our observation, choose an action and take it in the environment
action = choose_action(cartpole_model, observation)
next_observation, reward, done, info = env.step(action)
# add to memory
memory.add_to_memory(observation, action, reward)
# is the episode over? did you crash or do so well that you're done?
if done:
# determine total reward and keep a record of this
total_reward = sum(memory.rewards)
smoothed_reward.append(total_reward)
# initiate training - remember we don't know anything about how the
# agent is doing until it has crashed!
train_step(
cartpole_model,
optimizer,
observations=np.vstack(memory.observations),
actions=np.array(memory.actions),
discounted_rewards=discount_rewards(memory.rewards),
)
# reset the memory
memory.clear()
break
# update our observatons
observation = next_observation
# To get a sense of how our agent did, we can save a video of the trained model working on balancing the pole. Realize that this is a brand new environment that the agent has not seen before!
# Let's display the saved video to watch how our agent did!
#
saved_cartpole = mdl.lab3.save_video_of_model(cartpole_model, "CartPole-v0")
mdl.lab3.play_video(saved_cartpole)
# How does the agent perform? Could you train it for shorter amounts of time and still perform well? Do you think that training longer would help even more?
# #Part 2: Pong
# In Cartpole, we dealt with an environment that was static -- in other words, it didn't change over time. What happens if our environment is dynamic and unpredictable? Well that's exactly the case in [Pong](https://en.wikipedia.org/wiki/Pong), since part of the environment is the opposing player. We don't know how our opponent will act or react to our actions, so the complexity of our problem increases. It also becomes much more interesting, since we can compete to beat our opponent. RL provides a powerful framework for training AI systems with the ability to handle and interact with dynamic, unpredictable environments. In this part of the lab, we'll use the tools and workflow we explored in Part 1 to build an RL agent capable of playing the game of Pong.
# ## 3.6 Define and inspect the Pong environment
# As with Cartpole, we'll instantiate the Pong environment in the OpenAI gym, using a seed of 1.
env = gym.make("Pong-v0", frameskip=5)
env.seed(1)
# for reproducibility
# Let's next consider the observation space for the Pong environment. Instead of four physical descriptors of the cart-pole setup, in the case of Pong our observations are the individual video frames (i.e., images) that depict the state of the board. Thus, the observations are 210x160 RGB images (arrays of shape (210,160,3)).
# We can again confirm the size of the observation space by query:
print("Environment has observation space =", env.observation_space)
# In Pong, at every time step, the agent (which controls the paddle) has six actions to choose from: no-op (no operation), move right, move left, fire, fire right, and fire left. Let's confirm the size of the action space by querying the environment:
n_actions = env.action_space.n
print("Number of possible actions that the agent can choose from =", n_actions)
# ## 3.7 Define the Pong agent
# As before, we'll use a neural network to define our agent. What network architecture do you think would be especially well suited to this game? Since our observations are now in the form of images, we'll add convolutional layers to the network to increase the learning capacity of our network.
### Define the Pong agent ###
# Functionally define layers for convenience
# All convolutional layers will have ReLu activation
Conv2D = functools.partial(tf.keras.layers.Conv2D, padding="same", activation="relu")
Flatten = tf.keras.layers.Flatten
Dense = tf.keras.layers.Dense
# Defines a CNN for the Pong agent
def create_pong_model():
model = tf.keras.models.Sequential(
[
# Convolutional layers
# First, 16 7x7 filters with 4x4 stride
Conv2D(filters=16, kernel_size=7, strides=4),
# TODO: define convolutional layers with 32 5x5 filters and 2x2 stride
Conv2D(filters=32, kernel_size=5, strides=2),
# TODO: define convolutional layers with 48 3x3 filters and 2x2 stride
Conv2D(filters=48, kernel_size=3, strides=2),
Flatten(),
# Fully connected layer and output
Dense(units=64, activation="relu"),
# TODO: define the output dimension of the last Dense layer.
# Pay attention to the space the agent needs to act in
Dense(units=n_actions, activation=None),
]
)
return model
pong_model = create_pong_model()
# Since we've already defined the action function, `choose_action(model, observation)`, we don't need to define it again. Instead, we'll be able to reuse it later on by passing in our new model we've just created, `pong_model`. This is awesome because our action function provides a modular and generalizable method for all sorts of RL agents!
# ## 3.8 Pong-specific functions
# In Part 1 (Cartpole), we implemented some key functions and classes to build and train our RL agent -- `choose_action(model, observation)` and the `Memory` class, for example. However, in getting ready to apply these to a new game like Pong, we might need to make some slight modifications.
# Namely, we need to think about what happens when a game ends. In Pong, we know a game has ended if the reward is +1 (we won!) or -1 (we lost unfortunately). Otherwise, we expect the reward at a timestep to be zero -- the players (or agents) are just playing eachother. So, after a game ends, we will need to reset the reward to zero when a game ends. This will result in a modified reward function.
### Pong reward function ###
# Compute normalized, discounted rewards for Pong (i.e., return)
# Arguments:
# rewards: reward at timesteps in episode
# gamma: discounting factor. Note increase to 0.99 -- rate of depreciation will be slower.
# Returns:
# normalized discounted reward
def discount_rewards(rewards, gamma=0.99):
discounted_rewards = np.zeros_like(rewards)
R = 0
for t in reversed(range(0, len(rewards))):
# NEW: Reset the sum if the reward is not 0 (the game has ended!)
if rewards[t] != 0:
R = 0
# update the total discounted reward as before
R = R * gamma + rewards[t]
discounted_rewards[t] = R
return normalize(discounted_rewards)
# Additionally, we have to consider the nature of the observations in the Pong environment, and how they will be fed into our network. Our observations in this case are images. Before we input an image into our network, we'll do a bit of pre-processing to crop and scale, clean up the background colors to a single color, and set the important game elements to a single color. Let's use this function to visualize what an observation might look like before and after pre-processing.
observation = env.reset()
for i in range(30):
observation, _, _, _ = env.step(0)
observation_pp = mdl.lab3.preprocess_pong(observation)
f = plt.figure(figsize=(10, 3))
ax = f.add_subplot(121)
ax2 = f.add_subplot(122)
ax.imshow(observation)
ax.grid(False)
ax2.imshow(np.squeeze(observation_pp))
ax2.grid(False)
plt.title("Preprocessed Observation")
# What do you notice? How might these changes be important for training our RL algorithm?
# ## 3.9 Training Pong
# We're now all set up to start training our RL algorithm and agent for the game of Pong! We've already defined our loss function with `compute_loss`, which employs policy gradient learning, as well as our backpropagation step with `train_step` which is beautiful! We will use these functions to execute training the Pong agent. Let's walk through the training block.
# In Pong, rather than feeding our network one image at a time, it can actually improve performance to input the difference between two consecutive observations, which really gives us information about the movement between frames -- how the game is changing. We'll first pre-process the raw observation, `x`, and then we'll compute the difference with the image frame we saw one timestep before.
# This observation change will be forward propagated through our Pong agent, the CNN network model, which will then predict the next action to take based on this observation. The raw reward will be computed, and the observation, action, and reward will be recorded into memory. This will continue until a training episode, i.e., a game, ends.
# Then, we will compute the discounted rewards, and use this information to execute a training step. Memory will be cleared, and we will do it all over again!
# Let's run the code block to train our Pong agent. Note that completing training will take quite a bit of time (estimated at least a couple of hours). We will again visualize the evolution of the total reward as a function of training to get a sense of how the agent is learning.
### Training Pong ###
# Hyperparameters
learning_rate = 1e-4
MAX_ITERS = (
10000 # increase the maximum number of episodes, since Pong is more complex!
)
# Model and optimizer
pong_model = create_pong_model()
optimizer = tf.keras.optimizers.Adam(learning_rate)
# plotting
smoothed_reward = mdl.util.LossHistory(smoothing_factor=0.9)
plotter = mdl.util.PeriodicPlotter(sec=5, xlabel="Iterations", ylabel="Rewards")
memory = Memory()
for i_episode in range(MAX_ITERS):
plotter.plot(smoothed_reward.get())
# Restart the environment
observation = env.reset()
previous_frame = mdl.lab3.preprocess_pong(observation)
while True:
# Pre-process image
current_frame = mdl.lab3.preprocess_pong(observation)
"""TODO: determine the observation change
Hint: this is the difference between the past two frames"""
obs_change = current_frame - previous_frame
"""TODO: choose an action for the pong model, using the frame difference, and evaluate"""
action = choose_action(pong_model, obs_change)
# Take the chosen action
next_observation, reward, done, info = env.step(action)
"""TODO: save the observed frame difference, the action that was taken, and the resulting reward!"""
memory.add_to_memory(obs_change, action, reward)
# is the episode over? did you crash or do so well that you're done?
if done:
# determine total reward and keep a record of this
total_reward = sum(memory.rewards)
smoothed_reward.append(total_reward)
# begin training
train_step(
pong_model,
optimizer,
observations=np.stack(memory.observations, 0),
actions=np.array(memory.actions),
discounted_rewards=discount_rewards(memory.rewards),
)
memory.clear()
break
observation = next_observation
previous_frame = current_frame
# Finally we can put our trained agent to the test! It will play in a newly instantiated Pong environment against the "computer", a base AI system for Pong. Your agent plays as the green paddle. Let's watch the match instant replay!
saved_pong = mdl.lab3.save_video_of_model(
pong_model, "Pong-v0", obs_diff=True, pp_fn=mdl.lab3.preprocess_pong
)
mdl.lab3.play_video(saved_pong)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
train_df = pd.read_csv("/kaggle/input/titanic/train.csv")
test_df = pd.read_csv("/kaggle/input/titanic/test.csv")
train_df.head()
train_df.tail()
# **Missing value** Define a function that will find the missing values and plot the frequency of them. With this information we can impute the values accordingly or delete it (in the case of Cabin feature)
def missing_data(data):
total = data.isnull().sum().sort_values(ascending=False)
percent = (data.isnull().sum() / data.isnull().count() * 100).sort_values(
ascending=False
)
ms = pd.concat([total, percent], axis=1, keys=["Total", "Percent"])
ms = ms[ms["Percent"] > 0]
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.barplot(ms.index, ms["Percent"], color="blue", alpha=0.8)
plt.xlabel("Independent variables", fontsize=15)
plt.ylabel("Percent of missing values", fontsize=15)
plt.title("NaN exploration", fontsize=15)
return ms
missing_data(train_df)
missing_data(test_df)
# **Data imputation**
test_df["Age"].fillna(test_df["Age"].median(), inplace=True)
train_df["Age"].fillna(train_df["Age"].median(), inplace=True)
drop_column = ["Cabin"]
train_df.drop(drop_column, axis=1, inplace=True)
test_df.drop(drop_column, axis=1, inplace=True)
test_df["Fare"].fillna(test_df["Fare"].median(), inplace=True)
train_df["Embarked"].fillna(train_df["Embarked"].mode()[0], inplace=True)
print(train_df.isnull().sum())
print(test_df.isnull().sum())
# **Feature Engineering**
# Character column Name: Extract tiles like Mrs, Miss, Mr
all_data = [train_df, test_df]
import re
# extract the second word from every name and assign it to a new column
def title_parser(name):
# Check wheter title exists, then return it, if not return ""
title_search = re.search("([A-Za-z]+)\.", name)
if title_search:
return title_search.group(1)
return ""
# Create new column Title
for dataset in all_data:
dataset["Title"] = dataset["Name"].apply(title_parser)
for dataset in all_data:
dataset["Title"] = dataset["Title"].replace(
[
"Lady",
"Countess",
"Capt",
"Col",
"Don",
"Dr",
"Major",
"Rev",
"Sir",
"Jonkheer",
"Dona",
],
"irrelevant",
)
dataset["Title"] = dataset["Title"].replace("Mlle", "Miss")
dataset["Title"] = dataset["Title"].replace("Ms", "Miss")
dataset["Title"] = dataset["Title"].replace("Mme", "Mrs")
# Before proceeding let us have a look at how our variables interact with each other
g = sns.pairplot(
data=train_df,
hue="Survived",
palette="seismic",
size=1.2,
diag_kind="kde",
diag_kws=dict(shade=True),
plot_kws=dict(s=10),
)
g.set(xticklabels=[])
# create new feature FamilySize as a combination of SibSp and Parch
for dataset in all_data:
dataset["FamilySize"] = dataset["SibSp"] + dataset["Parch"] + 1
# create bin for age features
for dataset in all_data:
dataset["Age_bin"] = pd.cut(
dataset["Age"],
bins=[0, 12, 20, 40, 120],
labels=["Children", "Teenager", "Adult", "Elder"],
)
# create bin for fare features
for dataset in all_data:
dataset["Fare_bin"] = pd.cut(
dataset["Fare"],
bins=[0, 7.91, 14.45, 31, 120],
labels=["Low_fare", "median_fare", "Average_fare", "high_fare"],
)
train_df.head()
for dataset in all_data:
drop_column = ["Name", "Ticket"]
dataset.drop(drop_column, axis=1, inplace=True)
train_df.head()
# **Hot encoding**
train_df = pd.get_dummies(
train_df,
columns=["Sex", "Embarked", "Title", "Age_bin", "Fare_bin"],
prefix=["Sex", "Embarked", "Title", "Age_bin", "Fare_bin"],
)
test_df = pd.get_dummies(
test_df,
columns=["Sex", "Embarked", "Title", "Age_bin", "Fare_bin"],
prefix=["Sex", "Embarked", "Title", "Age_bin", "Fare_bin"],
)
train_df.head()
# **Correlation between variables**
sns.heatmap(train_df.corr(), annot=True, cmap="RdYlGn", linewidths=0.2)
fig = plt.gcf()
fig.set_size_inches(20, 12)
plt.show()
# **Scaling and data-transformation**
from sklearn.preprocessing import MinMaxScaler
train_df[["Age", "Fare"]] = train_df[["Age", "Fare"]].apply(pd.to_numeric)
scaler = MinMaxScaler()
train_df[["Age", "Fare"]] = scaler.fit_transform(train_df[["Age", "Fare"]])
drop_column = ["PassengerId"]
train_df.drop(drop_column, axis=1, inplace=True)
train_X = train_df.drop("Survived", axis=1)
train_Y = train_df["Survived"]
test_X = test_df.drop("PassengerId", axis=1).copy()
train_df.head()
# **Modeling**
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import KFold
kfold = KFold(n_splits=10, random_state=22)
all_features = train_df.drop("Survived", axis=1)
targeted_feature = train_df["Survived"]
X_train, X_test, y_train, y_test = train_test_split(
all_features, targeted_feature, test_size=0.3, random_state=42
)
# **eXtreme Gradient Boosting**
train_X = train_df.drop("Survived", axis=1)
train_Y = train_df["Survived"]
test_X = test_df.drop("PassengerId", axis=1).copy()
import xgboost as xgb
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import GradientBoostingClassifier
model = GradientBoostingClassifier()
param_grid = {
"loss": ["deviance"],
"n_estimators": [100, 200, 300, 400],
"learning_rate": [0.1, 0.05, 0.01, 0.001],
"max_depth": [4, 8],
"min_samples_leaf": [100, 150],
"max_features": [0.3, 0.2, 0.1],
}
modelf = GridSearchCV(
model, param_grid=param_grid, cv=kfold, scoring="accuracy", n_jobs=4, verbose=1
)
modelf.fit(train_X, train_Y)
modelf.best_estimator_
modelf.best_score_
# **Random Forest parameter tuning and other models:**
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier()
n_estim = range(100, 1000, 100)
param_grid = {"n_estimators": n_estim}
model_rf = GridSearchCV(
model, param_grid=param_grid, cv=5, scoring="accuracy", n_jobs=4, verbose=1
)
model_rf.fit(train_X, train_Y)
model_rf.best_estimator_
model_rf.best_score_
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
model = LinearDiscriminantAnalysis()
param_grid = {"tol": [0.001, 0.01, 0.1, 0.2]}
modell = GridSearchCV(
model, param_grid=param_grid, cv=5, scoring="accuracy", n_jobs=4, verbose=1
)
modell.fit(train_X, train_Y)
modell.best_estimator_
modell.best_score_
# **Logistic Regression**
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
model = LogisticRegression()
model.fit(X_train, y_train)
prediction_lr = model.predict(X_test)
print("Accuracy", round(accuracy_score(prediction_lr, y_test) * 100, 2))
result_lr = cross_val_score(
model, all_features, targeted_feature, cv=10, scoring="accuracy"
)
print("The cross validated score ", round(result_lr.mean() * 100, 2))
y_pred = cross_val_predict(model, all_features, targeted_feature, cv=10)
sns.heatmap(
confusion_matrix(targeted_feature, y_pred), annot=True, fmt="3.0f", cmap="cool"
)
plt.title("Confusion matrix", y=1.05, size=15)
from sklearn.svm import SVC, LinearSVC
model = SVC()
model.fit(X_train, y_train)
prediction_svm = model.predict(X_test)
print("Accuracy", round(accuracy_score(prediction_svm, y_test) * 100, 2))
result_svm = cross_val_score(
model, all_features, targeted_feature, cv=10, scoring="accuracy"
)
print("The cross validated score ", round(result_svm.mean() * 100, 2))
y_pred = cross_val_predict(model, all_features, targeted_feature, cv=10)
sns.heatmap(
confusion_matrix(targeted_feature, y_pred), annot=True, fmt="3.0f", cmap="cool"
)
plt.title("Confusion matrix", y=1.05, size=15)
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=5)
model.fit(X_train, y_train)
prediction_knn = model.predict(X_test)
print("Accuracy", round(accuracy_score(prediction_knn, y_test) * 100, 2))
result_knn = cross_val_score(
model, all_features, targeted_feature, cv=10, scoring="accuracy"
)
print("The cross validated score ", round(result_knn.mean() * 100, 2))
y_pred = cross_val_predict(model, all_features, targeted_feature, cv=10)
sns.heatmap(
confusion_matrix(targeted_feature, y_pred), annot=True, fmt="3.0f", cmap="cool"
)
plt.title("Confusion matrix", y=1.05, size=15)
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
model.fit(X_train, y_train)
prediction_gnb = model.predict(X_test)
print("Accuracy", round(accuracy_score(prediction_gnb, y_test) * 100, 2))
result_gnb = cross_val_score(
model, all_features, targeted_feature, cv=10, scoring="accuracy"
)
print("The cross validated score ", round(result_gnb.mean() * 100, 2))
y_pred = cross_val_predict(model, all_features, targeted_feature, cv=10)
sns.heatmap(
confusion_matrix(targeted_feature, y_pred), annot=True, fmt="3.0f", cmap="cool"
)
plt.title("Confusion matrix", y=1.05, size=15)
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier(
criterion="gini", min_samples_split=10, min_samples_leaf=1, max_features="auto"
)
model.fit(X_train, y_train)
prediction_tree = model.predict(X_test)
print("Accuracy", round(accuracy_score(prediction_tree, y_test) * 100, 2))
result_tree = cross_val_score(
model, all_features, targeted_feature, cv=10, scoring="accuracy"
)
print("The cross validated score ", round(result_tree.mean() * 100, 2))
y_pred = cross_val_predict(model, all_features, targeted_feature, cv=10)
sns.heatmap(
confusion_matrix(targeted_feature, y_pred), annot=True, fmt="3.0f", cmap="cool"
)
plt.title("Confusion matrix", y=1.05, size=15)
from sklearn.ensemble import AdaBoostClassifier
model = AdaBoostClassifier()
model.fit(X_train, y_train)
prediction_adb = model.predict(X_test)
print("Accuracy", round(accuracy_score(prediction_adb, y_test) * 100, 2))
result_adb = cross_val_score(
model, all_features, targeted_feature, cv=10, scoring="accuracy"
)
print("The cross validated score ", round(result_adb.mean() * 100, 2))
y_pred = cross_val_predict(model, all_features, targeted_feature, cv=10)
sns.heatmap(
confusion_matrix(targeted_feature, y_pred), annot=True, fmt="3.0f", cmap="cool"
)
plt.title("Confusion matrix", y=1.05, size=15)
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
model = LinearDiscriminantAnalysis()
model.fit(X_train, y_train)
prediction_lda = model.predict(X_test)
print("Accuracy", round(accuracy_score(prediction_lda, y_test) * 100, 2))
result_lda = cross_val_score(
model, all_features, targeted_feature, cv=10, scoring="accuracy"
)
print("The cross validated score ", round(result_lda.mean() * 100, 2))
y_pred = cross_val_predict(model, all_features, targeted_feature, cv=10)
sns.heatmap(
confusion_matrix(targeted_feature, y_pred), annot=True, fmt="3.0f", cmap="cool"
)
plt.title("Confusion matrix", y=1.05, size=15)
from sklearn.ensemble import GradientBoostingClassifier
model = GradientBoostingClassifier()
model.fit(X_train, y_train)
prediction_gbc = model.predict(X_test)
print("Accuracy", round(accuracy_score(prediction_gbc, y_test) * 100, 2))
result_gbc = cross_val_score(
model, all_features, targeted_feature, cv=10, scoring="accuracy"
)
print("The cross validated score ", round(result_gbc.mean() * 100, 2))
y_pred = cross_val_predict(model, all_features, targeted_feature, cv=10)
sns.heatmap(
confusion_matrix(targeted_feature, y_pred), annot=True, fmt="3.0f", cmap="cool"
)
plt.title("Confusion matrix", y=1.05, size=15)
from sklearn.ensemble import RandomForestClassifier
model_rf = RandomForestClassifier(
criterion="gini",
n_estimators=700,
min_samples_split=10,
min_samples_leaf=1,
max_features="auto",
oob_score=True,
random_state=1,
n_jobs=-1,
)
model_rf.fit(X_train, y_train)
prediction_rm = model.predict(X_test)
print("Accuracy", round(accuracy_score(prediction_rm, y_test) * 100, 2))
result_rm = cross_val_score(
model, all_features, targeted_feature, cv=10, scoring="accuracy"
)
print("The cross validated score ", round(result_rm.mean() * 100, 2))
y_pred = cross_val_predict(model, all_features, targeted_feature, cv=10)
sns.heatmap(
confusion_matrix(targeted_feature, y_pred), annot=True, fmt="3.0f", cmap="cool"
)
plt.title("Confusion matrix", y=1.05, size=15)
from sklearn.ensemble import RandomForestClassifier
model1 = RandomForestClassifier(
bootstrap=True,
class_weight=None,
max_depth=None,
max_leaf_nodes=None,
min_weight_fraction_leaf=0.0,
criterion="gini",
n_estimators=100,
min_samples_split=2,
min_samples_leaf=1,
max_features="auto",
oob_score=False,
random_state=None,
n_jobs=None,
verbose=0,
warm_start=False,
)
model1.fit(X_train, y_train)
prediction_rm1 = model1.predict(X_test)
print("Accuracy", round(accuracy_score(prediction_rm1, y_test) * 100, 2))
result_rm1 = cross_val_score(
model1, all_features, targeted_feature, cv=10, scoring="accuracy"
)
print("The cross validated score ", round(result_rm1.mean() * 100, 2))
y_pred = cross_val_predict(model, all_features, targeted_feature, cv=10)
sns.heatmap(
confusion_matrix(targeted_feature, y_pred), annot=True, fmt="3.0f", cmap="cool"
)
plt.title("Confusion matrix", y=1.05, size=15)
models = pd.DataFrame(
{
"Model": [
"support vector machine",
"KNN",
"Logistic Regression",
"Random Forest",
"Naive Bayes",
"AdaBoostClassifier",
"Gradient Decent",
"Linear Discriminant Analysis",
"Decision Tree",
"Tuned RF",
],
"Score": [
result_svm.mean(),
result_knn.mean(),
result_lr.mean(),
result_rm.mean(),
result_gnb.mean(),
result_adb.mean(),
result_gbc.mean(),
result_lda.mean(),
result_tree.mean(),
result_rm1.mean(),
],
}
)
models.sort_values(by="Score", ascending=False)
# Now we use our best model to create ouput set
from sklearn.ensemble import RandomForestClassifier
random_forest = RandomForestClassifier(
bootstrap=True,
class_weight=None,
max_depth=None,
max_leaf_nodes=None,
min_weight_fraction_leaf=0.0,
criterion="gini",
n_estimators=100,
min_samples_split=2,
min_samples_leaf=1,
max_features="auto",
oob_score=False,
random_state=None,
n_jobs=None,
verbose=0,
warm_start=False,
)
random_forest.fit(train_X, train_Y)
Y_pred_rf = random_forest.predict(test_X)
random_forest.score(train_X, train_Y)
acc_random_forest = round(random_forest.score(train_X, train_Y) * 100, 2)
print(acc_random_forest)
print("Feature selection")
pd.Series(random_forest.feature_importances_, X_train.columns).sort_values(
ascending=True
).plot.barh(width=0.8)
submission = pd.DataFrame(
{"PassengerId": test_df["PassengerId"], "Survived": Y_pred_rf}
)
submission.head()
submission.to_csv("submission.csv", index=False)
|
# Content
# Import Libraries
# Load data
# Data Preparation
# Missing values imputation
# Feature Engineering
# Modeling
# Build the model
# Evaluation
# Model performance
# Feature importance
# Who gets the best performing model?
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
# Modelling Algorithms
from sklearn.svm import SVC, LinearSVC
from sklearn import linear_model
# Modelling Helpers
from sklearn.preprocessing import Imputer, Normalizer, scale
from sklearn.feature_selection import RFECV
# Visualisation
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import seaborn as sns
# get TMDB Box Office Prediction train & test csv files as a DataFrame
train = pd.read_csv("/kaggle/input/tmdb-box-office-prediction/train.csv")
test = pd.read_csv("/kaggle/input/tmdb-box-office-prediction/test.csv")
# Shuffle data
tr_shuffle = shuffle(train, random_state=43).reset_index(drop=True)
# Select features
selected_features = {"budget", "popularity"}
# Split into training and validation data set
data_train = tr_shuffle[0:2499]
data_validate = tr_shuffle[2500:2999]
# Create input and out for training and validation set
data_tr_x = data_train[selected_features]
data_tr_y = data_train[{"revenue"}]
data_val_x = data_validate[selected_features]
data_val_y = data_validate[{"revenue"}]
def plot_correlation_map(df):
corr = train.corr()
_, ax = plt.subplots(figsize=(23, 22))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
_ = sns.heatmap(
corr,
cmap=cmap,
square=True,
cbar_kws={"shrink": 0.9},
ax=ax,
annot=True,
annot_kws={"fontsize": 12},
)
def plot_distribution(df, var, target, **kwargs):
row = kwargs.get("row", None)
col = kwargs.get("col", None)
facet = sns.FacetGrid(df, hue=target, aspect=4, row=row, col=col)
facet.map(sns.kdeplot, var, shade=True)
facet.set(xlim=(0, df[var].max()))
facet.add_legend()
# **Visualization**
train.corr()
# edit cast Show first main caracter in first movie with ID and name
print(train.cast.shape[0])
for index in train.cast:
print(int((index.split("'id':"))[1].split(",")[0]))
print((index.split("'name': '"))[1].split("',")[0])
print(int((train.cast[1].split("'id':"))[1].split(",")[0]))
print((train.cast[1].split("'name': '"))[1].split("',")[0])
np.count_nonzero(train.budget)
train.describe()
data = pd.concat([train["budget"], train["revenue"]], axis=1)
data.plot.scatter(x="budget", y="revenue", xlim=(0, 1e7), ylim=(0, 1e8))
# Test plot for popularity
data = pd.concat([train["popularity"], train["revenue"]], axis=1)
data.plot.scatter(x="popularity", y="revenue", xlim=(0, 400))
# Test plot for runtime
data = pd.concat([train["runtime"], train["revenue"]], axis=1)
data.plot.scatter(x="runtime", y="revenue")
from google.cloud import bigquery
# Create a "Client" object
client = bigquery.Client()
# Construct a reference to the "hacker_news" dataset
dataset_ref = client.dataset("hacker_news", project="bigquery-public-data")
# API request - fetch the dataset
dataset = client.get_dataset(dataset_ref)
# Construct a reference to the "comments" table
table_ref = dataset_ref.table("comments")
# API request - fetch the table
table = client.get_table(table_ref)
# Preview the first five lines of the "comments" table
client.list_rows(table, max_results=5).to_dataframe()
query = """
SELECT COUNT(id)
FROM `train`
HAVING 'budget'<10.0
"""
# Set up the query (cancel the query if it would use too much of
# your quota, with the limit set to 10 GB)
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=10**10)
query_job = client.query(query, job_config=safe_config)
# API request - run the query, and convert the results to a pandas DataFrame
unknown_budget = query_job.to_dataframe()
# Print the first five rows of the DataFrame
unknown_budget.head()
# plot_correlation_map(train)
# **Training**
## Splitting into Test and validation data and feature selection
#
## Selecting features Budget and Popularity
# train_mod = train[{"budget","popularity"}]
#
## Selecting the first 2001 indices of the training data for training
# train_train = train_mod[0:2000]
## Selecting the rest of the training data for validation
# train_val= train_mod[2001:2999]
#
## Obtain labels
# train_mod_y = train[{"revenue"}]
# train_train_y = train_mod_y[0:2000]
# train_val_y= train_mod_y[2001:2999]
# train_val_title = train["original_title"][2001:2999]
# Initialize and train a linear regression (Lasso) model
model = linear_model.Lasso(alpha=0.1)
model.fit(data_tr_x, data_tr_y.values.ravel())
# Predict on the validation data
res = model.predict(data_val_x)
# Obtain R2 score (ordinary least square)
print("R2 score on validation data = ", model.score(data_val_x, data_val_y))
# Obtain R2 score (ordinary least square)
print("R2 score on training data = ", model.score(data_tr_x, data_tr_y))
# Create the table for comparing predictions with labels
evaluation = pd.DataFrame(
{
"Title": data_validate["original_title"].values.ravel(),
"Prediction": res.round(),
"Actual revenue": data_val_y.values.ravel(),
"Relative error": res / data_val_y.values.ravel(),
}
)
evaluation
|
import os, cv2, random
import keras
import tensorflow as tf
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
np.random.seed(42)
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
train_zip_dir = "/kaggle/input/dogs-vs-cats-redux-kernels-edition/train.zip"
test_zip_dir = "/kaggle/input/dogs-vs-cats-redux-kernels-edition/test.zip"
train_dir = "/kaggle/working/train"
test_dir = "/kaggle/working/test"
catagories = {
"cat": np.array([1, 0], dtype=np.int8),
"dog": np.array([0, 1], dtype=np.int8),
}
def images_to_array(data_dir, img_size=224, train=True):
images_names = os.listdir(data_dir)
data_size = len(images_names)
X = np.zeros([data_size, img_size, img_size, 3], dtype=np.int8)
y = np.zeros([data_size, 2], dtype=np.int8)
print("Images Size: ", len(images_names))
for i, image_name in enumerate(images_names):
img_dir = os.path.join(data_dir, image_name)
img_pixels = cv2.imread(img_dir, cv2.IMREAD_COLOR)
img_pixels = cv2.resize(img_pixels, (img_size, img_size))
X[i] = img_pixels
if train:
y[i] = catagories[image_name.split(".")[0]]
# shuffle
ind = np.random.permutation(data_size)
X = X[ind]
y = y[ind]
print("Ouptut Data Size: ", X.shape)
print("Ouptut Label Size: ", y.shape)
if train:
return X, y
else:
return X
X_train, y_train = images_to_array(train_dir)
X_train, X_val, y_train, y_val = train_test_split(
X_train, y_train, test_size=0.2, random_state=42
)
print(X_train.shape)
print(y_train.shape)
print(X_val.shape)
print(y_val.shape)
# prepare model.
resnet50 = keras.applications.resnet50.ResNet50(include_top=False, weights="imagenet")
avg = keras.layers.GlobalAveragePooling2D()(resnet50.output)
output_layer = keras.layers.Dense(2, activation="softmax")(avg)
model = keras.models.Model(inputs=[resnet50.input], output=[output_layer])
optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
model.compile(
optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"]
)
model.fit(
X_train,
y_train,
validation_data=(X_val, y_val),
epochs=15,
batch_size=32,
verbose=True,
)
model.evaluate(X_val, y_val)
del X_train
del X_val
del y_val
del y_train
X_test = images_to_array(test_dir, train=False)
y_pred = model.predict(X_test)
y_pred = np.argmax(y_pred, axis=1)
results = pd.Series(y_pred, name="label")
Id = pd.Series(range(1, 12501), name="Id")
submission = pd.concat([Id, results], axis=1)
submission.to_csv("MySubmission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import folium # plotting library
from folium import plugins
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
df = pd.read_csv("../input/us-weather-events/US_WeatherEvents_2016-2019.csv")
df["StartTime(UTC)"] = pd.to_datetime(df["StartTime(UTC)"])
df["EndTime(UTC)"] = pd.to_datetime(df["EndTime(UTC)"])
df.head()
df["Type"].value_counts()
df["Severity"].value_counts()
# ## Data prep for k-Means clustering
df = df[(df["Severity"] != "UNK") & (df["Severity"] != "Other")]
df.head()
df_types = df[["AirportCode", "Type"]]
df_types.head()
types = pd.get_dummies(df_types["Type"])
types["AirportCode"] = df_types["AirportCode"]
types = types.groupby("AirportCode").sum().reset_index()
types.head()
# ## k-Means Clustering
codes = types[["AirportCode"]]
types.drop("AirportCode", axis=1, inplace=True)
distortions = []
K = range(1, 20)
for k in K:
kmean = KMeans(n_clusters=k, random_state=0, n_init=50, max_iter=500)
kmean.fit(types)
distortions.append(kmean.inertia_)
plt.figure(figsize=(10, 5))
plt.plot(K, distortions, "bx-")
plt.xlabel("k")
plt.ylabel("Distortion")
plt.title("The Elbow Method")
plt.show()
# run k-means clustering
kmeans = KMeans(n_clusters=4, random_state=0).fit(types)
codes["cluster"] = kmeans.labels_
codes.head()
pca = PCA().fit(types)
pca_types = pca.transform(types)
print("Variance explained by each component (%): ")
for i in range(len(pca.explained_variance_ratio_)):
print("\n", i + 1, "º:", pca.explained_variance_ratio_[i] * 100)
print("Total sum (%): ", sum(pca.explained_variance_ratio_) * 100)
c0 = []
c1 = []
c2 = []
c3 = []
for i in range(len(pca_types)):
if kmeans.labels_[i] == 0:
c0.append(pca_types[i])
if kmeans.labels_[i] == 1:
c1.append(pca_types[i])
if kmeans.labels_[i] == 2:
c2.append(pca_types[i])
if kmeans.labels_[i] == 3:
c3.append(pca_types[i])
c0 = np.array(c0)
c1 = np.array(c1)
c2 = np.array(c2)
c3 = np.array(c3)
plt.figure(figsize=(7, 7))
plt.scatter(c0[:, 0], c0[:, 1], c="red", label="Cluster 0")
plt.scatter(c1[:, 0], c1[:, 1], c="blue", label="Cluster 1")
plt.scatter(c2[:, 0], c2[:, 1], c="green", label="Cluster 2")
plt.scatter(c3[:, 0], c3[:, 1], c="black", label="Cluster 3")
plt.legend()
plt.xlabel("PC1")
plt.ylabel("PC2")
plt.title("Low dimensional visualization (PCA) - Airports")
# ## Folium Maps Visualisation by Number of Occurences and Clustering
latitude = 38.500000
longitude = -95.665
map_USA = folium.Map(location=[latitude, longitude], zoom_start=4)
map_USA
airports = df[["AirportCode", "LocationLat", "LocationLng", "City", "State"]]
airports.head()
number_of_occurences = pd.DataFrame(airports["AirportCode"].value_counts())
number_of_occurences.reset_index(inplace=True)
number_of_occurences.columns = ["AirportCode", "Count"]
number_of_occurences.head()
number_of_occurences = number_of_occurences.merge(airports.drop_duplicates())
number_of_occurences = number_of_occurences.merge(codes)
number_of_occurences.head()
occurences = folium.map.FeatureGroup()
n_mean = number_of_occurences["Count"].mean()
for lat, lng, number, city, state in zip(
number_of_occurences["LocationLat"],
number_of_occurences["LocationLng"],
number_of_occurences["Count"],
number_of_occurences["City"],
number_of_occurences["State"],
):
occurences.add_child(
folium.vector_layers.CircleMarker(
[lat, lng],
radius=number
/ n_mean
* 5, # define how big you want the circle markers to be
color="yellow",
fill=True,
fill_color="blue",
fill_opacity=0.6,
tooltip=str(number) + "," + str(city) + "," + str(state),
)
)
map_USA.add_child(occurences)
map_clusters = folium.Map(location=[latitude, longitude], zoom_start=11)
# set color scheme for the clusters
x = np.arange(4)
ys = [i + x + (i * x) ** 2 for i in range(4)]
colors_array = cm.rainbow(np.linspace(0, 1, len(ys)))
rainbow = [colors.rgb2hex(i) for i in colors_array]
# add markers to the map
markers_colors = []
for lat, lng, cluster, city, state in zip(
number_of_occurences["LocationLat"],
number_of_occurences["LocationLng"],
number_of_occurences["cluster"],
number_of_occurences["City"],
number_of_occurences["State"],
):
# label = folium.Popup(str(city)+ ','+str(state) + '- Cluster ' + str(cluster), parse_html=True)
folium.vector_layers.CircleMarker(
[lat, lng],
radius=5,
# popup=label,
tooltip=str(city) + "," + str(state) + "- Cluster " + str(cluster),
color=rainbow[cluster - 1],
fill=True,
fill_color=rainbow[cluster - 1],
fill_opacity=0.9,
).add_to(map_clusters)
map_clusters
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import (
train_test_split,
cross_val_score,
cross_val_predict,
GridSearchCV,
)
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import confusion_matrix
import category_encoders as ce
import xgboost as xgb
df_train = pd.read_csv("../input/indian-liver-patient-records/indian_liver_patient.csv")
pd.set_option("display.max_columns", 90)
pd.set_option("display.max_rows", 90)
# ===================================________Data Exploration________==================================================
def data_exploration(data):
"""
Understanding data to make better feature engineering
:param data: Data to be explored
:return: None
"""
# ============______Basic FAMILIARIZATION________==================
print("______________DATA HEAD__________ \n", data.head())
print("______________DATA DESCRIBE______ \n", data.describe())
print("______________DATA INFO__________ \n", data.info())
# ===========_______DATA FREQUENT TERM___________===================
print("_____________Total unique values in data_______ \n", data.nunique())
print("___________________ DATA UNIQUE VALUES_____________ \n")
print("\n", [pd.value_counts(data[cols]) for cols in data.columns], "\n")
# ===========_______DATA CORRELATION_____________====================
corr_mat_graph(data, "EDA MATRIX")
# =================____________DISTRIBUTION VISUALIZATION_________=================
dist_plot(data)
# ======================___________ Outliers__________________======================
box_plot(data)
# ================================___________GRAPHS FUNCTIONS____________==============================================
def corr_mat_graph(data, title):
"""
function to plot correlation matrix for better understanding of data
:param data: correlation matrix
:param title: Title of the graph
:return: None
"""
print("\n \n ____________________CORRELATION MATRIX_______________ \n \n")
corr_matrix = data.corr()
corr_matrix_salePrice = corr_matrix["Dataset"].sort_values(ascending=False)
print("________CORRELATION MATRIX BY DATA SET________ \n", corr_matrix_salePrice)
fig, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(
corr_matrix,
square=False,
linewidths=0.5,
ax=ax,
vmax=0.8,
vmin=0.42,
annot=True,
)
ax.title.set_text(title)
def dist_plot(data):
"""
Function to plot subplots of distribution for numerical data
:param data: data which needs to be plotted
:return: None
"""
print("\n \n ________________________DISTRIBUTION PLOT___________________ \n \n")
# Plotting numerical graph
data = data.select_dtypes(exclude="object")
data_filed = data.dropna(axis=0)
for cols in data.columns:
fig, ax = plt.subplots()
sns.distplot(data_filed[cols])
ax.title.set_text(cols)
def box_plot(data):
"""
To find oultliers in the data
:param data: data to be plot
:return:
"""
print("\n \n ________________________BOX PLOT___________________ \n \n")
data = data.select_dtypes(exclude="object")
for cols in data.columns:
fig, ax = plt.subplots()
sns.boxplot(data[cols])
ax.title.set_text(cols)
# =============================_____________________FEATURE ENGINEERING_________________===============================
def feature_engineering(data):
"""
To clean and add features in dataset
:param data: Dataset to be cleaned
:return: cleaned dataset
"""
print("\n \n ________________FEATURE ENGINEERING_________________ \n \n")
# =====================__________________OUTLIERS________________==========================
# We need to deal with outliers
# To many outliers in Total_Bilirubin should drop whole column after calculating indirect Bilirubin
# Direct_Bilirubin have many outliers after outliers 85, eliminating such outliers
data = data.drop(
data[data.Direct_Bilirubin > data.Direct_Bilirubin.quantile(0.85)].index
)
# Alkaline Phosphate have many outliers after outliers 85, eliminating such outliers
data = data.drop(
data[data.Alkaline_Phosphotase > data.Alkaline_Phosphotase.quantile(0.82)].index
)
# Alamine Aminotransferase has heavy outliers after 93% quantile, eliminating such outliers
data = data.drop(
data[
data.Alamine_Aminotransferase > data.Alamine_Aminotransferase.quantile(0.93)
].index
)
# Alamine Aminotransferase has heavy outliers after 93% quantile, eliminating such outliers
data = data.drop(
data[
data.Aspartate_Aminotransferase
> data.Aspartate_Aminotransferase.quantile(0.92)
].index
)
# All the major outliers are taken care of but Total and Direct Bilirubin is still heavily right skewed.
# Further removal of data will decrease data size
# =============================____________________IMPUTING MISSING VALUES_________________=================
# Since all features are numerical except Gender we need to drop rows where Gender.
# Fill NA of numerical data with median as dataset have way too much outliers
data["Gender"].dropna(axis=0, inplace=True)
data.fillna(data.median(), inplace=True)
# ===========================_____________________ADDING NEW FEATURES_______________________================
# Indirect Bilirubin is calculated not tested
data["Indirect_Bilirubin"] = data["Total_Bilirubin"] - data["Direct_Bilirubin"]
# Normal and high Bilirubin level in Total Bilirubin can be grouped together
data["TotalBilirubinGroup"] = data["Total_Bilirubin"].apply(
lambda x: "Normal" if x <= 1.2 else "High"
)
# Normal and high Bilirubin level in Direct Bilirubin can be grouped together
data["DirectBilirubinGroup"] = data["Direct_Bilirubin"].apply(
lambda x: "Normal" if x <= 0.3 else "High"
)
# Low, normal and high Bilirubin level in Indirect Bilirubin can be grouped together
data["IndirectBilirubinGroup"] = data["Indirect_Bilirubin"].apply(
lambda x: "Low" if x < 0.3 else ("Normal" if 0.3 <= x <= 1.0 else "High")
)
# Alkaline phosphotase levels in high and low bins
data["Alkaline_PhosphotaseGroup"] = data["Alkaline_Phosphotase"].apply(
lambda x: "Low" if x < 20.0 else ("Normal" if 20.0 <= x <= 140.0 else "High")
)
# Alamine Aminotransferase levels in high and low bins
data["Alamine_AminotransferaseGroup"] = data["Alamine_Aminotransferase"].apply(
lambda x: "Low" if x < 20.0 else ("Normal" if 20.0 <= x <= 60.0 else "High")
)
# Aspartate Aminotransferase (Male) levels
data.loc[(data["Gender"] == "Male"), "AspartateLevel"] = data[
"Aspartate_Aminotransferase"
].apply(lambda x: "Low" if x < 6 else ("Normal" if 6 <= x <= 34 else "High"))
# Aspartate Aminotransferase (FEMALE)
data.loc[(data["Gender"] == "Female"), "AspartateLevel"] = data[
"Aspartate_Aminotransferase"
].apply(lambda x: "Low" if x < 8 else ("Normal" if 8 <= x <= 40 else "High"))
# Total protein levels
data["Total_Protiens_Level"] = data["Total_Protiens"].apply(
lambda x: "Low" if x < 6.0 else ("Normal" if 6.0 <= x <= 8.3 else "High")
)
# Albumin levels
data["Albumin_Level"] = data["Albumin"].apply(
lambda x: "Low" if x < 3.4 else ("Normal" if 3.4 <= x <= 5.4 else "High")
)
# ===================___________________REDUCING SKEWNESS BY LOG____________====================
numeric_cols = data.select_dtypes(exclude="object").columns
for cols in numeric_cols:
if cols not in ["Dataset"]:
data[cols] = np.log1p(data[cols])
# ==================___________________VISUALIZING TRANSFORMED DATA____________==================
dist_plot(data)
corr_mat_graph(data, "Feature Engineering")
return data
# Calling data exploration and feature
data_exploration(df_train)
# ===================================_________________SPLITTING DATA______________=======================
print("___________________SPLITTING DATA________________")
x_train, x_test = train_test_split(df_train, random_state=42, test_size=0.25)
x_train = feature_engineering(x_train)
x_test = feature_engineering(x_test)
y_train = x_train["Dataset"]
x_train.drop("Dataset", axis=1, inplace=True)
y_test = x_test["Dataset"]
x_test.drop("Dataset", axis=1, inplace=True)
print("train data size", x_train.shape, y_train.shape)
print("Test data size", x_test.shape, y_test.shape)
# =========================__________________SCALING DATA____________====================
sc = StandardScaler()
enc = ce.OrdinalEncoder()
pipe = Pipeline(steps=[("enc", enc), ("sc", sc)])
X_train = pipe.fit_transform(x_train)
X_test = pipe.transform(x_test)
# ===========================________________Model____________________==============================
xgboost = xgb.XGBClassifier(n_jobs=-1)
grid_param = {
"n_estimators": [500, 1000, 1500, 2000],
"max_depth": [9, 10, 11],
"learning_rate": [0.1, 0.07, 0.03, 0.01],
"subsample": [0.5, 1.0],
"booster": ["dart", "gbtree"],
}
# GridSearchCv and Cross Validation
grid = GridSearchCV(xgboost, grid_param, cv=2, scoring="roc_auc")
grid.fit(X_train, y_train)
print("Best Params", grid.best_params_)
model = grid.best_estimator_
# Predicting
predict = model.predict(X_test)
predictions = cross_val_predict(model, X_test, y_test, cv=2)
print(confusion_matrix(y_test, predictions))
score = np.mean(cross_val_score(model, X_test, y_test, cv=2, scoring="roc_auc"))
print(np.around(score, decimals=4))
plt.show()
|
# # 1.0 Computer Vision for Crop Disease Detection
# ## Defining the question
# ### Specifying the Question
# Create an image recognition model to detect wheat rust. Model to recognize if:
# - Wheat is healthy
# - Has stem rust
# - Leaf rust.
# Classifier algorithm used is Artificial Neural Networks.
# The model will enable facilitate monitoring of wheat crops and detect presence of wheat rust on stem or leaves through avenues such as smartphone images.
# This will be a major breakthrough in the ability to monitor and control plant diseases like wheat rust that affect African livelihoods.
# Currently, the farmers are dependent on agricultral proffessionals such as extension officers or agrovets for advise. These proffessionals are not easily accessible due to cost or lack of government extension services.
# ### Metric for success
# - Log Loss
# - AUC- area under the curve
# ### Understanding the context
# What is wheat rust?
# Is a plant disease that affects wheat. If affects any above-ground plant mainly stem, leaves sheaths, glumes, awns and even seed, leading to the production of pustules that contain thousands of dry yellow-orange to reddish-brown or black spores. These pustules give the appearance of “rust” on the plant.
# What causes wheat rust?
# Is caused by rust fungi. Water on the leaf surface from intermittent rains or heavy dews and temperatures conducive for germination and growth of the pathogen are required for disease development.
# What is the impact of wheat rust?
# Reducing crop yields and hence affecting the livelihoods of farmers and decreasing food security across the continent.
# How to Manage Rust?
# - Spray fungicides
# - Destroy infected crop
# - Field Scouting
# Prevention
# - Plant rust resistant crops
# - Destroy infected crops to avoid spread.
# Preventing the disease totally is not possible in all scenarios because:
# - Constant changes in strains (races) of the pathogens
# - In many situations, the varieties remained resistant for only three to four years before showing signs of susceptibility
# ### Recording the experimental design
# CRISP- DM methodology will be applied. Below steps will be undertaken to create the classifer.
# - Business understanding - understanding the background about titanic
# - Data understanding
# - Exploratory data analysis
# - Feature engineering
# - Data modelling
# - Model interpretation
# ### Data relevance
# Data is sourced from Zindi a datascience competition platform. In turn, the data was availed to Zindi by
# Bulk of the data was collected in-field by CIMMYT(International Maize and Wheat Improvement Center) which is a non-profit research and training institution dedicated to development of improved varieties of wheat and maize as well as CIMMYT partners in Ethiopia and Tanzania.
# Remainder of the data was sourced from public images found on Google Images.
# ## 2.0 Libraries Importation
# Data Manipulation Libraries
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random
import re # regular expressions
# Progress bar
from tqdm import tqdm
# Read Images
import os
from skimage import io
from PIL import Image
# import cv2 # When open cv was used, there was an error in getting array from image. Using Pillow eliminated the error.
# Visualization
import matplotlib.pyplot as plt
import seaborn as sns
# Model Pre-processing
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
# Modelling
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout
from sklearn.metrics import (
r2_score,
roc_auc_score,
f1_score,
recall_score,
precision_score,
classification_report,
confusion_matrix,
log_loss,
)
# Increase rows and columns visible on the notebook
pd.set_option("display.max_rows", 5000)
pd.set_option("display.max_columns", 50)
# import required libraries
import warnings
warnings.filterwarnings("ignore")
# ## 3.0 Import Images
# Import the train images from the 3 image directories. The three directories are healthy_wheat, stem_rust and leaf_rust. Each of the directory has images which are healthy or have rust on either the stem or leaf. Therebeing, the name of the directory corresponds will be marked as the label of the image.
# Function to upload the Raw training images
def upload_raw_train_images(image_path, wheat_categories):
images = []
labels = []
# Loop across the three directories having wheat images.
for category in wheat_categories:
# Append the wheat category directory into the main path
full_image_path = image_path + category + "/"
# Retrieve the filenames from the all the three wheat directories. OS package used.
image_file_names = [
os.path.join(full_image_path, f) for f in os.listdir(full_image_path)
]
# Read the image pixels
for file in image_file_names[0:5]:
# image= cv2.imread(file)
image = io.imread(file) # io package from SKimage package
# Append image into list
images.append(np.array(image))
labels.append(category)
return images, labels
wheat_categories = ["healthy_wheat", "stem_rust", "leaf_rust"]
raw_train_images, raw_train_labels = upload_raw_train_images(
"/kaggle/input/cgiar-computer-vision-for-crop-disease/ICLR/train/train/",
wheat_categories,
)
# Function to upload the training images
def upload_train_images(image_path, wheat_categories, height, width):
images = []
labels = []
# Loop across the three directories having wheat images.
for category in wheat_categories:
# Append the wheat category directory into the main path
full_image_path = image_path + category + "/"
# Retrieve the filenames from the all the three wheat directories. OS package used.
image_file_names = [
os.path.join(full_image_path, f) for f in os.listdir(full_image_path)
]
# Read the image pixels
for file in image_file_names:
# image= cv2.imread(file)
image = io.imread(file) # io package from SKimage package
# Append image into list
image_from_array = Image.fromarray(image, "RGB")
# Resize image
size_image = image_from_array.resize((height, width))
# Append image into list
images.append(np.array(size_image))
# size_image = image_from_array.resize((height, width))
# Append image into list
# images.append(np.array(size_image))
# images.append(image) # uncomment after check
# Label for each image as per directory
labels.append(category)
return images, labels
## Invoke the function
# Image resize parameters
height = 256
width = 256
# Get number of classes
wheat_categories = ["healthy_wheat", "stem_rust", "leaf_rust"]
train_images, train_labels = upload_train_images(
"/kaggle/input/cgiar-computer-vision-for-crop-disease/ICLR/train/train/",
wheat_categories,
height,
width,
)
# Size and dimension of output image and labels
train_images = np.array(train_images)
train_labels = np.array(train_labels)
# Check the count and size of images uploaded
print("Shape of test images is " + str(train_images.shape))
print("Shape of test labels is " + str(train_labels.shape))
# ### Display training images
# a) Individual images
def show_train_images(images, labels, images_count):
for i in range(images_count):
index = int(random.random() * len(images))
plt.axis("off")
plt.imshow(images[index])
plt.show()
print("Size of this image is " + str(images[index].shape))
print("Class of the image is " + str(labels[index]))
# Execute the function
print("Train images, sizes and cass labels")
show_train_images(train_images, train_labels, 3)
# a function to show the image batch
def show_batch_train_images(images, labels):
plt.figure(figsize=(15, 15))
for n in range(20):
ax = plt.subplot(5, 5, n + 1)
index = int(random.random() * len(images))
plt.imshow(images[index])
plt.title(labels[index])
# plt.title(CLASS_NAMES[labels[n]==1][0].title())
# print("Size of this image is " + str(images[index].shape))
plt.axis("off")
show_batch_train_images(train_images, train_labels)
# ### Categories of Training Images
# Categories of Images
pd.Series(train_labels).value_counts().reset_index().values.tolist()
# Visualize the images distribution per label
# Plot chart
sns.countplot(train_labels)
plt.show()
# Above shows that the data is imbalanced since it's not evenly distributed across all classes Healthy wheat has the least number of photos while stemp rust has the largest number of photos
# As data is imbalanced, class weight to be calculated and passed as argument during fitting of the model
# Class Weights
from sklearn.utils import class_weight
class_weights = class_weight.compute_class_weight(
"balanced", np.unique(train_labels), train_labels
)
print(np.unique(train_labels))
class_weights
# 2.05: healthy wheat
# 0.81: leaf rust
# 0.77: stem rust
# ## 4.0 Images Preprocessing
# #### Label Encoding.
# The train labels are string variables of three types i.e healthy_wheat, stem_rust and leaf_rust. These are encoded to convert them to numerical Encoding will faciliate converting the labels to categorical variables
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
train_labels_enc = label_encoder.fit_transform(train_labels)
# Convert the encoded dependent values to categorical types. Reason is because ANN works best with categorical values
# Convert the predicted labels to categorical type
train_labels_cat = to_categorical(train_labels_enc)
# Display the categorical training labels
train_labels_cat
# #### Normalization
# Benefits of normalization
# 1. Reduce the effect of illumination's differences.
# 2. CNN converges faster on [0..1] data than on [0..255].
# Normalize the image pixels
train_images = train_images.astype("float32") / 255
# #### Split the test and validation.
# The validation set will be used to test overfitting in our model. The test images cannot be used as they do not have labels.**
# Split test and validation.
# Split by 10 percentage so that we have ample training images
X_train, X_valid, Y_train, Y_valid = train_test_split(
train_images, train_labels_cat, test_size=0.1, random_state=None
)
print(
"X Train count is ",
len(X_train),
"Shape",
X_train.shape,
" and Y train count ",
len(Y_train),
"Shape",
Y_train.shape,
)
print(
"X validation count is ",
len(X_valid),
"Shape",
X_valid.shape,
" and Y validation count ",
len(Y_valid),
"Shape",
Y_valid.shape,
)
# ## 5.0 Baseline Model
# ### Define the CNN model
# Define the CNN Model
# Sequential API to add one layer at a time starting from the input.
model = Sequential()
# Convolution layer with 32 filters first Conv2D layer.
# Each filter transforms a part of the image using the kernel filter. The kernel filter matrix is applied on the whole image.
# Relu activation function used to add non linearity to the network.
model.add(
Conv2D(
filters=32, kernel_size=(5, 5), activation="relu", input_shape=X_train.shape[1:]
)
)
# Convolution layer with 64 filters second Conv2D layer
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation="relu"))
# Max pooling applied. Reduces the size of the image by half. Is a downsampling filter which looks at the 2 neighboring pixels and picks the maximal value
model.add(MaxPool2D(pool_size=(2, 2)))
# Drop applied as a regularization method, where a proportion of nodes in the layer are randomly ignored by setting their wieghts to zero for each training sample.
# This drops randomly a proportion of the network and forces the network to learn features in a distributed way. This improves generalization and reduces overfitting.
model.add(Dropout(rate=0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation="relu"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
# Flatten to convert the final feature maps into a one single 1D vector. Needed so as to make use of fully connected layers after some convolutional/maxpool layers.
# It combines all the found local features of the previous convolutional layers.
model.add(Flatten())
# Dense layer applied to create a fully-connected artificial neural networks classifier.
model.add(Dense(256, activation="relu"))
model.add(Dropout(rate=0.5))
# Neural net outputs distribution of probability of each class.
model.add(Dense(3, activation="softmax"))
model.summary()
# ### Optimize and compile the model
# Categorical crossentropy loss function used as we are predicting the probality per class
# Compilation of the model
model.compile(
optimizer=tf.keras.optimizers.Adam(lr=0.01),
loss=tf.keras.losses.categorical_crossentropy,
metrics=[tf.keras.metrics.categorical_accuracy],
)
# using ten epochs for the training and saving the accuracy for each epoch
epochs = 12
history = model.fit(
X_train,
Y_train,
batch_size=32,
epochs=epochs,
validation_data=(X_valid, Y_valid),
class_weight=class_weights,
) # ,validation_split = 0.2,
# Display of the accuracy and the loss values
plt.figure(0)
plt.plot(history.history["categorical_accuracy"], label="training accuracy")
plt.plot(history.history["val_categorical_accuracy"], label="val accuracy")
plt.title("Accuracy")
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.legend()
plt.figure(1)
plt.plot(history.history["loss"], label="training loss")
plt.plot(history.history["val_loss"], label="val loss")
plt.title("Loss")
plt.xlabel("epochs")
plt.ylabel("loss")
plt.legend()
plt.show()
# Baseline Model Accuracy
# Create dictionary and dataframe to hold results for various models
dict = {
"Model": ["Baseline CNN", "Mobile Net V2", "Data Augmentation"],
"AUC": [0, 0, 0],
"Log Loss": [0, 0, 0],
"F1 score": [0, 0, 0],
"Recall": [0, 0, 0],
"Precision": [0, 0, 0],
}
df_results = pd.DataFrame(
dict, columns=["Model", "Log Loss", "AUC", "F1 score", "Recall", "Precision"]
)
# Function to calculate Results for each model
def model_results(
model_type, y_test_data, y_prediction_data, y_test_class, y_pred_class
):
index_val = df_results[df_results["Model"] == model_type].index
# Asign scores to dataframe
df_results.loc[index_val, "AUC"] = roc_auc_score(y_test_data, y_prediction_data)
df_results.loc[index_val, "Log Loss"] = log_loss(Y_valid, y_prediction_data)
df_results.loc[index_val, "F1 score"] = f1_score(
y_test_class, y_pred_class, average="weighted"
)
df_results.loc[index_val, "Recall"] = recall_score(
y_test_class, y_pred_class, average="weighted"
)
df_results.loc[index_val, "Precision"] = precision_score(
y_test_class, y_pred_class, average="weighted"
)
return df_results
# Baseline Prediction
y_prediction = model.predict(X_valid) # make predictions
# Baseline Results
dominant_y_valid = np.argmax(Y_valid, axis=1)
dominant_y_predict = np.argmax(y_prediction, axis=1)
model_results(
"Baseline CNN", Y_valid, y_prediction, dominant_y_valid, dominant_y_predict
)
# Confusion Matrix
import itertools
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(
cm, classes, normalize=False, title="Confusion matrix", cmap=plt.cm.Blues
):
plt.imshow(cm, interpolation="nearest", cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=75)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.0
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(
j,
i,
cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",
)
plt.tight_layout()
plt.ylabel("True label")
plt.xlabel("Predicted label")
class_names = range(3)
# cm = confusion_matrix(rounded_Y_valid , rounded_Y_predict_trf)
cm = confusion_matrix(dominant_y_valid, dominant_y_predict)
Y_valid, y_predict_trf
plt.figure(2)
plt.figure(figsize=(5, 5))
plot_confusion_matrix(cm, classes=class_names, title="Confusion matrix")
# ## 6.0 Transfer Learning : Model to use is MobileNetV2
# More about MobileNetV2 here - > https://ai.googleblog.com/2018/04/mobilenetv2-next-generation-of-on.html
# a) Import the MobileNetV2 from keras
# Create the base model from the pre-trained model MobileNet V2
base_model = tf.keras.applications.MobileNetV2(
input_shape=X_train.shape[1:], include_top=False, weights="imagenet"
)
# b) Train The model
# To use weights in the pre-trained model
base_model.trainable = False
# Define the pre-trained model
pretrained_model = tf.keras.Sequential(
[
base_model,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(3, activation="softmax"),
]
)
pretrained_model.compile(
optimizer=tf.keras.optimizers.Adam(lr=0.01),
loss=tf.keras.losses.categorical_crossentropy,
metrics=[tf.keras.metrics.categorical_accuracy],
)
pretrained_model.summary()
# c) Fitting
# Fit the pretrained model to the data
history_trf = pretrained_model.fit(
X_train,
Y_train,
epochs=5,
batch_size=32,
validation_data=(X_valid, Y_valid),
class_weight=class_weights,
)
# Graph of accuracy and loss for training and validation
# Display of the accuracy and the loss values
plt.figure(0)
plt.plot(history_trf.history["categorical_accuracy"], label="training accuracy")
plt.plot(history_trf.history["val_categorical_accuracy"], label="val accuracy")
plt.title("Accuracy")
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.legend()
plt.figure(1)
plt.plot(history_trf.history["loss"], label="training loss")
plt.plot(history_trf.history["val_loss"], label="val loss")
plt.title("Loss")
plt.xlabel("epochs")
plt.ylabel("loss")
plt.legend()
plt.show()
# ### Mobile Net V2 Transfer Running Results
# #### a) AUC and Log Loss
# Mobile Net V2 Prediction
y_prediction_trf = pretrained_model.predict(X_valid) # make predictions
# Baseline Results
dominant_y_valid = np.argmax(Y_valid, axis=1)
dominant_y_predict = np.argmax(y_prediction_trf, axis=1)
model_results(
"Mobile Net V2", Y_valid, y_prediction, dominant_y_valid, dominant_y_predict
)
# #### b) Classification Report
print(classification_report(dominant_y_valid, dominant_y_predict))
# #### Confusion Matrix
# Confusion Matrix
import itertools
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(
cm, classes, normalize=False, title="Confusion matrix", cmap=plt.cm.Blues
):
plt.imshow(cm, interpolation="nearest", cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=75)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.0
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(
j,
i,
cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",
)
plt.tight_layout()
plt.ylabel("True label")
plt.xlabel("Predicted label")
class_names = range(3)
# cm = confusion_matrix(rounded_Y_valid , rounded_Y_predict_trf)
cm = confusion_matrix(dominant_y_valid, dominant_y_predict)
Y_valid, y_predict_trf
plt.figure(2)
plt.figure(figsize=(5, 5))
plot_confusion_matrix(cm, classes=class_names, title="Mobile Net V2 Confusion matrix")
# ## 7.0 Image Data Augmentation
# We will generate more image data using ImageDataGenerator. The Image data generator package artificially creates training images through different ways of processing or combination of multiple processing, such as random rotation, shifts, shear and flips, etc.
# Generate more image data
from keras.preprocessing.image import ImageDataGenerator
train_generator = ImageDataGenerator(
rescale=1 / 255, zoom_range=0.3, horizontal_flip=True, rotation_range=30
)
val_generator = ImageDataGenerator(rescale=1 / 255)
train_generator = train_generator.flow(
np.array(X_train), Y_train, batch_size=32, shuffle=False
)
val_generator = val_generator.flow(
np.array(X_valid), Y_valid, batch_size=32, shuffle=False
)
# Train and test the model
history_idg = pretrained_model.fit_generator(
train_generator, epochs=10, shuffle=False, class_weight=class_weights
)
# Display of the accuracy and the loss values
plt.figure(0)
plt.plot(history_idg.history["categorical_accuracy"], label="training accuracy")
# plt.plot(history_idg.history['val_categorical_accuracy'], label='val accuracy')
plt.title("Accuracy")
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.legend()
plt.figure(1)
plt.plot(history_idg.history["loss"], label="training loss")
# plt.plot(history_idg.history['val_loss'], label='val loss')
plt.title("Loss")
plt.xlabel("epochs")
plt.ylabel("loss")
plt.legend()
plt.show()
# ## Subject the model to test data
# a) Import the test data from test directory
# Function to upload the test images
def upload_test_images(image_path, height, width):
test_images = []
test_image_paths = []
# Retrieve the filenames from the all the test directory
test_image_file_names = [
os.path.join(image_path, f) for f in os.listdir(image_path)
]
# Read the image pixels
for file in test_image_file_names:
test_image = io.imread(file)
# Append image into list
test_image_from_array = Image.fromarray(test_image, "RGB")
# Resize image
test_size_image = test_image_from_array.resize((height, width))
# Append image into list
test_images.append(np.array(test_size_image))
test_image_paths.append(file)
return test_images, test_image_paths
## Invoke the function
# Image resize parameters
height = 256
width = 256
test_images, test_image_paths = upload_test_images(
"/kaggle/input/cgiar-computer-vision-for-crop-disease/ICLR/test/test/",
height,
width,
)
test_images = np.array(test_images)
# Size and dimension of test image
print("Shape of test images is " + str(test_images.shape))
# Check image paths
test_image_paths[0:5]
# Image name is part of full image URL as above. We will seperate the name from the image path as below
# use regular expressions to extract the name of image
image_names = []
for i in test_image_paths:
# name = i
i = re.sub("[^A-Z0-9]", "", str(i))
i = i.replace("JPG", "")
i = i.replace("PNG", "")
i = i.replace("JPEG", "")
i = i.replace("JFIF", "")
i = i.replace("JFIF", "")
i.strip()
image_names.append(i)
# View images
image_names[0:5]
# Prediction for all images
y_prediction = model.predict_proba(test_images) # make predictions
y_prediction[400:500]
# Prediction for all images per test image
test_images = np.array(test_images)
preds = []
for img in tqdm(test_images):
img = img[np.newaxis, :] # add a new dimension
prediction = pretrained_model.predict_proba(img) # make predictions predict_proba
preds.append(prediction)
preds
# healthwheat =0 stem_rust = 2 ,leaf_rst =1
# create a dummy dataset
healthy_wheat = pd.Series(range(610), name="healthy_wheat", dtype=np.float32)
stem_rust = pd.Series(range(610), name="stem_rust", dtype=np.float32)
leaf_rust = pd.Series(range(610), name="leaf_rust", dtype=np.float32)
submission = pd.concat([healthy_wheat, stem_rust, leaf_rust], axis=1)
for i in range(0, len(preds)):
submission.loc[i] = preds[i]
# Append the image names to the result output
submission["ID"] = image_names
submission
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# ### Feature Enginnering
train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
train.head()
train = train[["text", "target"]]
test = test[["id", "text"]]
from nltk.stem import PorterStemmer
import nltk
from nltk.corpus import stopwords
stopwords = set(stopwords.words("english"))
import re
# Remove stopwords (or, are, is etc) from data
train["text"] = train["text"].apply(
lambda x: " ".join([word for word in x.split() if word not in (stopwords)])
)
test["text"] = test["text"].apply(
lambda x: " ".join([word for word in x.split() if word not in (stopwords)])
)
corpus_train = train["text"]
corpus_test = test["text"]
def replace(text):
text = text.str.replace(r"^.+@[^\.].*\.[a-z]{2,}$", " ") # remove emailaddress
text = text.str.replace(r"\W+", " ") # remove symbols
text = text.str.replace(r" ", " ") # remove punctuations
text = text.str.replace("\d+", " ") # remove numbers
text = text.str.lower() # remove capital letters as they does not make any effect
return text
corpus_train = replace(corpus_train)
corpus_test = replace(corpus_test)
import nltk
nltk.download("wordnet")
from textblob import Word
# Remove rare words from text that are not been used oftenly
freq = pd.Series(" ".join(corpus_train).split()).value_counts()[-19500:]
corpus_train = corpus_train.apply(
lambda x: " ".join(x for x in x.split() if x not in freq)
)
freq.head()
freq = pd.Series(" ".join(corpus_test).split()).value_counts()[-10000:]
corpus_test = corpus_test.apply(
lambda x: " ".join(x for x in x.split() if x not in freq)
)
# Visualise most occuring words from training corpus
from wordcloud import WordCloud
import matplotlib.pyplot as plt
def wordcloud(text):
wordcloud = WordCloud(
background_color="white",
max_words=500,
max_font_size=30,
scale=3,
random_state=5,
).generate(str(corpus_train))
fig = plt.figure(figsize=(15, 12))
plt.axis("off")
plt.imshow(wordcloud)
plt.show()
wordcloud(corpus_train)
import seaborn as sns
target = train["target"]
sns.countplot(target)
# Unlike humans, machine cannot understand raw text. Hence need to convert text into corresponding numerical form.
# Tfidfvectorizer count each word occurence from document
from sklearn.feature_extraction.text import TfidfVectorizer
Tfidf_vect = TfidfVectorizer(max_features=7000)
Tfidf_vect.fit(corpus_train)
X_train = Tfidf_vect.transform(corpus_train)
X_test = Tfidf_vect.transform(corpus_test)
# ### Hyperparameter Tuning
# Parameters are conditions or settings that are to be defined in models. These changing of parameters according to the need is called *Hyperparameter Tuning*.
# Technically parameters passed within algorithm are not best parameters for every dataset.
# Hence to choose the best parameters hyperparameter tuning is done
# Hyperparameter tuning are of two types *Grid SearchCV* and *Random SearchCV.*
#
# *Grid Search* is the approach where every parameter is selected from grid list specified and tried on the model and the best one can be interpreted. We will use Grid Search approach in this problem.
# Where in *Random Search*, search the parameter randomly sepcified randomly choosed according to the specifity of model with in range.
# 
# Above diagram states that Grid Search explores at every fix distinct places while Random Search has no such fix trials
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
parameters = {
"gamma": [
0.001,
0.01,
0.1,
0.4,
0.6,
0.7,
"auto",
], # for complex decision boundary (mainly used for rbf kerel)
"kernel": ["rbf", "linear"], # used for different type of data
# linear - when data is easy to classify
# rbf - when data is too complex
"C": [
0.001,
0.01,
0.1,
1,
1.5,
2,
3,
10,
], # inverse weight on regularization parameter
# (how finely to classify, decreasing will prevent overfititing and vice versa)
}
model = GridSearchCV(SVC(), parameters, cv=10, n_jobs=-1).fit(X_train, target)
model.cv_results_["params"][model.best_index_]
y_val_pred = model.predict(X_test)
# Above hyperparameter tuning is time consuming so putting the results directly we get,
# Here we will use **SVC (Support Vector Classifier)**
# SVC aims to fit data that is provided with returning best fit hyperplane that divides
# the data between classes while prediction helps to sort which class features belongs to.
from sklearn.svm import SVC
SVM = SVC(C=1.0, kernel="linear", gamma="auto")
SVM.fit(X_train, target)
SVM_predictions = SVM.predict(X_test)
# ### Prediction
file_submission = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv")
file_submission.target = SVM_predictions
file_submission.to_csv("submission.csv", index=False)
|
# # Introduction
# ## Predicting penguin species - modelling with binary classification
# The data for this activity comes from a study of three different species of Antartic penguin: Adelie, Chinstrap and Gentoo.
# In this notebook you will explore which features can be used to predict the species of a penguin.
# You will build a model to predict whether a particular penguin is a Gentoo penguin, or not. So this is an example of **binary classification**.
# ## Importing libraries and data
# > Run the code boxes below to import the libraries and the data.
# import pandas and seaborn
import pandas as pd
import seaborn as sns
# import modelling functions for decision trees from sklearn
from sklearn.tree import plot_tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
# import pyplot fro matplotlib for displaying decision trees
import matplotlib.pyplot as plt
# createa a plot_decision_tree command for displaying the decision tree
def plot_decision_tree(model, size=14):
fig, ax = plt.subplots(figsize=(18, 8))
plot_tree(
model,
filled=True,
impurity=False,
label="root",
feature_names=input_features,
proportion=True,
class_names=["No", "Yes"],
ax=ax,
fontsize=size,
)
plt.show()
# import the data - the .dropna() function removes any rows with missing data
penguin_data = pd.read_csv("/kaggle/input/penguins/penguins.csv").dropna()
# display the data to check it has imported
penguin_data
# This data set contains information about penguins observed on the islands of the Palmer Archipelago, Antarctica between 2007 and 2009.
# There are 344 individual records, each representing a single penguin.
# Features:
# * **Species**: *Adélie, Chinstrap or Gentoo*
# * **Island**: *Where the penguin was observed (Biscoe, Dream or Torgersen)*
# * **bill_length_mm**: *Bill length (mm)*
# * **bill_depth_mm**: *Bill depth (mm)*
# * **flipper_length_mm**: *Flipper length (mm)*
# * **body_mass_g**: *Body mass (g)*
# > Run the code below to explore the data types for the features.
# display the data types
penguin_data.info()
# # Pre-processing the data
# ## Adding a binary feature
# In this activity you are going to create a model that predicts whether a penguin is a Gentoo penguin. To do this you will need a *binary* feature that is 0 or 1 depending on whether the penguin is a Gentoo.
# > Run the code below to create a new feature `gentoo` that is 1 for all the Gentoo penguins and 0 otherwise.
# add a binary feature to identify Gentoo penguins
penguin_data["gentoo"] = penguin_data["species"].replace(
{"Adelie": 0, "Chinstrap": 0, "Gentoo": 1}
)
# check the data
penguin_data
# # Exploratory data analysis
# ## Statistics
# You can use `groupby` to see if there are any obvious differences in the measurements for the different species.
# > Run the code below to explore the statistics for `bill_length_mm` for the different species.
# display the statistics for bill_length_mm grouped by species
penguin_data.groupby("species")["bill_length_mm"].describe().round(2)
# > Add code in the boxes below to explore the statistics for `bill_depth_mm`, `flipper_length_mm` and `body_mass_g` grouped by species.
# display the statistics for bill_depth_mm grouped by species
# display the statistics for flipper_length_mm grouped by species
# display the statistics for body_mass_g grouped by species
# ## Visualisations
# KDE plots grouped by species will help compare the distributions.
# > Run the code in the box below to create a KDE plot for `bill_length_mm` grouped by species.
# KDE plot of flipper length grouped by species
sns.displot(data=penguin_data, kind="kde", x="bill_length_mm", hue="species", aspect=2)
# > Add code in the boxes below to create KDE plots for `bill_depth_mm`, `flipper_length_mm` and `body_mass_g` grouped by species.
# KDE plot of bill_depth_mm grouped by species
# KDE plot of flipper_length_mm grouped by species
# KDE plot of body_mass_g grouped by species
# ## Checkpoint 1
# > * Use your statistics and visualisations to describe the differences between the three species of penguin.
# > * Explain which feature you think would be the most useful to distinguish a Gentoo penguin from the other two species.
# # Building a model
# In this activity you are going to generate some binary classification models. These will split the data into two groups depending on some of the values of the numerical features. For example, a simple model might be to classify penguins with bill lengths greater than 40mm as Gentoo penguins and those with bill lengths less than 40mm as not Gentoo penguins.
# You will use an 80:20 training-testing split for your data as you saw in lesson 5. Your model will be built using 80% of the data and the remaining 20% will be used to measure how well the model perfoms on unseen data.
# * To create a model you will send your training data to a machine learning algorithm, `DecisionTreeClassifier`, which will find the best split for your choice of input features.
# * You will then measure your model by finding the *accuracy* using the testing data, i.e. the percentage of penguins it has correctly identified.
# ## Building a model based on bill length
# The code for building and measuring a binary classification model has a very similar structure to the code you met for building a linear regression model in lesson 5. The main differences are that it will display the model using a *decision tree* and the model will be measured using *accuracy*, the percentage of items correctly identified when the model is applied to the testing data.
# The code block builds and measures the model using five stages:
# * Define the input features and the target feature. The input can be a single feature or a list of features. The target feature has the values 0 or 1.
# * Perform a training-testing split. This creates four objects: a training set of inputs and training list of outputs for building the model; a testing set of inputs and testing list of outputs for measuring the model.
# * Find the best model for the given input features using the machine learning algorithm `DecisionTreeClassifier`.
# * Display the model using a decision tree.
# * Create a list of target predictions using the testing data inputs and compare these actual values in the testing target list. The percentage correct is given as the accuracy.
# > Run the code in the box below to create a binary classification model using `bill_length` as the input.
# define the input feature(s) and output (or target) feature
input_features = ["bill_length_mm"]
input_data = penguin_data[input_features]
target_data = penguin_data["gentoo"]
# use the train_test_split command to create training and testing testing data
input_train, input_test, target_train, target_test = train_test_split(
input_data, target_data, train_size=0.8, random_state=1
)
# create the model
tree_model = DecisionTreeClassifier(max_depth=1).fit(input_train, target_train)
# display the model
plot_decision_tree(tree_model)
# create a list of the predictions, display a two-way-table of predictions/actual values for the testing data, calculate the accuracy for the testing data
target_pred = tree_model.predict(input_test)
print("Accuracy: ", (100 * accuracy_score(target_test, target_pred)).round(1), "%")
# ## Interpretting a decision tree
# Decision trees are used visualise binary classfication models. The top line of the first box, or *node*, tells you how to split the data: you follow the left arrow if this statement is true and the right arrow otherwise. The final line in the bottom row of boxes tells you whether the model has classified the groups as 1/Yes or 0/No.
# The model shown here suggests that penguins with a bill length less than or equal to 41.6mm should be classified as not Gentoo and those with a bill length greater than 41.6mm should be classified as Gentoo. When this model is applied to the testing data it has accurately predicted whether 71.6% of the penguins are Gentoo or not. It has therefore misclassified 28.4% of penguins.
# The *samples* and *value* lines in the boxes show what proportion of the testing data is being considered at each node and the fraction of these that are 0 or 1. This is useful information for more detailed analysis of decision trees but is beyond the scope of this course.
# ## Building a model based on other features
# > Add code to the boxes below to build models based on `bill_depth_mm`, `flipper_length_mm` and `body_mass_g`.
# Build a model based on bill depth
# Build a model based on flipper length
# Build a model based on body mass
# ## Checkpoint 2
# > * Which input feature gives the best model and which input feature gives the worst model?
# > * Explain how this is consistent with your responses to Checkpoint 1.
# # Exploration 1
# ## Building a model based on other features
# > Copy and paste code below to build and test a model on `flipper_length_mm`.
# >
# > Use the confusion matrix to calculate the precision and recall of your model.
# calculate the precision
# calculate the recall
# You should find that this model splits at a flipper length of 207.5mm has a precision of 95% and a recall of 95%.
# > In the cells below build some models based on other features.
# Build a model based on another feature
# calculate the precision
# calculate the recall
# Build a model based on another feature
# calculate the precision
# calculate the recall
# # Building a model based on two input features
# Just as you did with your regression models, you can give more than one input feature. You'll need to increase the `max_depth` of your tree from 1 to 2.
# Define the input features
input_features = ["body_mass_g", "flipper_length_mm"]
# create the model
tree_model = DecisionTreeClassifier(max_depth=3).fit(X_train[input_features], y_train)
# display the model
plot_decision_tree(tree_model)
# create a list of the predictions
y_pred = tree_model.predict(X_test[input_features])
# display a two-way-table of predictions and actual value for the testing data
print(
pd.crosstab(
y_test, y_pred, rownames=["Actual"], colnames=["Predicted"], margins=True
)
)
# display accuracy score
print(accuracy_score(y_test, y_pred))
# calculate the precision
25 / 26 * 100
# calculate the recall
25 / 26 * 100
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
glass = pd.read_csv("../input/glass/glass.csv")
glass
sns.pairplot(glass, hue="Type")
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(glass.drop("Type", axis=1))
scaled_features = scaler.transform(glass.drop("Type", axis=1))
scaled_df = pd.DataFrame(data=scaled_features, columns=glass.columns[:-1])
scaled_df.head()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
scaled_df, glass["Type"], test_size=0.33
)
from sklearn.neighbors import KNeighborsClassifier
error_rate = []
for i in range(1, 50):
kn = KNeighborsClassifier(n_neighbors=i)
kn.fit(X_train, y_train)
pred_i = kn.predict(X_test)
error_rate.append(np.mean(y_test != pred_i))
plt.figure(figsize=(11, 5))
plt.plot(range(1, 50), error_rate, marker="o", markerfacecolor="red")
# It seems the best n_neighbors to use is either 1 or 2. As it goes higher, the number of errors raise too.
kn = KNeighborsClassifier(n_neighbors=2)
kn.fit(X_train, y_train)
pred = kn.predict(X_test)
from sklearn.metrics import confusion_matrix, classification_report
print(confusion_matrix(y_test, pred))
print("\n")
print(classification_report(y_test, pred))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### Importing All Dependecies
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# ## Data Preparation & Cleaning
# Load csv file
# Creating the dataframe and understand the data present in the dataset
# Dealing with the missing data and the incorrect records
#
df = pd.read_csv("/kaggle/input/google-playstore-apps/Google-Playstore.csv")
# View the first 5 records
df.head()
# View the last 5 rows
df.tail()
# View the columns names in one row
# # Observations
df.columns
# View the count of rows and columns
print(df.shape)
print("Total rows of the data : 2312944")
print("Total columns of the data : 24")
# View the composition
df.info()
# # Descriptive Statistics
df.describe()
# Observations
# We have 5 numerical columns
# Minimum Rating, Rating Count & Price is 0
# Maximum Price of any app is 4.00000e+02
# # Missing Values
df.isnull().sum().sort_values(ascending=False)
# ### Percentange of missing values
missing_percentage = df.isnull().sum().sort_values(ascending=False) / len(df) * 100
missing_percentage
# # Plot the missing/null the value
import missingno as msno
msno.matrix(df)
# # Plot the percentage of the missing/null values
missing_percentage = missing_percentage[missing_percentage != 0]
import matplotlib
matplotlib.rcParams["figure.figsize"] = (20, 6)
missing_percentage.plot(kind="barh")
plt.title("Missing percentage of null values")
# # Observation;
# Columns having highest percentage of missing/null values are;
# * Developer website
# * Privacy policy
# These two columns are not very helpful so that we can drop these two column
# to sumarize are data.
# * We can drop the small null values for the columns like
# Size,
# Currence,
# Installs,
# Minimum Installs,
# Devp Id,
# Developer Email.
# * Rating, Rating count, Released are important columns so its really good to fill the null values
#
df.dropna(
subset=[
"App Name",
"Installs",
"Minimum Installs",
"Size",
"Currency",
"Developer Id",
"Developer Email",
],
inplace=True,
)
df.isnull().sum()
# # Cleaning Each Row
# checking any duplicates
df.duplicated().sum()
# # Observation
# There is no duplicated values in dataset
# # Unique values
df.nunique()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
data = pd.read_csv("/kaggle/input/titanic/train.csv")
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
data.head()
test_data.head()
data.describe().sum()
data.describe(include=[np.object])
data.isnull().sum()
# removing the column cabin
data = data.drop("Cabin", axis=1)
data.isnull().sum()
# filling the missing age values
data["Age"] = data["Age"].fillna(0)
# filling the missing embarked sections
temp = data.Embarked.describe()[2]
data["Embarked"] = data["Embarked"].fillna(temp)
data.isnull().sum()
data[["Pclass", "Survived"]].groupby(by="Pclass").mean()
data.Pclass.unique()
data[["Sex", "Survived"]].groupby(by="Sex").mean()
data[["Embarked", "Survived"]].groupby(by="Embarked").mean()
# # Data Cleansing and Feature Extractions
# # train data
data.head()
data["Title"] = data.Name.str.extract(" ([A-Za-z]+)\.", expand=False)
pd.crosstab(data["Title"], data["Sex"])
data["Title"] = data["Title"].replace(["Miss", "Mrs"], "Ladies")
data.head()
data["Age_range"] = pd.cut(data.Age, 5)
data[["Age_range", "Survived"]].groupby(by="Age_range").sum()
# classifying the data set on age ranges
data.loc[data["Age"] < 16.0, "Age"] = 0
data.loc[(data["Age"] >= 16.0) & (data["Age"] < 32.0), "Age"] = 1
data.loc[(data["Age"] >= 32.0) & (data["Age"] < 48.0), "Age"] = 2
data.loc[(data["Age"] >= 48.0) & (data["Age"] < 64.0), "Age"] = 3
data.loc[(data["Age"] >= 64.0) & (data["Age"] < 80.0), "Age"] = 4
data.loc[data["Age"] >= 80.0, "Age"] = 5
data.Age = data.Age.astype(int)
data.head()
# creating a new column relation
data["Relation"] = data.SibSp + data.Parch
data[["Relation", "Survived"]].groupby(by="Relation").mean()
# people having no relationships
data["No_one"] = 0
data.loc[data["Relation"] >= 1, "No_one"] = 1
data.head()
data[["No_one", "Survived"]].groupby(by="No_one").mean()
data = data.drop(["Title", "Name", "Age_range", "Relation"], axis=1)
data.head()
data["Fare_cut"] = pd.cut(data.Fare, 5)
data[["Fare_cut", "Survived"]].groupby(by="Fare_cut").mean()
data.loc[data["Fare"] < 102.0, "Fare"] = 0
data.loc[(data["Fare"] >= 102.0) & (data["Fare"] < 204.0), "Fare"] = 1
data.loc[(data["Fare"] >= 204.0) & (data["Fare"] < 307.0), "Fare"] = 2
data.loc[(data["Fare"] >= 307.0) & (data["Fare"] < 409.0), "Fare"] = 3
data.loc[(data["Fare"] >= 409.0) & (data["Fare"] < 512.0), "Fare"] = 4
data.loc[data["Fare"] >= 512.0, "Fare"] = 5
data.Fare = data.Fare.astype("int")
data.head()
data["Embarked"].unique()
data.Embarked = data.Embarked.replace({"S": 0, "C": 1, "Q": 2})
data.Sex = data.Sex.replace({"male": 0, "female": 1}).astype("int")
data.head()
# replacing the values of embarked with 0 1 2
data.Embarked = data.Embarked.replace({"S": 0, "C": 1, "Q": 2})
data.head()
data = data.drop("Ticket", axis=1)
data.head()
# # test data
test_data.head()
test_data["Age"] = test_data["Age"].fillna(0)
test_data["Embarked"] = test_data["Embarked"].fillna(temp)
test_data["age_range"] = pd.cut(test_data.Age, 5)
test_data.loc[test_data["Age"] < 16.0, "Age"] = 0
test_data.loc[(test_data["Age"] >= 16.0) & (test_data["Age"] < 32.0), "Age"] = 1
test_data.loc[(test_data["Age"] >= 32.0) & (test_data["Age"] < 48.0), "Age"] = 2
test_data.loc[(test_data["Age"] >= 48.0) & (test_data["Age"] < 64.0), "Age"] = 3
test_data.loc[(test_data["Age"] >= 64.0) & (test_data["Age"] < 80.0), "Age"] = 4
test_data.loc[test_data["Age"] >= 80.0, "Age"] = 5
test_data["Relation"] = test_data.SibSp + test_data.Parch
test_data["No_one"] = 0
test_data.loc[test_data["Relation"] >= 1, "No_one"] = 1
test_data = test_data.drop(["age_range", "Relation", "Name"], axis=1)
test_data = test_data.drop(["Ticket"], axis=1)
test_data.head()
test_data["fare_cut"] = pd.cut(test_data.Fare, 5)
test_data.loc[test_data["Fare"] < 102.0, "Fare"] = 0
test_data.loc[(test_data["Fare"] >= 102.0) & (test_data["Fare"] < 204.0), "Fare"] = 1
test_data.loc[(test_data["Fare"] >= 204.0) & (test_data["Fare"] < 307.0), "Fare"] = 2
test_data.loc[(test_data["Fare"] >= 307.0) & (test_data["Fare"] < 409.0), "Fare"] = 3
test_data.loc[(test_data["Fare"] >= 409.0) & (test_data["Fare"] < 512.0), "Fare"] = 4
test_data.loc[test_data["Fare"] >= 512.0, "Fare"] = 5
test_data.Fare = test_data.Fare.fillna(0)
test_data.Fare = test_data.Fare.astype("int")
# test_data.Embarked=test_data.Embarked.replace({'S':0,'C':1,'Q':2})
test_data = test_data.drop(["SibSp", "Parch", "fare_cut"], axis=1)
test_data.head()
Id = test_data["PassengerId"]
test_data = test_data.drop("PassengerId", axis=1)
test_data.head()
test_data.Sex = test_data.Sex.replace({"male": 0, "female": 1}).astype("int")
test_data.Age = test_data.Age.astype(int)
test_data = test_data.drop(["Cabin", "Fare"], axis=1)
test_data.head()
y = data.Survived
data = data.drop("Survived", axis=1)
data = data.drop(["PassengerId", "SibSp", "Parch", "Fare", "Fare_cut"], axis=1)
data.head()
# # Predictions
# Logistic Regression
logistic = LogisticRegression().fit(data, y)
logistic_score = logistic.score(data, y)
logistic_score
# SVC
vector = SVC().fit(data, y)
vector_score = vector.score(data, y)
vector_score
# decision tree
tree = DecisionTreeClassifier().fit(data, y)
tree_score = tree.score(data, y)
tree_score
# random forest
random = RandomForestClassifier(n_estimators=100).fit(data, y)
random_score = random.score(data, y)
random_score
# KNN
knn = KNeighborsClassifier(n_neighbors=3).fit(data, y)
knn_score = knn.score(data, y)
knn_score
# final opting for Random forest classifier
predict_y = random.predict(test_data)
prediction = pd.DataFrame({"PassengetId": Id, "Survived": predict_y})
prediction
my_submission = pd.DataFrame(prediction)
my_submission.to_csv("submission.csv", index=False)
my_submission
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random
from sklearn.neighbors import NearestNeighbors
from keras import applications
import math
import seaborn as sn
import random
from scipy import ndarray
import lightgbm as lgb
from sklearn.metrics import confusion_matrix
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import accuracy_score
from sklearn.manifold import TSNE
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from PIL import Image
import keras
from keras.layers import Dense, GlobalAveragePooling2D
from keras.applications import MobileNet
from keras.preprocessing import image
from keras.applications.mobilenet import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.optimizers import Adam
import tensorflow as tf
os.chdir("/kaggle/input")
pwd
with open("V_all_np.npy", "rb") as f:
v_all = np.load(f)
with open("NVT_all_np.npy", "rb") as f:
nvt_all = np.load(f)
with open("NT_all_np.npy", "rb") as f:
nt_all = np.load(f)
# Any results you write to the current directory are saved as output.
# tf.image.resize(v_all, [256, 256])
data = np.concatenate((v_all, nvt_all, nt_all))
type(data[0][0][0][0])
labels = np.zeros((1091, 3))
for i in range(len(labels)):
if i < len(v_all):
labels[i][0] = 1
elif len(v_all) - 1 < i < len(nvt_all):
labels[i][1] = 1
else:
labels[i][2] = 1
labels
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
data, labels, test_size=0.33, random_state=42
)
del v_all, nvt_all, nt_all
del data
X_train.shape
from keras.applications.resnet50 import ResNet50, preprocess_input
from keras.applications.mobilenet import MobileNet
HEIGHT = 1024
WIDTH = 1024
base_model = applications.MobileNet(
weights=None, include_top=False, input_shape=(HEIGHT, WIDTH, 3)
)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(750, activation="relu")(
x
) # we add dense layers so that the model can learn more complex functions and classify for better results.
x = Dense(750, activation="relu")(x) # dense layer 2
x = Dense(250, activation="relu")(x) # dense layer 3
preds = Dense(3, activation="softmax")(x) # final layer with softmax activation
model = Model(inputs=base_model.input, outputs=preds)
# specify the inputs
# specify the outputs
# now a model has been created based on our architecture
for i, layer in enumerate(model.layers):
print(i, layer.name)
for layer in model.layers[:12]:
layer.trainable = False
for layer in model.layers[12:]:
layer.trainable = True
model.compile(
optimizer=Adam(lr=0.0001), loss="categorical_crossentropy", metrics=["accuracy"]
)
model.fit(
X_train,
y_train,
epochs=30,
batch_size=3,
verbose=1,
shuffle=True,
validation_split=0.2,
)
model.predict(X_test)
y_test
labels
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
nRowsRead = 1000 # specify 'None' if want to read whole file
df = pd.read_csv(
"../input/cusersmarildownloadsbiodiversitycsv/biodiversity.csv",
delimiter=";",
encoding="ISO-8859-1",
nrows=nRowsRead,
)
df.dataframeName = "biodiversity.csv"
nRow, nCol = df.shape
print(f"There are {nRow} rows and {nCol} columns")
df.head()
df.tail()
df.dtypes
df["ZoneID"].plot.hist()
plt.show()
# Data Cleaning
plt.figure(figsize=(10, 5))
sns.heatmap(df.isnull(), cbar=False, yticklabels="", cmap="viridis")
clean_df = df.fillna(value=0)
plt.figure(figsize=(10, 5))
sns.heatmap(clean_df.isnull(), cbar=False, yticklabels="", cmap="viridis")
corrs = df.corr()
corrs
plt.figure(figsize=(20, 8))
# Heatmap of correlations
sns.heatmap(corrs, cmap=plt.cm.RdYlBu_r, vmin=-0.25, annot=True, vmax=0.6)
plt.title("BIODIVERSITY")
import plotly.express as px
# Grouping it by Zone ID and RCAC Score I
plot_data = df.groupby(["ZoneID", "RCACScoreI"], as_index=False).DescID.sum()
fig = px.bar(plot_data, x="ZoneID", y="DescID", color="RCACScoreI")
fig.update_layout(title_text="Biodiversity", height=500, width=1000)
fig.show()
import plotly.express as px
# Grouping it by Zone ID and RCACScore I
plot_data = df.groupby(["ZoneID", "DescID"], as_index=False).RCACScoreI.sum()
fig = px.line_polar(plot_data, theta="ZoneID", r="RCACScoreI", color="DescID")
fig.update_layout(title_text="Biodiversity", height=500, width=1000)
fig.show()
# sample codes from Mikey_Mtk @motokinakamura https://www.kaggle.com/motokinakamura/treemap-with-plotly
fig = go.Figure(
go.Treemap(
labels=[
"Eve",
"Cain",
"Seth",
"Enos",
"Noam",
"Abel",
"Awan",
"Enoch",
"Azura",
],
parents=["", "Eve", "Eve", "Seth", "Seth", "Eve", "Eve", "Awan", "Eve"],
)
)
fig.show()
# codes from Mikey_Mtk @motokinakamura https://www.kaggle.com/motokinakamura/treemap-with-plotly
# make a df it's grouped by "Genre"
gb_ZoneID = df.groupby("ZoneID").sum()
gb_ZoneID.head()
# codes from Mikey_Mtk @motokinakamura https://www.kaggle.com/motokinakamura/treemap-with-plotly
ZoneID = list(gb_ZoneID.index)
biodiver = list(gb_ZoneID.DescID)
print(ZoneID)
print(biodiver)
# codes from Mikey_Mtk @motokinakamura https://www.kaggle.com/motokinakamura/treemap-with-plotly
# first treemap
test_tree = go.Figure(
go.Treemap(
labels=ZoneID,
parents=[""] * len(ZoneID),
values=biodiver,
textinfo="label+value",
)
)
test_tree.show()
# codes from Mikey_Mtk @motokinakamura https://www.kaggle.com/motokinakamura/treemap-with-plotly
# second treemap
test_tree_blue = go.Figure(
go.Treemap(
labels=ZoneID,
parents=[""] * len(ZoneID),
values=biodiver,
textinfo="label+value",
marker_colorscale="greens",
)
)
test_tree_blue.show()
df.plot(subplots=True, figsize=(10, 10), sharex=False, sharey=False)
plt.show()
df.plot.area(y=["DescID", "Version"], alpha=0.4, figsize=(12, 6))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
import json
meta = pd.DataFrame(
json.load(
open(
"/kaggle/input/deepfake-detection-challenge/train_sample_videos/metadata.json"
)
)
).T
print(meta.head())
# Any results you write to the current directory are saved as output.
meta["label"].value_counts()
meta[meta["label"] == "REAL"].head()
# read a random real video in memory and analyse
import random
try:
import imageio
import pylab
except Exception as e:
import pylab
import imageio
real_vids = meta[(meta["label"] == "REAL") & (meta["split"] == "train")]
rand_real_vid = real_vids.index[random.randint(0, len(real_vids))]
filename = (
"/kaggle/input/deepfake-detection-challenge/train_sample_videos/" + rand_real_vid
)
print(filename)
vid = imageio.get_reader(filename, "ffmpeg")
type(vid)
vid.count_frames()
image = vid.get_data(1)
pylab.imshow(image)
# Find number of frames in each video
import tqdm
frames_per_video = list()
for file in tqdm.tqdm(meta.index):
filename = "/kaggle/input/deepfake-detection-challenge/train_sample_videos/" + file
vid = imageio.get_reader(filename, "ffmpeg")
frames_per_video.append(vid.count_frames())
import matplotlib.pyplot as plt
plt.plot(frames_per_video)
plt.show()
# ### all images does not have same number of frames
# read meta data info about each video
import tqdm
meta_per_video = list()
for file in tqdm.tqdm(meta.index):
filename = "/kaggle/input/deepfake-detection-challenge/train_sample_videos/" + file
vid = imageio.get_reader(filename, "ffmpeg")
vid_meta = vid._meta
# append other info from meta file
vid_meta["num_frames"] = vid.count_frames()
vid_meta["filename"] = file
vid_meta["label"] = meta.loc[file]["label"]
vid_meta["split"] = meta.loc[file]["split"]
vid_meta["original"] = meta.loc[file]["original"]
meta_per_video.append(vid_meta)
# convert list of dict to pandas dataframe for easy analysis
meta_videos = pd.DataFrame(meta_per_video)
meta_videos.head()
list(
filter(
lambda x: x.split(".")[1] != "mp4",
os.listdir("../input/deepfake-detection-challenge/test_videos"),
)
)
# ### there is no meta data file in test directory
meta_per_video = list()
for file in tqdm.tqdm(os.listdir("../input/deepfake-detection-challenge/test_videos")):
filename = "/kaggle/input/deepfake-detection-challenge/test_videos/" + file
vid = imageio.get_reader(filename, "ffmpeg")
vid_meta = vid._meta
# append other info from meta file
vid_meta["num_frames"] = vid.count_frames()
vid_meta["filename"] = file
meta_per_video.append(vid_meta)
# convert list of dict to pandas dataframe for easy analysis
meta_test_videos = pd.DataFrame(meta_per_video)
meta_test_videos.head()
# ### test videos num_frams distibution
plt.plot(meta_test_videos["num_frames"])
plt.show()
# ## Reading the entire data
# import the libraries
import PIL.Image
import PIL.ImageDraw
try:
import face_recognition
except:
import face_recognition
# load a video
vid = imageio.get_reader(
"/kaggle/input/deepfake-detection-challenge/train_sample_videos/aagfhgtpmv.mp4",
"ffmpeg",
)
# get a random frame of video
image = vid.get_data(random.randint(0, vid.count_frames()))
# Load the jpg file into a NumPy array
# image = face_recognition.load_image_file(image)
# Find all the faces in the image
face_locations = face_recognition.face_locations(image)
number_of_faces = len(face_locations)
print("I found {} face(s) in this photograph.".format(number_of_faces))
# Load the image into a Python Image Library object so that we can draw on top of it and display it
pil_image = PIL.Image.fromarray(image)
for face_location in face_locations:
# Print the location of each face in this image. Each face is a list of co-ordinates in (top, right, bottom, left) order.
top, right, bottom, left = face_location
print(
"A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(
top, left, bottom, right
)
)
# Let's draw a box around the face
draw = PIL.ImageDraw.Draw(pil_image)
draw.rectangle([left, top, right, bottom], outline="red")
# Display the image on screen
pil_image.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import math
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import keras.backend as K
from keras.layers import (
Input,
Dense,
Reshape,
Flatten,
Dropout,
Activation,
BatchNormalization,
)
from keras.models import Sequential, Model
from keras.constraints import Constraint
from keras.optimizers import RMSprop, Adam
from keras.initializers import GlorotNormal
from keras.losses import BinaryCrossentropy
from pathlib import Path
np.random.seed(2023)
def genGauss(p, n=1, r=1):
# Load the dataset
x = []
y = []
for k in range(n):
x_t, y_t = np.random.multivariate_normal(
[math.sin(2 * k * math.pi / n), math.cos(2 * k * math.pi / n)],
[[0.0125, 0], [0, 0.0125]],
p,
).T
x.append(x_t)
y.append(y_t)
x = np.array(x).flatten()[:, None]
y = np.array(y).flatten()[:, None]
x -= np.mean(x)
y -= np.mean(y)
train = np.concatenate((x, y), axis=1)
return train / (np.max(train) * r)
def wasserstein_loss(y_true, y_pred):
return K.mean(y_true * y_pred)
def compute_grads(model, X, y, loss_fn=None):
model.trainable = False
with tf.GradientTape() as tape:
logits = model(X, training=False)
if loss_fn is None:
loss = model.compiled_loss(y, logits)
else:
logits = tf.reshape(logits, tf.shape(y))
loss = loss_fn(y, logits)
grads = tape.gradient(loss, model.trainable_weights)
return avg_grads(grads)
def avg_grads(grads):
mean_grad = tf.zeros(())
for grad in grads:
mean_grad += tf.reduce_mean(grad)
mean_grad /= len(grads)
return mean_grad
class ClipConstraint(Constraint):
# set clip value when initialized
def __init__(self, clip_value):
self.clip_value = clip_value
# clip model weights to hypercube
def __call__(self, weights):
return K.clip(weights, -self.clip_value, self.clip_value)
# get the config
def get_config(self):
return {"clip_value": self.clip_value}
def generate_real_samples(dataset, n_samples):
# choose random instances
ix = np.random.randint(0, dataset.shape[0], n_samples)
# select images
X = dataset[ix]
return X
def build_generator(z_dim):
init = GlorotNormal(seed=0)
model = Sequential()
model.add(
Dense(512, input_dim=z_dim, kernel_initializer=init, activation="LeakyReLU")
)
model.add(Dense(512, kernel_initializer=init, activation="LeakyReLU"))
model.add(Dense(512, kernel_initializer=init, activation="LeakyReLU"))
model.add(Dense(2, kernel_initializer=init, activation="linear"))
return model
def build_discriminator():
init = GlorotNormal(seed=0)
model = Sequential()
model.add(Dense(512, input_dim=2, kernel_initializer=init, activation="LeakyReLU"))
model.add(Dense(512, kernel_initializer=init, activation="LeakyReLU"))
model.add(Dense(512, kernel_initializer=init, activation="LeakyReLU"))
model.add(Dense(1, kernel_initializer=init, activation="sigmoid"))
model.compile(optimizer=Adam(learning_rate=lr), loss="binary_crossentropy")
return model
def build_critic():
init = GlorotNormal(seed=0)
const = ClipConstraint(0.01)
model = Sequential()
model.add(
Dense(
512,
input_dim=2,
kernel_initializer=init,
kernel_constraint=const,
activation="LeakyReLU",
)
)
model.add(
Dense(
512,
kernel_initializer=init,
kernel_constraint=const,
activation="LeakyReLU",
)
)
model.add(
Dense(
512,
kernel_initializer=init,
kernel_constraint=const,
activation="LeakyReLU",
)
)
model.add(
Dense(1, kernel_initializer=init, kernel_constraint=const, activation="linear")
)
model.compile(optimizer=RMSprop(learning_rate=lr), loss=wasserstein_loss)
return model
def build_gan(gen, dis):
for layer in dis.layers:
if not isinstance(layer, BatchNormalization):
layer.trainable = False
model = Sequential()
model.add(gen)
model.add(dis)
model.compile(optimizer=Adam(learning_rate=lr), loss="binary_crossentropy")
return model
def build_wgan(gen, critic):
for layer in critic.layers:
if not isinstance(layer, BatchNormalization):
layer.trainable = False
model = Sequential()
model.add(gen)
model.add(critic)
model.compile(optimizer=RMSprop(learning_rate=lr), loss=wasserstein_loss)
return model
def init_graphs():
fig, axarr = plt.subplots(1, 3, figsize=(20, 5))
for ax in axarr:
ax.set_xlim([-1.5, 1.5])
ax.set_ylim([-1.0, 1.0])
return fig, axarr
Path("/kaggle/working/data").mkdir(parents=True, exist_ok=True)
fig, axarr = init_graphs()
save_dir = "/kaggle/working/data"
epochs = 5000
batch_size = 64
z_dim = 2
lr = 5e-5
critic = build_critic()
dis = build_discriminator()
gen_gan, gen_wgan = build_generator(z_dim), build_generator(z_dim)
combined_gan = build_gan(gen_gan, dis)
combined_wgan = build_wgan(gen_wgan, critic)
X_train = genGauss(100, 5, 1)
np.random.shuffle(X_train)
plot_dis_loss = []
plot_critic_loss = []
plot_gan_loss = []
plot_wgan_loss = []
epochs = 200
for epoch in range(epochs):
print("Epoch :", epoch)
n_batches = int(X_train.shape[0] / batch_size)
# record loss for each epoch
critic_loss_epo, critic_grad_epo = list(), list()
dis_loss_epo, dis_grad_epo = list(), list()
gan_loss_epo, gan_grad_epo = list(), list()
wgan_loss_epo, wgan_grad_epo = list(), list()
# for each batch
for _ in range(n_batches):
dis_iter = 5
it = 0
critic_loss, critic_grads = list(), list()
dis_loss, dis_grads = list(), list()
# train critic and dis
for _ in range(dis_iter):
sample_batch = generate_real_samples(X_train, batch_size)
Z = np.random.uniform(-1, 1, (batch_size, z_dim))
generated_samples_gan, generated_samples_wgan = gen_gan.predict(
Z
), gen_wgan.predict(Z)
X_gan = np.concatenate((sample_batch, generated_samples_gan))
X_wgan = np.concatenate((sample_batch, generated_samples_wgan))
d_y = tf.cast(
np.array([1] * len(sample_batch) + [0] * batch_size), tf.float32
)
c_y = tf.cast(
np.array([1] * len(sample_batch) + [-1] * batch_size), tf.float32
)
d_loss = dis.train_on_batch(X_gan, d_y)
c_loss = critic.train_on_batch(X_wgan, c_y)
critic_loss.append(c_loss)
critic_grads.append(compute_grads(critic, X_wgan, c_y))
dis_loss.append(d_loss)
dis_grads.append(compute_grads(dis, X_gan, d_y))
critic_loss_epo.append(np.mean(critic_loss))
critic_grad_epo.append(np.mean(critic_grads))
dis_loss_epo.append(np.mean(dis_loss))
dis_grad_epo.append(np.mean(dis_grads))
Z = np.random.uniform(-1, 1, (batch_size, z_dim))
target = np.ones(batch_size)
gan_loss = combined_gan.train_on_batch(Z, target)
wgan_loss = combined_wgan.train_on_batch(Z, target)
gan_loss_epo.append(gan_loss)
gan_grad_epo.append(
compute_grads(combined_gan, Z, target, loss_fn=BinaryCrossentropy())
)
wgan_loss_epo.append(wgan_loss)
wgan_grad_epo.append(
compute_grads(combined_wgan, Z, target, loss_fn=wasserstein_loss)
)
print(
"\n Vanilla GAN [Loss_D: {:.6f}, Loss_G: {:.6f}]".format(
np.mean(dis_loss_epo), np.mean(gan_loss_epo)
)
)
print(
"\n w-GAN [Loss_D: {:.6f}, Loss_G: {:.6f}]".format(
np.mean(critic_loss_epo), np.mean(wgan_loss_epo)
)
)
plot_dis_loss.append(np.mean(dis_loss_epo))
plot_critic_loss.append(np.mean(critic_loss_epo))
plot_gan_loss.append(np.mean(gan_loss_epo))
plot_wgan_loss.append(np.mean(wgan_loss_epo))
Z = np.random.uniform(-1, 1, (100, z_dim))
gan_images = gen_gan.predict(Z)
wgan_images = gen_wgan.predict(Z)
fig.suptitle("Epoch: {}".format(epoch))
axarr[0].set_title("GAN")
axarr[0].scatter(
X_train[:, 0], X_train[:, 1], c="red", label="Real data", marker="."
)
axarr[0].scatter(
gan_images[:, 0], gan_images[:, 1], c="green", label="Fake data", marker="."
)
axarr[0].legend(loc="upper left")
axarr[1].set_title("w-GAN")
axarr[1].scatter(
X_train[:, 0], X_train[:, 1], c="red", label="Real data", marker="."
)
axarr[1].scatter(
wgan_images[:, 0], wgan_images[:, 1], c="green", label="Fake data", marker="."
)
axarr[1].legend(loc="upper left")
fig.savefig(f"/kaggle/working/data/dist_epoch{epoch}.png")
axarr[0].clear()
axarr[1].clear()
import imageio
frames = list()
for epoch in range(epochs):
image = imageio.v2.imread(f"/kaggle/working/data/dist_epoch{epoch}.png")
frames.append(image)
imageio.mimsave(
"/kaggle/working/dist.gif", frames, fps=5 # output gif # array of input frames
) # optional: frames per second
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import math
#!pip install bar-chart-race
#!pip install ffmpeg-python
import bar_chart_race as bcr
import warnings
warnings.filterwarnings("ignore")
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Reading the Historical Public Debt Database
path = "/kaggle/input/government-financial-statistics-dataset/"
HPDD = pd.read_csv(path + "Historical Public Debt Database.csv")
HPDD.columns.values
HPDD.head()
# Dropping unwanted variables
HPDD = HPDD.drop(
["Indicator Name", "Indicator Code", "Attribute", "Unnamed: 221"], axis=1
)
# Flattening the time series data
years = list(HPDD.columns[2:].values)
HPDD = pd.melt(
HPDD,
id_vars=["Country Name", "Country Code"],
value_vars=years,
var_name="Year",
value_name="Debt to GDP Ratio",
)
HPDD.dtypes
HPDD["Year"] = HPDD["Year"].astype(str)
df_pivot = pd.pivot_table(
HPDD,
values="Debt to GDP Ratio",
index=["Year"],
columns=["Country Name"],
aggfunc="sum",
fill_value=0,
)
df_pivot
# ## Highest & Lowest
# Find the country with the highest and lowest debt-to-GDP ratio for each year
max_df = HPDD.loc[HPDD.groupby("Year")["Debt to GDP Ratio"].idxmax()]
min_df = HPDD.loc[HPDD.groupby("Year")["Debt to GDP Ratio"].idxmin()]
# Create the interactive subplots
fig = make_subplots(
rows=2,
cols=1,
shared_xaxes=True,
subplot_titles=(
"Country with highest debt-to-GDP ratio",
"Country with lowest debt-to-GDP ratio",
),
)
# Add the scatter plots for the highest and lowest ratios
fig.add_trace(
go.Scatter(
x=max_df["Year"],
y=max_df["Debt to GDP Ratio"],
mode="markers",
marker=dict(size=10, color="red"),
hovertext=max_df["Country Name"],
name="Highest",
),
row=1,
col=1,
)
fig.add_trace(
go.Scatter(
x=min_df["Year"],
y=min_df["Debt to GDP Ratio"],
mode="markers",
marker=dict(size=10, color="blue"),
hovertext=min_df["Country Name"],
name="Lowest",
),
row=2,
col=1,
)
# Update the axis and layout properties
fig.update_layout(
height=800,
title_text="Debt-to-GDP Ratio by Country and Year",
xaxis=dict(title="Year"),
yaxis=dict(title="Debt to GDP Ratio"),
)
# Show the plot
fig.show()
# ***We have in total of 191 countries to visualize. This makes the plot hard to look at. We will only visualize the G20 Nations.***
G20_Nations = HPDD[
HPDD["Country Name"].isin(
[
"Argentina",
"Australia",
"Brazil",
"Canada",
"China",
"Euro area",
"France",
"Germany",
"India",
"Indonesia",
"Italy",
"Japan",
"Korea",
"Mexico",
"Russia",
"Saudi Arabia",
"South Africa",
"Turkey",
"United Kingdom",
"United States",
]
)
]
G20_pivot = pd.pivot_table(
G20_Nations,
values="Debt to GDP Ratio",
index=["Year"],
columns=["Country Name"],
aggfunc="sum",
fill_value=0,
)
# ## Animated Plot
bcr.bar_chart_race(
df=G20_pivot,
n_bars=20,
sort="desc",
title="Debt-To-GDP Ratio",
dpi=100,
steps_per_period=1,
period_length=1000,
)
# ## Summary Statistics
# Summary statistics for Debt-to-GDP Ratio by year
G20_Nations.groupby("Year")["Debt to GDP Ratio"].describe()
# Summary statistics for Debt-to-GDP Ratio by country
G20_Nations.groupby("Country Name")["Debt to GDP Ratio"].describe()
# ## Line plots
grouped_data = G20_Nations.groupby("Country Name")
nrows = math.ceil(len(grouped_data) / 4)
fig, axs = plt.subplots(nrows=nrows, ncols=4, figsize=(16, 10))
for i, (country, data) in enumerate(grouped_data):
row = i // 4
col = i % 4
axs[row, col].plot(data["Year"], data["Debt to GDP Ratio"])
axs[row, col].set_title(f"{country}: Debt-to-GDP Ratio over Time")
axs[row, col].set_xlabel("Year")
axs[row, col].set_ylabel("Debt to GDP Ratio")
# remove empty subplots
while i < (nrows * 4) - 1:
axs[-1, -1].axis("off")
i += 1
plt.tight_layout()
plt.show()
country = "India"
df = HPDD[HPDD["Country Name"] == country]
fig = px.line(
df, x="Year", y="Debt to GDP Ratio", title=f"{country}: Debt-to-GDP Ratio over Time"
)
fig.update_layout(xaxis_title="Year", yaxis_title="Debt to GDP Ratio")
fig.show()
# ## Scatter Plot
fig = px.scatter(
G20_Nations,
x="Year",
y="Debt to GDP Ratio",
color="Country Name",
hover_data=["Country Name"],
)
fig.update_layout(
title="Debt to GDP Ratio vs Year",
xaxis_title="Year",
yaxis_title="Debt to GDP Ratio",
)
fig.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas_profiling
test1 = pd.read_csv("../input/test1.csv")
train1 = pd.read_csv("../input/train1.csv")
train1.info()
test1.info()
train1.describe()
test1.describe()
train1.drop(train1.columns[[0]], axis=1, inplace=True)
test1.drop(test1.columns[[0]], axis=1, inplace=True)
train1[train1.isnull().any(axis=1)].count()
test1[test1.isnull().any(axis=1)].count()
test1.isna().sum()
train1.isna().sum()
train1["PROD_CD"].unique()
import re
p = re.compile(r"\D")
train1["TARGET_IN_EA"] = [p.sub("", x) for x in train1["TARGET_IN_EA"]]
train1["ACH_IN_EA"] = [p.sub("", x) for x in train1["ACH_IN_EA"]]
test1["TARGET_IN_EA"] = [p.sub("", x) for x in test1["TARGET_IN_EA"]]
train1.head()
train1["TARGET_IN_EA"] = pd.to_numeric(train1["TARGET_IN_EA"])
train1["ACH_IN_EA"] = pd.to_numeric(train1["ACH_IN_EA"])
test1["TARGET_IN_EA"] = pd.to_numeric(test1["TARGET_IN_EA"])
train1.info()
test1.info()
pandas_profiling.ProfileReport(train1)
plt.scatter(x="TARGET_IN_EA", y="ACH_IN_EA")
sns.pairplot(train1.iloc[:, :])
sns.boxplot(x="TARGET_IN_", y="ACH_IN_EA", data=train1)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
train_df = pd.read_csv(
"/kaggle/input/cs985-987-Emotion-Recognition-Project/my_emotion_train.csv"
)
train_df["pixels"] = train_df["pixels"].apply(
lambda x: np.fromstring(x, dtype=int, sep=" ")
)
X_train, X_val, y_train, y_val = train_test_split(
train_df["pixels"], train_df["emotion"], test_size=0.2, random_state=42
)
rfc = RandomForestClassifier(n_estimators=100, random_state=42)
rfc.fit(X_train.tolist(), y_train)
emotions = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", " Neutral"]
accuracy = rfc.score(X_val.tolist(), y_val)
print(f"Validation accuracy: {accuracy}")
y_pred = rfc.predict(X_val.tolist())
# Compute confusion matrix
conf_matrix = confusion_matrix(y_val, y_pred)
# Compute classification report
class_report = classification_report(y_val, y_pred)
print("Confusion Matrix:")
df = pd.DataFrame(conf_matrix, columns=emotions, index=emotions)
print(df)
print(f"Classification Report:\n{class_report}")
test_df = pd.read_csv(
"/kaggle/input/cs985-987-Emotion-Recognition-Project/my_emotion_test.csv"
)
test_df["pixels"] = test_df["pixels"].apply(
lambda x: np.fromstring(x, dtype=int, sep=" ")
)
predictions = rfc.predict(test_df["pixels"].tolist())
submission_df = pd.DataFrame({"id": test_df["id"], "emotion": predictions})
submission_df.to_csv("rf-submission.csv", index=False)
len(submission_df)
submission_df.head()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import warnings
warnings.filterwarnings("ignore")
warnings.filterwarnings("ignore", category=DeprecationWarning)
import pandas as pd
pd.options.display.max_columns = 100
from matplotlib import pyplot as plt
import numpy as np
import seaborn as sns
import pylab as plot
params = {
"axes.labelsize": "large",
"xtick.labelsize": "x-large",
"legend.fontsize": 20,
"figure.dpi": 150,
"figure.figsize": [25, 7],
}
plot.rcParams.update(params)
data = pd.read_csv("/kaggle/input/titanic/train.csv")
data.shape
data.head()
data.describe()
data["Age"] = data["Age"].fillna(data["Age"].median())
data.describe()
data["Died"] = 1 - data["Survived"]
data.describe()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# !git clone https://github.com/zaryabmakram/ml-ddp-example.git
# %cd ml-ddp-example/
# simple run
# !python train.py
# distributed run
# !torchrun --nnodes=1 --nproc_per_node=2 train.py
# # Model
import torch
import torch.nn as nn
class EncoderDecoder(nn.Module):
def __init__(self):
super(EncoderDecoder, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.decoder = nn.Sequential(
nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Upsample(scale_factor=2),
nn.Conv2d(32, 3, kernel_size=3, stride=1, padding=1),
nn.Tanh(),
)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import fastai
print(fastai.__version__)
train = pd.read_csv("/kaggle/input/tabular-playground-series-feb-2021/train.csv")
train.head()
train.columns
from fastai.tabular.all import *
dls = TabularDataLoaders.from_df(
train,
path=".",
y_names="target",
cat_names=[
"cat0",
"cat1",
"cat2",
"cat3",
"cat4",
"cat5",
"cat6",
"cat7",
"cat8",
"cat9",
],
cont_names=[
"cont1",
"cont2",
"cont3",
"cont4",
"cont5",
"cont6",
"cont7",
"cont8",
"cont9",
"cont10",
"cont11",
"cont12",
"cont13",
],
procs=[Categorify, FillMissing, Normalize],
)
dls.show_batch()
learn = tabular_learner(dls, metrics=rmse)
learn.fit_one_cycle(2)
test = pd.read_csv("/kaggle/input/tabular-playground-series-feb-2021/test.csv")
test.head()
tdl = learn.dls.test_dl(test)
preds, _ = learn.get_preds(dl=tdl)
preds.numpy().shape
ss = pd.read_csv(
"/kaggle/input/tabular-playground-series-feb-2021/sample_submission.csv"
)
ss.head()
preds
ss["target"] = preds.numpy()
ss.to_csv("submission.csv", index=False)
|
from typing import Dict, List, Union
from pathlib import Path, PosixPath
import fasttext
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import matplotlib as mpl
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn import metrics
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from xgboost import XGBClassifier, plot_importance
pylab.rcParams["figure.figsize"] = 10, 10
colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]
# There is no description about the dataset. But I like it, since it contains of 3 datasets (human, dog, chimpanzee). Each datasets has a dna sequence and 7 possible classes. Each dna sequence is describes by the nucleotides A (adenine) C (cytosine) G (guanine) T (thymine). Please have a look for the dataset introduction [here](https://www.kaggle.com/code/tarunsolanki/classifying-dna-sequence-using-ml).
human_data = pd.read_table("/kaggle/input/dna-sequence-dataset/human.txt")
chimpanzee_data = pd.read_table("/kaggle/input/dna-sequence-dataset/chimpanzee.txt")
dog_data = pd.read_table("/kaggle/input/dna-sequence-dataset/dog.txt")
print(f"n rows: {human_data.shape[0]} - n classes: {human_data['class'].nunique()}")
human_data.head()
print(
f"n rows: {chimpanzee_data.shape[0]} - n classes: {chimpanzee_data['class'].nunique()}"
)
chimpanzee_data.head()
print(f"n rows: {dog_data.shape[0]} - n classes: {dog_data['class'].nunique()}")
dog_data.head()
human_data["class"].value_counts().sort_index().plot.bar()
chimpanzee_data["class"].value_counts().sort_index().plot.bar()
dog_data["class"].value_counts().sort_index().plot.bar()
human_data["n_seq"] = [len(i) for i in human_data["sequence"]]
chimpanzee_data["n_seq"] = [len(i) for i in chimpanzee_data["sequence"]]
dog_data["n_seq"] = [len(i) for i in dog_data["sequence"]]
human_data["n_seq"].describe()
chimpanzee_data["n_seq"].describe()
dog_data["n_seq"].describe()
# k-mers are substrings of length k contained within a biological sequence. Primarily used within the context of computational genomics and sequence analysis, in which k-mers are composed of nucleotides (i.e. A, T, G, and C), k-mers are capitalized upon to assemble DNA sequences,improve heterologous gene expression, identify species in metagenomic samples, and create attenuated vaccines.
def getKmers(sequence, size=6):
return [sequence[x : x + size].lower() for x in range(len(sequence) - size + 1)]
human_data["kmers"] = human_data.apply(lambda x: getKmers(x["sequence"]), axis=1)
chimpanzee_data["kmers"] = chimpanzee_data.apply(
lambda x: getKmers(x["sequence"]), axis=1
)
dog_data["kmers"] = dog_data.apply(lambda x: getKmers(x["sequence"]), axis=1)
human_data["text"] = [" ".join(i) for i in human_data["kmers"]]
chimpanzee_data["text"] = [" ".join(i) for i in chimpanzee_data["kmers"]]
dog_data["text"] = [" ".join(i) for i in dog_data["kmers"]]
# ## Dataset split
seed = 83110
splits = [0.7, 0.15, 0.15]
datasets = {}
for animal, data in [
("human", human_data),
("chimpanzee", chimpanzee_data),
("dog", dog_data),
]:
test_size = int(data.shape[0] * splits[1])
X_train, X_test, y_train, y_test = train_test_split(
data["text"],
data["class"],
test_size=test_size,
train_size=data.shape[0] - test_size,
random_state=seed,
shuffle=True,
stratify=data["class"],
)
datasets[animal] = {}
datasets[animal]["train"] = (X_train, y_train)
datasets[animal]["test"] = (X_test, y_test)
for animal, split in datasets.items():
print(animal)
for k, v in split.items():
print(k)
print(v[0].shape, v[1].shape)
# ### Model prediction
def get_metrics(y_test, y_predicted):
accuracy = metrics.accuracy_score(y_test, y_predicted)
precision = metrics.precision_score(y_test, y_predicted, average="weighted")
recall = metrics.recall_score(y_test, y_predicted, average="weighted")
f1 = metrics.f1_score(y_test, y_predicted, average="weighted")
return accuracy, precision, recall, f1
# ## Baseline
cv = CountVectorizer()
classifier = MultinomialNB()
pipe = Pipeline(steps=[("vect", cv), ("clf", classifier)])
param_grid = {
"vect__ngram_range": [(5, 5), (6, 6)],
"clf__alpha": [0.1, 0.3],
}
for animal in ["human", "chimpanzee", "dog"]:
print(f"\n{animal}")
X_train, y_train = datasets[animal]["train"]
X_test, y_test = datasets[animal]["test"]
search = GridSearchCV(pipe, param_grid, n_jobs=2)
search.fit(X_train, y_train)
print("Best parameter (CV score=%0.3f):" % search.best_score_)
print(search.best_params_)
y_hat = search.predict(X_test)
# X_train = cv.fit_transform(X_train)
# X_test = cv.transform(X_test)
# classifier.fit(X_train, y_train)
# y_hat = classifier.predict(X_test)
print("Confusion matrix\n")
print(metrics.classification_report(y_test, y_hat))
accuracy, precision, recall, f1 = get_metrics(y_test, y_hat)
print(
"accuracy = %.3f \nprecision = %.3f \nrecall = %.3f \nf1 = %.3f"
% (accuracy, precision, recall, f1)
)
cm = metrics.confusion_matrix(y_test, y_hat)
disp = metrics.ConfusionMatrixDisplay(
confusion_matrix=cm, display_labels=list(range(human_data["class"].nunique()))
)
disp.plot(xticks_rotation="vertical")
print("-----------")
# ## SVC
for animal in ["human", "chimpanzee", "dog"]:
print(f"\n{animal}")
cv = CountVectorizer()
tfidf = TfidfTransformer()
classifier = SVC()
X_train, y_train = datasets[animal]["train"]
X_test, y_test = datasets[animal]["test"]
pipe = Pipeline(steps=[("vect", cv), ("tfidf", tfidf), ("clf", classifier)])
pipe.fit(X_train, y_train)
y_hat = pipe.predict(X_test)
print("Confusion matrix\n")
print(metrics.classification_report(y_test, y_hat))
accuracy, precision, recall, f1 = get_metrics(y_test, y_hat)
print(
"accuracy = %.3f \nprecision = %.3f \nrecall = %.3f \nf1 = %.3f"
% (accuracy, precision, recall, f1)
)
cm = metrics.confusion_matrix(y_test, y_hat)
disp = metrics.ConfusionMatrixDisplay(
confusion_matrix=cm, display_labels=list(range(human_data["class"].nunique()))
)
disp.plot(xticks_rotation="vertical")
print("-----------")
# ## XGBoost
for animal in ["human", "chimpanzee", "dog"]:
print(f"\n{animal}")
cv = CountVectorizer()
tfidf = TfidfTransformer()
classifier = XGBClassifier(
learning_rate=0.1, n_estimators=100, max_depth=6, gamma=0.1
)
X_train, y_train = datasets[animal]["train"]
X_test, y_test = datasets[animal]["test"]
pipe = Pipeline(steps=[("vect", cv), ("tfidf", tfidf), ("clf", classifier)])
pipe.fit(X_train, y_train)
y_hat = pipe.predict(X_test)
print("Confusion matrix\n")
print(metrics.classification_report(y_test, y_hat))
accuracy, precision, recall, f1 = get_metrics(y_test, y_hat)
print(
"accuracy = %.3f \nprecision = %.3f \nrecall = %.3f \nf1 = %.3f"
% (accuracy, precision, recall, f1)
)
cm = metrics.confusion_matrix(y_test, y_hat)
disp = metrics.ConfusionMatrixDisplay(
confusion_matrix=cm, display_labels=list(range(human_data["class"].nunique()))
)
disp.plot(xticks_rotation="vertical")
# plot_importance(classifier)
# pyplot.show()
print("-----------")
# ## Fasttext
def write_fasttext_text(path: PosixPath, data: List[str], labels: List[str]) -> None:
""" """
# if file exists, remove it to make sure. Prevents duplicates by the "a" mode
# path.unlink(missing_ok=True)
with open(path, "a") as output:
for x, y in zip(data, labels):
label = f"__label__{y}"
output.writelines(" ".join([label, x, "\n"]))
return None
for animal, data in datasets.items():
for split, (X, y) in data.items():
write_fasttext_text(Path(f"baseline_{animal}_{split}.txt"), X, y)
for animal in ["human", "chimpanzee", "dog"]:
print(animal)
model = fasttext.train_supervised(
input=f"baseline_{animal}_train.txt",
minCount=1,
wordNgrams=2,
minn=3,
maxn=6,
lr=5,
dim=100,
epoch=20,
)
print("\nModel test score:")
print(model.test(f"baseline_{animal}_test.txt"))
y_hat = []
y_prob = []
X_test, y_test = datasets[animal]["test"]
for text in X_test:
result = model.predict(text)
y_hat.append(int(result[0][0].replace("__label__", "")))
y_prob.append(result[1][0])
y_test = [int(i) for i in y_test]
print("Confusion matrix\n")
print(metrics.classification_report(y_test, y_hat))
accuracy, precision, recall, f1 = get_metrics(y_test, y_hat)
print(
"accuracy = %.3f \nprecision = %.3f \nrecall = %.3f \nf1 = %.3f"
% (accuracy, precision, recall, f1)
)
cm = metrics.confusion_matrix(y_test, y_hat)
disp = metrics.ConfusionMatrixDisplay(
confusion_matrix=cm, display_labels=list(range(human_data["class"].nunique()))
)
disp.plot(xticks_rotation="vertical")
print("-----------")
|
# # What is the average PhD stipend at CU Boulder?
# *Step 1: Import Python packages and define helper functions*
import numpy as np
import pandas as pd
import plotly.express as px
def return_avg_result(dataframe, university, department, year, measurement):
new_df = pd.DataFrame()
dataframe = dataframe[dataframe["Academic Year"].isin([year])]
dataframe = dataframe[dataframe["University"].isin([university])]
smaller_dataframe = dataframe[dataframe["Department"].isin([department])]
new_df.loc[university + " All Departments" + " " + YEAR, "mean"] = dataframe.loc[
:, measurement
].mean(axis=0)
new_df.loc[university + " All Departments" + " " + YEAR, "std"] = dataframe.loc[
:, measurement
].std(axis=0)
new_df.loc[university + " All Departments" + " " + YEAR, "count"] = dataframe.loc[
:, measurement
].shape[0]
new_df.loc[
university + " " + department + " Department" + " " + YEAR, "mean"
] = smaller_dataframe.loc[:, measurement].mean(axis=0)
new_df.loc[
university + " " + department + " Department" + " " + YEAR, "std"
] = smaller_dataframe.loc[:, measurement].std(axis=0)
new_df.loc[
university + " " + department + " Department" + " " + YEAR, "count"
] = smaller_dataframe.loc[:, measurement].shape[0]
# print(measurement+' at '+university+' in '+year+':\n')
return new_df
def return_popular_universities(dataframe, number_of_values):
popular_universities = pd.DataFrame(
dataframe["University"].value_counts()[1:number_of_values]
)
# print('Number of Records Per University (Top '+str(number_of_values)+'):\n')
return popular_universities
def return_popular_departments(dataframe, university, number_of_values):
dataframe = dataframe[dataframe["University"].isin([university])]
popular_departments = pd.DataFrame(
dataframe["Department"].value_counts()[0:number_of_values]
)
# print('Number of Records Per Department at '+UNIVERSITY+' (Top '+str(number_of_values)+'): \n')
return popular_departments
# *Step 2: Load and preview the data*
PHD_STIPENDS = pd.read_csv("/kaggle/input/phd-stipends/csv") # load the data
PHD_STIPENDS["Overall Pay"].replace(
regex=True, inplace=True, to_replace=r"\D", value=r""
) # remove $ sign from column
PHD_STIPENDS["Overall Pay"] = (
PHD_STIPENDS["Overall Pay"].astype(float).fillna(0)
) # convert column to float
PHD_STIPENDS[["University", "Department", "Overall Pay", "LW Ratio"]].head(
10
) # preview the data
df = return_popular_universities(PHD_STIPENDS, number_of_values=100)
df.reset_index(level=0, inplace=True)
df.columns = ["University", "Number of Records"]
fig = px.bar(
df,
x="University",
y="Number of Records",
title="Number of Records Per University (Top 10)",
)
fig.update(
layout=dict(
xaxis_title="University",
yaxis_title="Number of Records",
legend_orientation="h",
showlegend=True,
)
)
fig.update_yaxes(range=[0, 140])
fig.show()
# *Step 3: Visualize the data for CU Boulder only*
UNIVERSITY = "University of Colorado - Boulder (UCB)"
df = return_popular_departments(PHD_STIPENDS, UNIVERSITY, number_of_values=10)
df.reset_index(level=0, inplace=True)
df.columns = ["Department", "Number of Records"]
fig = px.bar(
df,
x="Department",
y="Number of Records",
title="Number of Records Per Department at " + UNIVERSITY + " (Top 10)",
)
fig.update(
layout=dict(
xaxis_title="Department",
yaxis_title="Number of Records",
legend_orientation="h",
showlegend=True,
)
)
fig.update_yaxes(range=[0, 8])
fig.show()
UNIVERSITY = "University of Colorado - Boulder (UCB)"
YEAR = "2019-2020"
MEASUREMENT = "Overall Pay"
DEPARTMENT = "Sociology"
df = return_avg_result(PHD_STIPENDS, UNIVERSITY, DEPARTMENT, YEAR, MEASUREMENT)
df.reset_index(level=0, inplace=True)
df.columns = ["Cohort", "Avg", "Std", "n"]
fig = px.bar(
df, x="Cohort", y="Avg", error_y="Std", title="Average Overall Pay at " + UNIVERSITY
)
fig.update(
layout=dict(
xaxis_title="Cohort",
yaxis_title="Average Overall Pay",
legend_orientation="h",
showlegend=True,
)
)
fig.update_yaxes(range=[0, 35000])
fig.show()
|
# # BirdClef2023 Mel torch CNN
# https://www.kaggle.com/code/stpeteishii/birdclef2023-sound-j-0-to-mel/notebook
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import DataLoader
from torchvision import datasets, transforms, models
from torchvision.utils import make_grid
from torchvision.datasets import ImageFolder
import os
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
train_transform = transforms.Compose(
[
transforms.RandomRotation(10), # rotate +/- 10 degrees
transforms.RandomHorizontalFlip(), # reverse 50% of images
transforms.Resize(224), # resize shortest side to 224 pixels
transforms.CenterCrop(224), # crop longest side to 224 pixels at center
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
# # Set Train and Test
# dataset=datasets.ImageFolder(root=("/kaggle/input/monkeys/Monkeys"),transform=train_transform)
# class_names=dataset.classes
# print(class_names)
# print(len(class_names))
dataset = ImageFolder(
root="/kaggle/input/birdclef2023-sound-j-0-to-mel/mel/", transform=train_transform
)
# class_names = dataset.classes
# dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=4)
class_names = []
for path in paths:
class_name = path.split("/")[-1].split("_")[0]
class_names += [class_name]
dataset_size = len(dataset)
indices = list(range(dataset_size))
split_ratio = 0.6
split_index = int(dataset_size * split_ratio)
train_indices = indices[:split_index]
test_indices = indices[split_index:]
train_sampler = SubsetRandomSampler(train_indices)
test_sampler = SubsetRandomSampler(test_indices)
train_loader = DataLoader(dataset, batch_size=32, sampler=train_sampler)
test_loader = DataLoader(dataset, batch_size=32, sampler=test_sampler)
# # Images and Labels
for images, labels in train_loader:
break
print("Label:", labels.numpy())
print("Class:", *np.array([class_names[i] for i in labels]))
im = make_grid(images, nrow=8)
plt.figure(figsize=(15, 10))
plt.imshow(np.transpose(im.numpy(), (1, 2, 0)))
# inv_normalize=transforms.Normalize(mean=[-0.485/0.229,-0.456/0.224,-0.406/0.225],
# std=[1/0.229,1/0.224,1/0.225])
# im=inv_normalize(im)
# plt.figure(figsize=(15,10))
# plt.imshow(np.transpose(im.numpy(),(1,2,0)))
# # CNN Model
class CNNModel(nn.Module):
def __init__(self):
super(CNNModel, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(128 * 28 * 28, 512)
self.fc2 = nn.Linear(512, 5)
self.dropout = nn.Dropout(0.25)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x))) # Conv1 -> ReLU -> MaxPool
x = self.pool(F.relu(self.conv2(x))) # Conv2 -> ReLU -> MaxPool
x = self.pool(F.relu(self.conv3(x))) # Conv3 -> ReLU -> MaxPool
x = x.view(-1, 128 * 28 * 28) # Flatten
x = self.dropout(
F.relu(self.fc1(x))
) # Fully connected layer 1 -> ReLU -> Dropout
x = self.fc2(x) # Fully connected layer 2 (output)
return x
model = CNNModel()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
model
# # Fit
import time
start_time = time.time()
train_losses = []
test_losses = []
train_correct = []
test_correct = []
epochs = 100
for i in range(epochs):
print(i)
trn_corr = 0
tst_corr = 0
for b, (X_train, y_train) in enumerate(train_loader):
b += 1
y_pred = model(X_train)
loss = criterion(y_pred, y_train)
predicted = torch.max(y_pred.data, 1)[1]
batch_corr = (predicted == y_train).sum()
trn_corr += batch_corr
optimizer.zero_grad()
loss.backward()
optimizer.step()
if b % 200 == 0:
print(
f"epoch: {i} loss: {loss.item} batch: {b} accuracy: {trn_corr.item()*100/(10*b):7.3f}%"
)
loss = loss.detach().numpy()
train_losses.append(loss)
train_correct.append(trn_corr)
with torch.no_grad():
for b, (X_test, y_test) in enumerate(test_loader):
y_val = model(X_test)
loss = criterion(y_val, y_test)
predicted = torch.max(y_val.data, 1)[1]
btach_corr = (predicted == y_test).sum()
tst_corr += btach_corr
loss = loss.detach().numpy()
test_losses.append(loss)
test_correct.append(tst_corr)
print(f"\nDuration: {time.time() - start_time:.0f} seconds")
plt.plot(train_losses, label="train_losses")
plt.plot(test_losses, label="test_losses")
plt.legend()
plt.show()
# save model
torch.save(model.state_dict(), "model.pt")
# new_model = ConvolutionalNetwork()
# new_model.load_state_dict(torch.load('model.pt'))
# # Predict
device = torch.device("cpu") # "cuda:0"
model.eval()
y_true = []
y_pred = []
with torch.no_grad():
for test_data in test_loader:
test_images, test_labels = test_data[0].to(device), test_data[1].to(device)
pred = model(test_images).argmax(dim=1)
for i in range(len(pred)):
y_true.append(test_labels[i].item())
y_pred.append(pred[i].item())
print(y_pred[0:5])
from sklearn.metrics import classification_report
print(classification_report(y_true, y_pred, target_names=class_names, digits=4))
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("/kaggle/input/cancer-data/Cancer_Data.csv")
df.head(5)
df.shape
df.columns
# Note:
# - We have 33 columns of data
# - The column "diagnosis" it is our target variable
# ### Cancer Types:
# #### 1. Benign cancer (B)
# #### 2. Malignant cancer (M)
# ## Data Cleaning, Correcting, Completing and Converting
# ### Null Columns
print("Train columns with null values:\n", df.isnull().sum())
# We don't have any null values!
df.info()
df["Unnamed: 32"]
# We can see that column 32 represents a column full of NaNs, and it's called "Unnamed:32".
# We will need to delete that later.
# ### Transforming the target column from categorical to numerical
df["diagnosis"].replace(["B", "M"], [0, 1], inplace=True)
df.head()
df["diagnosis"].value_counts()
# ## EDA
df["diagnosis"].value_counts()
plt.figure(figsize=(10, 6))
sns.set_style("whitegrid")
plt.pie(df["diagnosis"].value_counts(), autopct="%1.2f%%", startangle=90)
plt.axis("equal")
plt.title("B x M")
plt.show()
plt.figure(figsize=(12, 6))
ax = sns.countplot(data=df, x="diagnosis")
plt.title("Total B x M cells")
# plt.ylim([0,3000])
ax.bar_label(ax.containers[0], label_type="edge")
plt.show()
M = df[df.diagnosis == 1] # Diagnosis transfers all values of M to M data
B = df[df.diagnosis == 0] # Diagnosis transfers all values of B to B data
plt.scatter(M.radius_mean, M.texture_mean, label="Malignant", alpha=0.3)
plt.scatter(B.radius_mean, B.texture_mean, label="Benign", alpha=0.3)
plt.xlabel("radius_mean")
plt.ylabel("texture_mean")
plt.legend()
plt.show()
df.groupby("diagnosis")[["radius_mean", "texture_mean"]].mean()
# Notes:
# - Malignant cells have a higher radius
plt.figure(figsize=(20, 20))
sns.heatmap(df.corr(), cbar=True, annot=True, cmap="Blues")
plt.show()
df.corr()
# ## Building our model
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.metrics import (
accuracy_score,
f1_score,
confusion_matrix,
plot_confusion_matrix,
)
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV as gscv
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier, ExtraTreesClassifier
from lightgbm import LGBMClassifier
from sklearn.ensemble import VotingClassifier
from scipy.stats import expon, uniform
df.shape
df = df.drop(labels="Unnamed: 32", axis=1)
df.shape
X, y = df.drop("diagnosis", axis=1), df[["diagnosis"]]
X_train, X_val, y_train, y_val = train_test_split(
X, y, test_size=0.33, random_state=43, stratify=y
)
model_dict = {}
# Naive Bayes
classifer = GaussianNB()
predictor = classifer.fit(X_train, y_train)
y_pred = predictor.predict(X_val)
accuracy_naive_bayes = accuracy_score(y_val, y_pred)
model_dict["naive_bayes"] = accuracy_naive_bayes
print(accuracy_naive_bayes)
# Linear Discriminant Analysis
classifer = LinearDiscriminantAnalysis()
predictor = classifer.fit(X_train, y_train)
y_pred = predictor.predict(X_val)
accuracy_lda = accuracy_score(y_val, y_pred)
model_dict["linear_discriminant_analysis"] = accuracy_lda
print(accuracy_lda)
# Logistic Regression
classifier = LogisticRegression(random_state=42)
predictor = classifier.fit(X_train, y_train)
y_pred = predictor.predict(X_val)
accuracy_log_reg = accuracy_score(y_val, y_pred)
model_dict["logistic_regression"] = accuracy_log_reg
print(accuracy_log_reg)
# Support Vector Classifier
classifier = SVC(random_state=42)
predictor_svc = classifier.fit(X_train, y_train)
y_pred = predictor_svc.predict(X_val)
accuracy_svc = accuracy_score(y_val, y_pred)
model_dict["SVC"] = accuracy_svc
print(accuracy_svc)
# Kneighbors
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier()
predictor = classifier.fit(X_train, y_train)
y_pred = predictor.predict(X_val)
accuracy_knn = accuracy_score(y_val, y_pred)
model_dict["kneighbors_classifier"] = accuracy_knn
print(accuracy_knn)
# Stochastic Gradient Descent Classifier
classifier = SGDClassifier(random_state=42)
predictor = classifier.fit(X_train, y_train)
y_pred = predictor.predict(X_val)
accuracy_sgdc = accuracy_score(y_val, y_pred)
model_dict["sgd_classifier"] = accuracy_sgdc
print(accuracy_sgdc)
# Random Forest Classifier
classifier = RandomForestClassifier(random_state=42)
predictor = classifier.fit(X_train, y_train)
y_pred = predictor.predict(X_val)
accuracy_rfc = accuracy_score(y_val, y_pred)
model_dict["random_forest_classifier"] = accuracy_rfc
print(accuracy_rfc)
classifier = GradientBoostingClassifier(random_state=42)
predictor_gbc = classifier.fit(X_train, y_train)
y_pred = predictor_gbc.predict(X_val)
accuracy_gbc = accuracy_score(y_val, y_pred)
model_dict["gradient_boosting_classifier"] = accuracy_gbc
print(accuracy_gbc)
# xboost
classifier = XGBClassifier(random_state=42, eval_metric="logloss")
predictor_xgb = classifier.fit(X_train, y_train)
y_pred = predictor_xgb.predict(X_val)
accuracy_xgb = accuracy_score(y_val, y_pred)
model_dict["xgboost_classifier"] = accuracy_xgb
print(accuracy_xgb)
# adboost
dtc = DecisionTreeClassifier(criterion="entropy", random_state=42)
classifier = AdaBoostClassifier(dtc, random_state=42)
predictor = classifier.fit(X_train, y_train)
y_pred = predictor.predict(X_val)
accuracy_ada = accuracy_score(y_val, y_pred)
model_dict["adaboost_classifier"] = accuracy_ada
print(accuracy_ada)
# lgbm
classifier = LGBMClassifier(random_state=42)
predictor_lgbm = classifier.fit(X_train, y_train)
y_pred = predictor_lgbm.predict(X_val)
accuracy_lgbm = accuracy_score(y_val, y_pred)
model_dict["lgbm_classifier"] = accuracy_lgbm
print(accuracy_lgbm)
# extra trees
classifier = ExtraTreesClassifier(random_state=42)
predictor_etc = classifier.fit(X_train, y_train)
y_pred = predictor_etc.predict(X_val)
accuracy_etc = accuracy_score(y_val, y_pred)
model_dict["etc_classifier"] = accuracy_etc
print(accuracy_etc)
model_dict
model_accuracies_df = pd.DataFrame(columns=["Model", "Accuracy"])
model_accuracies_df["Model"] = model_dict.keys()
model_accuracies_df["Accuracy"] = model_dict.values()
model_accuracies_df
model_accuracies_df.sort_values(by="Accuracy", ascending=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_path = "/kaggle/input/titanic/train.csv"
test_path = "/kaggle/input/titanic/test.csv"
train_data = pd.read_csv(train_path)
test_data = pd.read_csv(test_path)
train_data.isna().sum()
train_data
test_data.head()
import seaborn as sns
import matplotlib.pyplot as plt
def convert_embarked(x):
if x == "C":
return 0
if x == "Q":
return 1
if x == "S":
return 2
def convert_sex(x):
if x == "female":
return 0
else:
return 1
temp_train_data = train_data.dropna(subset=["Embarked"])
temp_train_data["Sex"] = train_data["Sex"].apply(convert_sex)
temp_train_data["Embarked"] = train_data["Embarked"].apply(convert_embarked)
temp_train_data = temp_train_data.astype({"Embarked": int})
corr_matrix = temp_train_data.corr()
sns.heatmap(corr_matrix, annot=True)
plt.show()
temp_train_data.head()
# cleaning the data
train_data = temp_train_data.drop(
["PassengerId", "Age", "Cabin", "SibSp", "Parch", "Embarked", "Ticket", "Name"],
axis=1,
)
train_data.head()
from sklearn.model_selection import train_test_split
y = train_data["Survived"]
X = train_data.drop("Survived", axis=1)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
from catboost import CatBoostClassifier
model = CatBoostClassifier(
iterations=500,
learning_rate=0.01,
)
model.fit(
X_train,
y_train,
eval_set=(X_val, y_val),
)
X_test = test_data[["Pclass", "Sex", "Fare"]]
X_test["Sex"] = X_test["Sex"].apply(convert_sex)
X_test.head()
predictions = model.predict(X_test)
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions})
output.to_csv("submission.csv", index=False)
print("Your submission was successfully saved!")
|
import numpy as np
import pandas as pd
from sklearn import datasets
# Read data and store in dictionary by category.
keys = ["business", "entertainment", "politics", "sport", "tech"]
dataset = {}
for key in keys:
df = pd.read_pickle("../input/bbcwithlabel/train_df_label_" + key + ".pickle")
df_result = df[
["Text", "Summary", "text_clean", "summary_clean", "labels", "labels_idx_list"]
].copy()
dataset[key] = df_result
dataset["business"].head()
# We get dataframe have all articles without divided by category.
df_all_doc = pd.read_pickle("../input/train-df-not-shuffle/train_df_not_shuffle.pickle")
df_all_doc.tail()
# We take all text from dataframe. Because we need to get dictionary of dataset's vocabulary.
X_all_doc = df_all_doc["Text"]
X_all_doc = np.array(X_all_doc)
len(X_all_doc)
import tensorflow as tf
import keras
# text_vectorizer is a layer that transforms list of sentences to vector of integer.
# We padding every vector to the same length of 500 elements.
text_vectorizer = keras.layers.TextVectorization(
max_tokens=34500,
standardize="lower_and_strip_punctuation",
split="whitespace",
output_mode="int",
output_sequence_length=500,
)
# We adapt text_vectorizer to all text so that we can have dataset's vocabulary.
text_vectorizer.adapt(X_all_doc, batch_size=2225)
# Dataset's vocabulary is a dictionary. For example: 'hi':1, "bye":2.
vocab = text_vectorizer.get_vocabulary()
print("Vocab : {}".format(vocab[:10]))
print("Vocab Size : {}".format(text_vectorizer.vocabulary_size()))
keys = ["business", "entertainment", "politics", "sport", "tech"]
# We vectorized text in dataset of every category to be a matrix of integer.
# Each vector in matrix present a sentence in text.
# The matrix is "text_embedding" attribute of dataset.
for key in keys:
df_category = dataset[key]
vectorized_text_list = []
for i in range(len(df_category)):
vectorized_text = text_vectorizer(df_category.iloc[i]["text_clean"])
vectorized_text = np.array(vectorized_text)
vectorized_text_list.append(vectorized_text)
df_category["text_embedding"] = vectorized_text_list
# dataset["sport"].head()
from sklearn.model_selection import train_test_split
# We split dataset to train set and test set. Ratio is 9:1.
train_sport, test_sport = train_test_split(dataset["sport"], test_size=0.1)
train_business, test_business = train_test_split(dataset["business"], test_size=0.1)
train_entertainment, test_entertainment = train_test_split(
dataset["entertainment"], test_size=0.1
)
train_tech, test_tech = train_test_split(dataset["tech"], test_size=0.1)
train_politics, test_politics = train_test_split(dataset["politics"], test_size=0.1)
# Then we store in dictionary by category.
train_test_sets = {}
train_test_sets["sport"] = {"train": train_sport, "test": test_sport}
train_test_sets["business"] = {"train": train_business, "test": test_business}
train_test_sets["entertainment"] = {
"train": train_entertainment,
"test": test_entertainment,
}
train_test_sets["tech"] = {"train": train_tech, "test": test_tech}
train_test_sets["politics"] = {"train": train_politics, "test": test_politics}
import pickle
# Save dataset for future comparing.
test_data_file = "test_data.pickle"
with open(test_data_file, "wb") as handle:
pickle.dump(train_test_sets, handle)
import gc
gc.collect()
import numpy as np
import pandas as pd
import re
import string
import csv
import os
from keras.models import Sequential
import torch
from tensorflow.keras import optimizers, utils
from keras.preprocessing.text import Tokenizer
from tensorflow.keras.layers import (
LSTM,
Dense,
Input,
Embedding,
Dropout,
Concatenate,
TimeDistributed,
Bidirectional,
GRU,
BatchNormalization,
Flatten,
)
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import EarlyStopping
from keras.utils.vis_utils import plot_model
from rouge_score import rouge_scorer
from tensorflow.keras import backend as K
from rouge_score import rouge_scorer
# Calculate F1 of ROUGE for Evaluation.
def calc_rouge_scores(
pred_summaries, gold_summaries, keys=["rouge1", "rougeL"], use_stemmer=True
):
# Calculate rouge scores
scorer = rouge_scorer.RougeScorer(keys, use_stemmer=use_stemmer)
n = len(pred_summaries)
# Calculate ROUGE score for every test in testset.
scores = [scorer.score(pred_summaries[j], gold_summaries[j]) for j in range(n)]
dict_scores = {}
for key in keys:
dict_scores.update({key: {}})
for key in keys:
# Get precision for every test in testset.
precision_list = [scores[j][key][0] for j in range(len(scores))]
# Get recall for every test in testset.
recall_list = [scores[j][key][1] for j in range(len(scores))]
# Get F1 for every test in testset.
f1_list = [scores[j][key][2] for j in range(len(scores))]
# Calculate mean ROUGE score of all test.
precision = np.mean(precision_list)
recall = np.mean(recall_list)
f1 = np.mean(f1_list)
dict_results = {"recall": recall, "precision": precision, "f1": f1}
dict_scores[key] = dict_results
return dict_scores
# Standardizing every text_embedding to a matrix with shape 246,500.
# We use padding of 0 to do this.
def padding_sentence(X, Y):
max_number_sentence = 246
padding_X = np.empty(500)
padding_X.fill(0)
for i in range(len(X)):
while len(X[i]) < max_number_sentence:
X[i] = np.append(X[i], [padding_X], axis=0)
Y[i] = np.append(Y[i], [0], axis=0)
return X, Y
# Diving batch of dataset.
def get_batch(tasks_key, batch_size, number_of_shot=None):
batch_sets = {}
test_sets = {}
for key in tasks_key:
# In scenario of training on 12-Shot dataset, Few-shot Learning.
if number_of_shot:
X_train = np.array(train_test_sets[key]["train"]["text_embedding"])[
:number_of_shot
]
y_train = np.array(train_test_sets[key]["train"]["labels"])[:number_of_shot]
# In scenario of training on full dataset.
else:
X_train = np.array(train_test_sets[key]["train"]["text_embedding"])
y_train = np.array(train_test_sets[key]["train"]["labels"])
X_test = np.array(train_test_sets[key]["test"]["text_embedding"])
y_test = np.array(train_test_sets[key]["test"]["labels"])
# Standardizing text_embedding.
X_train, y_train = padding_sentence(X_train, y_train)
X_test, y_test = padding_sentence(X_test, y_test)
# Calculate number of batch based on batch_size.
num_batches = (len(X_train) + batch_size - 1) // batch_size
# Batch_sets is a dictionary by category.
# Value of each category is a vector that named batches.
# Each element of batches is a set of data that present a batch in training.
batches = []
for i in range(num_batches):
if batch_size * i + batch_size <= len(X_train):
batches.append(
{
"X_train": X_train[
i * batch_size : i * batch_size + batch_size
],
"y_train": y_train[
i * batch_size : i * batch_size + batch_size
],
}
)
else:
batches.append(
{
"X_train": X_train[i * batch_size :],
"y_train": y_train[i * batch_size :],
}
)
batch_sets[key] = batches
test_sets[key] = {"X_test": X_test, "y_test": y_test}
return batch_sets, test_sets
from tensorflow.keras import layers, models, losses
import tensorflow as tf
import numpy as np
class MAML:
def __init__(self):
self.meta_model = self.get_maml_model()
def get_maml_model(self):
# define model
model = Sequential()
model.add(
Bidirectional(LSTM(128, return_sequences=True), input_shape=(246, 500))
)
model.add(Dense(1, activation="sigmoid"))
model.add(tf.keras.layers.Reshape((-1,), input_shape=(246, 1)))
return model
# Training step of each batch.
def train_on_batch(
self,
support_train_data,
query_train_data,
inner_optimizer,
outer_optimizer=None,
):
batch_acc = []
batch_loss = []
task_weights = []
support_task_key = ["tech", "politics", "entertainment", "sport"]
query_task_keys = ["business"]
# Get currrent model's weight to make sure that model's weight is reset in beginning
# of each inner loop.
meta_weights = self.meta_model.get_weights()
# Inner loops.
# Loop through all support dataset and update model weight.
for key in support_task_key:
# Get starting initialized weight.
self.meta_model.set_weights(meta_weights)
X = np.array([np.array(val) for val in support_train_data[key]["X_train"]])
y = np.array([np.array(val) for val in support_train_data[key]["y_train"]])
with tf.GradientTape() as tape:
pred = self.meta_model(X)
loss = losses.binary_crossentropy(y, pred)
# Calculate the gradients for the variables
gradients = tape.gradient(loss, self.meta_model.trainable_variables)
# Apply the gradients and update the optimizer
inner_optimizer.apply_gradients(
zip(gradients, self.meta_model.trainable_variables)
)
# Save optimized weight of each support task.
task_weights.append(self.meta_model.get_weights())
# Calculate loss of each optimized weight on query training dataset set.
with tf.GradientTape() as tape:
for i in range(len(support_task_key)):
query_task_key = query_task_keys[0]
# Get each saved optimized weight.
self.meta_model.set_weights(task_weights[i])
X = np.array(
[
np.array(val)
for val in query_train_data[query_task_key]["X_train"]
]
)
y = np.array(
[
np.array(val)
for val in query_train_data[query_task_key]["y_train"]
]
)
pred = self.meta_model(X)
loss = losses.binary_crossentropy(y, pred)
batch_loss.append(loss)
# Calculate sum loss
# Calculate mean loss only for visualizing.
sum_loss = tf.reduce_sum(batch_loss)
mean_loss = tf.reduce_mean(batch_loss)
# Get starting initialized weight.
self.meta_model.set_weights(meta_weights)
# Backpropagation of outer loop.
if outer_optimizer:
grads = tape.gradient(sum_loss, self.meta_model.trainable_variables)
outer_optimizer.apply_gradients(
zip(grads, self.meta_model.trainable_variables)
)
return mean_loss
import math
# Set parameter of training data.
# Diving training data into batches.
support_tasks_key = ["sport", "entertainment", "tech", "politics"]
query_tasks_key = ["business"]
number_of_query_batch = 12
number_of_shot = 12
query_batch_size = math.ceil(number_of_shot / number_of_query_batch)
# query_batch_size = 30
support_batch_size = 30
support_batch_sets, support_test_sets = get_batch(support_tasks_key, support_batch_size)
query_batch_sets, query_test_sets = get_batch(
query_tasks_key, query_batch_size, number_of_shot
)
# Evaluation while training.
def val_on_batch(model):
y_pred_list = []
idx_list = []
key_query_task = "business"
# Get text_embedding of test set.
X_test = np.array(
[np.array(val) for val in query_test_sets[key_query_task]["X_test"]]
)
# Prediction on test set.
# Every prediction is a vector of values between 0 and 1.
# Each value equivalent a sentences in the same position.
# Each value is prob that sentence is picked for summary.
y_preds = model.predict(X_test, verbose=0)
print(len(y_preds))
# Loop through prediction.
# If the prob is equal or higher 0.5, we store it index.
# If the number of stored index if less than 5, we store 5 prob-highest index.
for j in range(len(y_preds)):
idx = []
for i in range(len(y_preds[j])):
pred_percent = y_preds[j][i]
if pred_percent >= 0.5:
idx.append(i)
if len(idx) < 5.0:
idx = np.argsort(y_preds[j][-5:])
idx = sorted(idx)
idx_list.append(idx)
val_sets = train_test_sets[key_query_task]["test"]
# Retrieve picked sentences from source texts.
df_text_clean = val_sets["text_clean"]
pred_summaries = []
for doc in range(len(idx_list)):
pred_summary_sentences_list = []
text_clean = np.array(df_text_clean.iloc[doc])
idx_doc = idx_list[doc]
for i in range(len(text_clean)):
if i in idx_doc:
sentence = text_clean[i]
pred_summary_sentences_list.append(sentence)
pred_summary = " ".join(pred_summary_sentences_list)
pred_summaries.append(pred_summary)
# Get golden summary.
df_gold = val_sets["Summary"]
gold_summaries = [df_gold.iloc[m] for m in range(len(df_gold))]
summaries_comp = tuple(zip(pred_summaries, gold_summaries))
# calculate rouge score
scores = calc_rouge_scores(
pred_summaries, gold_summaries, keys=["rouge1", "rougeL"], use_stemmer=True
)
return scores
epochs = 50
maml = MAML()
inner_optimizer = optimizers.Adam(learning_rate=0.001)
outer_optimizer = optimizers.Adam(learning_rate=0.001)
# print(y_test)
query_key = "business"
# Find min number of batch.
# Because each dataset of different category have different size.
# So the number of batch will be different for each category.
# We need to make sure have all the category in a training step.
training_steps = 1000
for key in support_batch_sets:
if len(support_batch_sets[key]) < training_steps:
training_steps = len(support_batch_sets[key])
for key in query_batch_sets:
if len(query_batch_sets[key]) < training_steps:
training_steps = len(query_batch_sets[key])
valuating_steps = len(query_test_sets[query_key]["X_test"])
train_progbar = utils.Progbar(training_steps)
loss_plot = []
f1_score_plot = []
precision_plot = []
recall_plot = []
# Loop by number of epochs
for epoch in range(epochs):
train_meta_loss = []
val_meta_loss = []
# In each epoch, we loop through each batch. Each batch will be sent to training step function.
for i in range(training_steps):
support_train_data = {}
query_train_data = {}
for support_key in support_batch_sets:
support_train_data[support_key] = support_batch_sets[support_key][i]
for query_key in query_batch_sets:
query_train_data[query_key] = query_batch_sets[query_key][i]
batch_train_loss = maml.train_on_batch(
support_train_data,
query_train_data,
inner_optimizer,
outer_optimizer=outer_optimizer,
)
train_meta_loss.append(batch_train_loss)
train_progbar.update(i + 1, [("loss", np.mean(train_meta_loss))])
# Store number for ploting
loss_plot.append(np.mean(train_meta_loss))
scores = val_on_batch(maml.meta_model)
f1_score_plot.append(scores["rouge1"]["f1"])
precision_plot.append(scores["rouge1"]["precision"])
recall_plot.append(scores["rouge1"]["recall"])
print("\n")
print(scores)
print("\n")
# Save trained model
maml.meta_model.save("./model.h5")
from matplotlib import pyplot as plt
# Ploting F1 and loss graph.
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
epochs_plot = [i for i in range(epochs)]
plt.plot(epochs_plot, loss_plot, color="red", label="loss")
plt.plot(epochs_plot, f1_score_plot, color="blue", label="validation")
plt.legend(loc="upper left")
plt.show()
from matplotlib import pyplot as plt
# Ploting Recall and Precision graph.
plt.title("rouge score")
plt.ylabel("score")
plt.xlabel("epoch")
epochs_plot = [i for i in range(epochs)]
plt.plot(epochs_plot, recall_plot, color="red", label="recall")
plt.plot(epochs_plot, precision_plot, color="blue", label="precision")
plt.legend(loc="upper left")
plt.show()
# This cell is the same to cell "val_on_batch".
# It calculate score,save pair golden_summary-model_summary, save evaluation information.
output_file = "result.pickle"
y_pred_list = []
idx_list = []
key_query_task = "business"
X_test = np.array([np.array(val) for val in query_test_sets[key_query_task]["X_test"]])
y_preds = maml.meta_model.predict(X_test, verbose=0)
for j in range(len(y_preds)):
idx = []
for i in range(len(y_preds[j])):
pred_percent = y_preds[j][i]
if pred_percent > 0.5:
idx.append(i)
if len(idx) < 5.0:
idx = np.argsort(y_preds[j][-5:])
idx = sorted(idx)
idx_list.append(idx)
val_sets = train_test_sets[key_query_task]["test"]
# retrieve summary pairs
df_text_clean = val_sets["text_clean"]
pred_summaries = []
for doc in range(len(idx_list)):
pred_summary_sentences_list = []
text_clean = np.array(df_text_clean.iloc[doc])
idx_doc = idx_list[doc]
for i in range(len(text_clean)):
if i in idx_doc:
sentence = text_clean[i]
pred_summary_sentences_list.append(sentence)
pred_summary = " ".join(pred_summary_sentences_list)
pred_summaries.append(pred_summary)
df_gold = val_sets["Summary"]
gold_summaries = [df_gold.iloc[m] for m in range(len(df_gold))]
summaries_comp = tuple(zip(pred_summaries, gold_summaries))
# calculate rouge score
scores = calc_rouge_scores(
pred_summaries, gold_summaries, keys=["rouge1", "rougeL"], use_stemmer=True
)
results_dict = {
"summaries_comp": summaries_comp,
"sent_index_number": idx,
"Rouge": scores,
"mod_summary": maml.meta_model.summary(),
}
with open(output_file, "wb") as handle:
pickle.dump(results_dict, handle)
result = pd.read_pickle("./result.pickle")
print(result["Rouge"])
print("\nPrediction\n")
print(result["summaries_comp"][0][0])
print("\nReal summary\n")
print(result["summaries_comp"][0][1])
|
# **A regression problem is when the output variable is a real or continuous value, such as “salary” or “weight”. Many different models can be used, the simplest is the linear regression. It tries to fit data with the best hyper-plane which goes through the points.**
# ### Loading Packages
import numpy as np
import pandas as pd
from sklearn.preprocessing import normalize, MinMaxScaler, StandardScaler
from sklearn.decomposition import PCA, randomized_svd
from sklearn import random_projection
from sklearn.random_projection import GaussianRandomProjection, SparseRandomProjection
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor, ExtraTreeRegressor
from lightgbm import LGBMRegressor
from sklearn.svm import SVR
from sklearn.ensemble import (
GradientBoostingRegressor,
VotingRegressor,
BaggingRegressor,
RandomForestRegressor,
)
from xgboost import XGBRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression, LassoCV, RidgeCV
from catboost import CatBoostRegressor
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# ### Loading Data
training_set = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
testing_set = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/test.csv"
)
sample_submission = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/sample_submission.csv"
)
print("Training Set: ", training_set.shape)
print("Testing Set ", testing_set.shape)
# ### Dependent and Independent Variable
# * The data is split equally between train and test. There are 80 features (Independent) in the dataset with one target (Dependent) continous variable.
# ### Dataset Description
# * Load the text to read about the dataset's feature and target variable. It will given an eventual idea about whether the feature is categorical or numeric.
with open(
"/kaggle/input/house-prices-advanced-regression-techniques/data_description.txt",
"r",
) as data_desc:
for line in data_desc.readlines()[:10]:
print(line)
# **First thing first, keep aside the target variable as y, don't drop the target from train before analysis. Since the problem statement is Sales Price of a House, the material cost both in quality and quantity, or size of the area will highly correlate with sale price.**
y = training_set["SalePrice"]
# ### Find the no. of null value in a feature.
def check_null(df, col):
return print(col, " ", df[col].isnull().sum())
# ### Find mean sale price of a feature. It works well on categorical data, where limited options are available.
def categorical_saleprice(df, col):
return df.groupby(col)["SalePrice"].mean()
# **Identify columns both in training and testing set, where columns with less than 100 missing values are present and impute values in each accordingly.**
columns_impute_train = training_set.columns[
(training_set.isnull().sum() > 0) & (training_set.isnull().sum() < 100)
]
columns_impute_test = testing_set.columns[
(testing_set.isnull().sum() > 0) & (testing_set.isnull().sum() < 100)
]
# **Identify the Numeric Columns in the dataset**
# Numeric Columns
num_cols = training_set._get_numeric_data().columns
# training_set[columns_impute_train[0]].fillna(training_set[columns_impute_train[0]].mode()[0],inplace=True)
print(columns_impute_train)
print(columns_impute_test)
check_null(training_set, columns_impute_train[0])
categorical_saleprice(training_set, columns_impute_train[0])
# **Impute Value for MasVnrType based on SalePrice.**
# * Mason Veneer Type - https://www.angieslist.com/articles/how-much-does-brick-veneer-cost.htm.
# * The link above lets us know, that stone MasVnr is costlier than brick. Always verify!
MasVnrType = list(
training_set.groupby(columns_impute_train[0])["SalePrice"].mean().index
)
sale_price = list(
training_set.groupby(columns_impute_train[0])["SalePrice"].mean().values
)
# plt.bar(MasVnrType,sale_price)
# ### Records where MasVnrType is null.
training_set[training_set[columns_impute_train[0]].isnull()]
# **Based on correlation between MasVnr with sale price, we can impute MasVnr type value.**
# > For instance, Id=1244, the sales price is 465000, which is very high compared to other records, while above we have seen that MasVnrType Stone has average of saleprice of 265583.625000.
#
training_set.loc[training_set["Id"] == 235, "MasVnrType"] = "BrkFace"
training_set.loc[training_set["Id"] == 530, "MasVnrType"] = "BrkFace"
training_set.loc[training_set["Id"] == 651, "MasVnrType"] = "BrkFace"
training_set.loc[training_set["Id"] == 937, "MasVnrType"] = "None"
training_set.loc[training_set["Id"] == 974, "MasVnrType"] = "None"
training_set.loc[training_set["Id"] == 978, "MasVnrType"] = "BrkFace"
training_set.loc[training_set["Id"] == 1244, "MasVnrType"] = "Stone"
training_set.loc[training_set["Id"] == 1279, "MasVnrType"] = "Stone"
# **Impute Value for MasVnrArea based on MasVnrType and Frequency count.**
# **Checking for correlation between **
# * MasVnrType vs MasVnrArea
# * MasVnrArea vs SalePrice
# **Not a surprising correlation here because as the area increases the sale price is increased.**
print(training_set.groupby(columns_impute_train[1])["SalePrice"].mean())
print(training_set.groupby(columns_impute_train[0])["MasVnrArea"].value_counts())
print(
training_set[
(training_set["MasVnrType"] == "Stone") & (training_set["SalePrice"] >= 230000)
][["MasVnrArea", "SalePrice"]].mean()
)
# **For MasVnrArea, we can impute value based on whats the average sales price based on MasVnrType.**
training_set[
(training_set["MasVnrType"] == "BrkFace") & (training_set["SalePrice"] >= 200000)
][["MasVnrArea", "SalePrice"]].mean()
training_set[training_set[columns_impute_train[1]].isnull()][
["MasVnrType", "SalePrice"]
]
training_set.loc[training_set["Id"] == 937, "MasVnrArea"] = 0.0
training_set.loc[training_set["Id"] == 974, "MasVnrArea"] = 0.0
training_set.loc[training_set["Id"] == 978, "MasVnrArea"] = 332.00
training_set.loc[training_set["Id"] == 1244, "MasVnrArea"] = 280.3
training_set.loc[training_set["Id"] == 1279, "MasVnrArea"] = 280.3
training_set.loc[training_set["Id"] == 235, "MasVnrArea"] = 332.00
training_set.loc[training_set["Id"] == 530, "MasVnrArea"] = 332.00
training_set.loc[training_set["Id"] == 651, "MasVnrArea"] = 332.00
# **Impute the Basement quality based on Sale Price**
training_set[columns_impute_train[2]].value_counts()
training_set[training_set[columns_impute_train[2]].isnull()].head()
print(training_set.groupby(columns_impute_train[2])["SalePrice"].mean())
print(training_set[(training_set["BsmtQual"] == "Ex")][["SalePrice"]].mean())
print(training_set[(training_set["BsmtQual"] == "TA")][["SalePrice"]].mean())
print(training_set[(training_set["BsmtQual"] == "Fa")][["SalePrice"]].mean())
def impute_BsmtQual_train(df, column):
df.loc[(df[column].isnull()) & (df["SalePrice"] < 120000.00), column] = "Fa"
df.loc[
(df[column].isnull())
& (df["SalePrice"] >= 120000.00)
& (df["SalePrice"] < 180000.00),
column,
] = "TA"
df.loc[(df[column].isnull()) & (df["SalePrice"] >= 180000.00), column] = "TA"
return "Basement Quality's missing value is imputed"
impute_BsmtQual_train(training_set, "BsmtQual")
# **Basement Condition is imputed based on Sale Price.**
training_set[columns_impute_train[3]].value_counts()
training_set[training_set[columns_impute_train[3]].isnull()].head()
# print(training_set[(training_set["BsmtCond"]=="Ex") ][["SalePrice"]].mean())
print(training_set[(training_set["BsmtCond"] == "TA")][["SalePrice"]].mean())
print(training_set[(training_set["BsmtCond"] == "Fa")][["SalePrice"]].mean())
print(training_set[(training_set["BsmtCond"] == "Po")][["SalePrice"]].mean())
print(training_set[(training_set["BsmtCond"] == "Gd")][["SalePrice"]].mean())
print(training_set.groupby(columns_impute_train[3])["SalePrice"].mean())
training_set.loc[
(training_set[columns_impute_train[3]].isnull())
& (training_set["SalePrice"] < 75000.00),
"BsmtCond",
] = "Po"
training_set.loc[
(training_set[columns_impute_train[3]].isnull())
& (training_set["SalePrice"] >= 75000.00)
& (training_set["SalePrice"] < 140000.00),
"BsmtCond",
] = "Fa"
training_set.loc[
(training_set[columns_impute_train[3]].isnull())
& (training_set["SalePrice"] >= 140000.00)
& (training_set["SalePrice"] < 190000.00),
"BsmtCond",
] = "TA"
training_set.loc[
(training_set[columns_impute_train[3]].isnull())
& (training_set["SalePrice"] >= 190000.00),
"BsmtCond",
] = "Gd"
# **Basement Exposure value is imposed based on Sale Price.**
training_set[columns_impute_train[4]].value_counts()
training_set[training_set[columns_impute_train[4]].isnull()].shape
print(training_set.groupby(columns_impute_train[4])["SalePrice"].mean())
training_set.loc[
(training_set[columns_impute_train[4]].isnull())
& (training_set["SalePrice"] < 150000.00),
"BsmtExposure",
] = "No"
training_set.loc[
(training_set[columns_impute_train[4]].isnull())
& (training_set["SalePrice"] >= 150000.00)
& (training_set["SalePrice"] < 200000.00),
"BsmtExposure",
] = "Mn"
training_set.loc[
(training_set[columns_impute_train[4]].isnull())
& (training_set["SalePrice"] >= 200000.00)
& (training_set["SalePrice"] < 230000.00),
"BsmtExposure",
] = "Av"
training_set.loc[
(training_set[columns_impute_train[4]].isnull())
& (training_set["SalePrice"] >= 210000.00),
"BsmtExposure",
] = "Gd"
# **BsmtFinType1: Rating of basement finished area, the missing value is imputed based on the basement quality.**
#
training_set[columns_impute_train[5]].value_counts()
training_set[training_set[columns_impute_train[5]].isnull()].shape
print(training_set.groupby(columns_impute_train[5])["SalePrice"].mean())
training_set[training_set[columns_impute_train[5]].isnull()][
["BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtQual"]
].head()
training_set.loc[
(training_set[columns_impute_train[5]].isnull())
& (training_set["BsmtQual"] == "Fa"),
"BsmtFinType1",
] = "Unf"
training_set.loc[
(training_set[columns_impute_train[5]].isnull())
& (training_set["BsmtQual"] == "TA"),
"BsmtFinType1",
] = "LwQ"
training_set.loc[
(training_set[columns_impute_train[5]].isnull())
& (training_set["BsmtQual"] == "Gd"),
"BsmtFinType1",
] = "GLQ"
# **BsmtFinType2: Rating of basement finished area (if multiple types), the missing value is imputed based on the mode value.**
#
training_set[columns_impute_train[6]].value_counts()
training_set[training_set[columns_impute_train[6]].isnull()].shape
print(training_set.groupby(columns_impute_train[6])["SalePrice"].mean())
training_set[training_set["BsmtQual"] == "Ex"]["BsmtFinType2"].value_counts()
training_set.loc[training_set[columns_impute_train[6]].isnull(), "BsmtFinType2"] = "Unf"
# **Electrical, the missing value is imputed based on the mode of the column.**
training_set[columns_impute_train[7]].value_counts()
training_set[columns_impute_train[7]].fillna(value="SBrkr", inplace=True)
# **GarageType, the missing value is imputed based on Sale Price**
training_set[columns_impute_train[8]].value_counts()
training_set[training_set[columns_impute_train[8]].isnull()]
print(training_set.groupby(columns_impute_train[8])["SalePrice"].mean())
training_set.loc[
(training_set[columns_impute_train[8]].isnull())
& (training_set["SalePrice"] < 110000.00),
"GarageType",
] = "CarPort"
training_set.loc[
(training_set[columns_impute_train[8]].isnull())
& (training_set["SalePrice"] >= 110000.00)
& (training_set["SalePrice"] < 140000.00),
"GarageType",
] = "Detchd"
training_set.loc[
(training_set[columns_impute_train[8]].isnull())
& (training_set["SalePrice"] >= 140000.00)
& (training_set["SalePrice"] < 210000.00),
"GarageType",
] = "Attchd"
training_set.loc[
(training_set[columns_impute_train[8]].isnull())
& (training_set["SalePrice"] >= 210000.00),
"GarageType",
] = "BuiltIn"
# training_set[columns_impute_train[9]].value_counts()
# training_set[training_set[columns_impute_train[9]].isnull()].shape
# print(training_set.groupby(columns_impute_train[9])["SalePrice"].mean())
# pd.crosstab(training_set["GarageYrBlt"],training_set["GarageType"]).ipynb_checkpoints/
# training_set[training_set["GarageYrBlt"].isnull()]["GarageType"].value_counts()
# training_set[training_set["GarageType"]=="Detchd"]["GarageYrBlt"].value_counts()
columns_impute_train.drop(columns_impute_train[9])
# **Garage Finish, missing value is imputed based on Sale Price**
training_set[columns_impute_train[10]].value_counts()
training_set[training_set[columns_impute_train[10]].isnull()]
print(training_set.groupby(columns_impute_train[10])["SalePrice"].mean())
pd.crosstab(training_set["SalePrice"], training_set["GarageFinish"])
training_set.loc[
(training_set[columns_impute_train[10]].isnull())
& (training_set["SalePrice"] < 160000.00),
"GarageFinish",
] = "Unf"
training_set.loc[
(training_set[columns_impute_train[10]].isnull())
& (training_set["SalePrice"] >= 160000.00)
& (training_set["SalePrice"] < 220000.00),
"GarageFinish",
] = "RFn"
training_set.loc[
(training_set[columns_impute_train[10]].isnull())
& (training_set["SalePrice"] >= 220000.00),
"GarageFinish",
] = "Fin"
# **Garage Quality, missing value is based on Sale Price**
training_set[columns_impute_train[11]].value_counts()
training_set[training_set[columns_impute_train[11]].isnull()].head()
print(training_set.groupby(columns_impute_train[11])["SalePrice"].mean())
training_set.loc[
(training_set[columns_impute_train[11]].isnull())
& (training_set["SalePrice"] < 100000.00),
"GarageQual",
] = "Po"
training_set.loc[
(training_set[columns_impute_train[11]].isnull())
& (training_set["SalePrice"] >= 100000.00)
& (training_set["SalePrice"] < 140000.00),
"GarageQual",
] = "Fa"
training_set.loc[
(training_set[columns_impute_train[11]].isnull())
& (training_set["SalePrice"] >= 140000.00)
& (training_set["SalePrice"] < 220000.00),
"GarageQual",
] = "Ta"
training_set.loc[
(training_set[columns_impute_train[11]].isnull())
& (training_set["SalePrice"] >= 220000.00),
"GarageQual",
] = "Ex"
training_set[columns_impute_train[12]].value_counts()
training_set[training_set[columns_impute_train[12]].isnull()].shape
print(training_set.groupby(columns_impute_train[12])["SalePrice"].mean())
training_set[columns_impute_train[12]].fillna(value="TA", inplace=True)
columns_too_many_missing = list(training_set.columns[training_set.isnull().any()])
list(training_set.columns[training_set.isnull().any()])
training_set.drop(columns=columns_too_many_missing, inplace=True)
testing_set.drop(columns=columns_too_many_missing, inplace=True)
training_set.select_dtypes(include="object").columns
col_to_encode = [
"MSZoning",
"Street",
"LotShape",
"LandContour",
"LotConfig",
"LandSlope",
"Neighborhood",
"Condition1",
"Condition2",
"BldgType",
"HouseStyle",
"RoofStyle",
"RoofMatl",
"Exterior1st",
"Exterior2nd",
"MasVnrType",
"ExterQual",
"ExterCond",
"Foundation",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"Heating",
"HeatingQC",
"CentralAir",
"Electrical",
"KitchenQual",
"Functional",
"GarageType",
"GarageFinish",
"GarageQual",
"GarageCond",
"PavedDrive",
"SaleType",
"SaleCondition",
]
encoded_categorical_df = pd.get_dummies(
training_set, columns=col_to_encode, drop_first=True
)
col_drop = list(training_set.select_dtypes(include="object").columns)
col_drop.append("Id")
training_set.drop(columns=col_drop, inplace=True)
encoded_categorical_df.drop(columns=["Id", "Utilities"], inplace=True)
# **Normalization/Standard Scalar**
min_max_scaler = MinMaxScaler()
standard_scaler = StandardScaler()
col_to_normalize = [
"MSSubClass",
"LotArea",
"MasVnrArea",
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"1stFlrSF",
"2ndFlrSF",
"LowQualFinSF",
"GrLivArea",
"GarageArea",
"WoodDeckSF",
"OpenPorchSF",
"EnclosedPorch",
"3SsnPorch",
"ScreenPorch",
]
encoded_categorical_df[col_to_normalize] = min_max_scaler.fit_transform(
encoded_categorical_df[col_to_normalize]
)
# encoded_categorical_df[col_to_normalize] = standard_scaler.fit_transform(encoded_categorical_df[col_to_normalize])
# encoded_categorical_df[col_to_normalize] = normalize(encoded_categorical_df[col_to_normalize])
encoded_categorical_df[
[
"MSSubClass",
"LotArea",
"MasVnrArea",
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"1stFlrSF",
"2ndFlrSF",
"LowQualFinSF",
"GrLivArea",
"GarageArea",
"WoodDeckSF",
"OpenPorchSF",
"EnclosedPorch",
"3SsnPorch",
"ScreenPorch",
]
]
# encoded_categorical_df.drop(columns=col_to_normalize,inplace=True)
encoded_categorical_df.drop(columns=["SalePrice"], inplace=True)
columns_impute_test
testing_set["MSZoning"].value_counts()
# ### Imputing value for test data
# **Since we could see, how an independent variable is affected by the sale price in training data. We need different analysis for imputing value.**
# **MSZoning, the missing value is imputed based on the MSSubclass**
testing_set["MSSubClass"].value_counts()
testing_set[testing_set[columns_impute_test[0]].isnull()]
testing_set[testing_set[columns_impute_test[0]].isnull()][["MSSubClass", "MSZoning"]]
pd.crosstab(testing_set["MSSubClass"], testing_set["MSZoning"])
testing_set.loc[
(testing_set[columns_impute_test[0]].isnull()) & (testing_set["MSSubClass"] == 20),
"MSZoning",
] = "RL"
testing_set.loc[
(testing_set[columns_impute_test[0]].isnull()) & (testing_set["MSSubClass"] == 30),
"MSZoning",
] = "RM"
testing_set.loc[
(testing_set[columns_impute_test[0]].isnull()) & (testing_set["MSSubClass"] == 70),
"MSZoning",
] = "RM"
testing_set[columns_impute_test[1]].value_counts()
testing_set[testing_set[columns_impute_test[1]].isnull()]
testing_set[columns_impute_test[1]].fillna(value="AllPub", inplace=True)
testing_set[columns_impute_test[2]].value_counts()
testing_set[testing_set[columns_impute_test[2]].isnull()]
testing_set[columns_impute_test[2]].fillna(value="VinylSd", inplace=True)
testing_set[columns_impute_test[3]].value_counts()
testing_set[testing_set[columns_impute_test[3]].isnull()]
testing_set[columns_impute_test[3]].fillna(value="VinylSd", inplace=True)
testing_set[columns_impute_test[4]].value_counts()
testing_set[testing_set[columns_impute_test[4]].isnull()][
["MasVnrType", "ExterCond", "ExterQual"]
]
testing_set[testing_set["ExterQual"] == "Gd"]["MasVnrType"].value_counts()
pd.crosstab(testing_set["MasVnrType"], testing_set["ExterCond"])
testing_set["MasVnrType"].fillna(value="None", inplace=True)
testing_set[columns_impute_test[5]].value_counts()
testing_set[testing_set[columns_impute_test[5]].isnull()]
testing_set[columns_impute_test[5]].fillna(value=0.0, inplace=True)
testing_set[columns_impute_test[6]].value_counts()
testing_set[testing_set[columns_impute_test[6]].isnull()].head()
pd.crosstab(testing_set["BsmtQual"], testing_set["GarageQual"])
testing_set[testing_set[columns_impute_test[6]].isnull()][
["BsmtQual", "GarageQual"]
].head()
testing_set.loc[
(testing_set[columns_impute_test[6]].isnull())
& (testing_set["GarageQual"] == "Ta"),
"BsmtQual",
] = "Gd"
testing_set[columns_impute_test[6]].fillna(value="TA", inplace=True)
testing_set[columns_impute_test[7]].value_counts()
testing_set[columns_impute_test[7]].fillna(value="TA", inplace=True)
testing_set[columns_impute_test[8]].value_counts()
testing_set[testing_set[columns_impute_test[8]].isnull()][
["BsmtExposure", "GarageQual"]
].head()
testing_set[testing_set["GarageQual"] == "TA"]["BsmtExposure"].value_counts()
testing_set[columns_impute_test[8]].fillna(value="No", inplace=True)
testing_set[columns_impute_test[9]].value_counts()
testing_set[testing_set[columns_impute_test[9]].isnull()].head()
testing_set[testing_set[columns_impute_test[9]].isnull()][
["BsmtFinType1", "GarageCond", "GarageQual", "BldgType"]
].head()
testing_set[testing_set["BldgType"] == "1Fam"]["BsmtFinType1"].value_counts()
testing_set.loc[
(testing_set[columns_impute_test[9]].isnull())
& (testing_set["BldgType"] == "1Fam"),
"BsmtFinType1",
] = "Unf"
testing_set.loc[
(testing_set[columns_impute_test[9]].isnull())
& (testing_set["BldgType"] == "Duplex"),
"BsmtFinType1",
] = "GLQ"
testing_set.columns
testing_set[columns_impute_test[10]].value_counts()
testing_set[testing_set[columns_impute_test[10]].isnull()]
testing_set[testing_set["SaleCondition"] == "Abnorml"][
columns_impute_test[10]
].value_counts().head()
testing_set[columns_impute_test[10]].fillna(value=0.0, inplace=True)
testing_set[columns_impute_test[11]].value_counts()
testing_set[testing_set[columns_impute_test[11]].isnull()].head()
testing_set[testing_set["SaleCondition"] == "AdjLand"][
columns_impute_test[11]
].value_counts()
testing_set[columns_impute_test[11]].fillna(value="Unf", inplace=True)
testing_set[columns_impute_test[12]].fillna(value=0.0, inplace=True)
testing_set[columns_impute_test[13]].value_counts()
testing_set[testing_set[columns_impute_test[13]].isnull()][
["BsmtFinType1", "GarageCond", "GarageQual", "BldgType"]
]
testing_set[columns_impute_test[13]].fillna(value=0.0, inplace=True)
testing_set[columns_impute_test[14]].value_counts()
testing_set[testing_set[columns_impute_test[14]].isnull()][
["BsmtFinType1", "GarageCond", "GarageQual", "BldgType"]
]
testing_set[columns_impute_test[14]].fillna(value=0.0, inplace=True)
testing_set[columns_impute_test[15]].value_counts()
testing_set[testing_set[columns_impute_test[15]].isnull()][
["BsmtFinType1", "GarageCond", "GarageQual", "BldgType"]
]
testing_set[columns_impute_test[15]].fillna(value=0.0, inplace=True)
testing_set[columns_impute_test[16]].value_counts()
testing_set[columns_impute_test[16]].fillna(value=0.0, inplace=True)
testing_set[columns_impute_test[17]].value_counts()
testing_set[testing_set[columns_impute_test[17]].isnull()][
["BsmtFinType1", "GarageCond", "GarageQual", "BldgType"]
]
testing_set[columns_impute_test[17]].fillna(value="Fa", inplace=True)
testing_set[columns_impute_test[18]].value_counts()
testing_set[testing_set[columns_impute_test[18]].isnull()][
["BsmtFinType1", "GarageCond", "GarageQual", "BldgType"]
]
testing_set[columns_impute_test[18]].fillna(value="Fa", inplace=True)
testing_set[columns_impute_test[19]].value_counts()
testing_set[testing_set[columns_impute_test[19]].isnull()][["BldgType", "GarageType"]]
testing_set[testing_set["BldgType"] == "TwnhsE"][columns_impute_test[19]].value_counts()
testing_set.loc[
(testing_set[columns_impute_test[19]].isnull())
& (testing_set["BldgType"] == "1Fam"),
"GarageType",
] = "Attchd"
testing_set.loc[
(testing_set[columns_impute_test[19]].isnull())
& (testing_set["BldgType"] == "Duplex"),
"GarageType",
] = "Detchd"
testing_set.loc[
(testing_set[columns_impute_test[19]].isnull())
& (testing_set["BldgType"] == "2fmCon"),
"GarageType",
] = "Detchd"
testing_set.loc[
(testing_set[columns_impute_test[19]].isnull())
& (testing_set["BldgType"] == "Twnhs"),
"GarageType",
] = "Detchd"
testing_set.loc[
(testing_set[columns_impute_test[19]].isnull())
& (testing_set["BldgType"] == "TwnhsE"),
"GarageType",
] = "Attchd"
testing_set[columns_impute_test[21]].value_counts()
testing_set[testing_set[columns_impute_test[21]].isnull()].head()
testing_set[testing_set[columns_impute_test[21]].isnull()]["BldgType"].value_counts()
testing_set.loc[
(testing_set[columns_impute_test[21]].isnull())
& (testing_set["BldgType"] == "TwnhsE"),
"GarageFinish",
] = "Fin"
testing_set[columns_impute_test[21]].fillna(value="Unf", inplace=True)
testing_set[columns_impute_test[22]].value_counts()
testing_set[testing_set[columns_impute_test[22]].isnull()]
testing_set[testing_set["SaleCondition"] == "Alloca"][
columns_impute_test[22]
].value_counts()
testing_set[columns_impute_test[22]].fillna(value=2.0, inplace=True)
testing_set[columns_impute_test[23]].value_counts()
testing_set[testing_set[columns_impute_test[23]].isnull()]
testing_set[testing_set["SaleCondition"] == "Alloca"][columns_impute_test[23]].mean()
testing_set[columns_impute_test[23]].fillna(value=499.09, inplace=True)
testing_set[columns_impute_test[24]].value_counts()
testing_set[testing_set[columns_impute_test[24]].isnull()].head()
testing_set[columns_impute_test[24]].fillna(value="TA", inplace=True)
testing_set[columns_impute_test[25]].value_counts()
testing_set[columns_impute_test[25]].fillna(value="TA", inplace=True)
testing_set[columns_impute_test[26]].value_counts()
testing_set[testing_set[columns_impute_test[26]].isnull()]
testing_set[testing_set["SaleCondition"] == "Normal"][
columns_impute_test[26]
].value_counts()
testing_set[columns_impute_test[26]].fillna(value="WD", inplace=True)
testing_set.columns[
(testing_set.isnull().sum() > 0) & (testing_set.isnull().sum() < 100)
]
testing_set.drop(columns=["Id"], inplace=True)
testing_set.drop(columns=["Utilities"], inplace=True)
col_for_dummies = [
"MSZoning",
"Street",
"LotShape",
"LandContour",
"LotConfig",
"LandSlope",
"Neighborhood",
"Condition1",
"Condition2",
"BldgType",
"HouseStyle",
"RoofStyle",
"RoofMatl",
"Exterior1st",
"Exterior2nd",
"MasVnrType",
"ExterQual",
"ExterCond",
"Foundation",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"Heating",
"HeatingQC",
"CentralAir",
"Electrical",
"KitchenQual",
"Functional",
"GarageType",
"GarageFinish",
"GarageQual",
"GarageCond",
"PavedDrive",
"SaleType",
"SaleCondition",
]
testing_encoding_categorical = pd.get_dummies(
testing_set, columns=col_for_dummies, drop_first=True
)
# min_max_scaler = MinMaxScaler()
num_cols_test = testing_set._get_numeric_data().columns
num_cols_test
col_to_normalize = [
"MSSubClass",
"LotArea",
"MasVnrArea",
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"1stFlrSF",
"2ndFlrSF",
"LowQualFinSF",
"GrLivArea",
"GarageArea",
"WoodDeckSF",
"OpenPorchSF",
"EnclosedPorch",
"3SsnPorch",
"ScreenPorch",
]
testing_encoding_categorical[col_to_normalize] = min_max_scaler.transform(
testing_encoding_categorical[col_to_normalize]
)
# testing_encoding_categorical[col_to_normalize] = standard_scaler.transform(testing_encoding_categorical[col_to_normalize])
# testing_encoding_categorical[col_to_normalize] = normalize(testing_encoding_categorical[col_to_normalize])
# testing_encoding_categorical.drop(columns=col_to_normalize,inplace=True)
features_to_delete_test = []
for col in testing_encoding_categorical.columns:
if col not in encoded_categorical_df.columns:
features_to_delete_test.append(col)
features_to_delete_train = []
for col in encoded_categorical_df.columns:
if col not in testing_encoding_categorical.columns:
features_to_delete_train.append(col)
print(encoded_categorical_df.shape)
print(testing_encoding_categorical.shape)
# features_to_delete_train.remove("SalePrice")
encoded_categorical_df.drop(columns=features_to_delete_train, inplace=True)
testing_encoding_categorical.drop(columns=features_to_delete_test, inplace=True)
features_to_delete_train
# grp = PCA(n_components=)
# X = grp.fit_transform(encoded_categorical_df)
# print(type(X))
# print(type(y))
# grp = random_projection.johnson_lindenstrauss_min_dim(encoded_categorical_df,eps=0.3)
X_train, X_valid, y_train, y_valid = train_test_split(
encoded_categorical_df, y, test_size=0.25
)
cbr = CatBoostRegressor()
cbr.fit(X_train, y_train)
y_pred = cbr.predict(X_valid)
print("Mean Squared Error", mean_squared_error(y_valid, y_pred))
print("Mean Absolute Error", mean_absolute_error(y_valid, y_pred))
print("R2 Score", r2_score(y_valid, y_pred))
# param_grid = {'learning_rate':[0.1],"n_estimators":[150],'min_samples_leaf':[5],'min_samples_split':[20],"max_depth":[10],'loss': ['lad'],"max_features":["auto"]}
# gbccv = GridSearchCV(GradientBoostingRegressor(),param_grid)
# gbccv.fit(X_train,y_train)
# y_pred = gbccv.predict(X_valid)
# print("Mean Squared Error",mean_squared_error(y_valid, y_pred))
# print("Mean Absolute Error",mean_absolute_error(y_valid, y_pred))
# print("R2 Score",r2_score(y_valid, y_pred))
# param_grid = {'learning_rate':[0.1],"n_estimators":[100],"min_split_gain":[0.1],"min_child_samples":[15]}
# lgbm = GridSearchCV(LGBMRegressor(),param_grid)
# lgbm.fit(X_train,y_train)
# y_pred = lgbm.predict(X_valid)
# print(lgbm.best_params_)
# print(lgbm.best_score_)
# print("Mean Squared Error",mean_squared_error(y_valid, y_pred))
# print("Mean Absolute Error",mean_absolute_error(y_valid, y_pred))
# print("R2 Score",r2_score(y_valid, y_pred))
# reg1 = GradientBoostingRegressor(n_estimators=150,min_samples_leaf=5,min_samples_split=20,max_depth=10,loss='lad')
# reg2 = LGBMRegressor(min_split_gain=0.3,min_child_weight=0.0001,n_estimators=150)
# reg3 = RidgeCV(cv=20)
# reg4 = CatBoostRegressor()
# voting_regressor = VotingRegressor(estimators=[('gbr', reg1), ('lgbm', reg2),('ridge',reg3),('catboost',reg4)])
# voting_regressor.fit(X_train,y_train)
# y_pred = voting_regressor.predict(X_valid)
# print("Mean Squared Error",mean_squared_error(y_valid, y_pred))
# print("Mean Absolute Error",mean_absolute_error(y_valid, y_pred))
# print("R2 Score",r2_score(y_valid, y_pred))
# las_reg = RidgeCV(cv=10).fit(X_train,y_train)
# y_pred = las_reg.predict(X_valid)
# print("Mean Squared Error",mean_squared_error(y_valid, y_pred))
# print("Mean Absolute Error",mean_absolute_error(y_valid, y_pred))
# print("R2 Score",r2_score(y_valid, y_pred))
# testing_encoding_categorical_pca = pca.transform(testing_encoding_categorical)
Id = sample_submission["Id"]
predicted_test = []
# X_test = random_projection.johnson_lindenstrauss_min_dim(testing_encoding_categorical,eps=0.3)
for x in cbr.predict(testing_encoding_categorical):
predicted_test.append(x)
predicted_test_value = pd.DataFrame({"Id": Id, "SalePrice": predicted_test})
predicted_test_value.to_csv("PredictedTestScore.csv", index=False)
|
# this cell will output many lines, it's fine
# kaggle env have Python 3.7, which conflicts with some dependencies we have
# so we work around that here
# ideally we need Python 3.8+ here
from PIL import Image
# https://www.sothebys.com/en/buy/new-arrivals/_bayreuth-opera-1477
Image.open("/kaggle/input/art-price-dataset/artDataset/image_6.png").resize((100, 100))
import pandas as pd
df = pd.read_csv("/kaggle/input/art-price-dataset/artDataset.csv")
df["image"] = [f"image_{i}.png" for i in range(1, len(df) + 1)]
# df["price"].map(lambda x: int(x.strip(" USD").replace(".", "")))
df.head()
import os
images = list(os.walk("/kaggle/input/art-price-dataset/artDataset"))[0][2]
prices = (
df.set_index("image")
.loc[images]
.price.map(lambda x: int(x.strip(" USD").replace(".", "")))
.values
)
import numpy as np
from tensorflow.keras.preprocessing.image import img_to_array, load_img
images_arrays = []
for i in images:
img = load_img(
f"/kaggle/input/art-price-dataset/artDataset/{i}", target_size=(224, 224, 3)
)
images_arrays.append(img_to_array(img))
images_arrays = np.concatenate([i.reshape(1, 224, 224, 3) for i in images_arrays])
images_arrays.shape
# ## prices
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.applications import MobileNetV3Small
from tensorflow.keras.applications.mobilenet_v3 import preprocess_input
# let's make a very strong suggestion that flipping and rotation
# doesn't influence the price of the high art
data_augmentations = keras.Sequential(
[
layers.RandomFlip("horizontal"),
layers.RandomRotation(0.1),
]
)
# Load the ResNet50 model
base_model = MobileNetV3Small(include_top=False, input_shape=(224, 224, 3))
# Create a new model on top
inputs = keras.Input(shape=(224, 224, 3))
x = preprocess_input(inputs)
x = base_model(x, training=False)
x = keras.layers.GlobalAveragePooling2D()(x)
# x = keras.layers.Dropout(0.2)(x) # add dropout to prevent overfitting
outputs = keras.layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
# Freeze the base_model
base_model.trainable = False
# Compile the model
optimizer = keras.optimizers.Adam(learning_rate=0.1)
model.compile(
optimizer=optimizer, loss="mean_squared_error", metrics=["mean_absolute_error"]
)
# val loss don't go less than 6700
# but, the constant loss is 12800
# np.mean((prices - np.mean(prices)) ** 2) ** 0.5
# Train the model on your regression dataset
model.fit(images_arrays, prices, epochs=1, validation_split=0.1)
img = img_to_array(Image.open("/kaggle/input/art-price-dataset/artDataset/image_1.png"))
model.predict(np.expand_dims(img[:224, :224, :3], 0))[0][0]
def pred(data):
p = model1.predict(data)
name = model2.predict(data)
return {"price": p, "name": name}
from mlem.api import save
save(
model,
"/kaggle/working/models/price",
preprocess=lambda x: np.expand_dims(x[:224, :224, :3], 0),
postprocess=lambda x: {"price": np.array(x)[0][0]},
sample_data=img,
)
from mlem.api import load_meta
loaded = load_meta("/kaggle/working/models/price", load_value=True)
loaded(img)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Import neccessery library
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# # import Dataset
marriage_divorce = pd.read_csv(
"/kaggle/input/marriage-and-divorce-in-iran/Marriage and divorce in Iran.csv"
)
marriage_divorce.head()
marriage_divorce["marriage(city)"].value_counts(dropna=False)
# **Some missing values fill with '…' , so read the dataset again and fill all '…' entry with np.nan**
# # Import dataset again
marriage_divorce = pd.read_csv(
"/kaggle/input/marriage-and-divorce-in-iran/Marriage and divorce in Iran.csv",
na_values="…",
)
marriage_divorce.head()
marriage_divorce.describe()
marriage_divorce.info()
# # Rename Columns
marriage_divorce.columns = (
marriage_divorce.columns.str.replace("(", "_").str.replace(")", "").str.lower()
)
marriage_divorce.columns
# # Manage null values
marriage_divorce.isnull().sum()
marriage_divorce[marriage_divorce.isnull().any(axis=True)]
# **Fill this rows with data from another resources**
marriage_divorce.set_index("year", inplace=True)
marriage_divorce.loc[
"1386", ["marriage_city", "divorce_city", "marriage_village", "divorce_village"]
] = (602309, 84120, 238798, 15732)
marriage_divorce.loc[
"1387", ["marriage_city", "divorce_city", "marriage_village", "divorce_village"]
] = (633179, 93496, 248413, 17014)
marriage_divorce.loc[
"1388", ["marriage_city", "divorce_city", "marriage_village", "divorce_village"]
] = (629893, 106548, 260315, 19199)
marriage_divorce.loc[
"1389", ["marriage_city", "divorce_city", "marriage_village", "divorce_village"]
] = (622398, 116643, 269230, 20557)
marriage_divorce.loc[
"1390", ["marriage_city", "divorce_city", "marriage_village", "divorce_village"]
] = (631457, 121091, 243335, 21750)
marriage_divorce.reset_index(inplace=True)
# # Verify data
marriage_divorce["year"].unique()
marriage_divorce.tail(10)
# ****Two items in the 'year' column do not have the correct values.****
marriage_divorce.iloc[38, 0] = "1396"
marriage_divorce.iloc[39, 0] = "1397"
marriage_divorce["year"].unique()
marriage_divorce["year"] = marriage_divorce["year"].astype(int)
marriage_divorce["marriage_city"] = marriage_divorce["marriage_city"].astype(int)
marriage_divorce["divorce_city"] = marriage_divorce["divorce_city"].astype(int)
marriage_divorce["marriage_village"] = marriage_divorce["marriage_village"].astype(int)
marriage_divorce["divorce_village"] = marriage_divorce["divorce_village"].astype(int)
# marriage_country', 'divorce_country' are int
marriage_divorce.info()
# # Set index
marriage_divorce.set_index("year", drop=False, inplace=True)
# marriage_divorce.columns
# # Analysis and visualization
# marriage max/min
print(
"Max number of marriage in country is: "
+ marriage_divorce["marriage_country"].max().astype(str)
+ " in "
+ marriage_divorce["marriage_country"].idxmax().astype(str)
)
print(
"Min number of marriage in country is: "
+ marriage_divorce["marriage_country"].min().astype(str)
+ " in "
+ marriage_divorce["marriage_country"].idxmin().astype(str)
)
print(
"\nMax number of marriage in city is: "
+ marriage_divorce["marriage_city"].max().astype(str)
+ " in "
+ marriage_divorce["marriage_city"].idxmax().astype(str)
)
print(
"Min number of marriage in city is: "
+ marriage_divorce["marriage_city"].min().astype(str)
+ " in "
+ marriage_divorce["marriage_city"].idxmin().astype(str)
)
print(
"\nMax number of marriage in village is: "
+ marriage_divorce["marriage_village"].max().astype(str)
+ " in "
+ marriage_divorce["marriage_village"].idxmax().astype(str)
)
print(
"Min number of marriage in village is: "
+ marriage_divorce["marriage_village"].min().astype(str)
+ " in "
+ marriage_divorce["marriage_village"].idxmin().astype(str)
)
print("\n-------------------------------------------------------")
# divorce max/min
print(
"\nMax number of divorce in country is: "
+ marriage_divorce["divorce_country"].max().astype(str)
+ " in "
+ marriage_divorce["divorce_country"].idxmax().astype(str)
)
print(
"Min number of divorce in country is: "
+ marriage_divorce["divorce_country"].min().astype(str)
+ " in "
+ marriage_divorce["divorce_country"].idxmin().astype(str)
)
print(
"\nMax number of divorce in city is: "
+ marriage_divorce["divorce_city"].max().astype(str)
+ " in "
+ marriage_divorce["divorce_city"].idxmax().astype(str)
)
print(
"Min number of divorce in city is: "
+ marriage_divorce["divorce_city"].min().astype(str)
+ " in "
+ marriage_divorce["divorce_city"].idxmin().astype(str)
)
print(
"\nMax number of divorce in village is: "
+ marriage_divorce["divorce_village"].max().astype(str)
+ " in "
+ marriage_divorce["divorce_village"].idxmax().astype(str)
)
print(
"Min number of divorce in village is: "
+ marriage_divorce["divorce_village"].min().astype(str)
+ " in "
+ marriage_divorce["divorce_village"].idxmin().astype(str)
)
print("\n-------------------------------------------------------")
# marriage mean
print(
"\nAverage number of marriage in country is: "
+ round(marriage_divorce["marriage_country"].mean(), 2).astype(str)
)
print(
"\nAverage number of marriage in city is: "
+ round(marriage_divorce["marriage_city"].mean(), 2).astype(str)
)
print(
"\nAverage number of marriage in village is: "
+ round(marriage_divorce["marriage_village"].mean(), 2).astype(str)
)
print("\n-------------------------------------------------------")
# divorce mean
print(
"\nAverage number of divorce in country is: "
+ round(marriage_divorce["divorce_country"].mean(), 2).astype(str)
)
print(
"\nAverage number of divorce in city is: "
+ round(marriage_divorce["divorce_city"].mean(), 2).astype(str)
)
print(
"\nAverage number of divorce in village is: "
+ round(marriage_divorce["divorce_village"].mean(), 2).astype(str)
)
# in country
marriage_divorce["marriage_country"].plot(label="Marriage counts in Country")
marriage_divorce["divorce_country"].plot(label="Divorce counts in Country")
plt.legend()
# plt.axvspan(marriage_divorce.nlargest(10,'marriage_country')['year'].iat[9],marriage_divorce.nlargest(10,'marriage_country')['year'].iat[0],alpha=0.4,color='grey')
plt.show()
# #in city
marriage_divorce["marriage_city"].plot(label="Marriage counts in City")
marriage_divorce["divorce_city"].plot(label="Divorce counts in City")
plt.legend()
# plt.axvspan(marriage_divorce.nlargest(10,'marriage_country')['year'].iat[9],marriage_divorce.nlargest(10,'marriage_country')['year'].iat[0],alpha=0.4,color='grey')
plt.show()
# in village
marriage_divorce["marriage_village"].plot(label="Marriage counts in Village")
marriage_divorce["divorce_village"].plot(label="Divorce counts in Village")
plt.legend()
# plt.axvspan(marriage_divorce.nlargest(10,'marriage_country')['year'].iat[9],marriage_divorce.nlargest(10,'marriage_country')['year'].iat[0],alpha=0.4,color='grey')
plt.show()
# ### Conclusion
# The above charts show that although the number of marriages in Iran has decreased (both in cities and villages, and naturally in the country as a whole), the number of divorces has had an increasing trend.As show in plot in highlited area the marriage count are maximized in country/City/Village
# There may be underlying social or cultural factors contributing to this trend. Further research and analysis would be needed to fully understand the reasons behind these changes.
# also:
# It shows between 1385 and 1391 number of marriage was increase more than other years.
# ### Compute marriage about revorce ratio
marriage_divorce["country_ratio"] = (
marriage_divorce["marriage_country"] / marriage_divorce["divorce_country"]
)
marriage_divorce["city_ratio"] = (
marriage_divorce["marriage_city"] / marriage_divorce["divorce_city"]
)
marriage_divorce["village_ratio"] = (
marriage_divorce["marriage_village"] / marriage_divorce["divorce_village"]
)
marriage_divorce["country_ratio"].plot(label="Marriage to divorce ratio in country")
marriage_divorce["city_ratio"].plot(label="Marriage to divorce ratio in city")
marriage_divorce["village_ratio"].plot(label="Marriage to divorce ratio in village")
plt.legend()
# plt.axvspan(marriage_divorce.nlargest(10,'marriage_country')['year'].iat[9],marriage_divorce.nlargest(10,'marriage_country')['year'].iat[0],alpha=0.4,color='grey')
plt.show()
# ### Conclusion
# Marriage to devorce ratio in village is more than country and city, it's means the marriage numbers in village in certain year is more than country and city and the diverce numbers is less than country and city in same year
# marriage per each divorce (y divorce againt x marriage)
marriage_divorce["country_marriages_for_each_divorce"] = round(
marriage_divorce["marriage_country"] / marriage_divorce["divorce_country"]
)
marriage_divorce["city_marriages_for_each_divorce"] = round(
marriage_divorce["marriage_city"] / marriage_divorce["divorce_city"]
)
marriage_divorce["village_marriages_for_each_divorce"] = round(
marriage_divorce["marriage_village"] / marriage_divorce["divorce_village"]
)
plt.figure(figsize=(13, 5))
plt.bar(
marriage_divorce["village_marriages_for_each_divorce"].index,
marriage_divorce["village_marriages_for_each_divorce"],
label="Marriages for each divorece in village",
)
plt.plot(
marriage_divorce["country_marriages_for_each_divorce"].index,
marriage_divorce["country_marriages_for_each_divorce"],
label="Marriages for each divorece in country",
color="red",
)
plt.bar(
marriage_divorce["city_marriages_for_each_divorce"].index,
marriage_divorce["city_marriages_for_each_divorce"],
label="Marriages for each ivorece in city",
)
# plt.xticks(ticks=marriage_divorce.index,rotation=90)
# plt.xlim(1390,1398)
plt.legend()
plt.show()
marriage_divorce[
[
"country_marriages_for_each_divorce",
"city_marriages_for_each_divorce",
"village_marriages_for_each_divorce",
]
]
# ### Colclusion
# Above chart show that over the years, the number of successful marriages has decreased such that in 1399 for 3 marriages 1 divorce occured in city and village
# divorce percentage of marriage
marriage_divorce["country_divorce_percentage_marriage"] = round(
marriage_divorce["divorce_country"] / marriage_divorce["marriage_country"] * 100, 2
)
marriage_divorce["city_divorce_percentage_marriage"] = round(
marriage_divorce["divorce_city"] / marriage_divorce["marriage_city"] * 100, 2
)
marriage_divorce["village_divorce_percentage_marriage"] = round(
marriage_divorce["divorce_village"] / marriage_divorce["marriage_village"] * 100, 2
)
plt.figure(figsize=(13, 5))
plt.bar(
marriage_divorce["city_divorce_percentage_marriage"].index,
marriage_divorce["city_divorce_percentage_marriage"],
label="Divorece percentage of Marriage in city",
)
plt.plot(
marriage_divorce["country_divorce_percentage_marriage"].index,
marriage_divorce["country_divorce_percentage_marriage"],
label="Divorece percentage of Marriage in country",
color="red",
)
plt.bar(
marriage_divorce["village_divorce_percentage_marriage"].index,
marriage_divorce["village_divorce_percentage_marriage"],
label="Divorece percentage of Marriage in village",
)
# plt.xticks(ticks=marriage_divorce.index,rotation=90)
# plt.xlim(1390,1398)
plt.legend()
plt.show()
marriage_divorce[
[
"country_divorce_percentage_marriage",
"city_divorce_percentage_marriage",
"village_divorce_percentage_marriage",
]
]
|
# ## Θεωρητικό Μέρος
# α) Σύμφωνα με το paper, η διαδικασία παραγωγής της Γκαουσιανής πυραμίδας ισοδυναμεί με την συνέλιξη της αρχικής εικονάς με ένα σετ ισοδύναμων συναρτήσεων βαρών h. Η συνάρτηση αυτή h μοιάζει όλο και περισσότερο με την συνάρτηση της Γκαουσιανής κατανομής όσο το α γίνεται μικρότερο της μονάδας αλλά καθώς η παράμετρος α προσεγγίζει την μονάδα, το σχήμα της συνάρτησης βαρών h παίρνει πιο τριγωνικές μορφές. Επιπλέον η παράμετρος α καθορίζει το κατά πόσο θα μειωθούν η διακύμανση και η εντροπία των ιστογραμμάτων των εικόνων του κάθε επιπέδου της πυραμίδας.
# b) Η εντροπία είναι ο ελάχιστος αριθμός από bits ανά pixel, που χρειαζόμαστε για να κωδικοποιήσουμε μία εικόνα. Επειδή χρησιμοποιούμε 8 bits για την αναπαράσταση του κάθε pixel σε μία grayscale εικόνα, άρα έχουμε 2^8=256 δυνατά αποτελέσματα, η μέγιστη εντροπία θα είναι:
# $ -\sum \limits _{n=0}^ {255}P(n)\log(P(n)) = -\sum \limits _{n=0}^{255}2^{(-8)}\log(2^{(-8)}) = -\log(2^{(-8)}) = 8 $
# c)
# d)
# ## Εργαστηριακό Μέρος
# ### Α. Υλοποίηση Αλγορίθμου
import numpy as np
from skimage import io
from skimage.transform import resize
from skimage import color
import matplotlib.pyplot as plt
img = io.imread("/kaggle/input/lenapng/lena.png")
gray_img = color.rgb2gray(img)
io.imshow(img)
plt.imshow(gray_img, cmap="gray")
def GKernel(a=0.0):
w_n = np.array(
[(0.25 - a / 2), 0.25, a, 0.25, (0.25 - a / 2)]
) # initializing row vector w(n) with given constraints
w_m = w_n.reshape((5, 1)) # initializing column vector w(m)
w = np.outer(w_m, w_n) # getting the 5x5 kernel
return w
# A helper function to normalize the intensity of an image
def Normalize(img):
lmin = float(img.min())
lmax = float(img.max())
return np.floor((img - lmin) / (lmax - lmin) * 255.0)
def GReduce(I, h):
if I.ndim < 3: # grayscale image
window = 5
offset = window // 2
row, col = I.shape
if row % 2 == 0:
height = row - offset
else:
height = row - offset - 1
if col % 2 == 0:
width = row - offset
else:
width = row - offset - 1
nextLevel = np.zeros((width // 2 - 1, height // 2 - 1))
for i in range(2, width):
for j in range(2, height):
if j % 2 == 0 and i % 2 == 0:
patch = I[i - offset : i + offset + 1, j - offset : j + offset + 1]
psum = np.dot(patch, h).sum()
nextLevel[(i // 2) - 1, (j // 2) - 1] = psum
return Normalize(nextLevel) / 255
else: # coloured image
window = 5
offset = window // 2
row, col, ch = I.shape
# splitting rgb channels to process seperately
red = I[:, :, 0]
green = I[:, :, 1]
blue = I[:, :, 2]
if row % 2 == 0:
height = row - offset
else:
height = row - offset - 1
if col % 2 == 0:
width = row - offset
else:
width = row - offset - 1
nextRedLevel = np.zeros((width // 2 - 1, height // 2 - 1))
nextGreenLevel = np.zeros((width // 2 - 1, height // 2 - 1))
nextBlueLevel = np.zeros((width // 2 - 1, height // 2 - 1))
# applying filter to each channel
for i in range(2, width):
for j in range(2, height):
if j % 2 == 0 and i % 2 == 0:
patch = red[
i - offset : i + offset + 1, j - offset : j + offset + 1
]
psum = np.dot(patch, h).sum()
nextRedLevel[(i // 2) - 1, (j // 2) - 1] = psum
patch = green[
i - offset : i + offset + 1, j - offset : j + offset + 1
]
psum = np.dot(patch, h).sum()
nextGreenLevel[(i // 2) - 1, (j // 2) - 1] = psum
patch = blue[
i - offset : i + offset + 1, j - offset : j + offset + 1
]
psum = np.dot(patch, h).sum()
nextBlueLevel[(i // 2) - 1, (j // 2) - 1] = psum
# combining back to a single 3d array
nextLevel = np.zeros((width // 2 - 1, height // 2 - 1, 3), dtype=float)
row, col, ch = nextLevel.shape
for i in range(0, row):
for j in range(0, col):
nextLevel[i, j, 0] = nextRedLevel[i, j]
nextLevel[i, j, 1] = nextGreenLevel[i, j]
nextLevel[i, j, 2] = nextBlueLevel[i, j]
return Normalize(nextLevel) / 255
def GPyramid(I, a, depth):
gpyramid = list()
I_out = GReduce(I, GKernel(a))
gpyramid.append(I_out)
for i in range(0, depth - 1):
I_out = GReduce(I_out, GKernel(a))
gpyramid.append(I_out)
return gpyramid
|
# Any results you write to the current directory are saved as output.
from fastai.vision import *
import os
os.makedirs("/root/.cache/torch/checkpoints")
model_path = "models"
plot_path = "plots"
if not os.path.exists(model_path):
os.makedirs(model_path)
os.makedirs(os.path.join(model_path, plot_path))
"""
Severity Levels
0 - 'No_DR',
1 - 'Mild',
2 - 'Moderate',
3 - 'Severe',
4 - 'Proliferate_DR'
"""
classes = ["No_DR", "Mild", "Moderate", "Severe", "Proliferate_DR"]
path = Path(
"../input/diabetic-retinopathy-224x224-grayscale-images/grayscale_images/grayscale_images/"
)
path.ls()
import pandas as pd
train = pd.read_csv("../input/diabetic-retinopathy-224x224-grayscale-images/train.csv")
np.random.seed(42)
data = ImageDataBunch.from_folder(
path,
train=".",
valid_pct=0.2,
ds_tfms=get_transforms(),
size=224,
num_workers=4,
bs=32,
).normalize(imagenet_stats)
data.classes
data.show_batch(rows=3, figsize=(10, 7))
data.classes, data.c, len(data.train_ds), len(data.valid_ds)
learn = cnn_learner(
data, models.resnet50, metrics=error_rate, model_dir="/kaggle/working/models"
)
learn.fit_one_cycle(20)
learn.recorder.plot_losses()
print(os.listdir("../../"))
learn.save("grayscale_stage1")
learn.unfreeze()
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(3, max_lr=slice(1e-6, 1e-5))
learn.save("grayscale_stage2")
learn.load("grayscale_stage2")
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_confusion_matrix()
learn.export("/kaggle/working/models/grayscale_export.pkl")
|
# # Preprocessing
import pandas as pd
import numpy as np
import os
print(os.listdir("../input/prostate-cancer"))
df = pd.read_csv("../input/prostate-cancer/Prostate_Cancer.csv")
df.head(4)
df.shape
df.info()
df = df.replace("M", 1)
df = df.replace("B", 0)
df.head(4)
df["diagnosis_result"].hist()
# ### Splitting data
x = df.drop("diagnosis_result", axis=1)
y = df["diagnosis_result"]
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.33, random_state=42, stratify=y
)
y_train.hist()
y_test.hist()
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_train = pd.DataFrame(
data=x_train[:, 1:], index=range(0, 67), columns=x.columns[1:] # values
)
x_train.head(4)
x_train.shape
# # Feature selection
# ### K best
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import mutual_info_regression
import matplotlib.pyplot as plt
selector = SelectKBest(mutual_info_regression, k=4)
selector.fit(x_train, y_train)
selector.scores_
scores = selector.scores_
plt.rcParams["figure.figsize"] = [12, 5]
plt.plot(scores)
plt.xticks(np.arange(7), list(x_train.columns))
# - Perimeter
# - Area
# - Compactness
# - fractal_dimension
# ### Ridge CV
from sklearn.linear_model import RidgeCV
crv = RidgeCV(store_cv_values=True)
fit = crv.fit(x_train, y_train)
fit.alpha_
var = np.floor(np.log10(np.abs(fit.coef_)))
plt.rcParams["figure.figsize"] = [12, 5]
plt.plot(var)
plt.xticks(np.arange(7), list(x_train.columns))
# - Perimeter
# - area
# - compactness
# - fractal_dimension
# ### Recursive Feature Elimination (RFE)
# Es un método backward de selección, es decir que se van creando modelos y removiendo aquellos features que menos info aporten.
import warnings
warnings.filterwarnings("ignore")
pd.options.display.max_columns = None
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import (
StratifiedKFold,
) # Hace que el set test y train esten estratificados
from sklearn.feature_selection import RFECV
rfc = RandomForestClassifier(random_state=101)
rfecv = RFECV(estimator=rfc, step=1, cv=StratifiedKFold(10), scoring="accuracy")
rfecv.fit(x_train, y_train)
print("Optimal number of features: {}".format(rfecv.n_features_))
plt.figure(figsize=(12, 5))
plt.title(
"Recursive Feature Elimination with Cross-Validation",
fontsize=18,
fontweight="bold",
pad=20,
)
plt.xlabel("Number of features selected", fontsize=14, labelpad=20)
plt.ylabel("% Correct Classification", fontsize=14, labelpad=20)
plt.plot(
range(1, len(rfecv.grid_scores_) + 1),
rfecv.grid_scores_,
color="#303F9F",
linewidth=3,
)
plt.show()
# - The performance of choising 3 and 5 variables is almost de same, then we chose 3 to reduce the complexity
dset = pd.DataFrame()
dset["attr"] = x_train.columns
dset["importance"] = rfecv.estimator_.feature_importances_
dset = dset.sort_values(by="importance", ascending=False)
plt.figure(figsize=(12, 5))
plt.barh(y=dset["attr"], width=dset["importance"], color="#1976D2")
plt.title("RFECV - Feature Importances", fontsize=20, fontweight="bold", pad=20)
plt.xlabel("Importance", fontsize=14, labelpad=20)
plt.show()
# - compactness
# - area
# - perimeter
# - symmetry
# #### Choosed variables are: area, perimeter, compactness and fractal_dimension
x_test, x_train = (
x_test[["area", "perimeter", "compactness", "fractal_dimension"]],
x_train[["area", "perimeter", "compactness", "fractal_dimension"]],
)
x_train.corr()
# - There is multicolinealitity between area and perimeter, then we delete on of those, in this case we delete perimeter
x_test, x_train = x_test.drop("perimeter", axis=1), x_train.drop("perimeter", axis=1)
# # Modeling
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_validate
# - Logistic regression
results_1 = cross_validate(
LogisticRegression(), x_train, y_train, return_train_score=True, cv=5
)
test_scores = results_1["test_score"]
train_scores = results_1["train_score"]
print(np.mean(train_scores))
print(np.mean(test_scores))
# - SVM linear
from sklearn.svm import LinearSVC
results_2 = cross_validate(LinearSVC(), x_train, y_train, return_train_score=True, cv=5)
test_scores = results_2["test_score"]
train_scores = results_2["train_score"]
print(np.mean(train_scores))
print(np.mean(test_scores))
# - SVM poly degree=2
from sklearn.svm import SVC
results_3 = cross_validate(
SVC(kernel="poly", degree=2, coef0=1, C=5),
x_train,
y_train,
return_train_score=True,
cv=5,
)
test_scores = results_3["test_score"]
train_scores = results_3["train_score"]
print(np.mean(train_scores))
print(np.mean(test_scores))
# - DecisionTree
from sklearn.tree import DecisionTreeClassifier
results_4 = cross_validate(
DecisionTreeClassifier(max_depth=3), x_train, y_train, return_train_score=True, cv=5
)
test_scores = results_4["test_score"]
train_scores = results_4["train_score"]
print(np.mean(train_scores))
print(np.mean(test_scores))
# - RandomForest
from sklearn.ensemble import RandomForestClassifier
results_5 = cross_validate(
RandomForestClassifier(max_leaf_nodes=10),
x_train,
y_train,
return_train_score=True,
cv=5,
)
test_scores = results_5["test_score"]
train_scores = results_5["train_score"]
print(np.mean(train_scores))
print(np.mean(test_scores))
# - AdaBoost
from sklearn.ensemble import AdaBoostClassifier
results_6 = cross_validate(
AdaBoostClassifier(
DecisionTreeClassifier(max_leaf_nodes=10),
n_estimators=500,
algorithm="SAMME.R",
learning_rate=0.1,
),
x_train,
y_train,
return_train_score=True,
cv=5,
)
test_scores = results_6["test_score"]
train_scores = results_6["train_score"]
print(np.mean(train_scores))
print(np.mean(test_scores))
# - GradientBoosting
from sklearn.ensemble import GradientBoostingClassifier
results_7 = cross_validate(
GradientBoostingClassifier(max_depth=3, n_estimators=40, learning_rate=0.01),
x_train,
y_train,
return_train_score=True,
cv=5,
)
test_scores = results_7["test_score"]
train_scores = results_7["train_score"]
print(np.mean(train_scores))
print(np.mean(test_scores))
# ## Hyperparameter's optimization
# ### RandomForestClassifier
params = {
"n_estimators": range(20, 400, 50),
"max_features": ["auto", "sqrt", "log2"],
"max_depth": range(2, 10, 1),
}
from sklearn.model_selection import GridSearchCV
estimator = RandomForestClassifier()
ggrid = GridSearchCV(estimator, param_grid=params, scoring="accuracy", cv=5)
final_model = ggrid.fit(x_train, y_train)
ggrid.cv_results_, ggrid.best_params_, ggrid.best_score_
final_results = cross_validate(
ggrid.best_estimator_, x_train, y_train, return_train_score=True, cv=5
)
test_scores = final_results["test_score"]
train_scores = final_results["train_score"]
print(np.mean(train_scores))
print(np.mean(test_scores))
final_estimator = RandomForestClassifier(
max_depth=3, max_features="auto", n_estimators=20
)
final_estimator.fit(x_train, y_train)
final_estimator.score(x_test, y_test)
|
import os
from glob import glob
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import nibabel as nib
import numpy as np
import pandas as pd
from matplotlib import animation, cm, colors, rc
DATA_PATH = "../input/brats20-dataset-training-validation"
TRAIN_PATH = f"{DATA_PATH}/BraTS2020_TrainingData/MICCAI_BraTS2020_TrainingData"
TEST_PATH = f"{DATA_PATH}/BraTS2020_ValidationData/MICCAI_BraTS2020_ValidationData"
DATA_TYPES = ["flair", "t1", "t1ce", "t2", "seg"]
LABELS = ["Non-Enhancing tumor core", "Peritumoral Edema", "GD-enhancing tumor"]
# ## Numbers
train_data_paths = {
data_type: sorted(glob(f"{TRAIN_PATH}/**/*_{data_type}.nii"))
for data_type in DATA_TYPES
}
for k, v in train_data_paths.items():
print(f"[TRAIN] Number of {k} images: {len(v)}")
print()
test_data_paths = {
data_type: sorted(glob(f"{TEST_PATH}/**/*_{data_type}.nii"))
for data_type in DATA_TYPES
}
for k, v in test_data_paths.items():
print(f"[TEST] Number of {k} images: {len(v)}")
for path in sorted(glob(f"{TRAIN_PATH}/*")):
if os.path.isdir(path):
if not any(f.endswith("seg.nii") for f in os.listdir(path)):
print(f'Missing segmentation mask for volume: {path.split("/")[-1]}')
# ## Shapes
train_shapes = [
np.asanyarray(nib.load(f).dataobj).shape
for data_type in DATA_TYPES
for f in train_data_paths[data_type]
]
test_shapes = [
np.asanyarray(nib.load(f).dataobj).shape
for data_type in DATA_TYPES
for f in test_data_paths[data_type]
]
assert len(set(train_shapes)) == 1
assert len(set(test_shapes)) == 1
assert train_shapes[0] == test_shapes[0]
print(f"Volume dimensions: {train_shapes[0]}")
# # Visualizations
def cmap_discretize(cmap, N):
"""Return a discrete colormap from the continuous colormap cmap.
cmap: colormap instance, eg. cm.jet.
N: number of colors.
"""
if type(cmap) == str:
cmap = plt.get_cmap(cmap)
colors_i = np.concatenate((np.linspace(0, 1.0, N), (0.0, 0.0, 0.0, 0.0)))
colors_rgba = cmap(colors_i)
indices = np.linspace(0, 1.0, N + 1)
cdict = {}
for ki, key in enumerate(("red", "green", "blue")):
cdict[key] = [
(indices[i], colors_rgba[i - 1, ki], colors_rgba[i, ki])
for i in range(N + 1)
]
# Return colormap object.
return colors.LinearSegmentedColormap(cmap.name + "_%d" % N, cdict, 1024)
def create_parallel_animation(volumes, case, show_mask=False, alpha=0.6):
"""Create animation of two volumes"""
# transpose volume from (x, y, z) to (z, x, y)
volumes = [np.transpose(volume, (2, 0, 1)) for volume in volumes]
fig = plt.figure(figsize=(12, 13))
fig.tight_layout()
plt.axis("off")
plt.suptitle(f"Patient ID: {case}", fontsize=16, fontweight="bold")
if show_mask:
custom_cmap = cmap_discretize(cm.jet, int(np.unique(volumes[-1])[-1]))
axes = []
for idx, data_type in enumerate(DATA_TYPES[:-1]):
ax = fig.add_subplot(2, len(DATA_TYPES[:-1]) // 2, idx + 1)
ax.set_title(data_type.upper(), weight="bold")
axes.append(ax)
images = []
for i, slices in enumerate(zip(*volumes[:-1])):
aux_imgs = []
for idx, s in enumerate(slices):
im = axes[idx].imshow(s, animated=True, cmap="bone")
aux_imgs.append(im)
if show_mask:
im2 = axes[idx].imshow(
np.ma.masked_where(volumes[-1][i] == 0, volumes[-1][i]),
animated=True,
cmap=custom_cmap,
alpha=alpha,
extent=im.get_extent(),
)
aux_imgs.append(im2)
images.append(aux_imgs)
if show_mask:
patches = [
mpatches.Patch(
color=custom_cmap(col_val / np.max(volumes[-1])),
label=f"{LABELS[l_idx]}",
)
for l_idx, col_val in enumerate(np.unique(volumes[-1])[1:])
]
plt.legend(
handles=patches,
loc="upper left",
bbox_to_anchor=(0.4, -0.1),
borderaxespad=0.4,
title="Mask Labels",
title_fontsize=18,
edgecolor="black",
facecolor="#c5c6c7",
)
ani = animation.ArtistAnimation(
fig, images, interval=5000 // len(volumes[0]), blit=False, repeat_delay=1000
)
plt.close()
return ani
volume_paths = [train_data_paths[data_type][0] for data_type in DATA_TYPES]
volumes = [nib.load(volume_path).get_fdata() for volume_path in volume_paths]
create_parallel_animation(volumes, case="1", show_mask=True)
|
# # CASE STUDY: BREAST CANCER CLASSIFICATION
# # STEP #1: PROBLEM STATEMENT
# - Predicting if the cancer diagnosis is benign or malignant based on several observations/features
# - 30 features are used, examples:
# - radius (mean of distances from center to points on the perimeter)
# - texture (standard deviation of gray-scale values)
# - perimeter
# - area
# - smoothness (local variation in radius lengths)
# - compactness (perimeter^2 / area - 1.0)
# - concavity (severity of concave portions of the contour)
# - concave points (number of concave portions of the contour)
# - symmetry
# - fractal dimension ("coastline approximation" - 1)
# - Datasets are linearly separable using all 30 input features
# - Number of Instances: 569
# - Class Distribution: 212 Malignant, 357 Benign
# - Target class:
# - Malignant
# - Benign
# # STEP #2: IMPORTING DATA
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
col_names = list(cancer.feature_names)
col_names.append("target")
df = pd.DataFrame(np.c_[cancer.data, cancer.target], columns=col_names)
df.head()
print(cancer.target_names)
df.shape
df.info()
# # STEP #3: VISUALIZING THE DATA
df.columns
sns.pairplot(
df,
hue="target",
vars=[
"mean radius",
"mean texture",
"mean perimeter",
"mean area",
"mean smoothness",
"mean compactness",
"mean concavity",
"mean concave points",
"mean symmetry",
"mean fractal dimension",
],
)
sns.countplot(df["target"], label="Count")
sns.scatterplot(x="mean area", y="mean smoothness", hue="target", data=df)
# Let's check the correlation between the variables
# Strong correlation between the mean radius and mean perimeter, mean area and mean primeter
plt.figure(figsize=(20, 10))
sns.heatmap(df.corr(), annot=True)
# # STEP #4: MODEL TRAINING (FINDING A PROBLEM SOLUTION)
X = df.drop("target", axis=1)
y = df.target
print(X.shape)
print(y.shape)
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=5)
model = LogisticRegression(solver="liblinear")
# scores = cross_val_score(model, X, y, cv=10, scoring='accuracy')
model.fit(X_train, y_train)
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
print(f"Train set:\n{classification_report(y_train, y_train_pred)}")
print("==============================================")
print(f"Test set:\n{classification_report(y_test, y_test_pred)}")
from sklearn.svm import SVC
model = SVC(C=0.1, gamma="auto", kernel="poly")
model.fit(X_train, y_train)
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
# # STEP #5: EVALUATING THE MODEL
print(f"Train set:\n{classification_report(y_train, y_train_pred)}")
print("==============================================")
print(f"Test set:\n{classification_report(y_test, y_test_pred)}")
confusion_matrix(y_test, y_test_pred)
# # STEP #6: IMPROVING THE MODEL
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler()
X_std = sc.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(
X_std, y, test_size=0.2, random_state=5
)
model = LogisticRegression(solver="liblinear")
# scores = cross_val_score(model, X, y, cv=10, scoring='accuracy')
model.fit(X_train, y_train)
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
print(f"Train set:\n{classification_report(y_train, y_train_pred)}")
print("==============================================")
print(f"Test set:\n{classification_report(y_test, y_test_pred)}")
from sklearn.svm import SVC
model = SVC(gamma="auto")
model.fit(X_train, y_train)
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
print(f"Train set:\n{accuracy_score(y_train, y_train_pred)}")
print("==============================================")
print(f"Test set:\n{accuracy_score(y_test, y_test_pred)}")
print("==============================================")
print(f"Confusion Matrix:\n{confusion_matrix(y_test, y_test_pred)}")
# # IMPROVING THE MODEL - PART 2
from sklearn.model_selection import GridSearchCV
param_grid = {
"C": [0.01, 0.1, 1, 10, 100],
"gamma": [1, 0.1, 0.01, 0.001],
"kernel": ["rbf"],
}
grid = GridSearchCV(SVC(), param_grid, refit=True, verbose=4, cv=5)
grid.fit(X_train, y_train)
grid.best_params_
grid.best_estimator_
y_pred = grid.predict(X_test)
print(f"Test set:\n{accuracy_score(y_test, y_pred)}")
print("==============================================")
print(f"Confusion Matrix:\n{confusion_matrix(y_test, y_pred)}")
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5, p=2, metric="minkowski")
knn.fit(X_train, y_train)
from sklearn.model_selection import cross_val_predict, cross_val_score
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
def print_score(clf, X_train, y_train, X_test, y_test, train=True):
if train:
print("Train Result:\n")
print(
"accuracy score: {0:.4f}\n".format(
accuracy_score(y_train, clf.predict(X_train))
)
)
print(
"Classification Report: \n {}\n".format(
classification_report(y_train, clf.predict(X_train))
)
)
print(
"Confusion Matrix: \n {}\n".format(
confusion_matrix(y_train, clf.predict(X_train))
)
)
res = cross_val_score(clf, X_train, y_train, cv=10, scoring="accuracy")
print("Average Accuracy: \t {0:.4f}".format(np.mean(res)))
print("Accuracy SD: \t\t {0:.4f}".format(np.std(res)))
elif train == False:
print("Test Result:\n")
print(
"accuracy score: {0:.4f}\n".format(
accuracy_score(y_test, clf.predict(X_test))
)
)
print(
"Classification Report: \n {}\n".format(
classification_report(y_test, clf.predict(X_test))
)
)
print(
"Confusion Matrix: \n {}\n".format(
confusion_matrix(y_test, clf.predict(X_test))
)
)
print_score(knn, X_train, y_train, X_test, y_test, train=True)
print_score(knn, X_train, y_train, X_test, y_test, train=False)
from sklearn.model_selection import GridSearchCV
params = {"n_neighbors": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
grid_search_cv = GridSearchCV(
KNeighborsClassifier(), params, n_jobs=-1, verbose=1, cv=5
)
grid_search_cv.fit(X_train, y_train)
grid_search_cv.best_estimator_
print_score(grid_search_cv, X_train, y_train, X_test, y_test, train=True)
print_score(grid_search_cv, X_train, y_train, X_test, y_test, train=False)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(random_state=42)
clf.fit(X_train, y_train)
print_score(clf, X_train, y_train, X_test, y_test, train=True)
print_score(clf, X_train, y_train, X_test, y_test, train=False)
import xgboost as xgb
clf = xgb.XGBClassifier()
clf.fit(X_train, y_train)
print_score(clf, X_train, y_train, X_test, y_test, train=True)
print_score(clf, X_train, y_train, X_test, y_test, train=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# # The overall idea
# This is my first attempt in building a prediction model. Not sure how to do that, but just giving it a try by using one of the simplest classification techniques i.e. K-Means. The first step is to read the train.csv file into a dataframe. As keywords and location attributes contain missing values for some instances so I am ignoring these attributes for the time-being. At the simplest level, I will consider text (tweets) only.
# Now that we have decided to use "text" attribute for building our model, the next step is to transform "text" into a suitable representation which can be fed to the prediction model. For this purpose, we can use TF-IDF.
# A simplest way is to pass the whole "text" column of our dataframe into the tfidf vectorizer for transformation without any text-preprocessing. This method resulted in an accuracy of 42.965979 % on the training dataset. In order to improve it a bit, we can do the following preprocessing steps.
# 1. Tokenize sentences
# 2. Convert into lowercase
# 3. Remove urls/hyperlinks
# 4. Remove digits
# 5. Remove starting and ending spaces
# 6. Tokenize into words
# After applying the above steps, we store the processed text for each instance in a seperate column; lets call it "tokenized_sents"
# Then we convert this column into a list of sentences (containing all tweets with preprocessing applied). Next step is to perform stemming and lemmatization and removing stop words, after which we get a list of words. We can convert this list to set, for getting unique words and then convert that set back to list for further processing. This list of unique words is then transformed using tfidf vectorizer and then used to train k-means prediction model.
# # Step by step working
# Reading the training dataset into a dataframe
df = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
print(df.head())
print(df["text"])
# Save tokenized sentences of "text" in another column
from nltk import sent_tokenize
df["tokenized_sents"] = df.apply(lambda column: sent_tokenize(column["text"]), axis=1)
print(df.head())
# Applying preprocessing on "tokenized_sents"
import re
from nltk.corpus import stopwords
from nltk import word_tokenize
stop = stopwords.words("english")
df["tokenized_sents"] = df["text"].str.lower()
df["tokenized_sents"] = df["tokenized_sents"].apply(
lambda x: re.sub(
r"""(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))""",
"",
str(x),
flags=re.MULTILINE,
)
)
df["tokenized_sents"] = df["tokenized_sents"].apply(
lambda x: re.sub("[^a-zA-Z]", " ", str(x))
)
df["tokenized_sents"] = df["tokenized_sents"].str.strip()
df["tokenized_sents"] = df.apply(
lambda column: word_tokenize(column["tokenized_sents"]), axis=1
)
print(df["tokenized_sents"])
# The next step is to remove stopwords, do stemming and lemmatization. After performing these steps, we find unique words and then vectorize them using tf-idf. These vectorized words are then using for training our model.
from nltk.stem.porter import PorterStemmer
porter_stemmer = PorterStemmer()
from nltk.stem import WordNetLemmatizer
wordnet_lemmatizer = WordNetLemmatizer()
import numpy as np
# stopword removal, stemming and lemmatization and then finding unique words
allwords = []
all_sentences = df["tokenized_sents"].tolist()
for slist in all_sentences:
for s in slist:
if s not in stop:
allwords.append(wordnet_lemmatizer.lemmatize(porter_stemmer.stem(s)))
set_allwords = set(allwords)
allwords_unique = list(set_allwords)
print("length of unique words ", len(allwords_unique))
# finding tfidf for unique words
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
vectorizer = TfidfVectorizer(stop)
X = vectorizer.fit_transform(allwords_unique)
# Training K-means for k=2 (real or not )
true_k = 2
model = KMeans(n_clusters=true_k, init="k-means++", max_iter=100, n_init=1)
model.fit(X)
labels = set(model.labels_)
labels = list(labels)
count = 0
# testing the trained model on test dataset
# read test.csv
import pandas as pd
df_test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
# dataframe for storing test results based on sample submission format
df_test_results = pd.DataFrame(columns=["id", "target"])
# for each tweet text in test dataset, preprocess "text" and then transform it and used the transformed representation for prediction
for x in df_test["text"]:
text = re.sub(
r"""(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))""",
"",
x.lower(),
flags=re.MULTILINE,
)
text = re.sub("[^a-zA-Z]", " ", text)
text = text.strip()
words = word_tokenize(text)
for w in words:
w = wordnet_lemmatizer.lemmatize(porter_stemmer.stem(w))
X = vectorizer.transform(list(set(words)))
predicted = model.predict(X)
# the predicted model returns a list of integers containing predicted label for each word. The idea is to assign the most frequently occuring label to the overall tweet.
if np.count_nonzero(predicted == labels[0]) > np.count_nonzero(
predicted == labels[1]
):
predicted_label = labels[0]
# print(predicted_label)
df_test_results.at[count, "target"] = predicted_label
else:
predicted_label = labels[1]
# print(predicted_label)
df_test_results.at[count, "target"] = predicted_label
# count = count + 1
df_test_results["id"] = df_test["id"]
# print(df_test_results)
export_csv = df_test_results.to_csv(
r"ArjumandFatima_NLP_DisasterTweet_Submission1.csv", index=None, header=True
) # Don't forget to add '.csv' at the end of the path
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### Importing Libraries
import seaborn as sns
import matplotlib.pyplot as plt
# ## Data preprocessing
df = pd.read_csv(
"/kaggle/input/gender-classification-dataset/gender_classification_v7.csv"
)
df.head()
df.describe()
df.info()
df.isnull().sum()
# Since, no null value is present, so we do not need to perform any data cleaning steps.
# ## EDA
sns.barplot(df, x="gender", y="long_hair")
plt.show()
correlation = df.corr()
plt.figure(figsize=(15, 8))
sns.heatmap(correlation, annot=True)
plt.show()
sns.catplot(df, x="long_hair", hue="nose_wide", kind="count")
plt.show()
sns.catplot(data=df, x="nose_long", kind="count", hue="gender", aspect=1.5)
plt.show()
# ## Label encoding
from sklearn import preprocessing
label_encoder = preprocessing.LabelEncoder()
df["gender"] = label_encoder.fit_transform(df["gender"])
df["gender"].unique()
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.metrics import accuracy_score
import warnings
warnings.filterwarnings("ignore")
# ## Train test split
train, test = train_test_split(df, random_state=42, test_size=0.2)
# ## Splitting training and testing data between dependent and independent variables.
y_train = train.gender
x_train = train.drop(["gender"], axis=1)
y_test = test.gender
x_test = test.drop(["gender"], axis=1)
# ## Linear Regression
linearreg = LinearRegression()
linearreg.fit(x_train, y_train)
lr_score = linearreg.score(x_test, y_test)
lr_score
# ## Logistics Regression
logisticreg = LogisticRegression()
logisticreg.fit(x_train, y_train)
logistic_score = logisticreg.score(x_test, y_test)
logistic_score
# ## Decision Tree
from sklearn import tree
dt = tree.DecisionTreeClassifier(criterion="gini")
dt.fit(x_train, y_train)
dt_score = dt.score(x_test, y_test)
dt_score
# ## Random forest classifier
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier()
rf.fit(x_train, y_train)
rf_score = rf.score(x_test, y_test)
rf_score
# # Applying Hyperparameter tuning using GridSearchCV
from sklearn.model_selection import GridSearchCV
def tune_model(model, params):
model = GridSearchCV(
estimator=model, param_grid=params, scoring="accuracy", cv=5, verbose=1
)
model.fit(x_train, y_train)
print("Best parameters :{}".format(model.best_params_))
rf_parameters = [
{
"n_estimators": list(range(50, 150)),
"max_depth": list(range(10, 15)),
"criterion": ["gini", "entropy"],
}
]
tune_model(rf, rf_parameters)
rf_finetuned = RandomForestClassifier(
criterion="entropy", max_depth=10, n_estimators=109
)
rf_finetuned.fit(x_train, y_train)
rf_fine = rf_finetuned.score(x_test, y_test)
rf_fine
# # Comparison of accuracies of different models.
names = [
"Linear Regression",
"Logistic Regression",
"Decision Tree CLassifier",
"Random Forest",
"Random Forest (fine tuned)",
]
accuracy = [lr_score, logistic_score, dt_score, rf_score, rf_fine]
plt.figure(figsize=(6, 4))
graph = plt.bar(names, accuracy)
plt.xlabel("Accuracy")
plt.ylabel("Models")
plt.xticks(rotation=45)
plt.show()
|
#
#
# Dataviz - Data Science Specialization Program - FACENS
# # Exercício 2
# * **Data de entrega:** xx/01/2020
# * **Professor:** Matheus Mota
# * **Aluno:Pedro Henrique Corrêa Kim**
# * **RA:191226**
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import plotly.plotly as py
import plotly.graph_objs as go
from collections import Counter
from math import log
def df_group(df, column):
name_list = []
count_list = []
ele_col_list = pd.unique(df[column])
ele_col_list.sort()
for i in ele_col_list:
discriminated = df.loc[data[column] == i]
count = discriminated.size
name_list.append(i)
count_list.append(count)
return name_list, count_list
def set_axis_style(ax, labels, name):
ax.get_xaxis().set_tick_params(direction="out")
ax.xaxis.set_ticks_position("bottom")
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(labels)
ax.set_xlim(0.25, len(labels) + 0.75)
ax.set_xlabel(name)
data = pd.read_csv(
"/kaggle/input/dataviz-facens-20182-ex3/BlackFriday.csv", delimiter=","
)
# ## Questão 1
# Construa um ou mais gráficos do tipo violino que permita(m) a comparação entre o valor gasto e a idade dos compradores.
labels, counts = df_group(data, "Gender")
print(labels)
fig, axes = plt.subplots()
name = "Valor X Genero"
# data.loc[data['Gender'] == 'M', 'Purchase']
# axes.violinplot([data.loc[data['Gender'] == 'M', 'Purchase'].values, [data.loc[data['Gender'] == 'F', 'Purchase'].values]], labels=labels, showmeans=True, showextrema=True, showmedians=True)
axes.violinplot(
[
data.loc[data["Gender"] == "F", "Purchase"].values,
data.loc[data["Gender"] == "M", "Purchase"].values,
],
showmeans=True,
showextrema=True,
)
set_axis_style(axes, labels, name)
# ## Questão 2
# Represente graficamente os N produtos mais comprados, onde N > 8.
counts = data["Product_ID"].value_counts()
x = counts.to_frame(name="count")
x.head(n=8)
fig, axs = plt.subplots()
fig.set_size_inches(10, 10, forward=True)
# x.head(n=8).values.transpose().tolist()[0]
# x.head(n=8).index.tolist()
axs.bar(x=x.head(n=8).index.tolist(), height=x.head(n=8).values.transpose().tolist()[0])
|
# **Importing Libraries**
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import time
import re
import warnings
warnings.filterwarnings("ignore")
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
analyzer = SentimentIntensityAnalyzer()
# **Dataset Import**
df = pd.read_csv(
"../input/30k-tweets-with-russiaukrainewar-hashtag/30K Tweets with russiaukrainewar hashtag.csv"
)
df.info()
# **Cleaning the Tweets**
def remove_pattern(input_txt, pattern):
r = re.findall(pattern, input_txt)
for i in r:
input_txt = re.sub(i, "", input_txt)
return input_txt
def clean_tweets(tweets):
# remove twitter Return handles (RT @xxx:)
tweets = np.vectorize(remove_pattern)(tweets, "RT @[\w]*:")
# remove twitter handles (@xxx)
tweets = np.vectorize(remove_pattern)(tweets, "@[\w]*")
# remove URL links (httpxxx)
tweets = np.vectorize(remove_pattern)(tweets, "https?://[A-Za-z0-9./]*")
# remove special characters, numbers, punctuations (except for #)
tweets = np.core.defchararray.replace(tweets, "[^a-zA-Z]", " ")
return tweets
df["Tweet"] = clean_tweets(df["Tweet"])
df["Tweet"]
# **Sentiment Score for All Tweets**
scores = []
# Declare variables for scores
compound_list = []
positive_list = []
negative_list = []
neutral_list = []
for i in range(df["Tweet"].shape[0]):
# print(analyser.polarity_scores(sentiments_pd['text'][i]))
compound = analyzer.polarity_scores(df["Tweet"][i])["compound"]
pos = analyzer.polarity_scores(df["Tweet"][i])["pos"]
neu = analyzer.polarity_scores(df["Tweet"][i])["neu"]
neg = analyzer.polarity_scores(df["Tweet"][i])["neg"]
scores.append(
{"Compound": compound, "Positive": pos, "Negative": neg, "Neutral": neu}
)
sentiments_score = pd.DataFrame.from_dict(scores)
df = df.join(sentiments_score)
df.head()
# **Classifying Our Tweets Into Positive, Negative & Neutral Category**
conditions = [
(df["Compound"] <= -0.5),
(df["Compound"] > -0.5) & (df["Compound"] < 0.5),
(df["Compound"] > 0.5),
]
# create a list of the values we want to assign for each condition
values = ["Negative", "Neutral", "Positive"]
# create a new column and use np.select to assign values to it using our lists as arguments
df["Category"] = np.select(conditions, values)
df.head()
pd.DataFrame(df.groupby(["Category"])["Category"].count()).rename(
columns={"Category": "Counts"}
).assign(Percentage=lambda x: (x.Counts / x.Counts.sum()) * 100)
# **Visualization of Tweets Category**
positive = 1758
neutral = 22799
negative = 5443
# Creating PieChart
labels = [
"Positive [" + str(positive) + "]",
"Neutral [" + str(neutral) + "]",
"Negative [" + str(negative) + "]",
]
sizes = [positive, neutral, negative]
colors = ["#81F495", "#A9E4EF", "#FF3C38"]
patches, texts = plt.pie(sizes, colors=colors, startangle=90)
plt.style.use("default")
plt.legend(labels)
plt.title("#Number of Tweets ( Positive, Negative, Neutral)")
plt.axis("equal")
plt.show()
# **Visualization of the Sentiment Scores of Positive, Neutral & Negative Tweets**
sns.distplot(
df["Positive"],
hist=False,
kde=True,
bins=int(180 / 5),
color="green",
hist_kws={"edgecolor": "black"},
kde_kws={"shade": True, "linewidth": 2},
)
sns.distplot(
df["Negative"],
hist=False,
kde=True,
bins=int(180 / 5),
color="red",
hist_kws={"edgecolor": "black"},
kde_kws={"shade": True, "linewidth": 2},
)
sns.distplot(
df["Neutral"],
hist=False,
kde=True,
bins=int(180 / 5),
color="y",
hist_kws={"edgecolor": "black"},
kde_kws={"shade": True, "linewidth": 2},
)
# **Visualization of the Sentiment Scores**
sns.distplot(
df["Compound"],
hist=False,
kde=True,
bins=int(180 / 5),
color="green",
hist_kws={"edgecolor": "black"},
kde_kws={"shade": True, "linewidth": 2},
)
# **Wordcloud for Negative Sentiments**
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from nltk import *
def plot_cloud(wordcloud):
plt.figure(figsize=(40, 30))
plt.imshow(wordcloud)
plt.axis("off")
text = " ".join(review for review in df[df["Category"] == "Negative"].Tweet)
wordcloud = WordCloud(
width=3000,
height=2000,
stopwords=STOPWORDS,
background_color="Black",
colormap="Set2",
collocations=False,
).generate(text)
plot_cloud(wordcloud)
# **Wordcloud for Positive Sentiments**
text = " ".join(review for review in df[df["Category"] == "Positive"].Tweet)
wordcloud = WordCloud(
width=3000,
height=2000,
stopwords=STOPWORDS,
background_color="Black",
colormap="Set2",
collocations=False,
).generate(text)
plot_cloud(wordcloud)
# **Wordcloud for Neutral Sentiments**
text = " ".join(review for review in df[df["Category"] == "Neutral"].Tweet)
wordcloud = WordCloud(
width=3000,
height=2000,
stopwords=STOPWORDS,
background_color="Black",
colormap="Set2",
collocations=False,
).generate(text)
plot_cloud(wordcloud)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
iris = pd.read_csv("C:/Users/Dnyaneshwar/Downloads/iris.csv")
iris.head()
df = pd.DataFrame(iris)
print(df)
iris.info()
print(iris.columns)
sns.pairplot(iris)
|
# # Pandas-Profiling
# Generates profile reports from a pandas DataFrame we use **pandas-profiling**. The pandas df.describe() function is great but a little basic for serious exploratory data analysis. **pandas_profiling** extends the pandas DataFrame with df.profile_report() for quick data analysis.
# - For install pandas-profiling use the following command:
# **pip install pandas-profiling**
# 
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
import numpy as np
from pandas_profiling import ProfileReport
df = pd.read_csv(
"/kaggle/input/aircrash-data/Airplane_Crashes_and_Fatalities_Since_1908.csv"
)
df.head(3)
df.describe(include="all")
# ## To generate the profile report
profile = ProfileReport(df, title="Flight Detail", html={"style": {"full_width": True}})
profile
# *The HTML report can be included in a Juyter notebook*
profile.to_notebook_iframe()
# ## Saving the report
# If you want to generate a HTML report file, save the ProfileReport to an object and use the to_file() function:
profile.to_file(output_file="Flight_report.html")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
from collections import Counter
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
median_house_hold_in_come = pd.read_csv(
"/kaggle/input/fatal-police-shootings-in-the-us/MedianHouseholdIncome2015.csv",
encoding="windows-1252",
)
percentage_people_below_poverty_level = pd.read_csv(
"/kaggle/input/fatal-police-shootings-in-the-us/PercentagePeopleBelowPovertyLevel.csv",
encoding="windows-1252",
)
percent_over_25_completed_highSchool = pd.read_csv(
"/kaggle/input/fatal-police-shootings-in-the-us/PercentOver25CompletedHighSchool.csv",
encoding="windows-1252",
)
share_race_city = pd.read_csv(
"/kaggle/input/fatal-police-shootings-in-the-us/ShareRaceByCity.csv",
encoding="windows-1252",
)
kill = pd.read_csv(
"/kaggle/input/fatal-police-shootings-in-the-us/PoliceKillingsUS.csv",
encoding="windows-1252",
)
percentage_people_below_poverty_level.head()
percentage_people_below_poverty_level.info()
percentage_people_below_poverty_level.poverty_rate.value_counts()
percentage_people_below_poverty_level.poverty_rate.replace(["-"], 0.0, inplace=True)
percentage_people_below_poverty_level.poverty_rate.value_counts()
percentage_people_below_poverty_level.poverty_rate = (
percentage_people_below_poverty_level.poverty_rate.astype(float)
)
percentage_people_below_poverty_level.info()
percentage_people_below_poverty_level["Geographic Area"].unique()
sum(
percentage_people_below_poverty_level[
percentage_people_below_poverty_level["Geographic Area"] == "AL"
].poverty_rate
) / len(
percentage_people_below_poverty_level[
percentage_people_below_poverty_level["Geographic Area"] == "AL"
]
)
area_list = percentage_people_below_poverty_level["Geographic Area"].unique()
area_poverty_ratio = []
for i in area_list:
x = percentage_people_below_poverty_level[
percentage_people_below_poverty_level["Geographic Area"] == i
]
area_poverty_rate = sum(x.poverty_rate) / len(x)
area_poverty_ratio.append(area_poverty_rate)
data = pd.DataFrame({"area_list": area_list, "area_poverty_ratio": area_poverty_ratio})
new_index = (data["area_poverty_ratio"].sort_values(ascending=False)).index.values
sorted_data = data.reindex(new_index)
(data["area_poverty_ratio"].sort_values(ascending=False)).index.values
data["area_poverty_ratio"].sort_values(ascending=False).index.values
plt.figure(figsize=(15, 10))
sns.barplot(x=sorted_data["area_list"], y=sorted_data["area_poverty_ratio"])
plt.xticks(rotation=45)
plt.xlabel("States")
plt.ylabel("Poverty Rate")
plt.title("Poverty Rate Given States")
# # Most common 15 Name or Surname of killed people
kill.head()
# Lets find the fake Names from the list.
kill.name.value_counts()
# TK TK is a fake name so we filtered out this name from our list and we split the name and surnames
separate = kill.name[kill.name != "TK TK"].str.split()
separate
# Then we unzip names ans surname into 2 different lists a,b.
a, b = zip(*separate)
name_list = a + b
name_count = Counter(name_list)
name_count
most_common_names = name_count.most_common(15)
most_common_names
x, y = zip(*most_common_names)
x = list(x)
y = list(y)
plt.figure(figsize=(15, 10))
sns.barplot(x=x, y=y)
plt.xlabel("Name or Surname of killed people")
plt.ylabel("Frequency")
plt.title("Most common 15 Name or Surname of killed people")
# # High school graduation rate of the population that is older than 25 in States
percent_over_25_completed_highSchool.info()
percent_over_25_completed_highSchool.head()
# Below command show value counts and seen there is a value '-' which is not numeric.
percent_over_25_completed_highSchool.percent_completed_hs.value_counts()
# Replace - with 0.0.
percent_over_25_completed_highSchool.percent_completed_hs.replace(
"-", 0.0, inplace=True
)
# Check the value counts again if - is replaced.
percent_over_25_completed_highSchool.percent_completed_hs.value_counts()
# 1. percent_completed_hs feaure is not numeric. Lets make it float so we can play,
percent_over_25_completed_highSchool.percent_completed_hs = (
percent_over_25_completed_highSchool.percent_completed_hs.astype(float)
)
percent_over_25_completed_highSchool.info()
area_list = list(percent_over_25_completed_highSchool["Geographic Area"].unique())
area_highschool = []
for i in area_list:
x = percent_over_25_completed_highSchool[
percent_over_25_completed_highSchool["Geographic Area"] == i
]
area_highschool_rate = sum(x.percent_completed_hs) / len(x)
area_highschool.append(area_highschool_rate)
data = pd.DataFrame({"area_list": area_list, "area_highschool_ratio": area_highschool})
new_index = (data["area_highschool_ratio"].sort_values(ascending=True)).index.values
sorted_data2 = data.reindex(new_index)
plt.figure(figsize=(15, 10))
sns.barplot(x=sorted_data2["area_list"], y=sorted_data2["area_highschool_ratio"])
plt.xlabel = "States"
plt.ylabel = "Highschool Ratio"
plt.show()
# # Horizantal Bar Plot
share_race_city.info()
share_race_city.tail()
# share_race_city['share_white'].value_counts()
# share_race_city['share_black'].value_counts()
# share_race_city['share_native_american'].value_counts()
# share_race_city['share_asian'].value_counts()
# share_race_city['share_hispanic'].value_counts()
share_race_city.replace("(X)", 0.0, inplace=True)
share_race_city.share_white = share_race_city.share_white.astype(float)
share_race_city.share_black = share_race_city.share_black.astype(float)
share_race_city.share_native_american = share_race_city.share_native_american.astype(
float
)
share_race_city.share_asian = share_race_city.share_asian.astype(float)
share_race_city.share_hispanic = share_race_city.share_hispanic.astype(float)
share_race_city.info()
share_race_city.loc[1]
# area list will be one of our arguments for our dictionary. We will use this list as y axis in our graph.
area_list = list(share_race_city["Geographic area"].unique())
len(area_list)
len(share_race_city)
share_race_city.info()
# len(area_list) olmayabilir onun yerine AL de kac whiteín kac sehire orani olabilir.
# alpha argument is important. It makes the multiple graphics are possiable to be seen.
white = []
black = []
native = []
asian = []
hispanic = []
for i in area_list:
white.append(
sum(share_race_city.share_white[share_race_city["Geographic area"] == i])
/ len(share_race_city.share_white[share_race_city["Geographic area"] == "AL"])
)
black.append(
sum(share_race_city.share_black[share_race_city["Geographic area"] == i])
/ len(share_race_city.share_white[share_race_city["Geographic area"] == "AL"])
)
native.append(
sum(
share_race_city.share_native_american[
share_race_city["Geographic area"] == i
]
)
/ len(share_race_city.share_white[share_race_city["Geographic area"] == "AL"])
)
asian.append(
sum(share_race_city.share_asian[share_race_city["Geographic area"] == i])
/ len(share_race_city.share_white[share_race_city["Geographic area"] == "AL"])
)
hispanic.append(
sum(share_race_city.share_hispanic[share_race_city["Geographic area"] == i])
/ len(share_race_city.share_white[share_race_city["Geographic area"] == "AL"])
)
f, ax = plt.subplots(figsize=(9, 15))
# alpha saydamlik demek
sns.barplot(x=white, y=area_list, label="white", alpha=0.5, color="cyan")
sns.barplot(x=black, y=area_list, label="black", alpha=0.5, color="green")
sns.barplot(x=native, y=area_list, label="native", alpha=0.5, color="yellow")
sns.barplot(x=asian, y=area_list, label="asian", alpha=0.5, color="red")
sns.barplot(x=hispanic, y=area_list, label="hispanic", alpha=0.5, color="orange")
ax.legend(loc="lower right", frameon=True)
ax.set(
xlabel="Percentage of the races",
ylabel="Area List",
title="Ratio of the races according to areas",
)
# ## Normalization
# We can normalize data to use the same scale between different graphics. We do use normalization when we do not care the numbers of values but the direction of trends. This is done by dividing the series with the maximun value of the series.
# 0 < ([1,2,3,4,5])/5 < 1
# ## Visiualization of High School Graduation vs Poverty of States with Point Plot
# we normalize the sorted_date set as below.
sorted_data["area_poverty_ratio"] = sorted_data["area_poverty_ratio"] / max(
sorted_data["area_poverty_ratio"]
)
# if we check the data as below, we can see that is recalculated based on the max value.
sorted_data.head()
# normalizing the sorted_date2. So that we can compare them.
sorted_data2["area_highschool_ratio"] = sorted_data2["area_highschool_ratio"] / max(
sorted_data2["area_highschool_ratio"]
)
sorted_data2.head()
# #### With concat function we can add the last column of sortaed_data2 into sorted_data set.
# #### We created a new data set named data.
data = pd.concat([sorted_data, sorted_data2["area_highschool_ratio"]], axis=1)
data.sort_values("area_poverty_ratio", inplace=True)
data.head()
f, ax1 = plt.subplots(figsize=(20, 10))
sns.pointplot(x="area_list", y="area_poverty_ratio", data=data, color="blue", alpha=0.8)
sns.pointplot(
x="area_list", y="area_highschool_ratio", data=data, color="orange", alpha=0.8
)
plt.text(40, 0.6, "High School Graduation", color="orange", fontsize=17, style="italic")
plt.text(40, 0.55, "Poverty", color="blue", fontsize=17, style="italic")
ax1.set(xlabel="States", ylabel="Normalized Values")
plt.title("Graduation vs Poverty")
plt.grid()
# ## Visiualization of High School Graduation vs Poverty of States with Joint Plot
# We will draw the same grap with Joint Plot.
data.head()
g = sns.jointplot(
data.area_poverty_ratio, data.area_highschool_ratio, kind="kde", size=7
)
plt.savefig("figure.png")
plt.show()
sns.jointplot("area_poverty_ratio", "area_highschool_ratio", data=data, size=5, ratio=3)
|
# ## World Values Survey
# ##### In this database we use World Values Survey data available at http://www.worldvaluessurvey.org/wvs.jsp (The data is free to be downloaded from the webpage). It is a survey, conducted every few years in a number of countries. Here we use wave 6 data, mostly from 2013-2014. Note that not all countries are participating in each wave.
# ##### The questions revolve around different opinion topics, including trust, work, religion, family, gender equality, and nationalism. The details of the questions and code used in the data is available in the attached files of "Official Questionnaire" and "Codebook". In this exercise we focus on what the respondents think about abortion: "Please tell if abortion can always be justified, never be justified, or something in between". The responses range between 1 - never justifiable (conservative attitude), and 10 - always justifiable (liberal attitude). Besides of the numeric range 1..10, a number of cases have negative codes (this applies to many variables). These are various types of missing information (-5: missing, -4: not asked, -3: not applicable, -2: no answer, -1: don't know). We treat all these as just missing below.
# ### Loading and preparing the data
# Loading necessary libraries
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import normalize
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestRegressor
import time
import os
# Loading the data
os.chdir("/kaggle/input/world-values-survey-wave-6")
wvs = pd.read_csv("wvs.csv.bz2", sep="\t")
wvs
# Each column here is a survey question and its values (observations) are individual's responses. Column V2 is the country code, the details of what country code corresponds to which country is present in the Official Questionnaire. The dataset has 328 variables and 90350 observations.
# The column we are most interested in here is the Abortion column, which is the variable V204. This column has values from 1-10 indicating respondants' response on the question "whether you think it can always be justified, never be justified, or something in between?" where 1 is Never Justifiable and 10 is Always Justifiable. The responses also have some responses that are negative. They mean the following:
# -5-.- DE,SE:Inapplicable; HT: Missing-Dropped out survey; RU:Inappropriate response; Missing
# -4-.- Not asked in survey
# -3-.- Not applicable
# -2-.- No answer
# -1-.- Don´t know
# For this analysis, we will only consider the responses that answer this question. Hence, we will not the considering the responses that have negative values.
# Looking at the summary of the survey question on Abortion..
abortion = wvs[(wvs.V204 > 0)]
abortion.V204.describe()
# After removing the negative responses which mean there no response recording by an individual, we see that the values fall in the range of 1-10, as expected. There are 85742 non-missing (positive) values for V204 (abortion) in this dataset. The average response of the global pool of respondents say Abortion is not justifiable (3.22 - mean). The 75th percentile of the response shows a neutral response (5).
# Since country (V2) seems to be an important column in this dataset, we will drop the observation that do not have a valid country code (has negative values in V2 column).
# As part of cleaning the data, let's also drop the observations that have missing values (NA) for any columns. We will keep the negative values for the rest of the variables, otherwise we will lose a lot of data.
# Removing negative values from V2 as well from abortion, and drop missing values (NA) from entire dataset
wvs_ab = abortion[(abortion.V2 > 0)].dropna()
wvs_ab.shape
# After dropping NA values from the rest of the data and negative values from V2 and V204, we are left with 79267 observations. In order to better simplify the analysis, let's create a new binary variable abortion as abortion = 1 (V204 > 3) and 0 (otherwise)
wvs_ab["abortion"] = [1 if x > 3 else 0 for x in wvs_ab["V204"]]
wvs_ab.abortion.describe()
# The modified column has 0/1 response and the mean response is biased towards conservative attitude (0.36)
# To investigate which variables/opinions are most related to Abortion, let's look at the Pearson correlation values of abortion with all the other variables.
wvs_ab_corr_all = wvs_ab.corr(method="pearson")[["abortion"]].sort_values(
by="abortion", ascending=False
)
wvs_ab_corr_all
# Strong correlation coefficient is generally described as values from 0.7 to 1.0 or -0.7 to -1.0. Looking at the co-efficients listed above, the only variables satisfying this criteria is the abortion column itself and V204 which is the original column from which abortion column was generated. Hence we will consider values greater than 0.4 or lesser than -0.4 as strong correlation co-efficient.
wvs_ab_corr = wvs_ab_corr_all[
(wvs_ab_corr_all.abortion > 0.4) | (wvs_ab_corr_all.abortion < -0.4)
]
# Not seeing the first and second column related to abortion
wvs_ab_corr[2::]
# Out of the values seen above, the following responses seem to be correlated with abortion:
# V205: Divorce
# V203: Homosexuality
# V206: Sex before marriage
# V207: Suicide
# For futher analysis, we will create dummies of the country column which is categorical for regression.
wvs_ab = wvs_ab.rename(columns={"V2": "country"})
wvs_ab_d = pd.get_dummies(
wvs_ab, columns=["country"]
) # This step removes the country variable
wvs_ab_d.shape
# After converting country column into dummies, we now have a total of 386 variablles. Let's check if the number of dummy columns created equals to the unique country codes we had.
# Number of columns with names starting with country - dummies we created == Unique countries in original dataset
wvs_ab_d[wvs_ab_d.columns[pd.Series(wvs_ab_d.columns).str.startswith("country")]].shape[
1
] == wvs_ab.country.unique().size
# There are a total of 58 country dummy columns which is same as the total number of countries in country column. To avoid perfect multicollinearity we will delete one column, hence let us delete the last country code column - country_887
wvs_dummy = wvs_ab_d.drop(["country_887", "V204"], axis=1)
# Also dropping the V204 column - responses for abortion - from the dataframe
wvs_dummy.shape
# Thus after cleaning and preparing the data, we finally have come down to a total of 384 variables and 79267 observations.
# ## Cross Validation Implementation
# Instead of using an existing implementation, we will create our own implementation of k-fold Cross Validation.
def kcv(k, unfit_m, X, y):
indices = X.index.values
i_shuffle = shuffle(indices)
f1 = []
accuracy = []
rmse = []
for i in np.arange(k):
v = i_shuffle[i::k]
X_valid = X.loc[v, :]
X_train = X[~X.index.isin(X_valid.index)]
y_valid = y.loc[v]
y_train = y[~y.index.isin(y_valid.index)]
m = unfit_m.fit(X_train, y_train)
y_predict = m.predict(X_valid)
f1.append(f1_score(y_valid, y_predict, average="weighted"))
accuracy.append(accuracy_score(y_valid, y_predict))
rmse.append(
np.sqrt(np.mean([np.square(m - n) for m, n in zip(y_valid, y_predict)]))
)
return (np.mean(f1), np.mean(accuracy), np.mean(rmse))
# ## Find the best model for this data
# ### k-Nearest Neighbours
# Now before starting with the model, let's extract a random set of data
# Picking a sample of 7000 observations to avoid forever run
wvs_sample = wvs_dummy.sample(n=10000, random_state=1)
X_sample = wvs_sample.loc[:, wvs_sample.columns != "abortion"]
y_sample = wvs_sample["abortion"]
# X and y for the entire dataset
X = wvs_dummy.loc[:, wvs_dummy.columns != "abortion"]
y = wvs_dummy["abortion"]
# To keep a track of the performance metrics of all the observed models, we will be creating a dataframe
# Create a structure to store accuracy and F-scores
mycolumns = ["model", "accuracy", "f-score", "RMSE", "runtime"]
models = pd.DataFrame(columns=mycolumns)
models.set_index("model")
# Trying kNN model on the selected sample of data for different values of k
k = 5
start_time = time.clock()
knn_5 = KNeighborsClassifier(n_neighbors=k)
# 5 fold cross validation for sample of original data
f1_knn_5, accuracy_knn_5, rmse_knn_5 = kcv(5, knn_5, X_sample, y_sample)
print("F1-score :", f1_knn_5)
print("Accuracy :", accuracy_knn_5)
models.loc[len(models)] = [
"knn, k=5",
accuracy_knn_5,
f1_knn_5,
rmse_knn_5,
time.clock() - start_time,
]
k = 3
start_time = time.clock()
knn = KNeighborsClassifier(n_neighbors=k)
# 5 fold cross validation for original data
f1_knn_3, accuracy_knn_3, rmse_knn_3 = kcv(5, knn, X_sample, y_sample)
print("F1-score :", f1_knn_3)
print("Accuracy :", accuracy_knn_3)
models.loc[len(models)] = [
"knn, k=3",
accuracy_knn_3,
f1_knn_3,
rmse_knn_3,
time.clock() - start_time,
]
k = 7
start_time = time.clock()
knn = KNeighborsClassifier(n_neighbors=k)
# 5 fold cross validation for original data
f1_knn_7, accuracy_knn_7, rmse_knn_7 = kcv(5, knn, X_sample, y_sample)
print("F1-score :", f1_knn_7)
print("Accuracy :", accuracy_knn_7)
models.loc[len(models)] = [
"knn, k=7",
accuracy_knn_7,
f1_knn_7,
rmse_knn_7,
time.clock() - start_time,
]
k = 9
start_time = time.clock()
knn = KNeighborsClassifier(n_neighbors=k)
# 5 fold cross validation for original data
f1_knn_9, accuracy_knn_9, rmse_knn_9 = kcv(5, knn, X_sample, y_sample)
print("F1-score :", f1_knn_9)
print("Accuracy :", accuracy_knn_9)
models.loc[len(models)] = [
"knn, k=9",
accuracy_knn_9,
f1_knn_9,
rmse_knn_9,
time.clock() - start_time,
]
k = 13
start_time = time.clock()
knn = KNeighborsClassifier(n_neighbors=k)
# 5 fold cross validation for original data
f1_knn_13, accuracy_knn_13, rmse_knn_13 = kcv(5, knn, X_sample, y_sample)
print("F1-score :", f1_knn_13)
print("Accuracy :", accuracy_knn_13)
models.loc[len(models)] = [
"knn, k=13",
accuracy_knn_13,
f1_knn_13,
rmse_knn_13,
time.clock() - start_time,
]
# Looking at the accuracy of different size clusters in kNN to see which models fits best...
models.sort_values(by=["accuracy", "f-score"], ascending=False)
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_selection import RFE
from sklearn.linear_model import LinearRegression
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
churn_data = pd.read_csv("churn_data.csv")
churn_data
customer_data = pd.read_csv("customer_data.csv")
customer_data
internet_data = pd.read_csv("internet_data.csv")
internet_data
# Merging the data on customer_id
df_1 = pd.merge(churn_data, customer_data, how="inner", on="customerID")
# combing with the last one
telecom = pd.merge(df_1, internet_data, how="inner", on="customerID")
telecom
telecom.shape
telecom.describe
telecom.describe()
telecom.info
telecom.info()
# # Now lets start preparing the data
varlist = ["PhoneService", "PaperlessBilling", "Churn", "Partner", "Dependents"]
def Binary_map(x):
return x.map({"Yes": 1, "No": 0})
telecom[varlist] = telecom[varlist].apply(Binary_map)
telecom[varlist]
# ### For Categorical variables lets create Dummy variables
dummy1 = pd.get_dummies(
telecom[["Contract", "PaymentMethod", "gender", "InternetService"]], drop_first=True
)
telecom = pd.concat([telecom, dummy1], axis=1)
telecom
# for rest remaining categorical variables
ml = pd.get_dummies(telecom["MultipleLines"], prefix="MultipleLines")
ml1 = ml.drop(["MultipleLines_No phone service"], 1)
telecom = pd.concat([telecom, ml1], axis=1)
os = pd.get_dummies(telecom["OnlineSecurity"], prefix="OnlineSecurity")
os1 = os.drop(["OnlineSecurity_No internet service"], 1)
telecom = pd.concat([telecom, os1], axis=1)
ob = pd.get_dummies(telecom["OnlineBackup"], prefix="OnlineBackup")
ob1 = ob.drop(["OnlineBackup_No internet service"], 1)
telecom = pd.concat([telecom, ob1], axis=1)
dp = pd.get_dummies(telecom["DeviceProtection"], prefix="DeviceProtection")
dp1 = dp.drop(["DeviceProtection_No internet service"], 1)
telecom = pd.concat([telecom, dp1], axis=1)
ts = pd.get_dummies(telecom["TechSupport"], prefix="TechSupport")
ts1 = ts.drop(["TechSupport_No internet service"], 1)
telecom = pd.concat([telecom, ts1], axis=1)
sm = pd.get_dummies(telecom["StreamingMovies"], prefix="StreamingMovies")
sm1 = sm.drop(["StreamingMovies_No internet service"], 1)
telecom = pd.concat([telecom, sm1], axis=1)
st = pd.get_dummies(telecom["StreamingTV"], prefix="StreamingTV")
st1 = st.drop(["StreamingTV_No internet service"], 1)
telecom = pd.concat([telecom, st1], axis=1)
telecom
# converting data type of charges from obejct to float
telecom["TotalCharges"] = pd.to_numeric(telecom["TotalCharges"], errors="coerce")
telecom.dtypes
# droping the un required variables
telecom = telecom.drop(
[
"Contract",
"PaymentMethod",
"gender",
"MultipleLines",
"InternetService",
"TechSupport",
"StreamingTV",
"StreamingMovies",
],
1,
)
telecom.info()
# Lets check for outliers in continous variables
n_telecom = telecom[["tenure", "MonthlyCharges", "SeniorCitizen", "TotalCharges"]]
n_telecom.describe(percentiles=[0.25, 0.5, 0.75, 0.90, 0.95, 0.99])
# From obove it can be observed that there are not any major outliers here that should be removed
# checking for null values and then imputing them
telecom.isnull().sum()
telecom
# 11/7043=0.1% which are not significant so lets remove these values from totalcharges
telecom = telecom[~np.isnan(telecom["TotalCharges"])]
telecom.isnull().sum()
# Lets split data in test and train
x = telecom.drop(["Churn", "customerID"], axis=1)
x
y = telecom["Churn"]
y
x_train, x_test, y_train, y_test = train_test_split(
x, y, train_size=0.7, test_size=0.3, random_state=100
)
from sklearn.preprocessing import StandardScaler
x_train[["tenure", "MonthlyCharges", "TotalCharges"]] = StandardScaler().fit_transform(
x_train[["tenure", "MonthlyCharges", "TotalCharges"]]
)
# Lets have a look on co-relations using heatmap
plt.figure(figsize=(20, 10))
sns.heatmap(telecom.corr(), annot=True)
plt.show()
# Here we can see alot of co-relations so it may be smart to have drop some dummies which are highly co-related
x_test = x_test.drop(
[
"MultipleLines_No",
"OnlineSecurity_No",
"OnlineBackup_No",
"DeviceProtection_No",
"TechSupport_No",
"StreamingMovies_No",
"StreamingTV_No",
],
1,
)
x_train = x_train.drop(
[
"MultipleLines_No",
"OnlineSecurity_No",
"OnlineBackup_No",
"DeviceProtection_No",
"TechSupport_No",
"StreamingMovies_No",
"StreamingTV_No",
],
1,
)
plt.figure(figsize=(20, 10))
sns.heatmap(telecom.corr(), annot=True)
plt.show()
# Lets start running our training model
import statsmodels.api as sm
x_train = pd.get_dummies(x_train, drop_first=True)
logm1 = sm.GLM(y_train, (sm.add_constant(x_train)), family=sm.families.Binomial())
logm1.fit().summary()
np.asarray(x_train)
# ### FEATURE SELECTION USING RFE
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
from sklearn.feature_selection import RFE
rfe = RFE(logreg, step=15)
rfe = rfe.fit(x_train, y_train)
rfe.support_
list(zip(x_train.columns, rfe.support_, rfe.ranking_))
col = x_train.columns[rfe.support_]
x_train.columns[~rfe.support_]
# ### Assessing the model with StatsModels
x_train_sm = sm.add_constant(x_train[col])
logm2 = sm.GLM(y_train, x_train_sm, family=sm.families.Binomial())
res = logm2.fit()
res.summary()
# Getting the predicted values on the train set
y_train_pred = res.predict(x_train_sm)
y_train_pred[:10]
y_train_pred = y_train_pred.values.reshape(-1)
y_train_pred[:10]
# Creating a dataframe with the actual churn flag and the predicted probabilities
y_train_pred_final = pd.DataFrame({"Churn": y_train.values, "Churn_Prob": y_train_pred})
y_train_pred_final["CustID"] = y_train.index
y_train_pred_final
y_train_pred_final["predicted"] = y_train_pred_final.Churn_Prob.map(
lambda x: 1 if x > 0.5 else 0
)
# Let's see the head
y_train_pred_final.head()
from sklearn import metrics
y_train_pred_final["predicted"] = y_train_pred_final.Churn_Prob.map(
lambda x: 1 if x > 0.5 else 0
)
# Let's see the head
y_train_pred_final.head()
# predicted. not_churn. churn
# not_churn. 3281. 354
# churn. 606. 681
# Lets check the overall accuracy of the model
print(
metrics.accuracy_score(y_train_pred_final.Churn, y_train_pred_final.predicted) * 100
)
# # Checking VIFs
# high value of vipf is problematic and low value is fine
from statsmodels.stats.outliers_influence import variance_inflation_factor
vif = pd.DataFrame()
vif["Features"] = x_train[col].columns
vif["VIF"] = [
variance_inflation_factor(x_train[col].values, i)
for i in range(x_train[col].shape[1])
]
vif["VIF"] = round(vif["VIF"], 2)
vif = vif.sort_values(by="VIF", ascending=False)
vif
col = col.drop(
"InternetService_No", 1
) # ,'OnlineSecurity_No internet service','OnlineBackup_No internet service',
#'DeviceProtection_No internet service ','MonthlyCharges'
# )
col = col.drop("OnlineSecurity_No internet service", 1) # ,
col = col.drop("OnlineBackup_No internet service", 1)
col = col.drop("DeviceProtection_No internet service", 1)
col = col.drop("MonthlyCharges", 1)
# nnow lets re-run the model
x_train_sm = sm.add_constant(x_train[col])
logm3 = sm.GLM(y_train, x_train_sm, family=sm.families.Binomial())
res = logm3.fit()
res.summary()
# Getting the predicted values on the train set
y_train_pred = res.predict(x_train_sm).values.reshape(-1)
y_train_pred[:10]
y_train_pred_final["Churn_Prob"] = y_train_pred
y_train_pred_final["predicted"] = y_train_pred_final.Churn_Prob.map(
lambda x: 1 if x > 0.5 else 0
)
y_train_pred_final.head()
print(
metrics.accuracy_score(y_train_pred_final.Churn, y_train_pred_final.predicted) * 100
)
# it has not dropped much
# lets check VIF again
vif = pd.DataFrame()
vif["Features"] = x_train[col].columns
vif["VIF"] = [
variance_inflation_factor(x_train[col].values, i)
for i in range(x_train[col].shape[1])
]
vif["VIF"] = round(vif["VIF"], 2)
vif = vif.sort_values(by="VIF", ascending=False)
vif
col = col.drop("tenure", 1)
# nnow lets re-run the model
x_train_sm = sm.add_constant(x_train[col])
logm4 = sm.GLM(y_train, x_train_sm, family=sm.families.Binomial())
res = logm4.fit()
res.summary()
y_train_pred = res.predict(x_train_sm).values.reshape(-1)
y_train_pred[:10]
y_train_pred_final["Churn_Prob"] = y_train_pred
y_train_pred_final["predicted"] = y_train_pred_final.Churn_Prob.map(
lambda x: 1 if x > 0.5 else 0
)
y_train_pred_final.head()
print(
metrics.accuracy_score(y_train_pred_final.Churn, y_train_pred_final.predicted) * 100
)
# Accuraccy is basically same
vif = pd.DataFrame()
vif["Features"] = x_train[col].columns
vif["VIF"] = [
variance_inflation_factor(x_train[col].values, i)
for i in range(x_train[col].shape[1])
]
vif["VIF"] = round(vif["VIF"], 2)
vif = vif.sort_values(by="VIF", ascending=False)
vif
(1000 + 1200) / (1000 + 1200 + 300)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import tensorflow as tf
print(tf.__version__)
from tensorflow.keras.datasets import mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images.shape
# train_images[0]
train_images[0].ndim
train_images[0].shape
train_labels[0]
train_labels[2]
from tensorflow.keras import models
from tensorflow.keras import layers
network = models.Sequential()
network.add(layers.Dense(512, activation="relu", input_shape=(28 * 28,)))
network.add(layers.Dense(10, activation="softmax"))
|
#
#
# ## [mlcourse.ai](https://mlcourse.ai) – Open Machine Learning Course
# Author: [Yury Kashnitskiy](https://yorko.github.io) (@yorko). This material is subject to the terms and conditions of the [Creative Commons CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/) license. Free use is permitted for any non-commercial purpose.
# ## Assignment 4. Sarcasm detection with logistic regression
#
# We'll be using the dataset from the [paper](https://arxiv.org/abs/1704.05579) "A Large Self-Annotated Corpus for Sarcasm" with >1mln comments from Reddit, labeled as either sarcastic or not. A processed version can be found on Kaggle in a form of a [Kaggle Dataset](https://www.kaggle.com/danofer/sarcasm).
# Sarcasm detection is easy.
#
# some necessary imports
import os
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix
import seaborn as sns
from matplotlib import pyplot as plt
train_df = pd.read_csv("../input/sarcasm/train-balanced-sarcasm.csv")
train_df.head()
train_df.info()
# Some comments are missing, so we drop the corresponding rows.
train_df.dropna(subset=["comment"], inplace=True)
# We notice that the dataset is indeed balanced
train_df["label"].value_counts()
train_df["label"].hist()
# We split data into training and validation parts.
train_texts, valid_texts, y_train, y_valid = train_test_split(
train_df["comment"], train_df["label"], random_state=17
)
# ## Tasks:
# 1. Analyze the dataset, make some plots. This [Kernel](https://www.kaggle.com/sudalairajkumar/simple-exploration-notebook-qiqc) might serve as an example
# 2. Build a Tf-Idf + logistic regression pipeline to predict sarcasm (`label`) based on the text of a comment on Reddit (`comment`).
# 3. Plot the words/bigrams which a most predictive of sarcasm (you can use [eli5](https://github.com/TeamHG-Memex/eli5) for that)
# 4. (optionally) add subreddits as new features to improve model performance. Apply here the Bag of Words approach, i.e. treat each subreddit as a new feature.
# ## Links:
# - Machine learning library [Scikit-learn](https://scikit-learn.org/stable/index.html) (a.k.a. sklearn)
# - Kernels on [logistic regression](https://www.kaggle.com/kashnitsky/topic-4-linear-models-part-2-classification) and its applications to [text classification](https://www.kaggle.com/kashnitsky/topic-4-linear-models-part-4-more-of-logit), also a [Kernel](https://www.kaggle.com/kashnitsky/topic-6-feature-engineering-and-feature-selection) on feature engineering and feature selection
# - [Kaggle Kernel](https://www.kaggle.com/abhishek/approaching-almost-any-nlp-problem-on-kaggle) "Approaching (Almost) Any NLP Problem on Kaggle"
# - [ELI5](https://github.com/TeamHG-Memex/eli5) to explain model predictions
# Let's look at our data
train_df.head()
# Let's find most common words for two types of comments
sarcasm_texsts = train_df[train_df["label"] == 1]
non_sarcasm_texsts = train_df[train_df["label"] == 0]
from sklearn.feature_extraction.text import CountVectorizer
# Method for find top n words in cv vocabulary
def get_top_n_words(corpus, n=None):
vec = CountVectorizer().fit(corpus)
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]
words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)
return words_freq[:n]
# For sarcasm comments
most_sarcasm_words = get_top_n_words(train_df[train_df["label"] == 1]["comment"], 30)
# For non sarcasm comments
most_non_sarcasm_words = get_top_n_words(
train_df[train_df["label"] == 0]["comment"], 30
)
# Let's plot words in sarcasm comments
data = pd.DataFrame(most_sarcasm_words, columns=["word", "frequency"])
fig_dims = (18, 4)
fig, ax = plt.subplots(figsize=fig_dims)
sns.barplot(x="word", y="frequency", data=data, ax=ax)
# And in non sarcasm comments
data = pd.DataFrame(most_non_sarcasm_words, columns=["word", "frequency"])
fig_dims = (18, 4)
fig, ax = plt.subplots(figsize=fig_dims)
sns.barplot(x="word", y="frequency", data=data, ax=ax)
# We see that the words are almost the same, we can assume that they will have small weights in our model
# Use CountVectorizer to process all comments
cv.fit(train_texts)
# Length of our vocabulary of all used in comments words
len(cv.vocabulary_)
# Transform all comments in sparse matrix
X_train = cv.transform(train_texts)
print(cv.get_feature_names()[10000])
X_train[10000].nonzero()[1]
X_test = cv.transform(valid_texts)
# Let's fit our model LogisticRegression
logit = LogisticRegression(solver="lbfgs", n_jobs=-1, random_state=7)
logit.fit(X_train, y_train)
# And check result on the test sample
logit.score(X_test, y_valid)
# Lets make pipeline for our model
from sklearn.pipeline import make_pipeline
text_pipe_logit = make_pipeline(
CountVectorizer(), LogisticRegression(solver="lbfgs", n_jobs=1, random_state=7)
)
text_pipe_logit.fit(train_texts, y_train)
text_pipe_logit.score(valid_texts, y_valid)
# Let's find optimal regularization parameter
from sklearn.model_selection import GridSearchCV
param_grid_logit = {"logisticregression__C": np.logspace(-3, 3, 20)}
grid_logit = GridSearchCV(
text_pipe_logit, param_grid_logit, return_train_score=True, cv=3, n_jobs=-1
)
grid_logit.fit(train_texts, y_train)
print(grid_logit.best_params_, grid_logit.best_score_, sep="\n")
# Check the final score
grid_logit.score(valid_texts, y_valid)
plt.plot(
grid_logit.param_grid["logisticregression__C"],
grid_logit.cv_results_["mean_test_score"],
color="red",
label="test",
)
# In our case, almost nothing depends on the regularization parameter
# Most important words
import eli5
eli5.show_weights(text_pipe_logit, top=20)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
import sys
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
print(os.listdir("../input"))
import glob
from os.path import join as path_join
from scipy.stats import spearmanr, rankdata
os.system("pip install ../input/sacremoses/sacremoses-master/ > /dev/null")
os.system("pip install ../input/transformers/transformers-master/ > /dev/null")
package_path = "../input/radam-pytorch/RAdam"
sys.path.append(package_path)
# # quest-003-55-bert-07-3-valid
folder = "../input/google-quest-challenge/"
pretrained_bert = (
"../input/pretrainedbertpytorch/pretrained-bert-pytorch/bert-base-uncased/"
)
model_weight_path1 = "../input/quest-003-55-bert-07-3-training-1/"
model_weight_path2 = "../input/quest-003-55-bert-07-3-training-2/"
model_weight_path3 = "../input/quest-003-55-bert-07-3-training-3/"
import os
import re
import gc
import pickle
import numpy as np
import pandas as pd
import random
import copy
import string
import time
import nltk
from nltk.tag import pos_tag
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.stem import PorterStemmer, WordNetLemmatizer
from nltk.corpus import stopwords
from sklearn.preprocessing import OneHotEncoder, LabelBinarizer
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from urllib.parse import urlparse
import math
from tqdm import tqdm
from spacy.lang.en import English
from scipy.stats import spearmanr
import warnings
warnings.simplefilter("ignore")
import torch
from torch.utils.data import TensorDataset, DataLoader, Dataset
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data.sampler import SubsetRandomSampler
from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, CosineAnnealingLR
from torch.nn.utils.weight_norm import weight_norm
import transformers
print("transformers:", transformers.__version__)
SEED = 12345
def seed_everything(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything(SEED)
import pickle
def read_pickle_from_file(pickle_file):
with open(pickle_file, "rb") as f:
x = pickle.load(f)
return x
def write_pickle_to_file(pickle_file, x):
with open(pickle_file, "wb") as f:
pickle.dump(x, f, pickle.HIGHEST_PROTOCOL)
# ## Load data
train_df = pd.read_csv(f"{folder}train.csv")
train_df.shape
test_df = pd.read_csv(f"{folder}test.csv")
test_df.shape
# ## Extract target variables
target_table = [
["question_asker_intent_understanding", "question"],
["question_body_critical", "question"],
["question_conversational", "question"],
["question_expect_short_answer", "question"],
["question_fact_seeking", "question"],
["question_has_commonly_accepted_answer", "question"],
["question_interestingness_others", "question"],
["question_interestingness_self", "question"],
["question_multi_intent", "question"],
["question_not_really_a_question", "question"],
["question_opinion_seeking", "question"],
["question_type_choice", "question"],
["question_type_compare", "question"],
["question_type_consequence", "question"],
["question_type_definition", "question"],
["question_type_entity", "question"],
["question_type_instructions", "question"],
["question_type_procedure", "question"],
["question_type_reason_explanation", "question"],
["question_type_spelling", "question"],
["question_well_written", "question"],
["answer_helpful", "answer"],
["answer_level_of_information", "answer"],
["answer_plausible", "answer"],
["answer_relevance", "answer"],
["answer_satisfaction", "answer"],
["answer_type_instructions", "answer"],
["answer_type_procedure", "answer"],
["answer_type_reason_explanation", "answer"],
["answer_well_written", "answer"],
]
input_columns = ["question_title", "question_body", "answer"]
target_question_columns = []
target_answer_columns = []
for table in target_table:
if table[1] == "question":
target_question_columns.append(table[0])
elif table[1] == "answer":
target_answer_columns.append(table[0])
target_columns = target_question_columns + target_answer_columns
print("question:", len(target_question_columns))
print("answer:", len(target_answer_columns))
print("total:", len(target_columns))
for df in [train_df, test_df]:
## domain components
df["domcom"] = df["url"].apply(lambda s: s.split("://")[1].split("/")[0].split("."))
# count components
df["dom_cnt"] = df["domcom"].apply(lambda s: len(s))
# extend length
df["domcom"] = df["domcom"].apply(lambda s: s + ["none", "none"])
# components
for ii in range(0, 4):
df["url_" + str(ii)] = df["domcom"].apply(lambda s: s[ii])
# clean up
df.drop("domcom", axis=1, inplace=True)
# ### Features
train_feature = pd.DataFrame()
test_feature = pd.DataFrame()
# #### Text based features
import nltk
from nltk.corpus import stopwords
eng_stopwords = set(stopwords.words("english"))
for df, feature in [[train_df, train_feature], [test_df, test_feature]]:
for column in input_columns:
feature[column + "_total_length"] = df[column].apply(len)
feature[column + "_capitals"] = df[column].apply(
lambda comment: sum(1 for c in comment if c.isupper())
)
feature[column + "_caps_vs_length"] = feature.apply(
lambda row: float(row[column + "_capitals"])
/ float(row[column + "_total_length"]),
axis=1,
)
feature[column + "_num_exclamation_marks"] = df[column].apply(
lambda comment: comment.count("!")
)
feature[column + "_num_question_marks"] = df[column].apply(
lambda comment: comment.count("?")
)
feature[column + "_num_punctuation"] = df[column].apply(
lambda comment: sum(comment.count(w) for w in ".,;:")
)
feature[column + "_num_symbols"] = df[column].apply(
lambda comment: sum(comment.count(w) for w in "*&$%")
)
feature[column + "_num_chars"] = df[column].apply(lambda x: len(str(x)))
feature[column + "_num_words"] = df[column].apply(
lambda comment: len(comment.split())
)
feature[column + "_num_unique_words"] = df[column].apply(
lambda comment: len(set(w for w in comment.split()))
)
feature[column + "_words_vs_unique"] = (
feature[column + "_num_unique_words"] / feature[column + "_num_words"]
)
feature[column + "_num_smilies"] = df[column].apply(
lambda comment: sum(comment.count(w) for w in (":-)", ":)", ";-)", ";)"))
)
## Number of stopwords in the text ##
feature[column + "_num_stopwords"] = df[column].apply(
lambda x: len([w for w in str(x).lower().split() if w in eng_stopwords])
)
## Number of punctuations in the text ##
feature[column + "_num_punctuations"] = df[column].apply(
lambda x: len([c for c in str(x) if c in string.punctuation])
)
## Number of title case words in the text ##
feature[column + "_num_words_upper"] = df[column].apply(
lambda x: len([w for w in str(x).split() if w.isupper()])
)
nlp = English()
sentencizer = nlp.create_pipe("sentencizer")
nlp.add_pipe(sentencizer)
ans_user_and_category = train_df[
train_df[["answer_user_name", "category"]].duplicated()
][["answer_user_name", "category"]].values
ans_user_and_category.shape
def add_question_metadata_features(text):
doc = nlp(text)
indirect = 0
choice_words = 0
reason_explanation_words = 0
question_count = 0
for sent in doc.sents:
if "?" in sent.text and "?" == sent.text[-1]:
question_count += 1
for token in sent:
if token.text.lower() == "why":
reason_explanation_words += 1
elif token.text.lower() == "or":
choice_words += 1
if question_count == 0:
indirect += 1
return np.array([indirect, question_count, reason_explanation_words, choice_words])
def question_answer_author_same(df):
q_username = df["question_user_name"]
a_username = df["answer_user_name"]
author_same = []
for i in range(len(df)):
if q_username[i] == a_username[i]:
author_same.append(int(1))
else:
author_same.append(int(0))
return author_same
def add_external_features(df, feature):
feature["question_vs_answer_length"] = (
feature["question_body_num_words"] / feature["answer_num_words"]
)
feature["q_a_author_same"] = question_answer_author_same(df)
answer_user_cat = []
for i in df[["answer_user_name", "category"]].values:
if i in ans_user_and_category:
answer_user_cat.append(int(1))
else:
answer_user_cat.append(int(0))
feature["answer_user_cat"] = answer_user_cat
handmade_features = []
for text in df["question_body"].values:
handmade_features.append(add_question_metadata_features(text))
feature = pd.concat(
[
feature,
pd.DataFrame(
handmade_features,
columns=[
"indirect",
"question_count",
"reason_explanation_words",
"choice_words",
],
),
],
axis=1,
)
return feature
train_feature = add_external_features(train_df, train_feature)
test_feature = add_external_features(test_df, test_feature)
for column in input_columns:
print(
"{} | Min: {}, Max: {}".format(
column,
train_feature[column + "_total_length"].min(),
train_feature[column + "_total_length"].max(),
)
)
print("=====")
for column in input_columns:
print(
"{} | Min: {}, Max: {}".format(
column,
test_feature[column + "_total_length"].min(),
test_feature[column + "_total_length"].max(),
)
)
stop_words = nltk.corpus.stopwords.words("english")
symbol = [
"'",
'"',
":",
";",
".",
",",
"-",
"!",
"?",
"'s",
")",
"(",
"...",
"``",
"''",
"/",
"$",
"%",
"*",
"&",
"{",
"}",
"[",
"]",
]
def get_prevalent(texts, top_count=15):
tokenized_sents = [nltk.word_tokenize(i) for i in texts]
tokenized_sents = [flatten for inner in tokenized_sents for flatten in inner]
# fdist = nltk.FreqDist(w for w in tokenized_sents if w not in stop_words + symbol)
fdist = nltk.FreqDist(
w.lower() for w in tokenized_sents if w.lower() not in stop_words + symbol
)
comments = fdist.most_common(top_count)
return [word[0] for word in comments]
for column in input_columns:
words = get_prevalent(train_df[column])
print(column, words)
for word in words:
for df, feature in [[train_df, train_feature], [test_df, test_feature]]:
feature[column + "_num" + word] = df[column].apply(
lambda comment: comment.count(word)
)
# #### count
find = re.compile(r"^[^.]*")
train_df["netloc"] = train_df["url"].apply(
lambda x: re.findall(find, urlparse(x).netloc)[0]
)
test_df["netloc"] = test_df["url"].apply(
lambda x: re.findall(find, urlparse(x).netloc)[0]
)
count_columns = [
"question_title",
"question_user_name",
"answer_user_name",
"category",
"netloc",
]
for col in count_columns:
value = train_df[col].value_counts()
train_feature[col + "_count"] = train_df[col].map(value)
test_feature[col + "_count"] = test_df[col].map(value).fillna(1)
for col in train_feature.columns:
train_mean = np.nanmean(train_feature[col].values)
train_feature[col].fillna(train_mean, inplace=True)
test_feature[col].fillna(train_mean, inplace=True)
print("train: nan=", np.sum(np.sum(pd.isnull(train_feature))))
print("test : nan=", np.sum(np.sum(pd.isnull(test_feature))))
scaler = MinMaxScaler()
scaler.fit(train_feature)
train_feature = pd.DataFrame(
scaler.transform(train_feature), columns=train_feature.columns
)
test_feature = pd.DataFrame(
scaler.transform(test_feature), columns=test_feature.columns
)
del scaler
gc.collect()
# #### Label Preprocessing
# - https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification/discussion/100961
def convert_label(df, result_df, column, count=10):
labels = [
(df[column].values >= (rate / count)).astype(np.int)
for rate in range(1, count + 1)
]
columns = ["{}_{}".format(column, i) for i in range(count)]
labels = np.array(labels).T
label_df = pd.DataFrame(labels, columns=columns)
result_df = pd.concat((result_df, label_df), axis=1)
return result_df
label_convert_count = 12
train_feature2 = pd.DataFrame()
test_feature2 = pd.DataFrame()
for column in train_feature.columns:
train_feature2 = convert_label(
train_feature, train_feature2, column, label_convert_count
)
test_feature2 = convert_label(
test_feature, test_feature2, column, label_convert_count
)
train_feature = train_feature2.copy()
test_feature = test_feature2.copy()
del train_feature2, test_feature2
# #### Encode
features = ["question_title", "question_user_name", "answer_user_name"]
limits = [6, 8, 8]
for col, limit in zip(features, limits):
value = train_df[col].value_counts()
train_df["item_count"] = train_df[col].map(value)
train_df["item_value"] = train_df[col].copy()
train_df.loc[train_df.item_count < limit, "item_value"] = "___###___"
test_df["item_count"] = test_df[col].map(value).fillna(1)
test_df["item_value"] = test_df[col].copy()
test_df.loc[test_df.item_count < limit, "item_value"] = "___###___"
lb = LabelBinarizer()
lb.fit(train_df["item_value"])
encode_train = lb.transform(train_df["item_value"])
encode_test = lb.transform(test_df["item_value"])
columns = ["LabelBinarizer_{}".format(i) for i in range(encode_train.shape[1])]
print("{}: {}".format(col, len(train_df["item_value"].value_counts())))
encode_train = pd.DataFrame(encode_train, columns=columns)
train_feature = pd.concat((train_feature, encode_train), axis=1)
encode_test = pd.DataFrame(encode_test, columns=columns)
test_feature = pd.concat((test_feature, encode_test), axis=1)
del lb
train_df.drop(["item_count", "item_count"], axis=1, inplace=True)
test_df.drop(["item_count", "item_count"], axis=1, inplace=True)
features = ["url_0", "category"]
enc = OneHotEncoder(handle_unknown="ignore")
enc.fit(train_df[features])
encode_train = enc.transform(train_df[features]).toarray()
encode_test = enc.transform(test_df[features]).toarray()
columns = ["encode_{}".format(i) for i in range(encode_train.shape[1])]
encode_train = pd.DataFrame(encode_train, columns=columns)
train_feature = pd.concat((train_feature, encode_train), axis=1)
encode_test = pd.DataFrame(encode_test, columns=columns)
test_feature = pd.concat((test_feature, encode_test), axis=1)
del encode_train, encode_test, enc
# ### remove nan
for col in train_feature.columns:
train_mean = np.nanmean(train_feature[col].values)
train_feature[col].fillna(train_mean, inplace=True)
test_feature[col].fillna(train_mean, inplace=True)
print("train: nan=", np.sum(np.sum(pd.isnull(train_feature))))
print("test : nan=", np.sum(np.sum(pd.isnull(test_feature))))
# #### Text cleaning
# https://www.kaggle.com/chenshengabc/from-quest-encoding-ensemble-a-little-bit-differen
puncts = [
",",
".",
'"',
":",
")",
"(",
"-",
"!",
"?",
"|",
";",
"'",
"$",
"&",
"/",
"[",
"]",
">",
"%",
"=",
"#",
"*",
"+",
"\\",
"•",
"~",
"@",
"£",
"·",
"_",
"{",
"}",
"©",
"^",
"®",
"`",
"<",
"→",
"°",
"€",
"™",
"›",
"♥",
"←",
"×",
"§",
"″",
"′",
"Â",
"█",
"½",
"à",
"…",
"\xa0",
"\t",
"“",
"★",
"”",
"–",
"●",
"â",
"►",
"−",
"¢",
"²",
"¬",
"░",
"¶",
"↑",
"±",
"¿",
"▾",
"═",
"¦",
"║",
"―",
"¥",
"▓",
"—",
"‹",
"─",
"\u3000",
"\u202f",
"▒",
":",
"¼",
"⊕",
"▼",
"▪",
"†",
"■",
"’",
"▀",
"¨",
"▄",
"♫",
"☆",
"é",
"¯",
"♦",
"¤",
"▲",
"è",
"¸",
"¾",
"Ã",
"⋅",
"‘",
"∞",
"«",
"∙",
")",
"↓",
"、",
"│",
"(",
"»",
",",
"♪",
"╩",
"╚",
"³",
"・",
"╦",
"╣",
"╔",
"╗",
"▬",
"❤",
"ï",
"Ø",
"¹",
"≤",
"‡",
"√",
]
mispell_dict = {
"aren't": "are not",
"can't": "cannot",
"couldn't": "could not",
"couldnt": "could not",
"didn't": "did not",
"doesn't": "does not",
"doesnt": "does not",
"don't": "do not",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"havent": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"i'd": "I would",
"i'd": "I had",
"i'll": "I will",
"i'm": "I am",
"isn't": "is not",
"it's": "it is",
"it'll": "it will",
"i've": "I have",
"let's": "let us",
"mightn't": "might not",
"mustn't": "must not",
"shan't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"shouldn't": "should not",
"shouldnt": "should not",
"that's": "that is",
"thats": "that is",
"there's": "there is",
"theres": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"theyre": "they are",
"they've": "they have",
"we'd": "we would",
"we're": "we are",
"weren't": "were not",
"we've": "we have",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"where's": "where is",
"who'd": "who would",
"who'll": "who will",
"who're": "who are",
"who's": "who is",
"who've": "who have",
"won't": "will not",
"wouldn't": "would not",
"you'd": "you would",
"you'll": "you will",
"you're": "you are",
"you've": "you have",
"'re": " are",
"wasn't": "was not",
"we'll": " will",
"didn't": "did not",
"tryin'": "trying",
}
def clean_text(x):
x = str(x).replace("\n", "")
for punct in puncts:
x = x.replace(punct, f" {punct} ")
return x
def clean_numbers(x):
x = re.sub("[0-9]{5,}", "#####", x)
x = re.sub("[0-9]{4}", "####", x)
x = re.sub("[0-9]{3}", "###", x)
x = re.sub("[0-9]{2}", "##", x)
return x
from nltk.tokenize.treebank import TreebankWordTokenizer
tokenizer = TreebankWordTokenizer()
def handle_contractions(x):
x = tokenizer.tokenize(x)
return x
def fix_quote(x):
x = [x_[1:] if x_.startswith("'") else x_ for x_ in x]
x = " ".join(x)
return x
def _get_mispell(mispell_dict):
mispell_re = re.compile("(%s)" % "|".join(mispell_dict.keys()))
return mispell_dict, mispell_re
def replace_typical_misspell(text):
mispellings, mispellings_re = _get_mispell(mispell_dict)
def replace(match):
return mispellings[match.group(0)]
return mispellings_re.sub(replace, text)
def clean_data(df, columns: list):
for col in columns:
# df[col] = df[col].apply(lambda x: clean_numbers(x))
df[col] = df[col].apply(lambda x: clean_text(x.lower()))
df[col] = df[col].apply(lambda x: replace_typical_misspell(x))
df[col] = df[col].apply(lambda x: handle_contractions(x))
df[col] = df[col].apply(lambda x: fix_quote(x))
return df
train_df = clean_data(train_df, input_columns)
test_df = clean_data(test_df, input_columns)
del tokenizer
# #### Bert
class SpatialDropout(nn.Dropout2d):
def forward(self, x):
x = x.unsqueeze(2) # (N, T, 1, K)
x = x.permute(0, 3, 2, 1) # (N, K, 1, T)
x = super(SpatialDropout, self).forward(
x
) # (N, K, 1, T), some features are masked
x = x.permute(0, 3, 2, 1) # (N, T, 1, K)
x = x.squeeze(2) # (N, T, K)
return x
class NeuralNet(nn.Module):
def __init__(self, num_features, num_labels, pretrained_bert):
super(NeuralNet, self).__init__()
self.bert = transformers.BertModel.from_pretrained(pretrained_bert)
self.num_features = num_features
self.num_labels = num_labels
self.encoded_dropout = SpatialDropout(0.2)
self.pooled_dropout = nn.Dropout(0.2)
self.feature_linear = nn.Sequential(
nn.Linear(self.num_features, self.num_features),
nn.ReLU(inplace=True),
nn.Dropout(0.2),
)
dense_hidden_units = self.bert.config.hidden_size * 3 + self.num_features
self.linear1 = nn.Linear(dense_hidden_units, dense_hidden_units)
self.linear2 = nn.Linear(dense_hidden_units, dense_hidden_units)
self.classifier = nn.Sequential(
nn.Linear(dense_hidden_units, num_labels),
)
def forward(self, ids, masks, segments, feature):
feature_output = self.feature_linear(feature)
sequence_output, pooled_output = self.bert(
input_ids=ids, attention_mask=masks, token_type_ids=segments
)
sequence_output = self.encoded_dropout(sequence_output)
pooled_output = self.pooled_dropout(pooled_output)
avg_pool = torch.mean(sequence_output, 1)
max_pool, _ = torch.max(sequence_output, 1)
h_conc = torch.cat((avg_pool, max_pool, pooled_output, feature_output), 1)
h_conc_linear = F.relu(self.linear1(h_conc))
hidden = h_conc + h_conc_linear
h_conc_linear = F.relu(self.linear2(hidden))
hidden = hidden + h_conc_linear
return self.classifier(hidden)
def compute_input_title_question(df, tokenizer, max_sequence_length=512):
input_ids, input_masks, input_segments = [], [], []
for _, instance in df.iterrows():
title = tokenizer.tokenize(instance.question_title)
question = tokenizer.tokenize(instance.question_body)
if (len(title) + len(question) + 3) > max_sequence_length:
if len(title) > 30:
title = title[:30]
question_len = max_sequence_length - len(title) - 3
question = question[:question_len]
# token = ["[CLS]"] + title + ["[SEP]"] + question + ["[SEP]"]
# token_ids = tokenizer.convert_tokens_to_ids(token)
title_ids = tokenizer.convert_tokens_to_ids(title)
question_ids = tokenizer.convert_tokens_to_ids(question)
cls_ids = tokenizer.convert_tokens_to_ids(["[CLS]"])
sep_ids = tokenizer.convert_tokens_to_ids(["[SEP]"])
token_ids = cls_ids + title_ids + sep_ids + question_ids + sep_ids
padding = [0] * (max_sequence_length - len(token_ids))
ids = token_ids + padding
masks = [1] * len(token_ids) + padding
segments = [0] * (len(title_ids) + 2) + [1] * (len(question_ids) + 1) + padding
input_ids.append(ids)
input_masks.append(masks)
input_segments.append(segments)
return [
torch.from_numpy(np.asarray(input_ids, dtype=np.int32)).long(),
torch.from_numpy(np.asarray(input_masks, dtype=np.int32)).long(),
torch.from_numpy(np.asarray(input_segments, dtype=np.int32)).long(),
]
def compute_input_arrays(df, tokenizer, max_sequence_length=512):
input_ids, input_masks, input_segments = [], [], []
t_max_len = 35
for _, instance in df.iterrows():
title = tokenizer.tokenize(instance.question_title)
question = tokenizer.tokenize(instance.question_body)
answer = tokenizer.tokenize(instance.answer)
if (len(title) + len(question) + len(answer) + 4) > max_sequence_length:
if len(title) > t_max_len:
title = title[:t_max_len]
question_len = (max_sequence_length - len(title) - 4) // 2
question = question[:question_len]
answer_len = max_sequence_length - len(title) - len(question) - 4
answer = answer[:answer_len]
# token = ["[CLS]"] + title + ["[SEP]"] + question + ["[SEP]"] + answer + ["[SEP]"]
# token_ids = tokenizer.convert_tokens_to_ids(token)
title_ids = tokenizer.convert_tokens_to_ids(title)
question_ids = tokenizer.convert_tokens_to_ids(question)
answer_ids = tokenizer.convert_tokens_to_ids(answer)
cls_ids = tokenizer.convert_tokens_to_ids(["[CLS]"])
sep_ids = tokenizer.convert_tokens_to_ids(["[SEP]"])
token_ids = (
cls_ids
+ title_ids
+ sep_ids
+ question_ids
+ sep_ids
+ answer_ids
+ sep_ids
)
padding = [0] * (max_sequence_length - len(token_ids))
ids = token_ids + padding
masks = [1] * len(token_ids) + padding
segments = (
[0] * (len(title_ids) + len(question_ids) + 3)
+ [1] * (len(answer_ids) + 1)
+ padding
)
input_ids.append(ids)
input_masks.append(masks)
input_segments.append(segments)
return [
torch.from_numpy(np.asarray(input_ids, dtype=np.int32)).long(),
torch.from_numpy(np.asarray(input_masks, dtype=np.int32)).long(),
torch.from_numpy(np.asarray(input_segments, dtype=np.int32)).long(),
]
n_splits = 6
"""
x_train = train_feature.values
y_train = train_df[target_columns].values
cv = KFold( n_splits=n_splits, random_state=SEED )
kfold_split = list( cv.split( x_train, y_train ) )
write_pickle_to_file( 'kfold_split_index.pkl', kfold_split )
"""
kfold_split = read_pickle_from_file(model_weight_path1 + "kfold_split_index.pkl")
test_pred_datas = {}
test_pred_weights = {}
for column in target_columns:
test_pred_datas[column] = np.zeros(len(test_df))
test_pred_weights[column] = 0.0
def add_test_pred_data(prediction, columns, weight):
for column_idx, column in enumerate(columns):
test_pred_datas[column] += weight * prediction[:, column_idx]
test_pred_weights[column] += weight
validation_datas = {}
validation_counts = {}
for column in target_columns:
validation_datas[column] = np.zeros(len(train_df))
validation_counts[column] = np.zeros(len(train_df))
def add_validation_data(prediction, columns, idx):
for column_idx, column in enumerate(columns):
validation_datas[column][idx] += prediction[:, column_idx]
validation_counts[column][idx] += 1.0
def mean_spearmanr_correlation_score(y_true, y_pred):
num_labels = y_pred.shape[1]
score = np.nanmean(
[
spearmanr(y_pred[:, idx], y_true[:, idx]).correlation
for idx in range(num_labels)
]
)
return score
class QuestDataset(torch.utils.data.Dataset):
def __init__(self, inputs, features, labels=None):
self.inputs = inputs
self.features = features
if labels is not None:
self.labels = labels
else:
self.labels = None
def __getitem__(self, idx):
input_ids = self.inputs[0][idx]
input_masks = self.inputs[1][idx]
input_segments = self.inputs[2][idx]
input_features = self.features[idx]
if self.labels is not None: # targets
input_labels = self.labels[idx]
return input_ids, input_masks, input_segments, input_features, input_labels
return input_ids, input_masks, input_segments, input_features
def __len__(self):
return len(self.inputs[0])
def model_test_validation(
label_columns, train_inputs, test_inputs, x_train, x_test, weight_files
):
if len(kfold_split) != len(weight_files):
return
batch_size = 6
num_features = x_test.shape[1]
num_labels = len(label_columns)
test_dataset = QuestDataset(test_inputs, x_test, None)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=batch_size, shuffle=False
)
for k, (train_idx, valid_idx) in enumerate(kfold_split):
fname = weight_files[k]
print(k + 1, fname)
x_train_valid = x_train[valid_idx]
train_inputs_valid = [x[valid_idx] for x in train_inputs]
valid_dataset = QuestDataset(train_inputs_valid, x_train_valid, None)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=batch_size, shuffle=False, drop_last=False
)
model = NeuralNet(num_features, num_labels, pretrained_bert)
model.to("cuda:0")
model.load_state_dict(torch.load(fname))
model.eval()
# ====================
# validation
valid_preds_fold = []
with torch.no_grad():
for ids, masks, segments, features in valid_loader:
ids = ids.cuda()
masks = masks.cuda()
segments = segments.cuda()
features = torch.tensor(features, dtype=torch.float32).cuda()
y_pred = model(ids, masks, segments, features)
valid_preds_fold.extend(
torch.sigmoid(y_pred).cpu().data.numpy().tolist()
)
valid_preds_fold = np.array(valid_preds_fold)
add_validation_data(valid_preds_fold, label_columns, valid_idx)
# ====================
# test
test_preds_fold = []
with torch.no_grad():
for ids, masks, segments, features in test_loader:
ids = ids.cuda()
masks = masks.cuda()
segments = segments.cuda()
features = torch.tensor(features, dtype=torch.float32).cuda()
y_pred = model(ids, masks, segments, features)
test_preds_fold.extend(
torch.sigmoid(y_pred).cpu().data.numpy().tolist()
)
test_preds_fold = np.array(test_preds_fold)
add_test_pred_data(test_preds_fold, label_columns, 1.0)
del model, valid_dataset, valid_loader
torch.cuda.empty_cache()
gc.collect()
del test_dataset, test_loader
tokenizer = transformers.BertTokenizer.from_pretrained(pretrained_bert)
# ### question
train_inputs = compute_input_title_question(
train_df, tokenizer, max_sequence_length=512
)
test_inputs = compute_input_title_question(test_df, tokenizer, max_sequence_length=512)
x_train = train_feature.values
x_test = test_feature.values
print(x_train.shape)
print(x_test.shape)
print(len(target_question_columns))
weight_files = [
model_weight_path1 + "best_weights_003_55_7_3_question_1.pth",
model_weight_path1 + "best_weights_003_55_7_3_question_2.pth",
model_weight_path2 + "best_weights_003_55_7_3_question_3.pth",
model_weight_path2 + "best_weights_003_55_7_3_question_4.pth",
model_weight_path3 + "best_weights_003_55_7_3_question_5.pth",
model_weight_path3 + "best_weights_003_55_7_3_question_6.pth",
]
model_test_validation(
target_question_columns, train_inputs, test_inputs, x_train, x_test, weight_files
)
del x_train, x_test
del train_inputs, test_inputs
# ### answer
train_inputs = compute_input_arrays(train_df, tokenizer, max_sequence_length=512)
test_inputs = compute_input_arrays(test_df, tokenizer, max_sequence_length=512)
x_train = train_feature.values
x_test = test_feature.values
print(x_train.shape)
print(x_test.shape)
print(len(target_answer_columns))
weight_files = [
model_weight_path1 + "best_weights_003_55_7_3_answer_1.pth",
model_weight_path1 + "best_weights_003_55_7_3_answer_2.pth",
model_weight_path2 + "best_weights_003_55_7_3_answer_3.pth",
model_weight_path2 + "best_weights_003_55_7_3_answer_4.pth",
model_weight_path3 + "best_weights_003_55_7_3_answer_5.pth",
model_weight_path3 + "best_weights_003_55_7_3_answer_6.pth",
]
model_test_validation(
target_answer_columns, train_inputs, test_inputs, x_train, x_test, weight_files
)
del x_train, x_test
del train_inputs, test_inputs
del train_feature, test_feature
del target_question_columns, target_answer_columns
# ### validation
validationT = pd.DataFrame()
for column in target_columns:
preds = validation_datas[column]
count = validation_counts[column]
count = np.where(count < 0.5, 1.0, count)
validationT[column] = preds / count
mean_spearmanr_correlation_score(validationT.values, train_df[target_columns].values)
validationT.head()
# ### test
test_pred_weights
test_predsT = pd.read_csv(f"{folder}sample_submission.csv")
for column in target_columns:
preds = test_pred_datas[column]
weight = test_pred_weights[column]
test_predsT[column] = preds / weight
test_predsT.head()
del target_columns, input_columns
del test_pred_datas, test_pred_weights
del validation_datas, validation_counts
torch.cuda.empty_cache()
gc.collect()
blabla
# # 006-55-gpt2-01-1
folder = "../input/google-quest-challenge/"
pretrained_bert = "../input/pretrainedbertpytorch/pretrained-bert-pytorch/gpt2/"
model_weight_path1 = "../input/photostage1/best_weights_006-55-gpt2-01_question_1.pth"
model_weight_path2 = "../input/photostage2/best_weights_006-55-gpt2-01_question_2.pth"
model_weight_path3 = "../input/photostage3/best_weights_006-55-gpt2-01_question_3.pth"
model_weight_path4 = "../input/photostage4/best_weights_006-55-gpt2-01_question_4.pth"
model_weight_path5 = "../input/photostage5/best_weights_006-55-gpt2-01_question_5.pth"
model_weight_path6 = "../input/photostage6/best_weights_006-55-gpt2-01_question_6.pth"
model_weight_path7 = "../input/photostagea1/best_weights_006-55-gpt2-01_answer_1.pth"
model_weight_path8 = "../input/photostagea2/best_weights_006-55-gpt2-01_answer_2.pth"
model_weight_path9 = "../input/photostagea3/best_weights_006-55-gpt2-01_answer_3.pth"
model_weight_path10 = "../input/photostagea4/best_weights_006-55-gpt2-01_answer_4.pth"
model_weight_path11 = "../input/photostagea5/best_weights_006-55-gpt2-01_answer_5.pth"
model_weight_path12 = "../input/photostagea6/best_weights_006-55-gpt2-01_answer_6.pth"
import torch
from torch.utils.data import TensorDataset, DataLoader, Dataset
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data.sampler import SubsetRandomSampler
from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, CosineAnnealingLR
from torch.nn.utils.weight_norm import weight_norm
from transformers import (
BertTokenizer,
BertModel,
BertForSequenceClassification,
BertConfig,
WEIGHTS_NAME,
CONFIG_NAME,
AdamW,
get_linear_schedule_with_warmup,
get_cosine_schedule_with_warmup,
GPT2Config,
GPT2Model,
GPT2LMHeadModel,
GPT2Tokenizer,
)
from transformers.modeling_gpt2 import GPT2PreTrainedModel
import transformers
print("transformers:", transformers.__version__)
SEED = 12345
start_time_all = time.time()
def seed_everything(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything(SEED)
import pickle
def read_pickle_from_file(pickle_file):
with open(pickle_file, "rb") as f:
x = pickle.load(f)
return x
def write_pickle_to_file(pickle_file, x):
with open(pickle_file, "wb") as f:
pickle.dump(x, f, pickle.HIGHEST_PROTOCOL)
def time_to_str(t, mode="min"):
if mode == "min":
t = int(t) / 60
hr = t // 60
min = t % 60
return "%2d hr %02d min" % (hr, min)
elif mode == "sec":
t = int(t)
min = t // 60
sec = t % 60
return "%2d min %02d sec" % (min, sec)
else:
raise NotImplementedError
train_df = pd.read_csv(f"{folder}train.csv")
train_df.shape
test_df = pd.read_csv(f"{folder}test.csv")
test_df.shape
target_table = [
["question_asker_intent_understanding", "question"],
["question_body_critical", "question"],
["question_conversational", "question"],
["question_expect_short_answer", "question"],
["question_fact_seeking", "question"],
["question_has_commonly_accepted_answer", "question"],
["question_interestingness_others", "question"],
["question_interestingness_self", "question"],
["question_multi_intent", "question"],
["question_not_really_a_question", "question"],
["question_opinion_seeking", "question"],
["question_type_choice", "question"],
["question_type_compare", "question"],
["question_type_consequence", "question"],
["question_type_definition", "question"],
["question_type_entity", "question"],
["question_type_instructions", "question"],
["question_type_procedure", "question"],
["question_type_reason_explanation", "question"],
["question_type_spelling", "question"],
["question_well_written", "question"],
["answer_helpful", "answer"],
["answer_level_of_information", "answer"],
["answer_plausible", "answer"],
["answer_relevance", "answer"],
["answer_satisfaction", "answer"],
["answer_type_instructions", "answer"],
["answer_type_procedure", "answer"],
["answer_type_reason_explanation", "answer"],
["answer_well_written", "answer"],
]
input_columns = ["question_title", "question_body", "answer"]
target_question_columns = []
target_answer_columns = []
for table in target_table:
if table[1] == "question":
target_question_columns.append(table[0])
elif table[1] == "answer":
target_answer_columns.append(table[0])
target_columns = target_question_columns + target_answer_columns
print("question:", len(target_question_columns))
print("answer:", len(target_answer_columns))
print("total:", len(target_columns))
import html
for df in [train_df, test_df]:
## domain components
df["domcom"] = df["url"].apply(lambda s: s.split("://")[1].split("/")[0].split("."))
# count components
df["dom_cnt"] = df["domcom"].apply(lambda s: len(s))
# extend length
df["domcom"] = df["domcom"].apply(lambda s: s + ["none", "none"])
# components
for ii in range(0, 4):
df["url_" + str(ii)] = df["domcom"].apply(lambda s: s[ii])
# clean up
df.drop("domcom", axis=1, inplace=True)
df.question_body = df.question_body.apply(html.unescape)
df.answer = df.answer.apply(html.unescape)
df["question_body"] = df["question_body"].apply(lambda s: s.replace("\>", ">"))
df["question_body"] = df["question_body"].apply(lambda s: s.replace("\<", "<"))
df["question_body"] = df["question_body"].apply(lambda s: s.replace("\&", "&"))
df["question_body"] = df["question_body"].apply(lambda s: s.replace("\"", '"'))
train_feature = pd.DataFrame()
test_feature = pd.DataFrame()
import nltk
from nltk.corpus import stopwords
eng_stopwords = set(stopwords.words("english"))
for df, feature in [[train_df, train_feature], [test_df, test_feature]]:
for column in input_columns:
feature[column + "_total_length"] = df[column].apply(len)
feature[column + "_capitals"] = df[column].apply(
lambda comment: sum(1 for c in comment if c.isupper())
)
feature[column + "_caps_vs_length"] = feature.apply(
lambda row: float(row[column + "_capitals"])
/ float(row[column + "_total_length"]),
axis=1,
)
feature[column + "_num_exclamation_marks"] = df[column].apply(
lambda comment: comment.count("!")
)
feature[column + "_num_question_marks"] = df[column].apply(
lambda comment: comment.count("?")
)
feature[column + "_num_punctuation"] = df[column].apply(
lambda comment: sum(comment.count(w) for w in ".,;:")
)
feature[column + "_num_symbols"] = df[column].apply(
lambda comment: sum(comment.count(w) for w in "*&$%")
)
feature[column + "_num_chars"] = df[column].apply(lambda x: len(str(x)))
feature[column + "_num_words"] = df[column].apply(
lambda comment: len(comment.split())
)
feature[column + "_num_unique_words"] = df[column].apply(
lambda comment: len(set(w for w in comment.split()))
)
feature[column + "_words_vs_unique"] = (
feature[column + "_num_unique_words"] / feature[column + "_num_words"]
)
feature[column + "_num_smilies"] = df[column].apply(
lambda comment: sum(comment.count(w) for w in (":-)", ":)", ";-)", ";)"))
)
feature[column + "_num_doubt"] = df[column].apply(
lambda comment: comment.count("not sure")
)
feature[column + "_num_think"] = df[column].apply(
lambda comment: sum(
comment.count(w) for w in ["thinking", "think", "thought"]
)
)
## Number of stopwords in the text ##
feature[column + "_num_stopwords"] = df[column].apply(
lambda x: len([w for w in str(x).lower().split() if w in eng_stopwords])
)
## Number of punctuations in the text ##
feature[column + "_num_punctuations"] = df[column].apply(
lambda x: len([c for c in str(x) if c in string.punctuation])
)
## Number of title case words in the text ##
feature[column + "_num_words_upper"] = df[column].apply(
lambda x: len([w for w in str(x).split() if w.isupper()])
)
nlp = English()
sentencizer = nlp.create_pipe("sentencizer")
nlp.add_pipe(sentencizer)
ans_user_and_category = train_df[
train_df[["answer_user_name", "category"]].duplicated()
][["answer_user_name", "category"]].values
ans_user_and_category.shape
def add_question_metadata_features(text):
doc = nlp(text)
indirect = 0
choice_words = 0
reason_explanation_words = 0
question_count = 0
for sent in doc.sents:
if "?" in sent.text and "?" == sent.text[-1]:
question_count += 1
for token in sent:
if token.text.lower() == "why":
reason_explanation_words += 1
elif token.text.lower() == "or":
choice_words += 1
if question_count == 0:
indirect += 1
return np.array([indirect, question_count, reason_explanation_words, choice_words])
def question_answer_author_same(df):
q_username = df["question_user_name"]
a_username = df["answer_user_name"]
author_same = []
for i in range(len(df)):
if q_username[i] == a_username[i]:
author_same.append(int(1))
else:
author_same.append(int(0))
return author_same
def add_external_features(df, feature):
feature["question_vs_answer_length"] = (
feature["question_body_num_words"] / feature["answer_num_words"]
)
feature["q_a_author_same"] = question_answer_author_same(df)
answer_user_cat = []
for i in df[["answer_user_name", "category"]].values:
if i in ans_user_and_category:
answer_user_cat.append(int(1))
else:
answer_user_cat.append(int(0))
feature["answer_user_cat"] = answer_user_cat
handmade_features = []
for text in df["question_body"].values:
handmade_features.append(add_question_metadata_features(text))
feature = pd.concat(
[
feature,
pd.DataFrame(
handmade_features,
columns=[
"indirect",
"question_count",
"reason_explanation_words",
"choice_words",
],
),
],
axis=1,
)
return feature
train_feature = add_external_features(train_df, train_feature)
test_feature = add_external_features(test_df, test_feature)
for column in input_columns:
print(
"{} | Min: {}, Max: {}".format(
column,
train_feature[column + "_total_length"].min(),
train_feature[column + "_total_length"].max(),
)
)
print("=====")
for column in input_columns:
print(
"{} | Min: {}, Max: {}".format(
column,
test_feature[column + "_total_length"].min(),
test_feature[column + "_total_length"].max(),
)
)
stop_words = nltk.corpus.stopwords.words("english")
symbol = [
"'",
'"',
":",
";",
".",
",",
"-",
"!",
"?",
"'s",
")",
"(",
"...",
"``",
"''",
"/",
"$",
"%",
"*",
"&",
"{",
"}",
"[",
"]",
]
def get_prevalent(texts, top_count=15):
tokenized_sents = [nltk.word_tokenize(i) for i in texts]
tokenized_sents = [flatten for inner in tokenized_sents for flatten in inner]
# fdist = nltk.FreqDist(w for w in tokenized_sents if w not in stop_words + symbol)
fdist = nltk.FreqDist(
w.lower() for w in tokenized_sents if w.lower() not in stop_words + symbol
)
comments = fdist.most_common(top_count)
return [word[0] for word in comments]
for column in input_columns:
words = get_prevalent(train_df[column])
print(column, words)
for word in words:
for df, feature in [[train_df, train_feature], [test_df, test_feature]]:
feature[column + "_num" + word] = df[column].apply(
lambda comment: comment.count(word)
)
find = re.compile(r"^[^.]*")
train_df["netloc"] = train_df["url"].apply(
lambda x: re.findall(find, urlparse(x).netloc)[0]
)
test_df["netloc"] = test_df["url"].apply(
lambda x: re.findall(find, urlparse(x).netloc)[0]
)
count_columns = [
"question_title",
"question_user_name",
"answer_user_name",
"category",
"netloc",
]
for col in count_columns:
value = train_df[col].value_counts()
train_feature[col + "_count"] = train_df[col].map(value)
test_feature[col + "_count"] = test_df[col].map(value).fillna(1)
for col in train_feature.columns:
train_mean = np.nanmean(train_feature[col].values)
train_feature[col].fillna(train_mean, inplace=True)
test_feature[col].fillna(train_mean, inplace=True)
print("train: nan=", np.sum(np.sum(pd.isnull(train_feature))))
print("test : nan=", np.sum(np.sum(pd.isnull(test_feature))))
scaler = MinMaxScaler()
scaler.fit(train_feature)
train_feature = pd.DataFrame(
scaler.transform(train_feature), columns=train_feature.columns
)
test_feature = pd.DataFrame(
scaler.transform(test_feature), columns=test_feature.columns
)
del scaler
gc.collect()
print(train_feature.shape)
print(test_feature.shape)
print("time: {}".format(time_to_str((time.time() - start_time_all), "min")))
def convert_label(df, result_df, column, count=10):
labels = [
(df[column].values >= (rate / count)).astype(np.int)
for rate in range(1, count + 1)
]
columns = ["{}_{}".format(column, i) for i in range(count)]
labels = np.array(labels).T
label_df = pd.DataFrame(labels, columns=columns)
result_df = pd.concat((result_df, label_df), axis=1)
return result_df
def convert_label_origin(df, result_df, column, count=10):
columns = ["{}_{}".format(column, i) for i in range(count)]
labels = df[columns].values
values = []
for i in range(len(labels)):
value = 0.0
for j in range(count):
if labels[i][j] > 0.5:
value = (j + 1) / count
values.append(value)
label_df = pd.DataFrame(values, columns=[column])
result_df = pd.concat((result_df, label_df), axis=1)
return result_df
label_convert_count = 12
train_feature2 = pd.DataFrame()
test_feature2 = pd.DataFrame()
for column in train_feature.columns:
train_feature2 = convert_label(
train_feature, train_feature2, column, label_convert_count
)
test_feature2 = convert_label(
test_feature, test_feature2, column, label_convert_count
)
train_feature = train_feature2.copy()
test_feature = test_feature2.copy()
del train_feature2, test_feature2
print(train_feature.shape)
print(test_feature.shape)
features = ["question_title", "question_user_name", "answer_user_name"]
limits = [6, 8, 8]
# features = ['question_title', 'question_user_name', 'answer_user_name', 'category', 'url_0', 'url_1']
# limits = [6, 8, 8, 1, 60, 1]
for col, limit in zip(features, limits):
value = train_df[col].value_counts()
train_df["item_count"] = train_df[col].map(value)
train_df["item_value"] = train_df[col].copy()
train_df.loc[train_df.item_count < limit, "item_value"] = "limit_abcdefg123456789"
test_df["item_count"] = test_df[col].map(value).fillna(1)
test_df["item_value"] = test_df[col].copy()
test_df.loc[test_df.item_count < limit, "item_value"] = "limit_abcdefg123456789"
lb = LabelBinarizer()
lb.fit(train_df["item_value"])
encode_train = lb.transform(train_df["item_value"])
encode_test = lb.transform(test_df["item_value"])
columns = ["LabelBinarizer_{}".format(i) for i in range(encode_train.shape[1])]
print("{}: {}".format(col, len(train_df["item_value"].value_counts())))
encode_train = pd.DataFrame(encode_train, columns=columns)
train_feature = pd.concat((train_feature, encode_train), axis=1)
encode_test = pd.DataFrame(encode_test, columns=columns)
test_feature = pd.concat((test_feature, encode_test), axis=1)
del lb
train_df.drop(["item_count", "item_count"], axis=1, inplace=True)
test_df.drop(["item_count", "item_count"], axis=1, inplace=True)
features = ["url_0", "category"]
enc = OneHotEncoder(handle_unknown="ignore")
enc.fit(train_df[features])
encode_train = enc.transform(train_df[features]).toarray()
encode_test = enc.transform(test_df[features]).toarray()
columns = ["encode_{}".format(i) for i in range(encode_train.shape[1])]
encode_train = pd.DataFrame(encode_train, columns=columns)
train_feature = pd.concat((train_feature, encode_train), axis=1)
encode_test = pd.DataFrame(encode_test, columns=columns)
test_feature = pd.concat((test_feature, encode_test), axis=1)
del encode_train, encode_test, enc
print(train_feature.shape)
print(test_feature.shape)
print("time: {}".format(time_to_str((time.time() - start_time_all), "min")))
for col in train_feature.columns:
train_mean = np.nanmean(train_feature[col].values)
train_feature[col].fillna(train_mean, inplace=True)
test_feature[col].fillna(train_mean, inplace=True)
print("train: nan=", np.sum(np.sum(pd.isnull(train_feature))))
print("test : nan=", np.sum(np.sum(pd.isnull(test_feature))))
# https://www.kaggle.com/chenshengabc/from-quest-encoding-ensemble-a-little-bit-differen
puncts = [
",",
".",
'"',
":",
")",
"(",
"-",
"!",
"?",
"|",
";",
"'",
"$",
"&",
"/",
"[",
"]",
">",
"%",
"=",
"#",
"*",
"+",
"\\",
"•",
"~",
"@",
"£",
"·",
"_",
"{",
"}",
"©",
"^",
"®",
"`",
"<",
"→",
"°",
"€",
"™",
"›",
"♥",
"←",
"×",
"§",
"″",
"′",
"Â",
"█",
"½",
"à",
"…",
"\xa0",
"\t",
"“",
"★",
"”",
"–",
"●",
"â",
"►",
"−",
"¢",
"²",
"¬",
"░",
"¶",
"↑",
"±",
"¿",
"▾",
"═",
"¦",
"║",
"―",
"¥",
"▓",
"—",
"‹",
"─",
"\u3000",
"\u202f",
"▒",
":",
"¼",
"⊕",
"▼",
"▪",
"†",
"■",
"’",
"▀",
"¨",
"▄",
"♫",
"☆",
"é",
"¯",
"♦",
"¤",
"▲",
"è",
"¸",
"¾",
"Ã",
"⋅",
"‘",
"∞",
"«",
"∙",
")",
"↓",
"、",
"│",
"(",
"»",
",",
"♪",
"╩",
"╚",
"³",
"・",
"╦",
"╣",
"╔",
"╗",
"▬",
"❤",
"ï",
"Ø",
"¹",
"≤",
"‡",
"√",
]
mispell_dict = {
"aren't": "are not",
"can't": "cannot",
"couldn't": "could not",
"couldnt": "could not",
"didn't": "did not",
"doesn't": "does not",
"doesnt": "does not",
"don't": "do not",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"havent": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"i'd": "I would",
"i'd": "I had",
"i'll": "I will",
"i'm": "I am",
"isn't": "is not",
"it's": "it is",
"it'll": "it will",
"i've": "I have",
"let's": "let us",
"mightn't": "might not",
"mustn't": "must not",
"shan't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"shouldn't": "should not",
"shouldnt": "should not",
"that's": "that is",
"thats": "that is",
"there's": "there is",
"theres": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"theyre": "they are",
"they've": "they have",
"we'd": "we would",
"we're": "we are",
"weren't": "were not",
"we've": "we have",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"where's": "where is",
"who'd": "who would",
"who'll": "who will",
"who're": "who are",
"who's": "who is",
"who've": "who have",
"won't": "will not",
"wouldn't": "would not",
"you'd": "you would",
"you'll": "you will",
"you're": "you are",
"you've": "you have",
"'re": " are",
"wasn't": "was not",
"we'll": " will",
"didn't": "did not",
"tryin'": "trying",
}
def clean_text(x):
x = str(x).replace("\n", "")
for punct in puncts:
x = x.replace(punct, f" {punct} ")
return x
def clean_numbers(x):
x = re.sub("[0-9]{5,}", "#####", x)
x = re.sub("[0-9]{4}", "####", x)
x = re.sub("[0-9]{3}", "###", x)
x = re.sub("[0-9]{2}", "##", x)
return x
from nltk.tokenize.treebank import TreebankWordTokenizer
tokenizer = TreebankWordTokenizer()
def handle_contractions(x):
x = tokenizer.tokenize(x)
return x
def fix_quote(x):
x = [x_[1:] if x_.startswith("'") else x_ for x_ in x]
x = " ".join(x)
return x
def _get_mispell(mispell_dict):
mispell_re = re.compile("(%s)" % "|".join(mispell_dict.keys()))
return mispell_dict, mispell_re
def replace_typical_misspell(text):
mispellings, mispellings_re = _get_mispell(mispell_dict)
def replace(match):
return mispellings[match.group(0)]
return mispellings_re.sub(replace, text)
def clean_data(df, columns: list):
for col in columns:
# df[col] = df[col].apply(lambda x: clean_numbers(x))
df[col] = df[col].apply(lambda x: clean_text(x.lower()))
df[col] = df[col].apply(lambda x: replace_typical_misspell(x))
df[col] = df[col].apply(lambda x: handle_contractions(x))
df[col] = df[col].apply(lambda x: fix_quote(x))
return df
train_df = clean_data(train_df, input_columns)
test_df = clean_data(test_df, input_columns)
del tokenizer
# 'Cyclical Learning Rates for Training Neural Networks'- Leslie N. Smith, arxiv 2017
# https://arxiv.org/abs/1506.01186
# https://github.com/bckenstler/CLR
class CyclicScheduler1:
def __init__(self, min_lr=0.001, max_lr=0.01, period=10):
super(CyclicScheduler1, self).__init__()
self.min_lr = min_lr
self.max_lr = max_lr
self.period = period
def __call__(self, time):
# sawtooth
# r = (1-(time%self.period)/self.period)
# cosine
time = time % self.period
r = (np.cos(time / self.period * np.pi) + 1) / 2
lr = self.min_lr + r * (self.max_lr - self.min_lr)
return lr
def __str__(self):
string = "CyclicScheduler\n" + "min_lr=%0.3f, max_lr=%0.3f, period=%8.1f" % (
self.min_lr,
self.max_lr,
self.period,
)
return string
class CyclicScheduler2:
def __init__(
self, min_lr=0.001, max_lr=0.01, period=10, max_decay=1.0, warm_start=0
):
super(CyclicScheduler2, self).__init__()
self.min_lr = min_lr
self.max_lr = max_lr
self.period = period
self.max_decay = max_decay
self.warm_start = warm_start
self.cycle = -1
def __call__(self, time):
if time < self.warm_start:
return self.max_lr
# cosine
self.cycle = (time - self.warm_start) // self.period
time = (time - self.warm_start) % self.period
period = self.period
min_lr = self.min_lr
max_lr = self.max_lr * (self.max_decay**self.cycle)
r = (np.cos(time / period * np.pi) + 1) / 2
lr = min_lr + r * (max_lr - min_lr)
return lr
def __str__(self):
string = "CyclicScheduler\n" + "min_lr=%0.4f, max_lr=%0.4f, period=%8.1f" % (
self.min_lr,
self.max_lr,
self.period,
)
return string
# tanh curve
class CyclicScheduler3:
def __init__(
self, min_lr=0.001, max_lr=0.01, period=10, max_decay=1.0, warm_start=0
):
super(CyclicScheduler3, self).__init__()
self.min_lr = min_lr
self.max_lr = max_lr
self.period = period
self.max_decay = max_decay
self.warm_start = warm_start
self.cycle = -1
def __call__(self, time):
if time < self.warm_start:
return self.max_lr
# cosine
self.cycle = (time - self.warm_start) // self.period
time = (time - self.warm_start) % self.period
period = self.period
min_lr = self.min_lr
max_lr = self.max_lr * (self.max_decay**self.cycle)
r = (np.tanh(-time / period * 16 + 8) + 1) * 0.5
lr = min_lr + r * (max_lr - min_lr)
return lr
def __str__(self):
string = "CyclicScheduler\n" + "min_lr=%0.3f, max_lr=%0.3f, period=%8.1f" % (
self.min_lr,
self.max_lr,
self.period,
)
return string
class NullScheduler:
def __init__(self, lr=0.01):
super(NullScheduler, self).__init__()
self.lr = lr
self.cycle = 0
def __call__(self, time):
return self.lr
def __str__(self):
string = "NullScheduler\n" + "lr=%0.5f " % (self.lr)
return string
# net ------------------------------------
# https://github.com/pytorch/examples/blob/master/imagenet/main.py ###############
def adjust_learning_rate(optimizer, lr):
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def get_learning_rate(optimizer):
lr = []
for param_group in optimizer.param_groups:
lr += [param_group["lr"]]
assert len(lr) == 1 # we support only one param_group
lr = lr[0]
return lr
class SpatialDropout(nn.Dropout2d):
def forward(self, x):
x = x.unsqueeze(2) # (N, T, 1, K)
x = x.permute(0, 3, 2, 1) # (N, K, 1, T)
x = super(SpatialDropout, self).forward(
x
) # (N, K, 1, T), some features are masked
x = x.permute(0, 3, 2, 1) # (N, T, 1, K)
x = x.squeeze(2) # (N, T, K)
return x
class NeuralNet3(nn.Module):
def __init__(self, num_features, num_labels, pretrained_bert):
super(NeuralNet3, self).__init__()
self.config = transformers.GPT2Config.from_pretrained(
pretrained_bert, output_hidden_states=False
)
self.gptmodel = transformers.GPT2Model(self.config)
self.gpt = self.gptmodel.from_pretrained(pretrained_bert, config=self.config)
self.num_features = num_features
self.num_labels = num_labels
self.encoded_dropout = SpatialDropout(0.2)
self.pooled_dropout = nn.Dropout(0.2)
self.feature_linear = nn.Sequential(
nn.Linear(self.num_features, self.num_features),
nn.ReLU(inplace=True),
nn.Dropout(0.2),
)
"""
self.feature_linear = nn.Sequential(
nn.Linear( self.num_features, self.num_features ),
nn.BatchNorm1d( self.num_features ),
nn.ReLU( inplace=True ),
nn.Linear( self.num_features, self.num_features2 ),
)
"""
dense_hidden_units = self.gpt.config.hidden_size * 3 + self.num_features
self.linear1 = nn.Linear(dense_hidden_units, dense_hidden_units)
self.linear2 = nn.Linear(dense_hidden_units, dense_hidden_units)
self.classifier = nn.Sequential(
nn.Linear(dense_hidden_units, num_labels),
)
def forward(self, ids, masks, segments, feature):
feature_output = self.feature_linear(feature)
outputs = self.gpt(input_ids=ids, attention_mask=masks, token_type_ids=segments)
last_hidden_state, present = self.gpt(
input_ids=ids, attention_mask=masks, token_type_ids=segments
)
# sequence_output = self.encoded_dropout(sequence_output)
# pooled_output = self.pooled_dropout(pooled_output)
# h12 = hidden_states[-1][:, 0].reshape((-1, 1, 768))
# h11 = hidden_states[-2][:, 0].reshape((-1, 1, 768))
# h10 = hidden_states[-3][:, 0].reshape((-1, 1, 768))
# h9 = hidden_states[-4][:, 0].reshape((-1, 1, 768))
# hidden_states = torch.cat([h9, h10, h11, h12], 1)
last_hidden_state = self.encoded_dropout(last_hidden_state)
avg_pool = torch.mean(last_hidden_state, 1)
max_pool, _ = torch.max(last_hidden_state, 1)
h_conc = torch.cat(
(avg_pool, max_pool, last_hidden_state[:, -1, :], feature_output), 1
)
h_conc_linear = F.relu(self.linear1(h_conc))
hidden = h_conc + h_conc_linear
h_conc_linear = F.relu(self.linear2(hidden))
hidden = hidden + h_conc_linear
return self.classifier(hidden)
def compute_input_title_question(df, tokenizer, max_sequence_length=512):
input_ids, input_masks, input_segments = [], [], []
for _, instance in df.iterrows():
title = tokenizer.tokenize(instance.question_title)
question = tokenizer.tokenize(instance.question_body)
if (len(title) + len(question) + 3) > max_sequence_length:
if len(title) > 30:
title = title[:30]
question_len = max_sequence_length - len(title) - 3
question = question[:question_len]
# token = ["[CLS]"] + title + ["[SEP]"] + question + ["[SEP]"]
# token_ids = tokenizer.convert_tokens_to_ids(token)
title_ids = tokenizer.convert_tokens_to_ids(title)
question_ids = tokenizer.convert_tokens_to_ids(question)
cls_ids = tokenizer.convert_tokens_to_ids(["[CLS]"])
sep_ids = tokenizer.convert_tokens_to_ids(["[SEP]"])
token_ids = title_ids + question_ids
padding = [0] * (max_sequence_length - len(token_ids))
ids = token_ids + padding
masks = [1] * len(token_ids) + padding
len_q = len(question_ids)
len_qf = int(len(question_ids) / 2)
len_ql = len_q - len_qf
segments = [0] * (len(title_ids)) + [1] * (len(question_ids)) + padding
# segments = [0]*(len(title_ids)+2) + [1]*(len_qf+1)+[2]*(len_ql) + padding
input_ids.append(ids)
input_masks.append(masks)
input_segments.append(segments)
return [
torch.from_numpy(np.asarray(input_ids, dtype=np.int32)).long(),
torch.from_numpy(np.asarray(input_masks, dtype=np.int32)).long(),
torch.from_numpy(np.asarray(input_segments, dtype=np.int32)).long(),
]
def compute_input_title_answer(df, tokenizer, max_sequence_length=512):
input_ids, input_masks, input_segments = [], [], []
for _, instance in df.iterrows():
title = tokenizer.tokenize(instance.question_title)
answer = tokenizer.tokenize(instance.answer)
if (len(title) + len(answer) + 3) > max_sequence_length:
if len(title) > 30:
title = title[:30]
answer_len = max_sequence_length - len(title) - 3
answer = answer[:answer_len]
# token = ["[CLS]"] + title + ["[SEP]"] + answer + ["[SEP]"]
# token_ids = tokenizer.convert_tokens_to_ids(token)
title_ids = tokenizer.convert_tokens_to_ids(title)
answer_ids = tokenizer.convert_tokens_to_ids(answer)
cls_ids = tokenizer.convert_tokens_to_ids(["[CLS]"])
sep_ids = tokenizer.convert_tokens_to_ids(["[SEP]"])
token_ids = title_ids + answer_ids
padding = [0] * (max_sequence_length - len(token_ids))
ids = token_ids + padding
masks = [1] * len(token_ids) + padding
segments = [0] * (len(title_ids)) + [1] * (len(answer_ids)) + padding
input_ids.append(ids)
input_masks.append(masks)
input_segments.append(segments)
return [
torch.from_numpy(np.asarray(input_ids, dtype=np.int32)).long(),
torch.from_numpy(np.asarray(input_masks, dtype=np.int32)).long(),
torch.from_numpy(np.asarray(input_segments, dtype=np.int32)).long(),
]
def compute_input_arrays(df, tokenizer, max_sequence_length=512):
input_ids, input_masks, input_segments = [], [], []
t_max_len = 35
for _, instance in df.iterrows():
title = tokenizer.tokenize(instance.question_title)
question = tokenizer.tokenize(instance.question_body)
answer = tokenizer.tokenize(instance.answer)
if (len(title) + len(question) + len(answer) + 4) > max_sequence_length:
if len(title) > t_max_len:
title = title[:t_max_len]
question_len = (max_sequence_length - len(title) - 4) // 2
question = question[:question_len]
answer_len = max_sequence_length - len(title) - len(question) - 4
answer = answer[:answer_len]
# token = ["[CLS]"] + title + ["[SEP]"] + question + ["[SEP]"] + answer + ["[SEP]"]
# token_ids = tokenizer.convert_tokens_to_ids(token)
title_ids = tokenizer.convert_tokens_to_ids(title)
question_ids = tokenizer.convert_tokens_to_ids(question)
answer_ids = tokenizer.convert_tokens_to_ids(answer)
cls_ids = tokenizer.convert_tokens_to_ids(["[CLS]"])
sep_ids = tokenizer.convert_tokens_to_ids(["[SEP]"])
token_ids = (
cls_ids
+ title_ids
+ sep_ids
+ question_ids
+ sep_ids
+ answer_ids
+ sep_ids
)
padding = [0] * (max_sequence_length - len(token_ids))
ids = token_ids + padding
masks = [1] * len(token_ids) + padding
segments = (
[0] * (len(title_ids) + len(question_ids) + 3)
+ [1] * (len(answer_ids) + 1)
+ padding
)
input_ids.append(ids)
input_masks.append(masks)
input_segments.append(segments)
return [
torch.from_numpy(np.asarray(input_ids, dtype=np.int32)).long(),
torch.from_numpy(np.asarray(input_masks, dtype=np.int32)).long(),
torch.from_numpy(np.asarray(input_segments, dtype=np.int32)).long(),
]
from sklearn.model_selection import GroupKFold
n_splits = 6
"""
x_train = train_feature.values
y_train = train_df[target_columns].values
cv = KFold( n_splits=n_splits, random_state=SEED )
kfold_split = list( cv.split( x_train, y_train ) )"""
cv = GroupKFold(n_splits=n_splits)
kfold_split = list(cv.split(X=train_df.question_body, groups=train_df.question_body))
"""write_pickle_to_file( 'kfold_split_index.pkl', kfold_split )
"""
# kfold_split = read_pickle_from_file( model_weight_path1 + 'kfold_split_index.pkl' )
test_pred_datas = {}
test_pred_weights = {}
for column in target_columns:
test_pred_datas[column] = np.zeros(len(test_df))
test_pred_weights[column] = 0.0
def add_test_pred_data(prediction, columns, weight):
for column_idx, column in enumerate(columns):
test_pred_datas[column] += weight * prediction[:, column_idx]
test_pred_weights[column] += weight
validation_datas = {}
validation_counts = {}
for column in target_columns:
validation_datas[column] = np.zeros(len(train_df))
validation_counts[column] = np.zeros(len(train_df))
def add_validation_data(prediction, columns, idx):
for column_idx, column in enumerate(columns):
validation_datas[column][idx] += prediction[:, column_idx]
validation_counts[column][idx] += 1.0
def mean_spearmanr_correlation_score(y_true, y_pred):
num_labels = y_pred.shape[1]
score = np.nanmean(
[
spearmanr(y_pred[:, idx], y_true[:, idx]).correlation
for idx in range(num_labels)
]
)
return score
class QuestDataset(torch.utils.data.Dataset):
def __init__(self, inputs, features, labels=None):
self.inputs = inputs
self.features = features
if labels is not None:
self.labels = labels
else:
self.labels = None
def __getitem__(self, idx):
input_ids = self.inputs[0][idx]
input_masks = self.inputs[1][idx]
input_segments = self.inputs[2][idx]
input_features = self.features[idx]
if self.labels is not None: # targets
input_labels = self.labels[idx]
return input_ids, input_masks, input_segments, input_features, input_labels
return input_ids, input_masks, input_segments, input_features
def __len__(self):
return len(self.inputs[0])
def model_test(test_inputs, x_test, label_columns, weight_files):
batch_size = 6
test_dataset = QuestDataset(test_inputs, x_test, None)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=batch_size, shuffle=False
)
num_features = x_test.shape[1]
num_labels = len(label_columns)
for fname in weight_files:
model = NeuralNet3(num_features, num_labels, pretrained_bert)
model.cuda()
model.load_state_dict(torch.load(fname))
model.eval()
test_preds_fold = []
with torch.no_grad():
for ids, masks, segments, features in test_loader:
ids = ids.cuda()
masks = masks.cuda()
segments = segments.cuda()
features = torch.tensor(features, dtype=torch.float32).cuda()
y_pred = model(ids, masks, segments, features)
test_preds_fold.extend(
torch.sigmoid(y_pred).cpu().data.numpy().tolist()
)
test_preds_fold = np.array(test_preds_fold)
add_test_pred_data(test_preds_fold, label_columns, 1.0)
del model
torch.cuda.empty_cache()
gc.collect()
del test_dataset, test_loader
def model_validation(train_inputs, x_train, label_columns, weight_files):
if len(kfold_split) != len(weight_files):
return
batch_size = 6
num_features = x_test.shape[1]
num_labels = len(label_columns)
for k, (train_idx, valid_idx) in enumerate(kfold_split):
x_train_valid = x_train[valid_idx]
train_inputs_valid = [x[valid_idx] for x in train_inputs]
valid_dataset = QuestDataset(train_inputs_valid, x_train_valid, None)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=batch_size, shuffle=False, drop_last=False
)
model = NeuralNet3(num_features, num_labels, pretrained_bert)
model.cuda()
fname = weight_files[k]
model.load_state_dict(torch.load(fname))
model.eval()
valid_preds_fold = []
with torch.no_grad():
for ids, masks, segments, features in valid_loader:
ids = ids.cuda()
masks = masks.cuda()
segments = segments.cuda()
features = torch.tensor(features, dtype=torch.float32).cuda()
y_pred = model(ids, masks, segments, features)
valid_preds_fold.extend(
torch.sigmoid(y_pred).cpu().data.numpy().tolist()
)
valid_preds_fold = np.array(valid_preds_fold)
add_validation_data(valid_preds_fold, label_columns, valid_idx)
del model, valid_dataset, valid_loader
torch.cuda.empty_cache()
gc.collect()
def model_test_validation(
label_columns, train_inputs, test_inputs, x_train, x_test, weight_files
):
if len(kfold_split) != len(weight_files):
return
batch_size = 6
num_features = x_test.shape[1]
num_labels = len(label_columns)
test_dataset = QuestDataset(test_inputs, x_test, None)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=batch_size, shuffle=False
)
for k, (train_idx, valid_idx) in enumerate(kfold_split):
fname = weight_files[k]
print(k + 1, fname)
x_train_valid = x_train[valid_idx]
train_inputs_valid = [x[valid_idx] for x in train_inputs]
valid_dataset = QuestDataset(train_inputs_valid, x_train_valid, None)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=batch_size, shuffle=False, drop_last=False
)
model = NeuralNet3(num_features, num_labels, pretrained_bert)
model.cuda()
model.load_state_dict(torch.load(fname))
model.eval()
# ====================
# validation
valid_preds_fold = []
with torch.no_grad():
for ids, masks, segments, features in valid_loader:
ids = ids.cuda()
masks = masks.cuda()
segments = segments.cuda()
features = torch.tensor(features, dtype=torch.float32).cuda()
y_pred = model(ids, masks, segments, features)
valid_preds_fold.extend(
torch.sigmoid(y_pred).cpu().data.numpy().tolist()
)
valid_preds_fold = np.array(valid_preds_fold)
add_validation_data(valid_preds_fold, label_columns, valid_idx)
# ====================
# test
test_preds_fold = []
with torch.no_grad():
for ids, masks, segments, features in test_loader:
ids = ids.cuda()
masks = masks.cuda()
segments = segments.cuda()
features = torch.tensor(features, dtype=torch.float32).cuda()
y_pred = model(ids, masks, segments, features)
test_preds_fold.extend(
torch.sigmoid(y_pred).cpu().data.numpy().tolist()
)
test_preds_fold = np.array(test_preds_fold)
add_test_pred_data(test_preds_fold, label_columns, 1.0)
del model, valid_dataset, valid_loader
torch.cuda.empty_cache()
gc.collect()
del test_dataset, test_loader
tokenizer = transformers.GPT2Tokenizer.from_pretrained(pretrained_bert)
print("time: {}".format(time_to_str((time.time() - start_time_all), "min")))
train_inputs = compute_input_title_question(
train_df, tokenizer, max_sequence_length=512
)
test_inputs = compute_input_title_question(test_df, tokenizer, max_sequence_length=512)
x_train = train_feature.values
x_test = test_feature.values
print(x_train.shape)
print(x_test.shape)
print(len(target_question_columns))
weight_files = [
model_weight_path1,
model_weight_path2,
model_weight_path3,
model_weight_path4,
model_weight_path5,
model_weight_path6,
]
model_test_validation(
target_question_columns, train_inputs, test_inputs, x_train, x_test, weight_files
)
del x_train, x_test
del train_inputs, test_inputs
train_inputs = compute_input_arrays(train_df, tokenizer, max_sequence_length=512)
test_inputs = compute_input_arrays(test_df, tokenizer, max_sequence_length=512)
x_train = train_feature.values
x_test = test_feature.values
print(x_train.shape)
print(x_test.shape)
print(len(target_answer_columns))
weight_files = [
model_weight_path7,
model_weight_path8,
model_weight_path9,
model_weight_path10,
model_weight_path11,
model_weight_path12,
]
model_test_validation(
target_answer_columns, train_inputs, test_inputs, x_train, x_test, weight_files
)
del x_train, x_test
del train_inputs, test_inputs
del train_feature, test_feature
del target_question_columns, target_answer_columns
for column in target_columns:
print(
np.sum(validation_counts[column]),
np.min(validation_counts[column]),
np.max(validation_counts[column]),
column,
)
validationTG = pd.DataFrame()
for column in target_columns:
preds = validation_datas[column]
count = validation_counts[column]
count = np.where(count < 0.5, 1.0, count)
validationTG[column] = preds / count
mean_spearmanr_correlation_score(validationTG.values, train_df[target_columns].values)
validationTG.head()
mean_spearmanr_correlation_score(validationTG.values, train_df[target_columns].values)
test_predsTG = pd.read_csv(f"{folder}sample_submission.csv")
for column in target_columns:
preds = test_pred_datas[column]
weight = test_pred_weights[column]
test_predsTG[column] = preds / weight
# output = rankdata( output )
# max_val = np.max(output) + 1
# output = output / max_val + 1e-12
test_predsTG.head()
del target_columns, input_columns
del test_pred_datas, test_pred_weights
del validation_datas, validation_counts
torch.cuda.empty_cache()
gc.collect()
# # QUEST_005_04_1
# # QUEST_005_07_DISTILBERT+LSTM (DISTILBERT)
folder = "../input/google-quest-challenge/"
pretrained_distilbert_base_uncased = (
"../input/pretrainedbertpytorch/pretrained-bert-pytorch/distilbert-base-uncased/"
)
universal_sentence_encoder_path = "../input/universalsentenceencoderlarge4/"
import os
import re
import gc
import pickle
import numpy as np
import pandas as pd
import random
import copy
import string
import time
import nltk
from nltk.tag import pos_tag
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.stem import PorterStemmer, WordNetLemmatizer
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import OneHotEncoder, LabelBinarizer
from sklearn.model_selection import KFold, GroupKFold
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import math
from tqdm import tqdm
from spacy.lang.en import English
from urllib.parse import urlparse
import math
import warnings
warnings.simplefilter("ignore")
import torch
from torch.utils.data import TensorDataset, DataLoader, Dataset
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data.sampler import SubsetRandomSampler
from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, CosineAnnealingLR
from torch.nn.utils.weight_norm import weight_norm
from scipy.stats import spearmanr
import tensorflow as tf
import tensorflow_hub as hub
import transformers
print("transformers:", transformers.__version__)
from radam import RAdam
SEED = 12345
def seed_everything(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything(SEED)
import pickle
def read_pickle_from_file(pickle_file):
with open(pickle_file, "rb") as f:
x = pickle.load(f)
return x
def write_pickle_to_file(pickle_file, x):
with open(pickle_file, "wb") as f:
pickle.dump(x, f, pickle.HIGHEST_PROTOCOL)
# ## Load data
train_df = pd.read_csv(f"{folder}train.csv")
train_df.shape
test_df = pd.read_csv(f"{folder}test.csv")
test_df.shape
# train_df.head(3).T
# ## Extract target variables
target_columns = [
"question_asker_intent_understanding",
"question_body_critical",
"question_conversational",
"question_expect_short_answer",
"question_fact_seeking",
"question_has_commonly_accepted_answer",
"question_interestingness_others",
"question_interestingness_self",
"question_multi_intent",
"question_not_really_a_question",
"question_opinion_seeking",
"question_type_choice",
"question_type_compare",
"question_type_consequence",
"question_type_definition",
"question_type_entity",
"question_type_instructions",
"question_type_procedure",
"question_type_reason_explanation",
"question_type_spelling",
"question_well_written",
"answer_helpful",
"answer_level_of_information",
"answer_plausible",
"answer_relevance",
"answer_satisfaction",
"answer_type_instructions",
"answer_type_procedure",
"answer_type_reason_explanation",
"answer_well_written",
]
input_columns = ["question_title", "question_body", "answer"]
print("target_columns:", len(target_columns))
print("input_columns:", len(input_columns))
for df in [train_df, test_df]:
## domain components
df["domcom"] = df["url"].apply(lambda s: s.split("://")[1].split("/")[0].split("."))
# count components
df["dom_cnt"] = df["domcom"].apply(lambda s: len(s))
# extend length
df["domcom"] = df["domcom"].apply(lambda s: s + ["none", "none"])
# components
for ii in range(0, 4):
df["url_" + str(ii)] = df["domcom"].apply(lambda s: s[ii])
# clean up
df.drop("domcom", axis=1, inplace=True)
# ## Features
train_feature = pd.DataFrame()
test_feature = pd.DataFrame()
features = ["url_0", "category"]
enc = OneHotEncoder(handle_unknown="ignore")
enc.fit(train_df[features])
encode_train = enc.transform(train_df[features]).toarray()
encode_test = enc.transform(test_df[features]).toarray()
columns = ["encode_{}".format(i) for i in range(encode_train.shape[1])]
encode_train = pd.DataFrame(encode_train, columns=columns)
train_feature = pd.concat((train_feature, encode_train), axis=1)
encode_test = pd.DataFrame(encode_test, columns=columns)
test_feature = pd.concat((test_feature, encode_test), axis=1)
del encode_train, encode_test, enc
print(train_feature.shape)
print(test_feature.shape)
# ### DISTILBERT
class QuestBertDataset(torch.utils.data.Dataset):
def __init__(self, inputs, labels=None):
self.inputs = inputs
if labels is not None:
self.labels = labels
else:
self.labels = None
def __getitem__(self, idx):
input_ids = self.inputs[0][idx]
input_masks = self.inputs[1][idx]
if self.labels is not None: # targets
input_labels = self.labels[idx]
return input_ids, input_masks, input_labels
return input_ids, input_masks
def __len__(self):
return len(self.inputs[0])
def compute_input_text(texts, tokenizer, max_sequence_length=512):
input_ids = []
input_masks = []
for text in texts:
text = tokenizer.tokenize(text)
text_ids = tokenizer.convert_tokens_to_ids(text)
cls_ids = tokenizer.convert_tokens_to_ids(["[CLS]"])
sep_ids = tokenizer.convert_tokens_to_ids(["[SEP]"])
if (len(text_ids) + 2) > max_sequence_length:
text_ids = text_ids[: max_sequence_length - 2]
token_ids = cls_ids + text_ids + sep_ids
padding = [0] * (max_sequence_length - len(token_ids))
ids = token_ids + padding
masks = [1] * len(token_ids) + padding
input_ids.append(ids)
input_masks.append(masks)
return [
torch.from_numpy(np.asarray(input_ids, dtype=np.int32)).long(),
torch.from_numpy(np.asarray(input_masks, dtype=np.int32)).long(),
]
def get_bert_feature(tokenizer, model, df, column):
print(column)
inputs = compute_input_text(df[column].values, tokenizer)
dataset = QuestBertDataset(inputs, None)
loader = torch.utils.data.DataLoader(
dataset, batch_size=6, shuffle=False, drop_last=False
)
preds_fold = []
model.eval()
with torch.no_grad():
for ids, masks in tqdm(loader):
ids = ids.cuda()
masks = masks.cuda()
outputs = model(input_ids=ids, attention_mask=masks)
x = outputs[0][:, 0, :]
preds_fold.extend(x.cpu().data.numpy().tolist())
preds_fold = np.array(preds_fold)
return preds_fold
tokenizer = transformers.DistilBertTokenizer.from_pretrained(
pretrained_distilbert_base_uncased
)
model = transformers.DistilBertModel.from_pretrained(pretrained_distilbert_base_uncased)
model.cuda()
print("train:")
train_question_title_dense = get_bert_feature(
tokenizer, model, train_df, "question_title"
)
train_question_body_dense = get_bert_feature(
tokenizer, model, train_df, "question_body"
)
train_answer_dense = get_bert_feature(tokenizer, model, train_df, "answer")
print("test:")
test_question_title_dense = get_bert_feature(
tokenizer, model, test_df, "question_title"
)
test_question_body_dense = get_bert_feature(tokenizer, model, test_df, "question_body")
test_answer_dense = get_bert_feature(tokenizer, model, test_df, "answer")
del tokenizer, model
# ### Universal Sentence Encoder
embed = hub.load(universal_sentence_encoder_path)
embeddings_train = {}
embeddings_test = {}
for text in input_columns:
print(text)
train_text = train_df[text].str.replace("?", ".").str.replace("!", ".").tolist()
test_text = test_df[text].str.replace("?", ".").str.replace("!", ".").tolist()
curr_train_emb = []
curr_test_emb = []
batch_size = 4
ind = 0
while ind * batch_size < len(train_text):
curr_train_emb.append(
embed(train_text[ind * batch_size : (ind + 1) * batch_size])[
"outputs"
].numpy()
)
ind += 1
ind = 0
while ind * batch_size < len(test_text):
curr_test_emb.append(
embed(test_text[ind * batch_size : (ind + 1) * batch_size])[
"outputs"
].numpy()
)
ind += 1
embeddings_train[text + "_embedding"] = np.vstack(curr_train_emb)
embeddings_test[text + "_embedding"] = np.vstack(curr_test_emb)
del embed
gc.collect()
embeddings_train_df = pd.DataFrame()
embeddings_test_df = pd.DataFrame()
l2_dist = lambda x, y: np.power(x - y, 2).sum(axis=1)
cos_dist = lambda x, y: (x * y).sum(axis=1)
abs_dist = lambda x, y: np.abs(x - y).sum(axis=1)
sum_dist = lambda x, y: (x + y).sum(axis=1)
dist_columns = [
["question_title_embedding", "answer_embedding"],
["question_body_embedding", "answer_embedding"],
["question_body_embedding", "question_title_embedding"],
]
for i, columns in enumerate(dist_columns):
embeddings_train_df[f"l2_dist_embedding_{i}"] = l2_dist(
embeddings_train[columns[0]], embeddings_train[columns[1]]
)
embeddings_train_df[f"cos_dist_embedding_{i}"] = cos_dist(
embeddings_train[columns[0]], embeddings_train[columns[1]]
)
embeddings_train_df[f"abs_dist_embedding_{i}"] = abs_dist(
embeddings_train[columns[0]], embeddings_train[columns[1]]
)
embeddings_train_df[f"l2_dist_embedding_{i}"] = sum_dist(
embeddings_train[columns[0]], embeddings_train[columns[1]]
)
embeddings_test_df[f"l2_dist_embedding_{i}"] = l2_dist(
embeddings_test[columns[0]], embeddings_test[columns[1]]
)
embeddings_test_df[f"cos_dist_embedding_{i}"] = cos_dist(
embeddings_test[columns[0]], embeddings_test[columns[1]]
)
embeddings_test_df[f"abs_dist_embedding_{i}"] = abs_dist(
embeddings_test[columns[0]], embeddings_test[columns[1]]
)
embeddings_test_df[f"l2_dist_embedding_{i}"] = sum_dist(
embeddings_test[columns[0]], embeddings_test[columns[1]]
)
x_train = np.hstack(
[item for k, item in embeddings_train.items()]
+ [
train_question_title_dense,
train_question_body_dense,
train_answer_dense,
embeddings_train_df.values,
train_feature.values,
]
)
x_test = np.hstack(
[item for k, item in embeddings_test.items()]
+ [
test_question_title_dense,
test_question_body_dense,
test_answer_dense,
embeddings_test_df.values,
test_feature.values,
]
)
y_train = train_df[target_columns].values
del train_feature, test_feature
del embeddings_train, embeddings_test
del embeddings_train_df, embeddings_test_df
del train_question_title_dense, train_question_body_dense, train_answer_dense
del test_question_title_dense, test_question_body_dense, test_answer_dense
print(x_train.shape)
print(x_test.shape)
print(y_train.shape)
# ### Model
# 'Cyclical Learning Rates for Training Neural Networks'- Leslie N. Smith, arxiv 2017
# https://arxiv.org/abs/1506.01186
# https://github.com/bckenstler/CLR
class CyclicScheduler1:
def __init__(self, min_lr=0.001, max_lr=0.01, period=10):
super(CyclicScheduler1, self).__init__()
self.min_lr = min_lr
self.max_lr = max_lr
self.period = period
def __call__(self, time):
# sawtooth
# r = (1-(time%self.period)/self.period)
# cosine
time = time % self.period
r = (np.cos(time / self.period * np.pi) + 1) / 2
lr = self.min_lr + r * (self.max_lr - self.min_lr)
return lr
def __str__(self):
string = "CyclicScheduler\n" + "min_lr=%0.3f, max_lr=%0.3f, period=%8.1f" % (
self.min_lr,
self.max_lr,
self.period,
)
return string
class CyclicScheduler2:
def __init__(
self, min_lr=0.001, max_lr=0.01, period=10, max_decay=1.0, warm_start=0
):
super(CyclicScheduler2, self).__init__()
self.min_lr = min_lr
self.max_lr = max_lr
self.period = period
self.max_decay = max_decay
self.warm_start = warm_start
self.cycle = -1
def __call__(self, time):
if time < self.warm_start:
return self.max_lr
# cosine
self.cycle = (time - self.warm_start) // self.period
time = (time - self.warm_start) % self.period
period = self.period
min_lr = self.min_lr
max_lr = self.max_lr * (self.max_decay**self.cycle)
r = (np.cos(time / period * np.pi) + 1) / 2
lr = min_lr + r * (max_lr - min_lr)
return lr
def __str__(self):
string = "CyclicScheduler\n" + "min_lr=%0.4f, max_lr=%0.4f, period=%8.1f" % (
self.min_lr,
self.max_lr,
self.period,
)
return string
# tanh curve
class CyclicScheduler3:
def __init__(
self, min_lr=0.001, max_lr=0.01, period=10, max_decay=1.0, warm_start=0
):
super(CyclicScheduler3, self).__init__()
self.min_lr = min_lr
self.max_lr = max_lr
self.period = period
self.max_decay = max_decay
self.warm_start = warm_start
self.cycle = -1
def __call__(self, time):
if time < self.warm_start:
return self.max_lr
# cosine
self.cycle = (time - self.warm_start) // self.period
time = (time - self.warm_start) % self.period
period = self.period
min_lr = self.min_lr
max_lr = self.max_lr * (self.max_decay**self.cycle)
r = (np.tanh(-time / period * 16 + 8) + 1) * 0.5
lr = min_lr + r * (max_lr - min_lr)
return lr
def __str__(self):
string = "CyclicScheduler\n" + "min_lr=%0.3f, max_lr=%0.3f, period=%8.1f" % (
self.min_lr,
self.max_lr,
self.period,
)
return string
class NullScheduler:
def __init__(self, lr=0.01):
super(NullScheduler, self).__init__()
self.lr = lr
self.cycle = 0
def __call__(self, time):
return self.lr
def __str__(self):
string = "NullScheduler\n" + "lr=%0.5f " % (self.lr)
return string
# net ------------------------------------
# https://github.com/pytorch/examples/blob/master/imagenet/main.py ###############
def adjust_learning_rate(optimizer, lr):
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def get_learning_rate(optimizer):
lr = []
for param_group in optimizer.param_groups:
lr += [param_group["lr"]]
assert len(lr) == 1 # we support only one param_group
lr = lr[0]
return lr
# ### Model
def mean_spearmanr_correlation_score(y_true, y_pred):
num_labels = y_pred.shape[1]
return np.nanmean(
[
spearmanr(y_pred[:, idx], y_true[:, idx]).correlation
for idx in range(num_labels)
]
)
class QuestDataset(torch.utils.data.Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
if self.y is not None:
return self.x[idx], self.y[idx]
else:
return self.x[idx]
class NeuralNet(nn.Module):
def __init__(self, num_features, num_labels):
super(NeuralNet, self).__init__()
self.num_features = num_features
self.num_labels = num_labels
self.classifier = nn.Sequential(
nn.Linear(self.num_features, self.num_features),
nn.Dropout(0.2),
nn.Linear(self.num_features, self.num_features),
# nn.Dropout( 0.1 ),
nn.Linear(self.num_features, self.num_labels),
)
def forward(self, features):
return self.classifier(features)
test_pred_datas = {}
test_pred_weights = {}
def init_test_pred_data():
for column in target_columns:
test_pred_datas[column] = np.zeros(len(test_df))
test_pred_weights[column] = 0.0
def add_test_pred_data(prediction, columns, weight):
for column_idx, column in enumerate(columns):
test_pred_datas[column] += weight * prediction[:, column_idx]
test_pred_weights[column] += weight
validation_datas = {}
validation_counts = {}
def init_validation_data():
for column in target_columns:
validation_datas[column] = np.zeros(len(train_df))
validation_counts[column] = np.zeros(len(train_df))
def add_validation_data(prediction, columns, idx):
for column_idx, column in enumerate(columns):
validation_datas[column][idx] += prediction[:, column_idx]
validation_counts[column][idx] += 1.0
init_test_pred_data()
init_validation_data()
n_splits = 5
# cv = KFold(n_splits=n_splits, random_state=SEED)
# kfold_split = list( cv.split( x_train, y_train ) )
cv = GroupKFold(n_splits=n_splits)
kfold_split = list(cv.split(X=train_df.question_body, groups=train_df.question_body))
n_epochs = 50
patience = 5
scores = []
batch_size = 32
num_features = x_train.shape[1]
num_labels = y_train.shape[1]
for k, (train_idx, valid_idx) in enumerate(kfold_split):
print("k:", k + 1)
# train
x_train_train = x_train[train_idx]
y_train_train = y_train[train_idx]
train_dataset = QuestDataset(x_train_train, y_train_train)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True, drop_last=True
)
# valid
x_train_valid = x_train[valid_idx]
y_train_valid = y_train[valid_idx]
valid_dataset = QuestDataset(x_train_valid, y_train_valid)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=batch_size, shuffle=False, drop_last=False
)
model = NeuralNet(num_features, num_labels)
model.cuda()
best_weights = copy.deepcopy(model.state_dict())
loss_fn = nn.BCEWithLogitsLoss()
# optimizer = optim.Adam( model.parameters(), lr=2e-5 )
optimizer = RAdam(model.parameters(), lr=2e-5)
schduler = CyclicScheduler2(
min_lr=2e-6, max_lr=2e-5, period=20, warm_start=0, max_decay=0.9
)
min_loss = np.inf
counter = 0
for epoch in range(n_epochs):
# for epoch in tqdm( range(n_epochs) ):
lr = schduler(epoch)
adjust_learning_rate(optimizer, lr)
# lr = get_learning_rate( optimizer )
model.train()
train_loss = []
for features, labels in train_loader:
features = torch.tensor(features, dtype=torch.float32).cuda()
labels = torch.tensor(labels, dtype=torch.float32).cuda()
y_pred = model(features)
loss = loss_fn(y_pred, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss.append(loss.item())
avg_train_loss = np.mean(train_loss)
model.eval()
val_loss = []
with torch.no_grad():
for features, labels in valid_loader:
features = torch.tensor(features, dtype=torch.float32).cuda()
labels = torch.tensor(labels, dtype=torch.float32).cuda()
y_pred = model(features)
loss = loss_fn(y_pred, labels)
val_loss.append(loss.item())
avg_val_loss = np.mean(val_loss)
if avg_val_loss < min_loss:
min_loss = avg_val_loss
counter = 0
best_weights = copy.deepcopy(model.state_dict())
else:
counter += 1
# print('Early stopping: %i / %i' % (counter, self.patience))
if counter >= patience and epoch > 12:
# print('Early stopping at epoch', epoch + 1)
break
model.load_state_dict(best_weights)
model.eval()
valid_preds_fold = []
valid_true_fold = []
with torch.no_grad():
for features, labels in valid_loader:
features = torch.tensor(features, dtype=torch.float32).cuda()
labels = torch.tensor(labels, dtype=torch.float32).cuda()
y_pred = model(features)
valid_preds_fold.extend(torch.sigmoid(y_pred).cpu().data.numpy().tolist())
valid_true_fold.extend(labels.cpu().data.numpy().tolist())
valid_preds_fold = np.array(valid_preds_fold)
valid_true_fold = np.array(valid_true_fold)
add_validation_data(valid_preds_fold, target_columns, valid_idx)
score = mean_spearmanr_correlation_score(valid_preds_fold, valid_true_fold)
print("Score:", score)
scores.append(score)
test_preds_fold = []
test_dataset = QuestDataset(x_test, None)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=batch_size, shuffle=False
)
with torch.no_grad():
for features in test_loader:
features = torch.tensor(features, dtype=torch.float32).cuda()
y_pred = model(features)
test_preds_fold.extend(torch.sigmoid(y_pred).cpu().data.numpy().tolist())
test_preds_fold = np.array(test_preds_fold)
add_test_pred_data(test_preds_fold, target_columns, 1.0)
del model, optimizer, loss_fn, best_weights
del train_dataset, train_loader
del valid_dataset, valid_loader
del test_dataset, test_loader
torch.cuda.empty_cache()
gc.collect()
print("================")
print("Mean score:", np.mean(scores))
# ### validation
validation0 = pd.DataFrame()
for column in target_columns:
preds = validation_datas[column]
count = validation_counts[column]
count = np.where(count < 0.5, 1.0, count)
validation0[column] = preds / count
validation0.head()
mean_spearmanr_correlation_score(train_df[target_columns].values, validation0.values)
# ### test
test_preds0 = pd.read_csv(f"{folder}sample_submission.csv")
for column in target_columns:
output = test_pred_datas[column] / test_pred_weights[column]
test_preds0[column] = output
test_preds0.head()
# # QUEST_005_07_DISTILBERT+LSTM (LSTM)
LSTM_UNITS = 512
# LSTM_UNITS = 1024
DENSE_HIDDEN_UNITS = 6 * LSTM_UNITS
class MODEL_v001(nn.Module):
def __init__(self, num_features, num_labels):
super().__init__()
self.lstm1 = nn.LSTM(
num_features, LSTM_UNITS, bidirectional=True, batch_first=True
)
self.lstm2 = nn.LSTM(
LSTM_UNITS * 2,
LSTM_UNITS,
bidirectional=True,
batch_first=True,
dropout=0.2,
)
self.linear1 = nn.Linear(DENSE_HIDDEN_UNITS, DENSE_HIDDEN_UNITS)
self.linearnorm = nn.LayerNorm(DENSE_HIDDEN_UNITS)
self.linear2 = nn.Linear(DENSE_HIDDEN_UNITS, DENSE_HIDDEN_UNITS)
self.linearnorm2 = nn.LayerNorm(DENSE_HIDDEN_UNITS)
self.linear_sub_out = nn.Linear(DENSE_HIDDEN_UNITS, num_labels)
def forward(self, x, lengths=None):
h_lstm1, _ = self.lstm1(x)
h_lstm2, _ = self.lstm2(h_lstm1)
avg_pool1 = torch.mean(h_lstm1, 1)
avg_pool2 = torch.mean(h_lstm2, 1)
max_pool2, _ = torch.max(h_lstm2, 1)
h_conc = torch.cat((avg_pool1, max_pool2, avg_pool2), 1)
h_conc_linear1 = self.linearnorm(F.relu(self.linear1(h_conc)))
h_conc_linear2 = self.linearnorm2(F.relu(self.linear2(h_conc)))
hidden = h_conc + h_conc_linear1 + h_conc_linear2
out = self.linear_sub_out(hidden)
return out
x_train = np.expand_dims(x_train, 1)
x_test = np.expand_dims(x_test, 1)
init_test_pred_data()
init_validation_data()
n_splits = 5
# cv = KFold(n_splits=n_splits, random_state=SEED)
# kfold_split = list( cv.split( x_train, y_train ) )
cv = GroupKFold(n_splits=n_splits)
kfold_split = list(cv.split(X=train_df.question_body, groups=train_df.question_body))
n_epochs = 8
patience = 4
scores = []
batch_size = 8
num_features = x_train.shape[2]
num_labels = y_train.shape[1]
for k, (train_idx, valid_idx) in enumerate(kfold_split):
print("k:", k + 1)
# train
x_train_train = x_train[train_idx]
y_train_train = y_train[train_idx]
train_dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_train[train_idx]), torch.from_numpy(y_train[train_idx])
)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True, drop_last=True
)
# valid
valid_dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_train[valid_idx]), torch.from_numpy(y_train[valid_idx])
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=batch_size, shuffle=False, drop_last=False
)
model = MODEL_v001(num_features, num_labels)
model.cuda()
best_weights = copy.deepcopy(model.state_dict())
loss_fn = nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model.parameters())
# optimizer = optim.Adam( model.parameters(), lr=2e-5 )
# optimizer = RAdam( model.parameters(), lr=2e-5 )
min_loss = np.inf
counter = 0
for epoch in range(n_epochs):
# for epoch in tqdm( range(n_epochs) ):
model.train()
train_loss = []
for features, labels in train_loader:
features = torch.tensor(features, dtype=torch.float32).cuda()
labels = torch.tensor(labels, dtype=torch.float32).cuda()
y_pred = model(features)
loss = loss_fn(y_pred, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss.append(loss.item())
avg_train_loss = np.mean(train_loss)
model.eval()
val_loss = []
with torch.no_grad():
for features, labels in valid_loader:
features = torch.tensor(features, dtype=torch.float32).cuda()
labels = torch.tensor(labels, dtype=torch.float32).cuda()
y_pred = model(features)
loss = loss_fn(y_pred, labels)
val_loss.append(loss.item())
avg_val_loss = np.mean(val_loss)
if avg_val_loss < min_loss:
min_loss = avg_val_loss
counter = 0
best_weights = copy.deepcopy(model.state_dict())
else:
counter += 1
# print('Early stopping: %i / %i' % (counter, self.patience))
if counter >= patience:
# print('Early stopping at epoch', epoch + 1)
break
model.load_state_dict(best_weights)
model.eval()
valid_preds_fold = []
valid_true_fold = []
with torch.no_grad():
for features, labels in valid_loader:
features = torch.tensor(features, dtype=torch.float32).cuda()
labels = torch.tensor(labels, dtype=torch.float32).cuda()
y_pred = model(features)
valid_preds_fold.extend(torch.sigmoid(y_pred).cpu().data.numpy().tolist())
valid_true_fold.extend(labels.cpu().data.numpy().tolist())
valid_preds_fold = np.array(valid_preds_fold)
valid_true_fold = np.array(valid_true_fold)
add_validation_data(valid_preds_fold, target_columns, valid_idx)
score = mean_spearmanr_correlation_score(valid_preds_fold, valid_true_fold)
print("Score:", score)
scores.append(score)
test_preds_fold = []
test_dataset = QuestDataset(x_test, None)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=batch_size, shuffle=False
)
with torch.no_grad():
for features in test_loader:
features = torch.tensor(features, dtype=torch.float32).cuda()
y_pred = model(features)
test_preds_fold.extend(torch.sigmoid(y_pred).cpu().data.numpy().tolist())
test_preds_fold = np.array(test_preds_fold)
add_test_pred_data(test_preds_fold, target_columns, 1.0)
del model, optimizer, loss_fn, best_weights
del train_dataset, train_loader
del valid_dataset, valid_loader
del test_dataset, test_loader
torch.cuda.empty_cache()
gc.collect()
print("================")
print("Mean score:", np.mean(scores))
# ### validation
validationLSTM = pd.DataFrame()
for column in target_columns:
preds = validation_datas[column]
count = validation_counts[column]
count = np.where(count < 0.5, 1.0, count)
validationLSTM[column] = preds / count
validationLSTM.head()
mean_spearmanr_correlation_score(validationLSTM.values, train_df[target_columns].values)
# ### submission
test_predsLSTM = pd.read_csv(f"{folder}sample_submission.csv")
for column in target_columns:
output = test_pred_datas[column] / test_pred_weights[column]
test_predsLSTM[column] = output
test_predsLSTM.head()
del train_df, test_df
del x_train, x_test, y_train
del validation_datas, validation_counts
del test_pred_datas, test_pred_weights
blabla
# # post processing
# | Name | valid | test_preds |
# |-------------------------|----------------|----------------|
# | QUEST_005_07-distilbert | validation0 | test_preds0 |
# | QUEST-003-55 | validationT | test_predsT |
# | QUEST-006-55 -GPT2 | validationTG | test_predsTG |
# | QUEST-005-04-1 | validationT2 | test_predsT2 |
# | QUEST-005_07-lstm | validationLSTM | test_predsLSTM |
data_dir = "../input/google-quest-challenge/"
train = pd.read_csv(path_join(data_dir, "train.csv"))
test = pd.read_csv(path_join(data_dir, "test.csv"))
sample = pd.read_csv(path_join(data_dir, "sample_submission.csv"))
print(train.shape, test.shape)
train.head()
targets = [
"question_asker_intent_understanding",
"question_body_critical",
"question_conversational",
"question_expect_short_answer",
"question_fact_seeking",
"question_has_commonly_accepted_answer",
"question_interestingness_others",
"question_interestingness_self",
"question_multi_intent",
"question_not_really_a_question",
"question_opinion_seeking",
"question_type_choice",
"question_type_compare",
"question_type_consequence",
"question_type_definition",
"question_type_entity",
"question_type_instructions",
"question_type_procedure",
"question_type_reason_explanation",
"question_type_spelling",
"question_well_written",
"answer_helpful",
"answer_level_of_information",
"answer_plausible",
"answer_relevance",
"answer_satisfaction",
"answer_type_instructions",
"answer_type_procedure",
"answer_type_reason_explanation",
"answer_well_written",
]
def compute_spearmanr_ignore_nan(trues, preds):
rhos = []
for tcol, pcol in zip(np.transpose(trues), np.transpose(preds)):
rhos.append(spearmanr(tcol, pcol).correlation)
return np.nanmean(rhos)
from functools import partial
import scipy as sp
class OptimizedRounder(object):
def __init__(self, correlation):
self.correlation = correlation
self.coef_ = 0
self.score = 0
def _kappa_loss(self, coef, X, y):
a = X.copy()
b = y.copy()
X_p = pd.cut(a, [-np.inf] + list(np.sort(coef)) + [np.inf], labels=[0, 1, 2])
a[X_p == 0] = 0
a[X_p == 2] = 1
# print("validation score = {}".format(spearmanr(a, b).correlation))
if spearmanr(a, b).correlation < self.correlation:
self.score = spearmanr(a, b).correlation
return (
-spearmanr(a, b).correlation
+ (self.correlation - spearmanr(a, b).correlation + 1) ** 10
)
else:
self.score = spearmanr(a, b).correlation
return -spearmanr(a, b).correlation
def fit(self, X, y, coef_ini):
loss_partial = partial(self._kappa_loss, X=X, y=y)
initial_coef = coef_ini
self.coef_ = sp.optimize.minimize(
loss_partial, initial_coef, method="nelder-mead"
)
def coefficients(self):
return self.coef_["x"]
def score_fc(self):
return self.score
from collections import Counter
def optimize_preds(y_df, x_df, preds_df):
for title in targets:
y1 = np.asarray(y_df[title].copy())
X1 = np.asarray(x_df[title].copy())
x_original = np.asarray(x_df[title].copy())
preds_original = np.asarray(preds_df[title].copy())
correlation = spearmanr(y1, X1).correlation
print(title)
print(correlation)
coefficients_target = [0, 0]
correlation = spearmanr(y1, X1).correlation
okcor = spearmanr(y1, X1).correlation
maxi = correlation
liste = [
[0.1, 0.9],
[0.2, 0.9],
[0.3, 0.9],
[0.3, 0.8],
[0.2, 0.7],
[0.3, 0.7],
[0.2, 0.6],
[0.5, 0.9],
[0.3, 0.6],
[0.4, 0.8],
[0.7, 0.9],
[0.8, 0.9],
]
for L in liste:
optR = OptimizedRounder(correlation)
optR.fit(X1, y1, L)
coefficients = optR.coefficients()
if optR.score_fc() > maxi:
maxi = optR.score_fc()
coefficients_target = coefficients
if maxi != spearmanr(y1, X1).correlation:
oof = X1.copy()
oof[oof > coefficients_target[1]] = 1
oof[oof <= coefficients_target[0]] = 0
X1 = np.asarray(preds_df[title].copy())
oof_test = X1.copy()
oof_test[oof_test > coefficients_target[1]] = 1
oof_test[oof_test <= coefficients_target[0]] = 0
score = spearmanr(y1, oof).correlation
if score - okcor > 0.001:
print("difference validation score = {}".format(score - okcor))
x_df[title] = oof
dist = Counter(x_df[title])
if 0 in list(dist.keys()):
dist[0] /= len(x_df)
if 1 in list(dist.keys()):
dist[1] /= len(x_df)
acum = 0
bound = {}
if 0 in list(dist.keys()):
acum = dist[0]
bound[0] = np.percentile(preds_df[title], acum * 100)
if 1 in list(dist.keys()):
acum = 1 - dist[1]
bound[1] = np.percentile(preds_df[title], acum * 100)
def classify(x):
if 0 in list(dist.keys()):
if 1 in list(dist.keys()):
if x <= bound[0]:
return 0
elif x <= bound[1]:
return x
else:
return 1
else:
if x <= bound[0]:
return 0
else:
return x
else:
if 1 in list(dist.keys()):
if x <= bound[1]:
return x
else:
return 1
else:
return x
final_pred = np.array(list(map(classify, preds_df[title])))
if len(np.unique(oof_test)) != 1:
print(coefficients_target)
preds_df[title] = final_pred
"""
validationN=pd.read_csv('../input/nirjharbert003/validation_preds_df_003.csv')
test_predsN=pd.read_csv('../input/nirjharbert003/submission_003.csv')
print("Nirjhar model : ")
compute_spearmanr_ignore_nan(np.asarray(train[targets].copy()), np.asarray(validationN[targets].copy()))
"""
"""
validationN1=pd.read_csv('../input/dataset-nirjhar/validation_preds_df_002.csv')
test_predsN1=pd.read_csv('../input/dataset-nirjhar/submission_002.csv')
print("Nirjhar model : ")
compute_spearmanr_ignore_nan(np.asarray(train[targets].copy()), np.asarray(validationN1[targets].copy()))
"""
data_table = []
data_table.append(
{
"Name": "QUEST-003-55",
"valid": copy.deepcopy(validationT),
"test_preds": copy.deepcopy(test_predsT),
}
)
# data_table.append( {'Name': 'QUEST-005-04-1', 'valid': copy.deepcopy( validationT2 ), 'test_preds': copy.deepcopy( test_predsT2 ) } )
data_table.append(
{
"Name": "QUEST-006-03-GPT2",
"valid": copy.deepcopy(validationTG),
"test_preds": copy.deepcopy(test_predsTG),
}
)
data_table.append(
{
"Name": "QUEST-005-07-distilbert",
"valid": copy.deepcopy(validation0),
"test_preds": copy.deepcopy(test_preds0),
}
)
data_table.append(
{
"Name": "QUEST-005-07-lstm",
"valid": copy.deepcopy(validationLSTM),
"test_preds": copy.deepcopy(test_predsLSTM),
}
)
# data_table.append( {'Name': 'QUEST-Bert-Nirjhar', 'valid': copy.deepcopy( validationN ), 'test_preds': copy.deepcopy( test_predsN ) } )
# data_table.append( {'Name': 'QUEST-Bert-Nirjhar1', 'valid': copy.deepcopy( validationN1 ), 'test_preds': copy.deepcopy( test_predsN1 ) } )
n_original = len(data_table)
score_list = []
for i in range(n_original):
print("=========================")
print(data_table[i]["Name"])
print("=========================")
data_table[i]["valid"] = pd.DataFrame(data_table[i]["valid"]).rank() / len(
data_table[i]["valid"]
)
data_table[i]["test_preds"] = pd.DataFrame(
data_table[i]["test_preds"]
).rank() / len(data_table[i]["test_preds"])
data_table.append(copy.deepcopy(data_table[i]))
score1 = compute_spearmanr_ignore_nan(
np.asarray(train[targets].copy()), np.asarray(data_table[i]["valid"].copy())
)
optimize_preds(train, data_table[i]["valid"], data_table[i]["test_preds"])
score2 = compute_spearmanr_ignore_nan(
np.asarray(train[targets].copy()), np.asarray(data_table[i]["valid"].copy())
)
score_list.append([score1, score2])
for i in range(len(score_list)):
score1, score2 = score_list[i][0], score_list[i][1]
print(
"{:.6f} -> {:.6f} ({:.6f}) : {}".format(
score1, score2, score2 - score1, data_table[i]["Name"]
)
)
validation_totale = train[targets].copy()
test_preds_totale = sample.copy()
for title in targets:
y1 = np.asarray(train[title].copy())
X_list = [np.asarray(x["valid"][title].copy()) for x in data_table]
test_list = [np.asarray(x["test_preds"][title].copy()) for x in data_table]
corr_list = [spearmanr(y1, x).correlation for x in X_list]
i = np.argmax(corr_list)
corr_best = copy.deepcopy(corr_list[i])
x_best = copy.deepcopy(X_list[i])
test_best = copy.deepcopy(test_list[i])
for weight_range in np.arange(0.45, 0.95, 0.005):
corr_list2 = [c * c for c in corr_list]
cmin = np.min(corr_list2)
cmax = np.max(corr_list2)
weights = (corr_list2 - cmin) / (cmax - cmin)
weights *= weight_range
weights += 1.0 - weight_range
# print( weight_range, weights, title )
x = np.average(X_list, axis=0, weights=weights)
x = rankdata(x) / len(x)
corr = spearmanr(y1, x).correlation
if corr > corr_best:
corr_best = copy.deepcopy(corr)
x_best = copy.deepcopy(x)
t = np.average(test_list, axis=0, weights=weights)
t = rankdata(t) / len(t)
test_best = copy.deepcopy(t)
validation_totale[title] = copy.deepcopy(x_best)
test_preds_totale[title] = copy.deepcopy(test_best)
compute_spearmanr_ignore_nan(
np.asarray(train[targets].copy()), np.asarray(validation_totale[targets].copy())
)
def function_spelling(row):
if row in "CULTURE":
return 1
else:
return 0
validation_totale["question_type_spelling"] = (
train["category"].apply(function_spelling)
* validation_totale["question_type_spelling"]
)
# def function_spelling2(row):
# if 'english.stackexchange' in str(row):
# return 1
# if 'ell.stackexchange' in str(row):
# return 1
# else:
# return 0
# #validation_totale['question_type_spelling'] = train['question_user_page'].apply(function_spelling2)*validation_totale['question_type_spelling']
compute_spearmanr_ignore_nan(
np.asarray(train[targets].copy()), np.asarray(validation_totale[targets].copy())
)
submission = pd.read_csv(path_join(data_dir, "sample_submission.csv"))
for title in targets:
for i, row in test_preds_totale.iterrows():
submission.loc[submission["qa_id"] == row["qa_id"], title] = row[title]
submission["question_type_spelling"] = (
test["category"].apply(function_spelling) * submission["question_type_spelling"]
)
# submission['question_type_spelling'] = test['question_user_page'].apply(function_spelling2)*validation['question_type_spelling']
compute_spearmanr_ignore_nan(
np.asarray(train[targets].copy()), np.asarray(validation_totale[targets].copy())
)
for title in targets:
submission[title] = submission[title].apply(lambda x: 1 if x > 1 else x)
submission.to_csv("submission.csv", index=False)
submission.head()
submission.describe()
|
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.datasets import load_diabetes
x, y = load_diabetes(return_X_y=True)
x
y
xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.2, random_state=2)
# Let's First Check sciket_learn's LinearRegression class
# # Sciket_learn's Linear Regression
xtrain
slr = LinearRegression()
slr.fit(xtrain, ytrain)
y_pred = slr.predict(xtest)
slr.coef_
slr.intercept_
r2_score(ytest, y_pred)
# # Simple Linear Regression
class LinearR:
def __init__(self):
self.m = None
self.b = None
def fit(self, X_train, Y_train):
den = 0
num = 0
for i in range(X_train.shape[0]):
num = num + ((X_train[i] - X_train.mean()) * (Y_train[i] - Y_train.mean()))
den = den + ((X_train[i] - X_train.mean()) * (X_train[i] - X_train.mean()))
self.m = num / den
self.b = Y_train.mean() - (self.m * X_train.mean())
def predict(self, X_test):
return m * X_test + self.b
# # Multiple Linear Regression
xtrain.shape
ytrain.shape
class MLR:
def __init__(self):
self.coeff_ = None
self.intercept_ = None
def fit(self, X_train, y_train):
X_train = np.insert(X_train, 0, 1, axis=1)
betas = np.linalg.inv(np.dot(X_train.T, X_train)).dot(X_train.T).dot(y_train)
self.intercept_ = betas[0]
self.coeff_ = betas[1:]
def predict(self, X_test):
y_pred = np.dot(X_test, self.coeff_) + self.intercept_
return y_pred
mlr = MLR()
mlr.fit(xtrain, ytrain)
ypred = mlr.predict(xtest)
mlr.coef_
mlr.intercept_
r2_score(ytest, ypred)
# # Linear Regression Using Gradient Descent (Batch Gradient Descent)
class GD:
def __init__(
self,
):
self.coeff_ = None
self.intercept_ = None
def fit(self, x_train, y_train, lr=0.01, epochs=100):
self.lr = lr
self.epochs = epochs
self.intercept_ = 0
self.coeff_ = np.ones(x_train.shape[1])
for i in range(self.epochs):
y_pred = self.intercept_ + np.dot(x_train, self.coeff_)
der = -2 * np.mean(y_train - y_pred)
self.intercept_ = self.intercept_ - (self.lr * der)
# coeffs
coeff_der = -2 * (np.dot((y_train - y_pred), x_train) / (x_train.shape[1]))
self.coeff_ = self.coeff_ - (self.lr * coeff_der)
print(self.intercept_, self.coeff_)
def predict(self, x):
return np.dot(x, self.coeff_) + self.intercept_
ytrain
gdlr = GD()
gdlr.fit(xtrain, ytrain, lr=0.1, epochs=1000)
Ypred = gdlr.predict(xtest)
gdlr.intercept_
r2_score(ytest, Ypred)
df
# lr=LinearR()
# from sklearn.model_selection import train_test_split
# x=df.iloc[:,0:1]
# y=df.iloc[:,-1]
# X_train,X_test,y_train,y_test=train_test_split(x,y,random_state=2)
|
# # overview
# #### This kernel is based on last year's [Basic Starter Kernel](https://www.kaggle.com/addisonhoward/basic-starter-kernel-ncaa-men-s-dataset-2019).
# #### I added total score feature calculated on a yearly basis :)
# #### Hi everyone welcome to this kernel,In this kernel i am building basic LightGBM model. This notebook was build on top of this [kernel](https://www.kaggle.com/hiromoon166/2020-basic-starter-kernel).Thanks to the author of the kernel [hiromu](https://www.kaggle.com/hiromoon166)
# #### If you find this kernel useful please consider upvoting it😊.Also dont forget to upvote the original kernel.
# ## Import Library & Load Data
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from sklearn.model_selection import GridSearchCV
tourney_result = pd.read_csv(
"../input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneyCompactResults.csv"
)
tourney_seed = pd.read_csv(
"../input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneySeeds.csv"
)
# deleting unnecessary columns
tourney_result = tourney_result.drop(
["DayNum", "WScore", "LScore", "WLoc", "NumOT"], axis=1
)
tourney_result
# ## Merge Seed
tourney_result = pd.merge(
tourney_result,
tourney_seed,
left_on=["Season", "WTeamID"],
right_on=["Season", "TeamID"],
how="left",
)
tourney_result.rename(columns={"Seed": "WSeed"}, inplace=True)
tourney_result = tourney_result.drop("TeamID", axis=1)
tourney_result = pd.merge(
tourney_result,
tourney_seed,
left_on=["Season", "LTeamID"],
right_on=["Season", "TeamID"],
how="left",
)
tourney_result.rename(columns={"Seed": "LSeed"}, inplace=True)
tourney_result = tourney_result.drop("TeamID", axis=1)
tourney_result
def get_seed(x):
return int(x[1:3])
tourney_result["WSeed"] = tourney_result["WSeed"].map(lambda x: get_seed(x))
tourney_result["LSeed"] = tourney_result["LSeed"].map(lambda x: get_seed(x))
tourney_result
# ## Merge Score
season_result = pd.read_csv(
"../input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MRegularSeasonCompactResults.csv"
)
season_win_result = season_result[["Season", "WTeamID", "WScore"]]
season_lose_result = season_result[["Season", "LTeamID", "LScore"]]
season_win_result.rename(columns={"WTeamID": "TeamID", "WScore": "Score"}, inplace=True)
season_lose_result.rename(
columns={"LTeamID": "TeamID", "LScore": "Score"}, inplace=True
)
season_result = pd.concat((season_win_result, season_lose_result)).reset_index(
drop=True
)
season_result
season_score = season_result.groupby(["Season", "TeamID"])["Score"].sum().reset_index()
season_score
tourney_result = pd.merge(
tourney_result,
season_score,
left_on=["Season", "WTeamID"],
right_on=["Season", "TeamID"],
how="left",
)
tourney_result.rename(columns={"Score": "WScoreT"}, inplace=True)
tourney_result = tourney_result.drop("TeamID", axis=1)
tourney_result = pd.merge(
tourney_result,
season_score,
left_on=["Season", "LTeamID"],
right_on=["Season", "TeamID"],
how="left",
)
tourney_result.rename(columns={"Score": "LScoreT"}, inplace=True)
tourney_result = tourney_result.drop("TeamID", axis=1)
tourney_result
tourney_win_result = tourney_result.drop(["Season", "WTeamID", "LTeamID"], axis=1)
tourney_win_result.rename(
columns={
"WSeed": "Seed1",
"LSeed": "Seed2",
"WScoreT": "ScoreT1",
"LScoreT": "ScoreT2",
},
inplace=True,
)
tourney_win_result
tourney_lose_result = tourney_win_result.copy()
tourney_lose_result["Seed1"] = tourney_win_result["Seed2"]
tourney_lose_result["Seed2"] = tourney_win_result["Seed1"]
tourney_lose_result["ScoreT1"] = tourney_win_result["ScoreT2"]
tourney_lose_result["ScoreT2"] = tourney_win_result["ScoreT1"]
tourney_lose_result
# ## Prepare Training Data
tourney_win_result["Seed_diff"] = (
tourney_win_result["Seed1"] - tourney_win_result["Seed2"]
)
tourney_win_result["ScoreT_diff"] = (
tourney_win_result["ScoreT1"] - tourney_win_result["ScoreT2"]
)
tourney_lose_result["Seed_diff"] = (
tourney_lose_result["Seed1"] - tourney_lose_result["Seed2"]
)
tourney_lose_result["ScoreT_diff"] = (
tourney_lose_result["ScoreT1"] - tourney_lose_result["ScoreT2"]
)
tourney_win_result["result"] = 1
tourney_lose_result["result"] = 0
tourney_result = pd.concat((tourney_win_result, tourney_lose_result)).reset_index(
drop=True
)
tourney_result
# # Preparing testing data
test_df = pd.read_csv(
"../input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv"
)
test_df["Season"] = test_df["ID"].map(lambda x: int(x[:4]))
test_df["WTeamID"] = test_df["ID"].map(lambda x: int(x[5:9]))
test_df["LTeamID"] = test_df["ID"].map(lambda x: int(x[10:14]))
test_df
test_df = pd.merge(
test_df,
tourney_seed,
left_on=["Season", "WTeamID"],
right_on=["Season", "TeamID"],
how="left",
)
test_df.rename(columns={"Seed": "Seed1"}, inplace=True)
test_df = test_df.drop("TeamID", axis=1)
test_df = pd.merge(
test_df,
tourney_seed,
left_on=["Season", "LTeamID"],
right_on=["Season", "TeamID"],
how="left",
)
test_df.rename(columns={"Seed": "Seed2"}, inplace=True)
test_df = test_df.drop("TeamID", axis=1)
test_df = pd.merge(
test_df,
season_score,
left_on=["Season", "WTeamID"],
right_on=["Season", "TeamID"],
how="left",
)
test_df.rename(columns={"Score": "ScoreT1"}, inplace=True)
test_df = test_df.drop("TeamID", axis=1)
test_df = pd.merge(
test_df,
season_score,
left_on=["Season", "LTeamID"],
right_on=["Season", "TeamID"],
how="left",
)
test_df.rename(columns={"Score": "ScoreT2"}, inplace=True)
test_df = test_df.drop("TeamID", axis=1)
test_df
test_df["Seed1"] = test_df["Seed1"].map(lambda x: get_seed(x))
test_df["Seed2"] = test_df["Seed2"].map(lambda x: get_seed(x))
test_df["Seed_diff"] = test_df["Seed1"] - test_df["Seed2"]
test_df["ScoreT_diff"] = test_df["ScoreT1"] - test_df["ScoreT2"]
test_df = test_df.drop(["ID", "Pred", "Season", "WTeamID", "LTeamID"], axis=1)
test_df
# ## Train
X = tourney_result.drop("result", axis=1)
y = tourney_result.result
from sklearn.model_selection import KFold
import lightgbm as lgb
params = {
"num_leaves": 70,
"min_child_weight": 0.034,
"feature_fraction": 0.379,
"bagging_fraction": 0.418,
"min_data_in_leaf": 106,
"objective": "binary",
"max_depth": -1,
"learning_rate": 0.0068,
"boosting_type": "gbdt",
"bagging_seed": 11,
"metric": "logloss",
"verbosity": -1,
"reg_alpha": 0.3899,
"reg_lambda": 0.648,
"random_state": 47,
}
import gc
NFOLDS = 10
folds = KFold(n_splits=NFOLDS)
columns = X.columns
splits = folds.split(X, y)
y_preds = np.zeros(test_df.shape[0])
y_oof = np.zeros(X.shape[0])
feature_importances = pd.DataFrame()
feature_importances["feature"] = columns
for fold_n, (train_index, valid_index) in enumerate(splits):
print("Fold:", fold_n + 1)
X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index]
y_train, y_valid = y.iloc[train_index], y.iloc[valid_index]
dtrain = lgb.Dataset(X_train, label=y_train)
dvalid = lgb.Dataset(X_valid, label=y_valid)
clf = lgb.train(
params, dtrain, 10000, valid_sets=[dtrain, dvalid], verbose_eval=200
)
feature_importances[f"fold_{fold_n + 1}"] = clf.feature_importance()
y_pred_valid = clf.predict(X_valid)
y_oof[valid_index] = y_pred_valid
y_preds += clf.predict(test_df) / NFOLDS
del X_train, X_valid, y_train, y_valid
gc.collect()
# ## Predict & Make Submission File
submission_df = pd.read_csv(
"../input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv"
)
submission_df["Pred"] = y_preds
submission_df
submission_df["Pred"].hist()
submission_df.to_csv("submission.csv", index=False)
# # Feature Importances:
import seaborn as sns
feature_importances["average"] = feature_importances[
[f"fold_{fold_n + 1}" for fold_n in range(folds.n_splits)]
].mean(axis=1)
feature_importances.to_csv("feature_importances.csv")
plt.figure(figsize=(6, 6))
sns.barplot(
data=feature_importances.sort_values(by="average", ascending=False).head(50),
x="average",
y="feature",
)
plt.title("50 TOP feature importance over {} folds average".format(folds.n_splits))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
import math
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
train_data = pd.read_csv("/kaggle/input/home-data-for-ml-course/train.csv")
# print(train_data.columns)
y = train_data["SalePrice"]
features = [
"LotArea",
"OverallQual",
"OverallCond",
"YearBuilt",
"YearRemodAdd",
"BsmtUnfSF",
"1stFlrSF",
"2ndFlrSF",
"GrLivArea",
"FullBath",
"BedroomAbvGr",
"KitchenAbvGr",
"TotRmsAbvGrd",
"Fireplaces",
"GarageArea",
]
X = train_data[features]
# print(X)
train_X, val_X, train_y, val_y = train_test_split(X, y, test_size=0.3)
def evaluate(prediction, val_y):
score = 0
for i in range(len(prediction)):
distance = (prediction[i] - list(val_y)[i]) ** 2
score += math.sqrt(distance)
return score
test_data = pd.read_csv("/kaggle/input/home-data-for-ml-course/test.csv")
test = test_data[features].fillna(0)
regressor = RandomForestRegressor(max_depth=50)
regressor.fit(X, y)
prediction = regressor.predict(test)
print(prediction)
submission = pd.DataFrame({"Id": test_data["Id"], "SalePrice": prediction})
submission.to_csv("submission.csv", index=False)
pd.read_csv("submission.csv")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# Importing Libraries
import matplotlib.pyplot as plt
import seaborn as sns
import plotly
import plotly.express as px
import plotly.graph_objects as go
import folium
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
# Setting up plotly
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot, plot_mpl
import plotly.offline as py
init_notebook_mode(connected=True)
# importing data
ncv_data = pd.read_csv(
"/kaggle/input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv"
)
ncv_deaths = pd.read_csv(
"/kaggle/input/novel-corona-virus-2019-dataset/time_series_2019_ncov_deaths.csv"
)
ncv_confirmed = pd.read_csv(
"/kaggle/input/novel-corona-virus-2019-dataset/time_series_2019_ncov_confirmed.csv"
)
ncv_recovered = pd.read_csv(
"/kaggle/input/novel-corona-virus-2019-dataset/time_series_2019_ncov_recovered.csv"
)
ncv_data.head()
ncv_confirmed.head()
ncv_recovered.head()
ncv_confirmed_long = ncv_confirmed.melt(
id_vars=["Province/State", "Country/Region", "Lat", "Long"],
var_name="Date",
value_name="Confirmed",
)
ncv_recovered_long = ncv_recovered.melt(
id_vars=["Province/State", "Country/Region", "Lat", "Long"],
var_name="Date",
value_name="Recovered",
)
ncv_deaths_long = ncv_deaths.melt(
id_vars=["Province/State", "Country/Region", "Lat", "Long"],
var_name="Date",
value_name="Deaths",
)
full_data = pd.concat(
[ncv_confirmed_long, ncv_deaths_long["Deaths"], ncv_recovered_long["Recovered"]],
axis=1,
sort=False,
)
full_data.head()
# filling missing values with 0 if null values exists in columns of recovered, confirmed and deaths
full_data[["Confirmed", "Deaths", "Recovered"]] = full_data[
["Confirmed", "Deaths", "Recovered"]
].fillna(0)
full_data.head()
China = full_data[full_data["Country/Region"] == "Mainland China"]
Outsidechina = full_data[full_data["Country/Region"] != "Mainland China"]
full_latest = full_data[full_data["Date"] == max(full_table["Date"])].reset_index()
china_latest = full_latest[full_latest["Country/Region"] == "Mainland China"]
outsidechina_latest = full_latest[full_latest["Country/Region"] != "Mainland China"]
full_latest_grouped = (
full_latest.groupby("Country/Region")["Confirmed", "Deaths", "Recovered"]
.sum()
.reset_index()
)
china_latest_grouped = (
china_latest.groupby("Province/State")["Confirmed", "Deaths", "Recovered"]
.sum()
.reset_index()
)
outsidechina_latest_grouped = (
outsidechina_latest.groupby("Country/Region")["Confirmed", "Deaths", "Recovered"]
.sum()
.reset_index()
)
# EDA and Data visulisations
# confirmed cases country wise
conf_ncv = (
full_latest_grouped[["Country/Region", "Confirmed"]]
.sort_values(by="Confirmed", ascending=False)
.reset_index(drop=True)
)
conf_ncv.head()
# total no.of countries affected
print(len(conf_ncv))
# Affected Country/Region
print(f"Affected countries are : {full_data['Country/Region'].unique()}")
print(f"Total Affected countries are : {len(full_data['Country/Region'].unique())}")
fig = px.bar(
full_table,
x="Date",
y="Confirmed",
hover_data=["Province/State", "Deaths", "Recovered"],
color="Country/Region",
)
annotations = []
annotations.append(
dict(
xref="paper",
yref="paper",
x=0.0,
y=1.05,
xanchor="left",
yanchor="bottom",
text="Confirmed bar plot for each Country",
font=dict(family="Arial", size=30, color="rgb(37,37,37)"),
showarrow=False,
)
)
fig.update_layout(annotations=annotations)
fig.show()
# Provinces in china with most reported cases
china_rep = (
china_latest_grouped[["Province/State", "Confirmed"]]
.sort_values(by="Confirmed", ascending=False)
.reset_index(drop=True)
)
china_rep.head()
fig = px.bar(
full_data.loc[full_data["Country/Region"] == "Mainland China"],
x="Date",
y="Confirmed",
hover_data=["Province/State", "Deaths", "Recovered"],
color="Province/State",
)
annotations = []
annotations.append(
dict(
xref="paper",
yref="paper",
x=0.0,
y=1.05,
xanchor="left",
yanchor="bottom",
text="Confirmed bar plot for Mainland China",
font=dict(family="Arial", size=30, color="rgb(37,37,37)"),
showarrow=False,
)
)
fig.update_layout(annotations=annotations)
fig.show()
# deaths as per country wise
cou_dea = (
full_latest_grouped[["Country/Region", "Deaths"]]
.sort_values(by="Deaths", ascending=False)
.reset_index(drop=True)
)
cou_dea = cou_dea[cou_dea["Deaths"] > 0]
print(cou_dea)
fig = px.bar(
full_data,
x="Date",
y="Deaths",
hover_data=["Province/State", "Confirmed", "Recovered"],
color="Country/Region",
)
annotations = []
annotations.append(
dict(
xref="paper",
yref="paper",
x=0.0,
y=1.05,
xanchor="left",
yanchor="bottom",
text="Death bar plot for each country",
font=dict(family="Arial", size=30, color="rgb(37,37,37)"),
showarrow=False,
)
)
fig.update_layout(annotations=annotations)
fig.show()
# **Maps visualisations**
pxdf = px.data.gapminder()
country_isoAlpha = pxdf[["country", "iso_alpha"]].drop_duplicates()
country_isoAlpha.rename(columns={"country": "Country"}, inplace=True)
country_isoAlpha.set_index("Country", inplace=True)
country_map = country_isoAlpha.to_dict("index")
def getCountryIsoAlpha(Country):
try:
return country_map[Country / Region]["iso_alpha"]
except:
return Country / Region
full_data.replace({"Country/Region": "Mainland China"}, "China", inplace=True)
full_data["iso_alpha"] = full_data["Country/Region"].apply(getCountryIsoAlpha)
# confirmed cases on map
df_plot = full_data.groupby("iso_alpha").max().reset_index()
fig = px.choropleth(
df_plot,
locations="iso_alpha",
color="Confirmed",
hover_data=["Confirmed", "Deaths", "Recovered"],
color_continuous_scale="Viridis",
)
fig.update_geos(fitbounds="locations", visible=True)
fig.update_layout(
margin={"r": 0, "t": 0, "l": 0, "b": 0}, title_text="Deaths Cases in World"
)
fig.show()
fig = px.scatter_geo(
full_data,
locations="iso_alpha",
color="Confirmed",
size="Confirmed",
hover_name="Country/Region",
hover_data=["Confirmed", "Deaths", "Recovered"],
projection="natural earth",
animation_frame="Date",
)
fig.show()
# Death cases map
fig = px.choropleth(
df_plot,
locations="iso_alpha",
color="Deaths",
hover_data=["Confirmed", "Deaths", "Recovered"],
color_continuous_scale="Viridis",
)
fig.update_geos(fitbounds="locations", visible=True)
fig.update_layout(
margin={"r": 0, "t": 0, "l": 0, "b": 0}, title_text="Deaths Cases in World"
)
fig.show()
fig = px.scatter_geo(
full_data,
locations="iso_alpha",
color="Deaths",
size="Deaths",
hover_name="Country/Region",
hover_data=["Confirmed", "Deaths", "Recovered"],
projection="natural earth",
animation_frame="Date",
)
fig.show()
|
# # Kensho Derived Wikimedia Dataset - Checking out Hugging Face Tokenizers
# Hugging Face [recently announced](https://twitter.com/huggingface/status/1215746098201014272?lang=en) fast [Rust](https://www.rust-lang.org/) implementations of its tokenizers. Lets see what kind of performance we can get out of the new [huggingface tokenizers package](https://github.com/huggingface/tokenizers) compared to the tokenizers included in the [huggingface transformers package](https://github.com/huggingface/transformers).
from collections import Counter
import json
import os
from pprint import pprint
import string
import time
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from tqdm import tqdm
sns.set()
sns.set_context("talk")
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# All of the KDWD files have one "thing" per line. We'll hard code the number of lines in the files we're going to use so we can have nice progress bars when streaming through them.
NUM_KLAT_LINES = 5_343_564
kdwd_path = os.path.join("/kaggle/input", "kensho-derived-wikimedia-data")
vocab_path = os.path.join("/kaggle/input", "hugging-face-tokenizer-vocabs")
# # Install the Hugging Face tokenizers and transformers packages
# # Example Usage of Tokenizers
import tokenizers # Rust implementations
import transformers # Python implementations
# Lets create some tokenizer classes and see how they work. We'll use Bert Word Pieces as our benchmark.
vocab_file = os.path.join(vocab_path, "bert-base-uncased-vocab.txt")
rust_bert_wp = tokenizers.BertWordPieceTokenizer(vocab_file)
pyth_bert_wp = transformers.BertTokenizer.from_pretrained("bert-base-uncased")
pprint("Rust tokenizer class: {}".format(rust_bert_wp))
print()
pprint("Python tokenizer class: {}".format(pyth_bert_wp))
# The `BertWordPieceTokenizer` class from the [huggingface tokenizers package](https://github.com/huggingface/tokenizers) works like this,
encoded = rust_bert_wp.encode("Do you feel like I feel?")
pprint("result={}".format(encoded))
pprint("tokens={}".format(encoded.tokens))
# The `BertTokenizer` class from the [huggingface transformers package](https://github.com/huggingface/transformers) works like this,
tokens = pyth_bert_wp.convert_ids_to_tokens(
pyth_bert_wp.encode("Do you feel like I feel?")
)
pprint("tokens={}".format(tokens))
# # Speed Test
# Lets create a class to iterate through the link annotated text of the Kensho Derived Wikimedia Dataset (KDWD).
class KdwdLinkAnnotatedText:
def __init__(self, file_path, max_pages):
self.num_lines = NUM_KLAT_LINES
self.file_path = file_path
self.max_pages = max_pages
self.pages_to_parse = min(self.num_lines, self.max_pages)
def __iter__(self):
with open(self.file_path) as fp:
for ii_line, line in enumerate(fp):
if ii_line == self.pages_to_parse:
break
yield json.loads(line)
# Lets see how long it takes to tokenize 5,000 pages from our Wikipedia sample. We'll use both huggingface tokenizers and a simple function that splits on whitespace, lowercases, and removes punctuation.
table = str.maketrans("", "", string.punctuation)
def simple_tokenizer(text):
tokens = [tok.lower().strip() for tok in text.split()]
tokens = [tok.translate(table) for tok in tokens]
tokens = [tok for tok in tokens if tok != ""]
return tokens
file_path = os.path.join(kdwd_path, "link_annotated_text.jsonl")
klat = KdwdLinkAnnotatedText(file_path, max_pages=5_000)
# To begin we'll see how long it take to simply iterate through the pages.
t0 = time.time()
for page in tqdm(klat, total=klat.pages_to_parse, desc="just iteration"):
for section in page["sections"]:
first = section["text"][0]
dt_iter = time.time() - t0
print("dt: {}".format(dt_iter))
# Next we'll count unigrams produced by our 3 tokenizers.
unigrams_simple = Counter()
unigrams_hf_rust = Counter()
unigrams_hf_pyth = Counter()
# # Simple Tokenizer
t0 = time.time()
for page in tqdm(klat, total=klat.pages_to_parse, desc="simple tokenizer"):
for section in page["sections"]:
tokens = simple_tokenizer(section["text"])
unigrams_simple.update(tokens)
dt_simple = time.time() - t0
print("dt: {}".format(dt_simple))
# # Hugging Face - Rust Tokenizer
t0 = time.time()
for page in tqdm(klat, total=klat.pages_to_parse, desc="hugging face Rust tokenizer"):
for section in page["sections"]:
encoded = rust_bert_wp.encode(section["text"])
unigrams_hf_rust.update(encoded.tokens)
dt_hf_rust = time.time() - t0
print("dt: {}".format(dt_hf_rust))
# # Hugging Face - Python Tokenizer
t0 = time.time()
for page in tqdm(klat, total=klat.pages_to_parse, desc="hugging face Python tokenizer"):
for section in page["sections"]:
tokens = pyth_bert_wp.convert_ids_to_tokens(
pyth_bert_wp.encode(section["text"])
)
unigrams_hf_pyth.update(tokens)
dt_hf_pyth = time.time() - t0
print("dt: {}".format(dt_hf_pyth))
# # Plot Results
labels = ["just iteration", "simple", "hugging rust", "hugging python"]
times = [dt_iter, dt_simple, dt_hf_rust, dt_hf_pyth]
rates = np.array(
[
sum(unigrams_simple.values()) / dt_simple,
sum(unigrams_hf_rust.values()) / dt_hf_rust,
sum(unigrams_hf_pyth.values()) / dt_hf_pyth,
]
)
yy = np.arange(len(labels))
width = 0.5
figsize = (16, 16)
fig, axes = plt.subplots(1, 2, figsize=figsize, sharey=True)
ax = axes[0]
rects1 = ax.barh(yy, times, width)
ax.set_yticks(yy)
ax.set_yticklabels(labels)
ax.set_xlabel("Time to Parse 5k pages [s]")
ax.set_ylabel("Tokenizer")
ax = axes[1]
rects2 = ax.barh(yy[1:], rates / 1000, width, color="orange")
ax.set_xlabel("Thousands of Tokens / s")
fig.suptitle("Tokenizer Performance on 5k Wikipedia Pages")
# # Results
# Note execution time may vary between runs, but we can get a sense of how large the differences are. It takes about 1s just to iterate over the pages. Our super simple (split on whitespace, lowercase, remove punctuation) python tokenizer takes about 30 seconds. The fast Rust implementation from Hugging Face gets through the pages in about 70 seconds and the original HuggingFace implementation takes about 1000 seconds. W
# # Check Tokens
unigrams_simple.most_common(25)
unigrams_hf_rust.most_common(25)
unigrams_hf_pyth.most_common(25)
|
import seaborn
import pandas as pd
import numpy
import matplotlib.pyplot as plt
seaborn.set_style("white")
# permet d'afficher les graphiques dans un notebook
WGI_raw = pd.read_csv("/kaggle/input/wgi-data/WGI_Data.csv")
WGI_raw["Value"] = [None if v == ".." else float(v) for v in WGI_raw["Value"]]
defcols = pd.DataFrame(
data={
"Code": ["CC.EST", "GE.EST", "PV.EST", "RQ.EST", "RL.EST", "VA.EST"],
"Serie": [
"CorruptionControl",
"GovEffectiveness",
"PoliticalStability",
"RegulatoryQuality",
"RuleLaw",
"VoiceAccountability",
],
}
)
WGI_raw = WGI_raw.merge(defcols, left_on="Series Code", right_on="Code")
Continent = pd.read_csv("/kaggle/input/continent/continent.csv", sep=";")
Continent.head()
Continent = Continent.drop(
columns=["Continent_Code", "Two_Letter_Country_Code", "Country_Number"]
)
# Continent = Continent.query("not(Three_Letter_Country_Code.isin(['ARM', 'CYP', 'AZE', 'TUR', 'KAZ', 'GEO', 'RUS']) & Continent_Name == 'Asia')")
WGI_raw = WGI_raw.merge(
Continent, how="left", left_on="Country Code", right_on="Three_Letter_Country_Code"
)
WGI = pd.pivot_table(
WGI_raw.reset_index(),
index=["Country Name", "Continent_Name"],
columns="Serie",
values="Value",
)
WGI = WGI.reset_index()
WGI
print(WGI.isnull().sum())
WGI.dropna(inplace=True)
print(WGI.isnull().sum())
# ## ACP
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
WGI["Country Name"] = label_encoder.fit_transform(WGI["Country Name"])
WGI["Continent_Name"] = label_encoder.fit_transform(WGI["Continent_Name"])
from sklearn import decomposition
pca = decomposition.PCA(n_components=8)
pca.fit(WGI) # phase d’apprentissage
# ### WGI Numérique
WGI_num = WGI.drop(["Country Name", "Continent_Name"], axis=1)
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
# standardisation des données
WGI_num_scaled = scale(WGI_num)
# application de l'ACP
pcanum = PCA(n_components=6)
WGI_nume_pca = pcanum.fit_transform(WGI_num_scaled)
# ## Variance expliquée
print(pca.explained_variance_)
print(pca.explained_variance_ratio_)
dict = {
"dimension": ["Dim" + str(x + 1) for x in range(8)],
"la variance expliquée": pca.explained_variance_,
"le pourcentage de la proportion de variance expliquée": numpy.round(
numpy.cumsum(pca.explained_variance_ratio_) * 100
),
}
output = pd.DataFrame.from_dict(dict)
output.head()
plt.bar(
output["dimension"], output["le pourcentage de la proportion de variance expliquée"]
)
plt.xlabel("dimension")
plt.ylabel("le pourcentage de la proportion de variance expliquée")
plt.title(
"Diagramme en barre des différentes dimensions en fonction du pourcentage de la variance expliquée"
)
plt.show()
# ## Représentation des individus
WGI_pca = pca.transform(WGI)
# Transformation en DataFrame pandas
WGI_pca_df = pd.DataFrame(
{
"Dim1": WGI_pca[:, 0],
"Dim2": WGI_pca[:, 1],
"Country": WGI["Country Name"],
"Continent": WGI["Continent_Name"],
}
)
# Résultat (premières lignes)
WGI_pca_df.head()
import matplotlib.pyplot as plt
# création de la figure
fig, ax = plt.subplots()
# nuage de points
ax.scatter(WGI_pca_df["Dim1"], WGI_pca_df["Dim2"])
# légendes
ax.set_xlabel("Dimension 1")
ax.set_ylabel("Dimension 2")
ax.set_title("ACP des indicateurs de gouvernance dans le monde")
# affichage de la figure
plt.show()
# utilisation de subplots nécessaire car annotation du graphique
fig, ax = plt.subplots()
WGI_pca_df.plot.scatter(
"Dim1", "Dim2", ax=ax
) # l'option ax permet de placer les points et le texte sur le même graphique
# boucle sur chaque pays
for k in WGI_pca_df.iterrows():
# annotation uniquement si valeur absolue sur une de 2 dimensions importantes (valeurs choisies empiriquement)
if (abs(k[1]["Dim1"]) > 3.5) | (abs(k[1]["Dim2"]) > 1.5):
ax.annotate(k[1]["Country"], (k[1]["Dim1"], k[1]["Dim2"]), fontsize=9)
plt.xlabel("Dimension 1 (83%)")
plt.ylabel("Dimension 2 (8 %)")
plt.suptitle("Premier plan factoriel (91%)")
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.b
import seaborn as sns
sns.set_style("whitegrid")
import matplotlib.pyplot as plt
# # Reading Dataset
df_fifa = pd.read_csv("/kaggle/input/fifa19/data.csv", index_col=0)
df_fifa.head()
df_fifa.info()
df_fifa["Position"].unique()
# # Major attributes for Forwards
df_fifa_fwd = df_fifa[df_fifa["Position"].isin(["RF", "ST", "LF", "RS", "LS", "CF"])]
df_fifa_fwd_imp = df_fifa_fwd[
[
"Finishing",
"HeadingAccuracy",
"ShortPassing",
"Volleys",
"Dribbling",
"Curve",
"FKAccuracy",
"LongPassing",
"BallControl",
"Acceleration",
"SprintSpeed",
"Agility",
"Reactions",
"Balance",
"ShotPower",
"Jumping",
"Stamina",
"Strength",
"LongShots",
"Aggression",
"Interceptions",
"Positioning",
"Vision",
"Penalties",
"Composure",
"Marking",
"StandingTackle",
"SlidingTackle",
"Overall",
]
]
# ## RF Model to find importance of features
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators=100)
rf_model = rf.fit(df_fifa_fwd_imp.drop(["Overall"], axis=1), df_fifa_fwd_imp["Overall"])
df_fifa_fwd_feat_imp = pd.DataFrame(columns=["attributes", "imp_values"])
df_fifa_fwd_feat_imp["imp_values"] = rf_model.feature_importances_
df_fifa_fwd_feat_imp["attributes"] = df_fifa_fwd_imp.drop(["Overall"], axis=1).columns
fig = plt.figure(figsize=(15, 5))
sns.barplot(
data=df_fifa_fwd_feat_imp.sort_values(by=["imp_values"], ascending=False)[0:10],
x=df_fifa_fwd_feat_imp.sort_values(by=["imp_values"], ascending=False)[0:10][
"imp_values"
],
y=df_fifa_fwd_feat_imp.sort_values(by=["imp_values"], ascending=False)[0:10][
"attributes"
],
)
plt.title("Important Attributes for Forwards")
plt.xlabel("Importance")
plt.ylabel("Attributes")
# # Major attributes for Midfielders
df_fifa["Position"].unique()
df_fifa_mid = df_fifa[
df_fifa["Position"].isin(
["RCM", "LCM", "LDM", "RDM", "CAM", "CDM", "RM", "LM", "RAM", "LAM", "CM"]
)
]
df_fifa_mid_imp = df_fifa_mid[
[
"Finishing",
"HeadingAccuracy",
"ShortPassing",
"Volleys",
"Dribbling",
"Curve",
"FKAccuracy",
"LongPassing",
"BallControl",
"Acceleration",
"SprintSpeed",
"Agility",
"Reactions",
"Balance",
"ShotPower",
"Jumping",
"Stamina",
"Strength",
"LongShots",
"Aggression",
"Interceptions",
"Positioning",
"Vision",
"Penalties",
"Composure",
"Marking",
"StandingTackle",
"SlidingTackle",
"Overall",
]
]
# ## RF Model to find importance of features
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators=100)
rf_model = rf.fit(df_fifa_mid_imp.drop(["Overall"], axis=1), df_fifa_mid_imp["Overall"])
df_fifa_mid_feat_imp = pd.DataFrame(columns=["attributes", "imp_values"])
df_fifa_mid_feat_imp["imp_values"] = rf_model.feature_importances_
df_fifa_mid_feat_imp["attributes"] = df_fifa_mid_imp.drop(["Overall"], axis=1).columns
fig = plt.figure(figsize=(15, 5))
sns.barplot(
data=df_fifa_mid_feat_imp.sort_values(by=["imp_values"], ascending=False)[0:10],
x=df_fifa_mid_feat_imp.sort_values(by=["imp_values"], ascending=False)[0:10][
"imp_values"
],
y=df_fifa_mid_feat_imp.sort_values(by=["imp_values"], ascending=False)[0:10][
"attributes"
],
)
plt.title("Important Attributes for Midfielders")
plt.xlabel("Importance")
plt.ylabel("Attributes")
# # Major attributes for Wingers
df_fifa["Position"].unique()
df_fifa_wing = df_fifa[df_fifa["Position"].isin(["LW", "RW"])]
df_fifa_wing_imp = df_fifa_wing[
[
"Finishing",
"HeadingAccuracy",
"ShortPassing",
"Volleys",
"Dribbling",
"Curve",
"FKAccuracy",
"LongPassing",
"BallControl",
"Acceleration",
"SprintSpeed",
"Agility",
"Reactions",
"Balance",
"ShotPower",
"Jumping",
"Stamina",
"Strength",
"LongShots",
"Aggression",
"Interceptions",
"Positioning",
"Vision",
"Penalties",
"Composure",
"Marking",
"StandingTackle",
"SlidingTackle",
"Overall",
]
]
# ## RF Model to find importance of features
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators=100)
rf_model = rf.fit(
df_fifa_wing_imp.drop(["Overall"], axis=1), df_fifa_wing_imp["Overall"]
)
df_fifa_wing_feat_imp = pd.DataFrame(columns=["attributes", "imp_values"])
df_fifa_wing_feat_imp["imp_values"] = rf_model.feature_importances_
df_fifa_wing_feat_imp["attributes"] = df_fifa_wing_imp.drop(["Overall"], axis=1).columns
fig = plt.figure(figsize=(15, 5))
sns.barplot(
data=df_fifa_wing_feat_imp.sort_values(by=["imp_values"], ascending=False)[0:10],
x=df_fifa_wing_feat_imp.sort_values(by=["imp_values"], ascending=False)[0:10][
"imp_values"
],
y=df_fifa_wing_feat_imp.sort_values(by=["imp_values"], ascending=False)[0:10][
"attributes"
],
)
plt.title("Important Attributes for Wingers")
plt.xlabel("Importance")
plt.ylabel("Attributes")
# # Major attributes for Defenders
df_fifa["Position"].unique()
df_fifa_def = df_fifa[
df_fifa["Position"].isin(["LWB", "RWB", "CB", "LCB", "RCB", "RB", "LB"])
]
df_fifa_def_imp = df_fifa_def[
[
"Finishing",
"HeadingAccuracy",
"ShortPassing",
"Volleys",
"Dribbling",
"Curve",
"FKAccuracy",
"LongPassing",
"BallControl",
"Acceleration",
"SprintSpeed",
"Agility",
"Reactions",
"Balance",
"ShotPower",
"Jumping",
"Stamina",
"Strength",
"LongShots",
"Aggression",
"Interceptions",
"Positioning",
"Vision",
"Penalties",
"Composure",
"Marking",
"StandingTackle",
"SlidingTackle",
"Overall",
]
]
# ## RF Model to find importance of features
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators=100)
rf_model = rf.fit(df_fifa_def_imp.drop(["Overall"], axis=1), df_fifa_def_imp["Overall"])
df_fifa_def_feat_imp = pd.DataFrame(columns=["attributes", "imp_values"])
df_fifa_def_feat_imp["imp_values"] = rf_model.feature_importances_
df_fifa_def_feat_imp["attributes"] = df_fifa_def_imp.drop(["Overall"], axis=1).columns
fig = plt.figure(figsize=(15, 5))
sns.barplot(
data=df_fifa_def_feat_imp.sort_values(by=["imp_values"], ascending=False)[0:10],
x=df_fifa_def_feat_imp.sort_values(by=["imp_values"], ascending=False)[0:10][
"imp_values"
],
y=df_fifa_def_feat_imp.sort_values(by=["imp_values"], ascending=False)[0:10][
"attributes"
],
)
plt.title("Important Attributes for Defenders")
plt.xlabel("Importance")
plt.ylabel("Attributes")
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objs as go
print("setup completed")
# ### Loading the dataset into pandas
df = pd.read_csv("/kaggle/input/heart-disease-statlog/Heart_disease_statlog.csv")
df.head(10)
df.age.hist()
plt.xlabel("Age", size=20)
import plotly.express as px
male_count = df[df["sex"] == 1]["sex"].count()
female_count = df[df["sex"] == 0]["sex"].count()
labels = ["Male", "Female"]
data = [male_count, female_count]
colors = ["green", "blue"]
px.pie(values=data, names=labels)
sns.histplot(data=df, x="cp", hue="sex")
plt.figure(figsize=(12, 6), dpi=100)
sns.kdeplot(df["trestbps"], shade=True)
df[["age", "sex", "cp", "trestbps"]].groupby("trestbps").describe()
plt.figure(figsize=(12, 6), dpi=100)
df["trestbps"].value_counts().sort_values(ascending=False).head(10).plot(kind="bar")
plt.title("Top Ten Plessure Blood", size=20)
df.isnull().sum()
sns.regplot(x="age", y="trestbps", data=df)
plt.title("Relationship between Age and trestbps", size=15)
plt.xlabel("Age", size=20)
plt.ylabel("Trestpbs", size=20)
plt.show()
df.head(3)
sns.histplot(df, x="chol", kde=True, hue="sex")
sns.regplot(x="age", y="chol", data=df)
plt.title("Relationship between Age and chohol", size=15)
plt.xlabel("Age", size=20)
plt.ylabel("chol", size=20)
plt.show()
sns.boxplot(x="sex", y="age", data=df)
# ### fbs: Blood sugar levels on fasting > 120 mg/dl represents as 1 in case of true and 0 as false (Nominal)
#
df["fbs"].value_counts()
corr = df.corr()
sns.heatmap(corr)
top_3 = df["fbs"].value_counts().sort_values(ascending=False).head(3).index.values
sns.boxplot(y="age", x="fbs", data=df[df["fbs"].isin(top_3)])
df.describe()
categorical = []
numerical = []
for feature in df.columns:
if df[feature].dtype == object:
categorical.append(feature)
else:
numerical.append(feature)
df[numerical].hist(figsize=(20, 12), bins=100, color="lightgreen")
# ### thal: A blood disorder called thalassemia
#
df["thal"].value_counts()
df.groupby("thal").mean().round(2)
sns.countplot(x="thal", data=df)
# ## target: It is the target variable which we have to predict 1 means patient is suffering from heart disease and 0 means patient is normal.
#
df["target"].value_counts()
sns.countplot(x=df["target"])
df.groupby("target").mean().round(1)
# people with heart diseases
sns.scatterplot(y="age", x="target", data=df)
sns.histplot(x="age", hue="target", data=df)
sns.pairplot(df[["ca", "thal", "target", "trestbps"]])
|
# ### Description: This program detects breast cancer, based off of data. Breast Cancer (BC) is a common cancer for women around the world
# ### Early detection of BC can greatly improve prognosis and survival chances by promoting clinical treatment to patients.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# # load the breast-cancer-wisconsin-data
import pandas as pd
df = pd.read_csv("../input/breast-cancer-wisconsin-data/data.csv")
df.head()
# M - malignant menas harmful
# B - benign means not harmful
df["diagnosis"].unique()
# count the number of rows and columns in the dataset
df.shape
# count the number of empty (Nan, NAN, na) values in each columns
df.isna().sum()
# drop the column with all missing values
df = df.dropna(axis=1)
df.shape
# Get the count of diagnosis categorical data which have malignant(M) and benign(B)
df["diagnosis"].value_counts()
# visualize the count
sns.countplot(df["diagnosis"], label="count")
# look at the data types
df.dtypes
# encode the categorical data values
from sklearn.preprocessing import LabelEncoder
labelencoder_Y = LabelEncoder()
df.iloc[:, 1] = labelencoder_Y.fit_transform(df.iloc[:, 1].values)
df.iloc[:, 1].value_counts()
# create a pair plot
sns.pairplot(df.iloc[:, 1:6], hue="diagnosis")
# print the first five rows of data
df.head()
# get the correlation of the columns
df.iloc[:, 1:12].corr()
# correlation with the %
plt.figure(figsize=(10, 10))
sns.heatmap(df.iloc[:, 1:12].corr(), annot=True, fmt=".0%")
# split the data set into independent (X) and dependent (Y) data sets
X = df.iloc[:, 2:32].values
Y = df.iloc[:, 1].values
# split the data set into 75% training and 25% testing
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.25, random_state=0
)
# check the shape of all the train test split
print(
"X_train, X_test, Y_train, Y_test",
X_train.shape,
X_test.shape,
Y_train.shape,
Y_test.shape,
)
# scale the data (Feature scaling)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)
X_train[10:]
# create a function for models
def models(X_train, Y_train):
# Logistic Regression
from sklearn.linear_model import LogisticRegression
log = LogisticRegression(random_state=0)
log.fit(X_train, Y_train)
# Decision Tree
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(criterion="entropy", random_state=0)
tree.fit(X_train, Y_train)
# Random Forest Classifier
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(
n_estimators=10, criterion="entropy", random_state=0
)
forest.fit(X_train, Y_train)
# print the model accuracy on the training data
print("[0]Logistic Regression Training Accuracy:", log.score(X_train, Y_train))
print(
"[5]Decision Tree Classifier Training Accuracy:", tree.score(X_train, Y_train)
)
print(
"[6]Random Forest Classifier Training Accuracy:", forest.score(X_train, Y_train)
)
return log, tree, forest
# Train the models
model = models(X_train, Y_train)
# test the mode accuracy on the test data on the confusion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(Y_test, model[0].predict(X_test))
print(cm)
# get the testing accuracy of all the model
for i in range(len(model)):
print("Model ", i)
cm = confusion_matrix(Y_test, model[i].predict(X_test))
TP = cm[0][0]
TN = cm[1][1]
FN = cm[1][0]
FP = cm[0][1]
print(cm)
print("Testing Accuracy = ", (TP + TN) / (TP + TN + FP + FN))
print()
# another way to get metrics of the models
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
for i in range(len(model)):
print("Model ", i)
print(classification_report(Y_test, model[i].predict(X_test)))
print(accuracy_score(Y_test, model[i].predict(X_test)))
print()
# Print the prediction of the Random Forest Classifier model
pred = model[2].predict(X_test)
print(pred)
print()
print(Y_test)
|
# ### __수업시간에 설명드린 바와 같이 Visualization 및 Feature Extraction 파트는 예시로 작성되어있을 뿐, 본 수업의 범위를 벗어나므로 보지 않으셔도 됩니다.__
# 필요한 라이브러리를 임포트
import random
import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, confusion_matrix
import warnings
warnings.filterwarnings(action="ignore")
# 랜덤시드 고정
seed = 42
random.seed(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
# 데이터 폴더 (Input) 내 경로 확인
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# ### **_Visualization_**
from pathlib import Path
from PIL import Image
import matplotlib.pyplot as plt
IMAGE_PATH = "/kaggle/input/galaxys"
galaxys = os.listdir(IMAGE_PATH)
title = [Path(galaxy).stem for galaxy in galaxys]
image_size = (300, 300)
images = list()
for galaxy in galaxys:
image_path = os.path.join(IMAGE_PATH, galaxy)
with Image.open(image_path) as image:
image = image.resize(image_size)
image_np = np.array(image)
images.append(image_np)
# Show
fig, ax = plt.subplots(1, len(galaxys), figsize=(len(galaxys) * 3, 3))
for i, image in enumerate(images):
ax[i].imshow(image)
ax[i].set_title(title[i])
ax[i].axis("off")
# ### **_Feature Extraction_**
import torch
import torchvision
import torch.nn as nn
import torchvision.models as models
import torchvision.transforms as transforms
from sklearn.decomposition import PCA
from tqdm import tqdm
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
warnings.filterwarnings(action="ignore")
transform = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
model = torchvision.models.vgg16(pretrained=True)
model.classifier = nn.Sequential(*list(model.classifier.children())[:-3])
model = model.to(device)
def preprocess_image(base_path, image_path):
image = Image.open(os.path.join(base_path, image_path)).convert("RGB")
image = transforms.ToTensor()(image)
image = transforms.Resize((224, 224))(image)
image = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(
image
)
return image.unsqueeze(0)
def extract_features(base_path, image_path):
image = preprocess_image(base_path, image_path)
image = torch.as_tensor(image, device=device)
with torch.no_grad():
features = model(image)
features = features.cpu().numpy().flatten()
return features
galaxys = galaxys * 25
features = list()
for galaxy in tqdm(galaxys):
features.append(extract_features(IMAGE_PATH, galaxy))
feature_df = pd.DataFrame(features)
display(feature_df.head())
# PCA: 주성분 분석. 주성분이란 전체 데이터(독립변수들)의 분산을 가장 잘 설명하는 성분
pca = PCA(n_components=64)
features_pca = pca.fit_transform(feature_df)
features_pca = pd.DataFrame(features_pca)
print(features_pca.shape)
display(features_pca.head())
# ### **_data preview_**
# 학습용 데이터 셋 (trainX, trainY)과 평가용 데이터 셋 (testX), 제출 포맷 (submit) 로드
trainX = pd.read_csv("/kaggle/input/2023-ml-w5p1/trainX.csv")
trainY = pd.read_csv("/kaggle/input/2023-ml-w5p1/trainY.csv")
testX = pd.read_csv("/kaggle/input/2023-ml-w5p1/testX.csv")
submit = pd.read_csv("/kaggle/input/2023-ml-w5p1/submit.csv")
# VGG16을 활용하여 64차원의 1-Dim 벡터로 가공된 데이터 셋 (trainX) 확인
print(f"shape of train data: {trainX.shape}")
trainX.head()
# 학습용 라벨 (label) 데이터 셋 (trainY)를 확인
display(trainY.head())
print(f"columns: {trainY.columns}")
print(f"labels : {trainY.Category.unique()}")
# 불필요한 열 (Id) drop
trainY.drop(["Id"], axis=1, inplace=True)
# ### **preprocessing**
print(type(trainY["Category"]))
print(type(trainY["Category"].values))
# 학습용 라벨 ('edge','smooth','spiral')을 정수로 변환 (LabelEncoder)
le = LabelEncoder()
# Got an error: "ValueError: y should be a 1d array, got an array of shape (6000, 2) instead." when transform trainY['Category']
# labels = le.fit_transform(trainY['Category'].values)
labels = pd.DataFrame(le.fit_transform(trainY["Category"].values))
labels = labels.values.ravel() # labels.squeeze()
# 학습용 데이터를 토대로 검증하고자, 학습용 데이터를 다시 검증을 위한 학습 데이터와 검증 데이터로 분리 (최종 모델 최적화에는 전체 학습용 데이터 (trainX)를 사용해야 함)
x_train, x_val, y_train, y_val = train_test_split(
trainX, labels, test_size=0.2, shuffle=True, random_state=seed, stratify=labels
)
print(f"shape of train data, x_train: {x_train.shape}, y_train: {y_train.shape}")
print(f"shape of test data, x_val : {x_val.shape}, y_val : {y_val.shape}")
# ### **model learning (Logistic Regression) & evaluation: validation data**
# 학습 모델을 정의
# Logistic Regreesion: 독립 변수의 선형 결합을 이용하여 사건의 발생 가능성을 예측하는 데 사용되는 통계 기법
clf = LogisticRegression(random_state=seed)
# clf = LogisticRegression(solver='liblinear', random_state=seed)
# 학습 데이터 (x_train)에 따라 최적화
clf.fit(x_train, y_train)
# 검증용 데이터 (x_val)에 대해 예측
pred_val = clf.predict(x_val)
# 검증용 데이터 라벨 (y_test)로 모델 성능의 경향성 살펴보기
print(f"accuracy for validation data accuracy: {accuracy_score(y_val, pred_val)}")
print("confusion matrix:")
print(confusion_matrix(y_val, pred_val))
decoded_labels = le.inverse_transform(labels)
for encoded_label, decoded_label in zip(set(labels), set(decoded_labels)):
print(f"Label: {decoded_label:>6} -> Encoding: {encoded_label}")
from sklearn.preprocessing import StandardScaler
scaling = True
if scaling:
scaler = StandardScaler()
x_train_scale = scaler.fit_transform(x_train)
x_val_scale = scaler.transform(x_val)
# 학습 모델을 정의
clf = LogisticRegression(random_state=seed)
# 학습 데이터 (x_train)에 따라 최적화
clf.fit(x_train_scale, y_train)
# 검증용 데이터 (x_val)에 대해 예측
pred_val = clf.predict(x_val_scale)
# 검증용 데이터 라벨 (y_test)로 모델 성능의 경향성 살펴보기
print(f"accuracy for validation data accuracy: {accuracy_score(y_val, pred_val)}")
print("confusion matrix:")
print(confusion_matrix(y_val, pred_val))
# ### **model learning (Logistic Regression) & prediction: train data**
# 전체 데이터 (trainX)에 대해 최적화
clf = LogisticRegression(max_iter=1000, random_state=seed)
clf.fit(trainX, labels)
# 평가용 데이터 (testX)에 대해 예측
pred_result = clf.predict(testX)
# 예측 데이터 확인
pred_result
# 현재 예측 데이터는 labelEncoder를 통해 정수로 변환되어 있으니, 다시 문자열로 변환 후 확인
pred_result = le.inverse_transform(pred_result)
pred_result
# ## **submit**
# 제출 데이터 포맷 확인
submit.head()
# 제출 포맷에 맞춰 예측 데이터를 삽입 후 저장
submit["Category"] = pred_result
submit.to_csv("Baseline.csv", index=False)
# 제출 데이터 확인
submit.head()
|
import numpy as np
import pandas as pd
import seaborn as sns
import plotly.express as px
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_excel("/kaggle/input/arketing-campaign/marketing_campaign.xlsx")
df.head()
# ## Basic Data Overview
df.describe()
df.info()
df.isnull().sum()
# ### Changing Object to Data time dtypes
df.Dt_Customer = pd.to_datetime(df.Dt_Customer, format="%Y-%m-%d")
df.Dt_Customer.dtypes
df["year"] = pd.to_datetime(df.Dt_Customer, format="%Y-%m-%d").dt.year
df["month"] = pd.to_datetime(df.Dt_Customer, format="%Y-%m-%d").dt.month
df.head()
# #### How many people have completed their qualification?
x = df.Education.value_counts()
fig = px.pie(df, values=x, names=x.index, title="Education")
fig.show()
# ### Here we are determining how many people are married and together in Marital_Status?
x = df.Marital_Status.value_counts()
fig = px.pie(df, values=x, names=x.index, title="Martial Status")
fig.show()
df.Income = df.Income.fillna(df.Income.median())
y = df.loc[:, df.columns.str.contains("Mnt")].columns
y
ms_group = df.Marital_Status.unique()
for group in ms_group:
x = df.loc[df.Marital_Status == group].pivot_table(
index="Marital_Status", values=y, aggfunc="mean"
)
x.unstack().plot(kind="bar")
plt.title("Marital Status Group: {}".format(group))
plt.xlabel("x")
plt.ylabel("Mean")
plt.show()
# ### In marital satus,which year customer had a highest income?
x = df.groupby(["year", "Marital_Status"])["Income"].mean().unstack()
x.unstack().plot(kind="bar")
x
# ### In Marital status,which year customer has done highest purchase or visited?
y = df.loc[:, df.columns.str.contains("Num")].columns
df.pivot_table(index=["year", "Marital_Status"], values=y, aggfunc="mean")
# ### Who has the maximum small children and teenagers in customer's household who have done education?
z = df.loc[:, df.columns.str.contains("home")].columns
df.pivot_table(index=["Education"], values=z, aggfunc="mean").plot(kind="bar")
# ### In Education,why type of marital status was having highest monthly income?
import matplotlib.pyplot as plt
sns.lineplot(x="month", y="Income", hue="Education", data=df)
plt.title("Monthly vs Income")
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
from numpy.random import rand
from numpy.linalg import solve, inv
a = np.array([[1, 2, 3], [3, 4, 6.7], [5, 9.0, 5]])
a.transpose()
c = rand(3, 3)
print(c)
c.transpose()
from numpy.random import rand
c = rand(3, 3)
print(c)
c.transpose()
from numpy.random import rand
c = rand(3, 3)
print("first matrix", c)
d = rand(3, 3)
print("second matrix", d)
print("sum of two matrices is", c + d)
print("difference of two matrices", c - d)
print("multiplication of two matrices", c * d)
import numpy as np
a = np.zeros((2, 2)) # Create an array of all zeros
print(a) # Prints "[[ 0. 0.]
# [ 0. 0.]]"
b = np.ones((2, 2)) # Create an array of all ones
print(b) # Prints "[[ 1. 1.]]"
e = np.random.random((2, 2)) # Create an array filled with random values
print(e)
import numpy as np
from scipy import stats
# X is a Python List
X = [32.32, 56.98, 21.52, 44.32, 55.63, 13.75, 43.47, 43.34]
# Sorting the data and printing it.
X.sort()
print(X)
# [13.75, 21.52, 32.32, 43.34, 43.47, 44.32, 55.63, 56.98]
# Using NumPy's built-in functions to Find Mean, Median, SD and Variance
mean = np.mean(X)
median = np.median(X)
mode = stats.mode(X)
sd = np.std(X)
variance = np.var(X)
# Printing the values
print("Mean", mean) # 38.91625
print("Median", median) # 43.405
print("Standard Deviation", sd) # 14.3815654029
print("Variance", variance) # 206.829423437
print("mode", mode)
import numpy as np
array = np.array([1, 2, 2, 3, 3, 4, 5, 6, 6, 6, 6, 7, 8, 9])
unique = np.unique(array, axis=0)
print(unique)
import numpy as np
array1 = np.array([0, 10, 20, 40, 60, 80])
print("Array1: ", array1)
array2 = [10, 30, 40, 50, 70]
print("Array2: ", array2)
print("Unique sorted array of values that are in either of the two input arrays:")
print(np.union1d(array1, array2))
print("Common values between two arrays:")
print(np.intersect1d(array1, array2))
import numpy as np
a = [[11, 2, 4], [4, 5, 6], [10, 8, -12]]
b = np.asarray(a)
print("Diagonal (sum): ", np.trace(b))
print("Diagonal (elements): ", np.diagonal(b))
|
# # Breast Cancer Detection
# 
# ## Problem Introduction:
# This notebook is concerned with classifying images using the **BreakHis** (Breast Cancer Histopathological Image Dataset). Breast cancer is a disease seen mainly in women and is seen as a major cause of death among women. In the year **2018**, the total deaths due to breast cancer in women was seen to be **627,000** out of **2.1 million** cases which were diagnosed. **Invasive Ductual Carcinoma (IDC)** in diagnosing breast cancer, since its subsequent digitalization is more feasible due to advancements in slide scanning technology, as well as the reduction of storage load cost in recent years. The digitialized approcches in deep learning has aided a lot in diagnosing and controlling breast cancer, with power to pre-identify the disease via deep learning methods.
# ## Dataset Information
# The breast cancer histopathological image dataset (BreakHis) contains **9109** microscopic images of breast tumor tissues collected from **82** patients, regarded as malignant or benign. The tissues are magnigfied at different scaling factors (**40X**, **100X**, **200X**, **400X**). In this dataset, it contains **2480** malignant and **5429** benign tumors. The images of tissues are taken to be **700X460 pixels**, **3-channel RGB**, **8** bit depth in each format, and in PNG. It is believed that this dataset can become a benchmark for future classifications of breast cancer classification.
# ## Import Python Libraries
# First, we will import the necessary libraries for our notebook.
# importing libraries
import warnings
warnings.filterwarnings("ignore")
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os, time, tqdm
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import *
import tensorflow as tf
from functools import partial
# import keras
from tensorflow.keras import layers
import albumentations as A
import tensorflow_hub as hub
from tensorflow.keras.layers import Input, Dense, Flatten, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.callbacks import CSVLogger
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.metrics import Precision, Recall
from sklearn.metrics import classification_report, f1_score
from sklearn.model_selection import KFold
# ## GPU Confirmation
# Let's make sure that we have a GPU installed.
# ## Loading the Dataset
# First, we load the dara that contains multiple files, one file contains the information anout all images in a csv format, and there is another directory that contains the information about the images of breast cancer tissues.
image_dir = "../input/breakhis/BreaKHis_v1/"
data_path = "../input/breakhis/Folds.csv"
# experimental API for making data pipelines
tf.data.experimental.AUTOTUNE
# defining the class names
class_names = ["malignant", "benign"]
print(class_names)
# loading the data
data = pd.read_csv(data_path)
data.head(5)
data["fold"].value_counts()
# ### Data Structuring
# We will strcuture the data in csv filefor our ease and for better understanding.
# renaming and structuring the columns for better data understanding
data = data.rename(columns={"filename": "path"})
data["label"] = data.path.apply(lambda x: x.split("/")[3])
data["label_int"] = data.label.apply(lambda x: class_names.index(x))
data["filename"] = data.path.apply(lambda x: x.split("/")[-1])
# view first n rows of strucrured data
data.head(6)
data.shape
# Our dataset has been restructured the way we wanted it to be. Let's perform some analysis on it tnd then we will move towards the images to perform computations on them.
# ## Analysis on Data
# making a plot to see data distribution
# sns.figure()
sns.set_theme()
sns.displot(x="label", data=data)
# The graph sows that most of the samples in our data have malignant tumors, and less have benign tumors
sns.countplot(x=data["label"], data=data)
# ## Dataset Modelling for Deep Learning
# We will model the data for our training, vaidation and testing sets.
# sorting out training, validation and testing images
test_images = data.groupby(by="label").sample(3000)
train_images = data.drop(test_images.index).reset_index(drop=True)
test_images = test_images.reset_index(drop=True)
# making splits of training & validation datasets
validation_images = train_images.sample(frac=0.3)
train_images = train_images.drop(validation_images.index).reset_index(drop=True)
validation_images = validation_images.reset_index(drop=True)
print("Total training images: % s" % str(train_images.shape[0]))
print("Total validation images: % s" % str(validation_images.shape[0]))
print("Total testing images: % s" % str(test_images.shape[0]))
train_images["set"] = "train"
validation_images["set"] = "validation"
test_images["set"] = "test"
new_data = pd.concat([train_images, validation_images, test_images])
new_data.head(5)
# ### Visualizing Train, Validation & Test Splits
sns.set(rc={"figure.figsize": (10.4, 5.4)})
sns.countplot(x=new_data["label"], hue=new_data["set"])
# ## Unsampling Data
max_count = np.max(train_images.label.value_counts())
min_count = np.min(train_images.label.value_counts())
train_images = train_images.groupby("label").sample(n=max_count, replace=True)
train_images = train_images.reset_index(drop=True)
train_images.head(5)
# ## Making the Deep Learning Model
model_handle_map = {
"efficientnetv2-b0": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_b0/feature_vector/2",
"inception_v3": "https://tfhub.dev/google/imagenet/inception_v3/feature_vector/4",
"inception_resnet_v2": "https://tfhub.dev/google/imagenet/inception_resnet_v2/feature_vector/5",
"resnet_50": "https://tfhub.dev/tensorflow/resnet_50/classification/1",
}
model_image_size = {
"efficientnetv2-b0": 224,
"inception_v3": 299,
"inception_resnet_v2": 299,
"resnet_50": 224,
}
# function to decode a PNG image into a tf tensor
def load_image(path, label):
image = tf.io.read_file(path)
image = tf.io.decode_png(image, channels=3)
return image, label
# reshaping the image between 0 and 1
def image_reshape(image, label):
image = tf.cast(image, tf.float32)
image = tf.image.resize(image, [224, 224] / 255)
return image, label
# image argumentation for faster model training
def argument_image(image):
transform = A.Compose(
[
A.HorizontalFlip(p=0.5),
A.Rotate(p=0.5, limit=15),
A.RandomBrightnessContrast(
p=0.5,
brightness_limit=(-0.2, 0.2),
contrast_limit=(-0.1, 0.1),
brightness_by_max=True,
),
A.RandomResizedCrop(
p=0.8,
height=IMG_SIZE,
width=IMG_SIZE,
scale=(0.9, 1.1),
ratio=(0.05, 1.1),
interpolation=0,
),
A.Blur(blur_limit=(1, 1)),
]
)
data = {"image": image}
argumented_data = transform(**data)
argumented_image = argumented_data["image"]
argumented_image = tf.cast(argumented_image, tf.float32)
argumented_image = tf.image.resize(argumented_image, [IMG_SIZE, IMG_SIZE]) / 255
return argumented_image
def argumentor_function(image, label):
argumented_image = tf.numpy_function(
func=argument_image, inp=[image], Tout=tf.float32
)
return argumented_image, label
# function to view sample of images
def view_image(ds, col=5, row=5, size=(30, 10)):
plt.figure(figsize=(10, 5))
plt.subplots_adjust(wspace=0.05, hspace=0.15)
for images, labels in ds.take(1):
for i in range(col * row):
ax = plt.subplot(row, col, i + 1)
shape = str(images[i].numpy().shape)
plt.imshow(images[i].numpy())
plt.title(class_names[labels[i].numpy()])
plt.axis("off")
plt.tight_layout
return None
def view_model_predictions():
plt.figure(figsize=(30, 8))
plt.rcParams.update({"font.size": 10})
plt.subplots_adjust(wspace=0.05, hspace=0.15)
for i in range(30):
ax = plt.subplot(3, 10, i + 1)
shape = str(test_image[i].numpy().shape)
plt.imshow(test_image[i].numpy())
plt.title(predicted_label[i][0])
plt.axis("off")
plt.tight_layout
return None
# making a function to calculate & show model history
def model_hist(history):
accuracy = history["accuracy"]
loss = history["loss"]
val_accuracy = history["val_accuracy"]
val_loss = history["val_loss"]
# setting the epochs
n_epochs = range(len(history["loss"]))
# saving models logs
# csv_logger = CSVLogger('cnn_model_logs.csv', append=True)
# making plots for accuracy
plt.figure(figsize=(16, 5))
plt.subplot(1, 2, 1)
plt.plot(n_epochs, accuracy, label="training accuracy")
plt.plot(n_epochs, val_accuracy, label="validation accuracy")
plt.legend()
plt.savefig("inception_v3_acc.png")
# making plots for loss
plt.figure(figsize=(16, 5))
plt.subplot(1, 2, 2)
plt.plot(n_epochs, loss, label="training loss (binary crossentropy)")
plt.plot(n_epochs, val_loss, label="validation loss (binary crossentropy)")
plt.legend()
plt.savefig("inception_v3_loss.png")
return None
# function for decoding a test image
def decode_test_img(path):
image = tf.io.read_file(path)
image = tf.image.decode_png(image, channels=3)
image = tf.cast(image, tf.float32)
image = tf.image.resize(image, [224, 224])
return image
# function for building a NN
def make_nn_model(image_size):
print("Making our deep cnn model.....")
cnn_model = tf.keras.Sequential(
[
layers.InputLayer(input_shape=(image_size, image_size, 3)),
hub.KerasLayer(model_handle, trainable=True, name="base"),
layers.Dense(512, activation="relu", name="fc1"),
layers.BatchNormalization(),
layers.Dropout(0.4, name="dropout"),
layers.Dense(128, activation="relu", name="fc2"),
layers.BatchNormalization(),
# layers.Dropout(0.4, name='dropout2'),
layers.Dense(1, activation="sigmoid", name="output"),
],
name=model_name,
)
cnn_model.build((None, image_size, image_size, 3))
cnn_model.summary()
print("model built!")
return cnn_model
# ### Model Configuration Parameters
# defining model configuration parameters
# model_name = "efficientnetv2-b0"
model_name = "inception_v3"
# model_name = "inception_resnet_v2"
# model_name = "resnet_50"
model_handle = model_handle_map.get(model_name)
IMG_SIZE = model_image_size.get(model_name, 224)
BATCH_SIZE = 32
EPOCHS = 10
sample_size = len(train_images)
print(f"Selected model: {model_name} : {model_handle}")
print(f"Input size of model: {IMG_SIZE}")
IMG_SIZE
# ## Loading the Dataset
# We have defined the model parameters + configuration in above sections. Now we define and load the dataset in our memory. We will define 2 datasets, train & valid.
(image_dir + train_images.path)[0]
# loading the train & validation dataets
load_train = tf.data.Dataset.from_tensor_slices(
(image_dir + train_images.path, train_images.label_int)
)
load_valid = tf.data.Dataset.from_tensor_slices(
(image_dir + validation_images.path, validation_images.label_int)
)
load_test = tf.data.Dataset.from_tensor_slices(
(image_dir + test_images.path, test_images.label_int)
)
load_train
train_dataset = (
load_train.shuffle(len(train_images))
.map(load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
.map(partial(argumentor_function), num_parallel_calls=tf.data.experimental.AUTOTUNE)
.batch(BATCH_SIZE)
.prefetch(tf.data.experimental.AUTOTUNE)
)
val_dataset = (
load_valid.shuffle(len(validation_images))
.map(load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
.map(partial(argumentor_function), num_parallel_calls=tf.data.experimental.AUTOTUNE)
.batch(BATCH_SIZE)
.prefetch(tf.data.experimental.AUTOTUNE)
)
test_dataset = (
load_test.shuffle(len(test_images))
.map(load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
.map(partial(argumentor_function), num_parallel_calls=tf.data.experimental.AUTOTUNE)
.batch(BATCH_SIZE)
.prefetch(tf.data.experimental.AUTOTUNE)
)
train_dataset
# checking the path of images
train_images.path[5]
start = time.time()
view_image(train_dataset)
end = time.time()
print("Time Taken: %.3f seconds" % (end - start))
start = time.time()
view_image(val_dataset)
end = time.time()
print("Time Taken: %.3f seconds" % (end - start))
# We have successfully loaded the images, preprocessed them, made image argumentations and visualized the images of what they look like.
# ## Deep Learning Model Training Using K Fold Cross Validation
# This is the step where we train our deep learning model. We have defined the model configuration, dataset modelling, data loading and preparation. We will use the **K-Fold Cross Validation Technique** to train our model, by making sure that what is the best fit for our model
def get_labels_from_tfdataset(tfdataset, batched=False):
labels = list(map(lambda x: x[1], tfdataset)) # Get labels
if not batched:
return tf.concat(labels, axis=0) # concat the list of batched labels
return labels
get_labels_from_tfdataset(train_dataset)
print("Size of Image being used: %d" % (IMG_SIZE))
# starting a new sesion for TF
image_size = 224
tf.keras.backend.clear_session()
model_nn = make_nn_model(IMG_SIZE)
# making model checkpoints
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(
"inception_v3.h5", save_best_only=True
)
# model logs
csv_logger = CSVLogger("cnn_model_logs.csv", append=True)
metrics = ["accuracy", Precision(name="Precision"), Recall(name="Recall")]
# compiling the model
model_nn.compile(loss="binary_crossentropy", optimizer="adam", metrics=metrics)
# fit the model
train_history = model_nn.fit(
train_dataset,
epochs=10,
batch_size=BATCH_SIZE,
verbose=1,
callbacks=[model_checkpoint, csv_logger],
validation_data=val_dataset,
)
# ## Visualize Model Performance (Loss/Accuracy)
# visualize model performance
history = train_history.history
model_hist(history)
# ##
model_performance = model_nn.evaluate(val_dataset)
print("Net loss on validation data: %.3f" % model_performance[0])
print("Net accuracy on validation data: %.3f" % model_performance[1])
print("Net precision on validation data: %.3f" % model_performance[2])
print("Net recall on validation data: %.3f" % model_performance[3])
# ## Evaluation on Test Data
from tensorflow.keras.models import load_model
my_reloaded_model = load_model(
("/kaggle/input/models/inception_v3.h5"),
custom_objects={"KerasLayer": hub.KerasLayer},
)
from tensorflow.keras.metrics import (
Precision,
Recall,
BinaryAccuracy,
BinaryCrossentropy,
)
def f1_score(pr, re):
return 2 * ((pr * re) / (pr + re))
pre = Precision()
re = Recall()
acc = BinaryAccuracy()
bce = BinaryCrossentropy()
for batch in test_dataset.as_numpy_iterator():
X, y = batch
yhat = my_reloaded_model.predict(X)
pre.update_state(y, yhat)
re.update_state(y, yhat)
acc.update_state(y, yhat)
bce.update_state(y, yhat)
print(pre.result(), re.result(), acc.result(), bce.result())
f1_score(pre.result().numpy(), re.result().numpy())
|
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
tweets = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
# tweets = pd.read_csv(r'C:\Users\TeYan\OneDrive\Work\Kaggle\Tweets_Disaster\Data\train.csv')
# tweets = pd.read_csv('/Users/teyang/OneDrive/Work/Kaggle/Tweets_Disaster/Data/train.csv')
tweets.head()
# ## Summary Statistics and Distribution of Class
tweets.isnull().sum().plot(kind="bar")
# Location has lots of NaN values and would not be a good/useful feature, unless we have a priori knowledge of where a disaster occured. Furthermore, some of them are not in the correct format, so it will be quite time consuming to clean it.
# Keyword has NaNs as well, but can be imputed with 'None'.
import seaborn as sns
import matplotlib.pyplot as plt
color = [sns.xkcd_rgb["pale red"], sns.xkcd_rgb["medium blue"]]
sns.countplot("target", data=tweets, palette=color)
plt.gca().set_ylabel("Samples")
# ## Exploratory Data Analysis of Tweets
# ### Distribution of Character, Word and Sentence Frequency
# import nltk
# nltk.download('punkt')
from nltk import word_tokenize, sent_tokenize
# count number of characters in each tweet
tweets["char_len"] = tweets.text.str.len()
# count number of words in each tweet
word_tokens = [len(word_tokenize(tweet)) for tweet in tweets.text]
tweets["word_len"] = word_tokens
# count number of sentence in each tweet
sent_tokens = [len(sent_tokenize(tweet)) for tweet in tweets.text]
tweets["sent_len"] = sent_tokens
plot_cols = ["char_len", "word_len", "sent_len"]
plot_titles = ["Character Length", "Word Length", "Sentence Length"]
plt.figure(figsize=(20, 4))
for counter, i in enumerate([0, 1, 2]):
plt.subplot(1, 3, counter + 1)
sns.distplot(
tweets[tweets.target == 1][plot_cols[i]], label="Disaster", color=color[1]
).set_title(plot_titles[i])
sns.distplot(
tweets[tweets.target == 0][plot_cols[i]], label="Non-Disaster", color=color[0]
)
plt.legend()
# Investigate the Outliers
tweets[tweets.sent_len > 8]
tweets[tweets.word_len > 50]
# Some of the outliers such as sentence length > 10 consist of a lot of punctuations
# ### Top Most Common Stopwords
## Plot most common stopwords
# nltk.download('stopwords')
from nltk.corpus import stopwords
stop = set(stopwords.words("english"))
# Get all the word tokens in dataframe for Disaster and Non-Disaster
corpus0 = [] # Non-Disaster
[
corpus0.append(word.lower())
for tweet in tweets[tweets.target == 0].text
for word in word_tokenize(tweet)
]
corpus1 = [] # Disaster
[
corpus1.append(word.lower())
for tweet in tweets[tweets.target == 1].text
for word in word_tokenize(tweet)
]
# Function for counting top stopwords in a corpus
def count_top_stopwords(corpus):
stopwords_freq = {}
for word in corpus:
if word in stop:
if word in stopwords_freq:
stopwords_freq[word] += 1
else:
stopwords_freq[word] = 1
topwords = sorted(stopwords_freq.items(), key=lambda item: item[1], reverse=True)[
:10
] # get the top 10 stopwords
x, y = zip(*topwords) # get key and values
return x, y
x0, y0 = count_top_stopwords(corpus0)
x1, y1 = count_top_stopwords(corpus1)
# Plot bar plot of top stopwords for each class
plt.figure(figsize=(15, 4))
plt.subplot(1, 2, 1)
plt.bar(x0, y0, color=color[0])
plt.title("Top Stopwords for Disaster Tweets")
plt.subplot(1, 2, 2)
plt.bar(x1, y1, color=color[1])
plt.title("Top Stopwords for Non-Disaster Tweets")
# There are lots of occurences of stopwords. These should be removed as they do not predict the target.
# ### Top Most Common Punctuations
## Plot most common punctuations
from string import punctuation
# Get all the punctuations in dataframe for Disaster and Non-Disaster
corpus0 = [] # Non-Disaster
[corpus0.append(c) for tweet in tweets[tweets.target == 0].text for c in tweet]
corpus0 = list(
filter(lambda x: x in punctuation, corpus0)
) # use filter to select only punctuations
corpus1 = [] # Disaster
[corpus1.append(c) for tweet in tweets[tweets.target == 1].text for c in tweet]
corpus1 = list(filter(lambda x: x in punctuation, corpus1))
from collections import Counter
x0, y0 = zip(*Counter(corpus0).most_common())
x1, y1 = zip(*Counter(corpus1).most_common())
# Plot bar plot of top punctuations for each class
plt.figure(figsize=(15, 4))
plt.subplot(1, 2, 1)
plt.bar(x0, y0, color=color[0])
plt.title("Top Punctuations for Disaster Tweets")
plt.subplot(1, 2, 2)
plt.bar(x1, y1, color=color[1])
plt.title("Top Punctuations for Non-Disaster Tweets")
# Most common punctuation is the slash, which usually comes from a link ('http://t.co/'). URLs should be removed, as well as most punctuations, with the exception of '!?', which signal some kind of intensity or tonality of the tweet.
# ### Top Most Common Words
## Plot most common words
import re
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
stop = ENGLISH_STOP_WORDS.union(stop) # combine stop words from different sources
# function for removing url from text
def remove_url(txt):
return " ".join(re.sub("([^0-9A-Za-z \t])|(\w+:\/\/\S+)", "", txt).split())
# Get all the word tokens in dataframe for Disaster and Non-Disaster
# - remove url, tokenize tweet into words, lowercase words
corpus0 = [] # Non-Disaster
[
corpus0.append(word.lower())
for tweet in tweets[tweets.target == 0].text
for word in word_tokenize(remove_url(tweet))
]
corpus0 = list(
filter(lambda x: x not in stop, corpus0)
) # use filter to unselect stopwords
corpus1 = [] # Disaster
[
corpus1.append(word.lower())
for tweet in tweets[tweets.target == 1].text
for word in word_tokenize(remove_url(tweet))
]
corpus1 = list(
filter(lambda x: x not in stop, corpus1)
) # use filter to unselect stopwords
# Create df for word counts to use sns plots
a = Counter(corpus0).most_common()
df0 = pd.DataFrame(a, columns=["Word", "Count"])
a = Counter(corpus1).most_common()
df1 = pd.DataFrame(a, columns=["Word", "Count"])
# Plot for Disaster and Non-Disaster
plt.figure(figsize=(15, 4))
plt.subplot(1, 2, 1)
sns.barplot(x="Word", y="Count", data=df0.head(10)).set_title(
"Most Common Words for Non-Disasters"
)
plt.xticks(rotation=45)
plt.subplot(1, 2, 2)
sns.barplot(x="Word", y="Count", data=df1.head(10)).set_title(
"Most Common Words for Disasters"
)
plt.xticks(rotation=45)
# Disaster tweets contain more words related to disasters. But still need more cleaning. And what is the word amp? Will need to expand contractions as well such as 'im'.
# ## Wordcloud for Hashtags (Just for fun)
def clean(word):
for p in punctuation:
word = word.replace(p, "")
return word
from wordcloud import WordCloud
def wc(target):
hashtag = [
clean(w[1:].lower())
for tweet in tweets[tweets.target == target].text
for w in tweet.split()
if "#" in w and w[0] == "#"
]
hashtag = " ".join(hashtag)
my_cloud = WordCloud(background_color="white", stopwords=stop).generate(hashtag)
plt.subplot(1, 2, target + 1)
plt.imshow(my_cloud, interpolation="bilinear")
plt.axis("off")
plt.figure(figsize=(15, 4))
wc(0)
wc(1)
# # Meta-Feature Engineering
# * polarity - range of [-1,1] where 1 denotes positivity and -1 denotes negativity
# * subjectivity - range of [0,1] where 1 denotes personal opinions and 0 denotes factual info
# * exclaimation_num - number of exclamation marks in tweet
# * questionmark_num - number of question marks in tweet
# * hash_num - number of hashtags (#) in tweet
# * mention_num - number of mentions (@) in tweet
from textblob import TextBlob
# polarity and subjectivity
tweets["polarity"] = [TextBlob(tweet).sentiment.polarity for tweet in tweets.text]
tweets["subjectivity"] = [
TextBlob(tweet).sentiment.subjectivity for tweet in tweets.text
]
# exclaimation and question marks
tweets["exclaimation_num"] = [tweet.count("!") for tweet in tweets.text]
tweets["questionmark_num"] = [tweet.count("?") for tweet in tweets.text]
# count number of hashtags
# Function for counting number of hashtags and mentions
def count_url_hashtag_mention(text):
urls_num = len(
re.findall(
"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+",
text,
)
)
word_tokens = text.split()
hash_num = len(
[word for word in word_tokens if word[0] == "#" and word.count("#") == 1]
) # only appears once in front of word
mention_num = len(
[word for word in word_tokens if word[0] == "@" and word.count("@") == 1]
) # only appears once in front of word
return urls_num, hash_num, mention_num
url_num, hash_num, mention_num = zip(
*[count_url_hashtag_mention(tweet) for tweet in tweets.text]
)
tweets = tweets.assign(url_num=url_num, hash_num=hash_num, mention_num=mention_num)
# # Data Cleaning
# * Replace NaNs with 'None'
# * Expand Contractions
## Replace NaNs with 'None'
tweets.keyword.fillna("None", inplace=True)
## Expand Contractions
# Function for expanding most common contractions https://stackoverflow.com/questions/19790188/expanding-english-language-contractions-in-python
def decontraction(phrase):
# specific
phrase = re.sub(r"won\'t", "will not", phrase)
phrase = re.sub(r"can\'t", "can not", phrase)
# general
phrase = re.sub(r"n\'t", " not", phrase)
phrase = re.sub(r"\'re", " are", phrase)
phrase = re.sub(r"\'s", " is", phrase)
phrase = re.sub(r"\'d", " would", phrase)
phrase = re.sub(r"\'ll", " will", phrase)
phrase = re.sub(r"\'t", " not", phrase)
phrase = re.sub(r"\'ve", " have", phrase)
phrase = re.sub(r"\'m", " am", phrase)
return phrase
tweets.text = [decontraction(tweet) for tweet in tweets.text]
# Count Number of Hashtags
# Function for counting number of hashtags and mentions
def count_hashtag_mention(text):
word_tokens = text.split()
hash_num = len(
[word for word in word_tokens if word[0] == "#" and word.count("#") == 1]
) # only appears once in front of word
mention_num = len(
[word for word in word_tokens if word[0] == "@" and word.count("@") == 1]
) # only appears once in front of word
return hash_num, mention_num
hash_num, mention_num = zip(*[count_hashtag_mention(tweet) for tweet in tweets.text])
tweets = tweets.assign(hash_num=hash_num, mention_num=mention_num)
print(tweets.text[6])
|
# Spam Detection using Naive Bayes
# **Importing Important Library**
import pandas as pd
# **Reading CSV File**
df = pd.read_csv(
"/kaggle/input/spam-text-message-classification/SPAM text message 20170820 - Data.csv"
)
df.head()
# **Using GroupBy() Function to Categorize the Data**
df.groupby("Category").describe()
# **Changing Category Value from Text to Number**
df["spam"] = df["Category"].apply(lambda x: 1 if x == "spam" else 0)
df.head()
# **Dropping the Category Column**
df.drop(["Category"], axis="columns", inplace=True)
df.head()
# **Creating Dependent and Independent Variable**
X = df.Message
y = df.spam
# **Train Test Split**
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y)
# **Importing Multinomial Naive Bayes Model**
from sklearn.naive_bayes import MultinomialNB
model = MultinomialNB()
# **Importing CountVectorizer() Method**
from sklearn.feature_extraction.text import CountVectorizer
v = CountVectorizer()
# **Importing Sklearn Pipeline**
from sklearn.pipeline import Pipeline
clf = Pipeline([("vectorizer", CountVectorizer()), ("nb", MultinomialNB())])
# **Fitting the Model**
clf.fit(X_train, y_train)
# **Checking the Score of Model**
clf.score(X_test, y_test)
# **Predicting the E-Mails as Spam or Ham**
emails = [
"Hey mohan, can we get together to watch footbal game tomorrow?",
"Upto 20% discount on parking, exclusive offer just for you. Dont miss this reward!",
]
clf.predict(emails)
|
# # Imports
import time
import requests
import os
from zipfile import ZipFile
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.layers import (
Dense,
Dropout,
Input,
Activation,
Conv2D,
MaxPooling2D,
Flatten,
)
from tensorflow.python.keras import Sequential
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.callbacks import *
# # HTTPS Requests, to send status via TelegramBot
def telegram_bot_sendtext(bot_message):
bot_token = "862297446:AAFooZ12e_PUVe1Nv59uzl3ceO_TqiTCnoc"
bot_chatID = "660201452"
send_text = (
"https://api.telegram.org/bot"
+ bot_token
+ "/sendMessage?chat_id="
+ bot_chatID
+ "&parse_mode=Markdown&text="
+ bot_message
)
response = requests.get(send_text)
return response.json()
telegram_bot_sendtext("Start Script")
# # Load Data
# 1. Add the *Dogs vs. Cats Redux: Kernels Edition* on the top right __+ Add Data__
#
# Now we have the folling folder structure:
#
# ├── Dogs vs. Cats Redux: Kernels Edition
# │ ├── test.zip
# │ ├── train.zip
# │ └── sample_submission.csv
#
#
# 2. Next we need to extract train and test data. We will use __ZipFile__ for that. Since the input directory is a __read only__ directory, we have to extract the zip files into the output directory.
with ZipFile("../input/dogs-vs-cats-redux-kernels-edition/train.zip") as zipobj:
zipobj.extractall("../working")
with ZipFile("../input/dogs-vs-cats-redux-kernels-edition/test.zip") as zipobj:
zipobj.extractall("../working")
train_data = os.listdir("../working/train")
test_data = os.listdir("../working/test")
os.mkdir("../working/train/cats")
os.mkdir("../working/train/dogs")
os.mkdir("../working/test/cats")
os.mkdir("../working/test/dogs")
os.mkdir("../working/models")
for i in train_data:
# print(i)
if (i != "cats") and (i != "dogs"):
if "cat" in i:
os.replace("../working/train/" + i, "../working/train/cats/" + i)
elif "dog" in i:
os.replace("../working/train/" + i, "../working/train/dogs/" + i)
# 3. After extraction and sorting we have the following folder structure:
#
# ├── working
# │ ├── train
# │ │ ├── cats
# │ │ └── dogs
# │ ├── test
# │ └── sample_submission.csv
# # Data Generator
bs = 32
train_datagen = ImageDataGenerator(
rescale=1.0 / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
validation_split=0.2,
)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen.flow_from_directory(
"../working/train",
target_size=(128, 128),
batch_size=bs,
class_mode="binary",
subset="training",
) # set as training data
validation_generator = train_datagen.flow_from_directory(
"../working/train",
target_size=(128, 128),
batch_size=bs,
class_mode="binary",
subset="validation",
) # set as validation data
test_generator = test_datagen.flow_from_directory(
"../working", target_size=(128, 128), batch_size=1, classes=["test"]
)
# # Define and Train model
lr = None
modelname = ""
model_destination = "../working/models/"
optimizer = ""
# Callbacks
checkpoint = ModelCheckpoint(
f"{model_destination}{modelname}_bs_{bs}_{optimizer}_{lr}.h5",
monitor="val_loss",
verbose=0,
save_best_only=True,
save_weights_only=False,
)
es = EarlyStopping(
monitor="val_loss", patience=3, baseline=None, restore_best_weights=False
)
callbacks = [checkpoint, es]
# define cnn model
def vgg():
model = Sequential()
model.add(
Conv2D(
32,
(3, 3),
activation="relu",
kernel_initializer="he_uniform",
padding="same",
input_shape=(128, 128, 3),
)
)
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(128, activation="relu", kernel_initializer="he_uniform"))
model.add(Dense(1, activation="sigmoid"))
# compile model
opt = SGD(lr=0.001, momentum=0.9)
model.compile(optimizer=opt, loss="binary_crossentropy", metrics=["accuracy"])
optimizer = opt.__dict__["_name"]
lr = opt.__dict__["_hyper"]["learning_rate"]
global modelname
modelname = "VGG"
return model
classification_model = vgg()
print("Done!")
training = classification_model.fit_generator(
train_generator, epochs=2, validation_data=validation_generator, callbacks=callbacks
)
history = training.history
telegram_bot_sendtext("Training finished!")
# # Save Model and training history
# classification_model.save(f"{model_destination}{modelname}_bs_{bs}_{optimizer}_{lr}.h5")
hist_df = pd.DataFrame(history)
hist_df.to_csv(f"history{modelname}_bs_{bs}_{optimizer}_{lr}.csv")
telegram_bot_sendtext(str(history))
telegram_bot_sendtext("Model saved!")
# # Evaluate Training
plt.style.use("dark_background")
# history['loss'] = [1,2,3,4,5]
# history['accuracy'] = [5,4,3,2,1]
# history['val_loss'] = [2,4,6,8,10]
# history['val_accuracy'] = [10,8,6,4,2]
# history = training.history
plt.figure(num=None, figsize=(8, 6), dpi=80)
plt.title("Loss")
plt.plot(history["loss"], color="red")
plt.plot(history["val_loss"], color="green")
plt.legend(["Training Loss", "Validation Loss"])
plt.show()
plt.figure(num=None, figsize=(8, 6), dpi=80)
plt.title("Accuracy")
plt.plot(history["accuracy"], color="red")
plt.plot(history["val_accuracy"], color="green")
plt.legend(["Training accuracy", "Validation accuracy"])
plt.show()
from sklearn.metrics import confusion_matrix
import seaborn as sn
import numpy as np
validation_generator.shuffle = False
validation_generator.index_array = None
y_pred = classification_model.predict_generator(validation_generator)
y_label = validation_generator.classes
# Save y_pred and y_label into Dataframe
# pd.DataFrame({'y_pred':y_pred, 'y_label':y_label})
# Confusion MATRIX cat=0 dog=1
# roundpredictions
y_pred_bin = np.where(y_pred > 0.5, 1, 0)
[[tn, fp], [fn, tp]] = confusion_matrix(
y_label, y_pred_bin, labels=[0, 1]
) # normalize='all')
array = [[tn / (tn + fp), fp / (tn + fp)], [fn / (tp + fn), tp / (tp + fn)]]
df_cm = pd.DataFrame(array, index=["Cat", "Dog"], columns=["Cat", "Dog"])
plt.figure(figsize=(10, 7))
sn.heatmap(df_cm, annot=True)
# # Prepare Submission
predicted = classification_model.predict_generator(test_generator)
sample_submission = pd.read_csv(
"../input/dogs-vs-cats-redux-kernels-edition/sample_submission.csv"
)
sample_submission.label = predicted
sample_submission.to_csv("next_submission.csv")
# # Delete Dataset from working directory
# We delete it in order to keep our output section clean
import shutil
shutil.rmtree("../working/train")
shutil.rmtree("../working/test")
# print(test_generator.__getitem__(0))
# import base64
# data = open(f"{model_destination}{modelname}_bs_{bs}_{optimizer}_{lr}.h5", "rb").read()
# data = open(f"158d.jpg", "rb").read()
# no_enc = repr(data)
# encoded = base64.b64encode(no_enc)
# telegram_bot_sendtext(data)
# print(encoded)
# telegram_bot_sendtext('Model as Text (BASE64)')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.