script
stringlengths 113
767k
|
---|
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# 1. Instalação e carregamento de pacotes
# Pack para manipular dados
import numpy as np
import pandas as pd
# Pack para visulização
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import missingno
# Estatística
import scipy
from scipy.stats import normaltest
from scipy.stats import chi2_contingency
# Engenharia de Atributos
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, OrdinalEncoder
from sklearn.compose import ColumnTransformer
import category_encoders as ce
# Pck para ignorar Warning
import sys
import warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
print("import de pack - ok!")
# Versões dos pacotes usados neste notebook
# 2. Carregando e conhecendo os dados
# Carrega o dataset
df = pd.read_csv(
"/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv"
)
print(
"Numero de linhas: " + str(df.shape[0]) + "\nNúmero de colunas: " + str(df.shape[1])
)
# Nomes das colunas do dataframe
df.columns
# Amostra de dados
df.head()
# informações do df
df.info()
# 3. Análise Exploratória de dados
df.head()
# Ao se fazer uma pequena analise nos dados notamos que podemos classificar as colunas em categóricas e númericas, sendo elas:
# * Categoricas
# 1. city
# 3. gender
# 4. relevent_experience
# 5. enrolled_university
# 6. education_level
# 7. major_discipline
# 9. company_size
# 10. company_type
# 11. last_new_job
# 13. target
# * Númericas
# 2. city_development_index
# 8. experience
# 12. training_hours
#
# Verificando a distribuição de variaveis categoricas
df.describe(include=object).drop(columns=["experience"])
# Plot
# Tamanho da figura
plt.figure(figsize=(18, 30))
# Lista de colunas
column_list = list(df.columns.values)[3:12]
# Contador
A = 0
# Loop
for i in column_list:
A += 1
plt.subplot(5, 2, A)
ax = sns.countplot(data=df.fillna("NaN"), x=i)
plt.title(i, fontsize=15)
for p in ax.patches:
ax.annotate(
f"\n{p.get_height()}",
(p.get_x() + 0.4, p.get_height()),
ha="center",
color="black",
size=12,
)
if A >= 7:
plt.xticks(rotation=45)
# Layout
plt.tight_layout(h_pad=2)
# Verificando a distribuição de variaveis numericas
# Para descrever as colunas numericas primeiro precisamos transformar a coluna 'experience'
# para o tipo float.
df.experience.unique()
replace_dict = {">20": 21, "<1": 1, "nan": 0}
df["experience"] = df["experience"].replace(replace_dict)
df["experience"] = df["experience"].astype("float64")
# Descrevendo somente os dados númericos
df.describe().drop(columns=["enrollee_id", "target"])
# Figura
plt.figure(figsize=(17, 12))
# Subplots com histogramas
plt.subplot(221)
sns.color_palette("hls", 8)
sns.histplot(df["city_development_index"], kde=True, color="green")
plt.title("Histograma do CDI", fontsize=20)
plt.subplot(222)
sns.histplot(df["training_hours"], kde=True, color="magenta")
plt.title("Histograma das Horas de Treinamento", fontsize=20)
# Subplots com boxplots
plt.subplot(223)
sns.boxplot(df["city_development_index"], orient="h", color="green")
plt.subplot(224)
sns.boxplot(df["training_hours"], orient="h", color="magenta")
plt.show()
# Figura
plt.figure(figsize=(10, 8))
# Subplots com histogramas
plt.subplot(221)
sns.color_palette("hls", 8)
sns.histplot(df["experience"], kde=True, color="green")
plt.title("Histograma de experience", fontsize=20)
# Subplots com boxplots
plt.subplot(222)
sns.boxplot(df["experience"], orient="h", color="green")
plt.show()
# Realizar o teste de normalidade de D'Agostino-Pearson para cada coluna
for coluna in ["city_development_index", "experience", "training_hours"]:
stat, p_valor = normaltest(df[coluna].dropna())
print(f"Coluna {coluna}:")
print(f"Estatística de teste: {stat:.4f}")
if p_valor < 0.05:
print("Os dados não seguem uma distribuição normal.\n")
else:
print("Os dados seguem uma distribuição normal.\n")
# Até o momento podemos dizer que:
# * Nenhuma das 3 colunas numericas city_development_index, experience,training_hours segue uma distribuição normal ou gaussiana, logo teremos que usa de outros métodos para analisar, bem como métodos paramétricos.
# * Sobre training_hours,ao se deectar uma calda mais longa em direção a valores mais altos, e sabendo que esses valores se distancial da média, podemos inferir que algumas possias que realizam o demoram muito tempo ou nem conseguem terminar em um tempo comum. Entre possiveis causas: Expectativas frustada sobre conteudo do curso por exemplo.
# Verificando a correlação entre os dados
df_numerical = df.copy()
df_numerical.head()
df_numerical["experience"].value_counts()
df_numerical["city"].value_counts()
df_numerical["city"] = df["city"].str.replace("city_", "")
df_numerical["city"].value_counts()
df_numerical["last_new_job"].value_counts()
# Convertemos a variável last_new_job para numérica
df_numerical["last_new_job"] = np.where(
df_numerical["last_new_job"] == "never", 0, df_numerical["last_new_job"]
)
df_numerical["last_new_job"] = np.where(
df_numerical["last_new_job"] == ">4", 5, df_numerical["last_new_job"]
)
df_numerical["last_new_job"] = df_numerical["last_new_job"].astype(float)
df_numerical["last_new_job"].value_counts()
df_numerical["education_level"].value_counts()
conversion_dict = {
"Primary School": 0,
"High School": 1,
"Graduate": 2,
"Masters": 3,
"Phd": 4,
}
# aplicar conversão na coluna 'education_level'
df_numerical["education_level"] = df_numerical["education_level"].map(conversion_dict)
df_numerical["education_level"].value_counts()
df_numerical.info()
df_numerical.drop("enrollee_id", axis=1).corr("spearman")
# Heatmap
plt.figure(figsize=(5, 5))
sns.heatmap(
df_numerical.drop("enrollee_id", axis=1).corr("spearman"), annot=True, cmap="YlGnBu"
)
plt.title("Mapa de Correlação das Variáveis Numéricas\n", fontsize=15)
plt.show()
# Ponto a se observar com arelação de variaveis:
# * A coluna "city_development_index" apresenta uma correlação negativa moderada com a variável alvo "target", o que sugere que quanto menor o índice de desenvolvimento da cidade, maior é a probabilidade do candidato estar buscando uma mudança de emprego.
# * A coluna "experience" também apresenta uma correlação negativa moderada com a variável alvo "target", o que sugere que quanto menor a experiência do candidato, maior é a probabilidade de ele estar buscando uma mudança de emprego.
# * A coluna "last_new_job" apresenta uma correlação negativa fraca com a variável alvo "target", o que sugere que quanto maior o tempo desde a última mudança de emprego, menor é a probabilidade do candidato estar buscando uma mudança de emprego.
# * As colunas "education_level" e "enrolled_university" não apresentam uma correlação forte com a variável alvo "target", o que sugere que a educação e o tipo de curso universitário matriculado não são fatores determinantes para a decisão de mudar de emprego.
# * As demais colunas apresentam uma correlação fraca ou nula com a variável alvo "target".
# Loop
for i in df.drop(
columns=[
"target",
"enrollee_id",
"city",
"city_development_index",
"training_hours",
"experience",
"last_new_job",
"company_size",
]
).columns:
df_woe_iv = (
pd.crosstab(df[i], df["target"], normalize="columns")
.assign(woe=lambda dfx: np.log(dfx[1] / dfx[0]))
.assign(iv=lambda dfx: np.sum(dfx["woe"] * (dfx[1] - dfx[0])))
)
print(df_woe_iv, "\n------------------------------------------------------------")
# Plot do Information Value
# Variáveis categóricas
columns_cat = df.drop(
columns=[
"target",
"enrollee_id",
"city",
"city_development_index",
"training_hours",
"experience",
"last_new_job",
"company_size",
]
).columns
# Lista para o IV
iv = []
# Loop
for i in columns_cat:
df_woe_iv = (
pd.crosstab(df[i], df["target"], normalize="columns")
.assign(woe=lambda dfx: np.log(dfx[1] / dfx[0]))
.assign(iv=lambda dfx: np.sum(dfx["woe"] * (dfx[1] - dfx[0])))
)
iv.append(df_woe_iv["iv"][0])
# Dataframe
df_iv = (
pd.DataFrame({"Features": columns_cat, "iv": iv})
.set_index("Features")
.sort_values(by="iv")
)
# Plot
# Figura
plt.figure(figsize=(10, 12))
df_iv.plot(
kind="barh", title="Information Value das Variáveis Categóricas", colormap="Dark2"
)
for index, value in enumerate(list(round(df_iv["iv"], 3))):
plt.text((value), index, str(value))
plt.legend(loc="lower right")
plt.show()
# * Com base em seu valor IV, `enrolled_university` é um preditor médio, `relevent_experience` e `education_level` são preditores fracos e os outros são inúteis para a previsão.
# * A maioria dos candidatos não está matriculada em nenhum curso universitário atualmente (enrolled_university = "no_enrollment").
# * Os candidatos com experiência relevante (relevent_experience = "Has relevent experience") têm uma proporção maior de estar procurando emprego em comparação com aqueles sem experiência relevante.
# * A maioria dos candidatos possui um nível de educação de graduação (education_level = "Graduate"), seguido de pós-graduação (education_level = "Masters").
# * Os candidatos com nível de educação de ensino médio (education_level = "High School") têm uma proporção menor de estar procurando emprego em comparação com aqueles com níveis de educação mais altos.
# * Com base nessas informações, podemos inferir que os candidatos que estão atualmente matriculados em cursos universitários têm menos probabilidade de procurar emprego, enquanto aqueles sem experiência relevante têm maior probabilidade de estar procurando emprego. Além disso, candidatos com níveis de educação mais altos, como pós-graduação, têm maior probabilidade de estar procurando emprego em comparação com aqueles com níveis de educação mais baixos, como ensino médio.
# Rodada de identificação de valores ausentes e limpesa de dados
# Valores ausentes por coluna
null_df = df.isna().sum().reset_index()
print(null_df)
# Figura
plt.figure(figsize=(15, 5))
# Barplot
ax = sns.barplot(x="index", y=0, data=null_df, palette="husl")
plt.xlabel("Atributos", fontsize=12)
plt.ylabel("Contagem de Valores Ausentes", fontsize=12)
plt.xticks(rotation=45)
plt.title("Plot de Valores Ausentes", fontsize=15)
for p in ax.patches:
ax.annotate(
f"\n{p.get_height()}",
(p.get_x() + 0.4, (p.get_height())),
ha="center",
color="black",
size=11,
)
plt.show()
# Gera a visualização
# Dataframe
df_nan = pd.DataFrame(df.isna().sum())
# Plot - Mapa de Valores Ausentes
if df.isna().any(axis=None):
missingno.matrix(df[df_nan[df_nan[0] > 0].index])
plt.show()
df["enrollee_id"].duplicated().sum()
# Figura
plt.figure(figsize=(17, (100) / 20))
plt.subplot(121)
plt.pie(
round(df["target"].value_counts() / len(df) * 100, 2),
labels=list(df["target"].value_counts().index),
autopct="%.2f%%",
explode=(0, 0.1),
)
plt.axis("equal")
plt.title("Target Imbalance Ratio", size=15)
plt.subplot(122)
ax = sns.countplot(data=df, x="target")
plt.title("Barplot Target Label", fontsize=15)
for p in ax.patches:
ax.annotate(
f"\n{p.get_height()}",
(p.get_x() + 0.4, p.get_height()),
ha="center",
va="top",
color="white",
size=12,
)
# Dados desbalanceados são aqueles em que a distribuição da classe alvo (target) é desproporcional, ou seja, uma classe tem um número significativamente maior de exemplos do que outra. Esse desequilíbrio pode levar a problemas na modelagem, pois o algoritmo de aprendizado pode ter dificuldade em aprender a classe minoritária, o que pode levar a uma baixa precisão e recall para essa classe. Além disso, métricas como a acurácia podem ser enganosas em conjuntos de dados desbalanceados, pois a simples previsão da classe majoritária pode levar a uma acurácia alta, mesmo que o modelo seja inútil para a classe minoritária.
# Limpeza e Processamento dos Dados
df.columns
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import re
import chardet
import operator
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from nltk import pos_tag
from nltk.corpus import wordnet
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from tensorflow.keras.layers import Embedding
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import EarlyStopping
from keras.layers.normalization import BatchNormalization
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.layers import LSTM, Dense, Bidirectional
from tensorflow.keras.layers import Dropout, SpatialDropout1D
from tqdm import tqdm
from wordcloud import WordCloud, STOPWORDS
from sklearn.model_selection import train_test_split
with open("../input/covid-19-nlp-text-classification/Corona_NLP_train.csv", "rb") as f:
result = chardet.detect(f.read())
print(result)
f.close()
# can be tried cp1254, latin and iso-8859-1
train_data = pd.read_csv(
"../input/covid-19-nlp-text-classification/Corona_NLP_train.csv",
encoding="iso-8859-1",
)
test_data = pd.read_csv(
"../input/covid-19-nlp-text-classification/Corona_NLP_test.csv",
encoding="iso-8859-1",
)
train_data.head()
train_data.tail()
test_data.head()
train_data.isna().sum()
train_data.shape, test_data.shape
train_data.drop(["UserName", "ScreenName", "Location", "TweetAt"], axis=1, inplace=True)
test_data.drop(["UserName", "ScreenName", "Location", "TweetAt"], axis=1, inplace=True)
train_data.head()
train_data.Sentiment.value_counts().plot(kind="bar")
pd.set_option("display.max_colwidth", None)
train_data.OriginalTweet[0:5]
# extract hashtag
train_data["hashtag"] = train_data["OriginalTweet"].apply(
lambda x: re.findall(r"#(\w+)", x)
)
# data pre processing
# extract url used
train_data["uri"] = train_data["OriginalTweet"].apply(
lambda x: re.findall(
r"http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+", x
)
)
train_data["handler"] = train_data["OriginalTweet"].apply(
lambda x: re.findall(r"@(\w+)", x)
)
# Lemmatize Words
def get_pos_tag(tag):
if tag.startswith("J"):
return wordnet.ADJ
elif tag.startswith("V"):
return wordnet.VERB
elif tag.startswith("N"):
return wordnet.NOUN
elif tag.startswith("R"):
return wordnet.ADV
else:
# As default pos in lemmatization is Noun
return wordnet.NOUN
lemmatizer = WordNetLemmatizer()
pos_tag(["going"])
# clean the data now
regex = [
r"<[^>]+>", # HTML tags
r"@(\w+)", # @-mentions
r"#(\w+)", # hashtags
r"http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+", # URLs
r"[^0-9a-z #+_\\r\\n\\t]", # BAD SYMBOLS
]
REPLACE_URLS = re.compile(
r"http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+"
)
REPLACE_HASH = re.compile(r"#(\w+)")
REPLACE_AT = re.compile(r"@(\w+)")
REPLACE_HTML_TAGS = re.compile(r"<[^>]+>")
# REPLACE_DIGITS = re.compile(r'\d+')
# REPLACE_BY = re.compile(r"[/(){}\[\]\|,;.:?\-\'\"$]")
REPLACE_BY = re.compile(r"[^a-z0-9\-]")
STOPWORDS = set(stopwords.words("english"))
# tokens_re = re.compile(r'('+'|'.join(regex)+')', re.VERBOSE | re.IGNORECASE)
sentences = [] # for Word2Vec model
def clean_text(text):
text = text.lower()
text = REPLACE_HTML_TAGS.sub(" ", text)
text = REPLACE_URLS.sub("", text)
text = REPLACE_HASH.sub("", text)
text = REPLACE_AT.sub("", text)
# text = REPLACE_DIGITS.sub(' ', text)
text = REPLACE_BY.sub(" ", text)
text = " ".join(
lemmatizer.lemmatize(word.strip(), get_pos_tag(pos_tag([word.strip()])[0][1]))
for word in text.split()
if word not in STOPWORDS and len(word) > 3
)
# sentences.append(text.split())
return (text, text.split())
train_data["Tweet"], train_data["sentences"] = zip(
*train_data["OriginalTweet"].apply(clean_text)
)
# from itertools import izip
test_data["Tweet"], test_data["sentences"] = zip(
*test_data["OriginalTweet"].apply(clean_text)
)
# Now lets do some eda on the data
# how sentiments are related with the hashtag and user handler
# ext_pos = train_data[train_data['Sentiment'] == 'Extremely Positive']
pos = train_data[train_data["Sentiment"] == "Positive"]
neu = train_data[train_data["Sentiment"] == "Neutral"]
neg = train_data[train_data["Sentiment"] == "Negative"]
# ext_neg = train_data[train_data["Sentiment"] == "Extremely Negative"]
fig, axs = plt.subplots(2, 3, figsize=(20, 20), subplot_kw=dict(aspect="equal"))
for i, param in enumerate(["hashtag", "handler"]): # , "uri"]):
for j, df in enumerate([pos, neu, neg]): # , ext_neg, ext_pos,]:
hash_count = {}
for tag in df[param]:
for value in tag:
hash_count[value] = hash_count.get(value, 0) + 1
data = sorted(hash_count.items(), key=operator.itemgetter(1))[-10:]
axs[i, j].pie(
[value[1] for value in data],
labels=[value[0] for value in data],
autopct="%1.1f%%",
shadow=True,
startangle=90,
)
axs[i, j].axis("equal")
axs[i, j].set_title(
"--Top {0} in {1}--".format(param, df["Sentiment"].values[0].upper()),
fontsize=22,
)
plt.show()
pd.set_option("display.max_colwidth", None)
train_data.Tweet[50:55]
# word Cloud
# for color, df in {"green":ext_pos, "yellow":pos, "white":neu, "pink":neg, "orange":ext_neg}.items():
for color, df in {"yellow": pos, "white": neu, "pink": neg}.items():
plt.figure(figsize=(18, 18))
wc_pos = WordCloud(
width=400, height=250, min_font_size=5, background_color=color, max_words=10000
).generate(" ".join(df["Tweet"]))
plt.title("word cloud for {0}".format(df["Sentiment"].values[0]), fontsize=25)
plt.imshow(wc_pos, interpolation="bilinear")
# max len of clean data
import numpy as np
max_len = np.max(train_data["Tweet"].apply(lambda x: len(x)))
max_len
tokenizer = Tokenizer()
tokenizer.fit_on_texts(train_data["Tweet"].values)
vocab_size = len(tokenizer.word_index) + 1
X = tokenizer.texts_to_sequences(train_data["Tweet"].values)
X = pad_sequences(X, maxlen=max_len, padding="post")
tokenizer.word_index
# pre process test data with param of train data
X_test = tokenizer.texts_to_sequences(test_data["Tweet"].values)
X_test = pad_sequences(X_test, maxlen=max_len, padding="post")
# load the GloVe vectors in a dictionary:
# ValueError: could not convert string to float: '.' this is why using ingore
embeddings_index = {}
glovefile = open("../input/glove42b300dtxt/glove.42B.300d.txt", "r", encoding="utf-8")
for line in tqdm(glovefile):
values = line.split(" ")
word = values[0]
coefs = np.asarray(values[1:], dtype="float32")
embeddings_index[word] = coefs
glovefile.close()
print("Found %s word vectors." % len(embeddings_index))
# create an embedding matrix for the words we have in the dataset
embedding_matrix = np.zeros((len(tokenizer.word_index) + 1, 300))
for word, index in tqdm(tokenizer.word_index.items()):
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
X.shape
X_test.shape
encoding = {
"Extremely Negative": 0,
"Negative": 0,
"Neutral": 1,
"Positive": 2,
"Extremely Positive": 2,
}
labels = ["Negative", "Neutral", "Positive"]
train_data["Sentiment"].replace(encoding, inplace=True)
test_data["Sentiment"].replace(encoding, inplace=True)
labels = pd.get_dummies(train_data["Sentiment"]).columns
y = pd.get_dummies(train_data["Sentiment"]).values
y
y_test = pd.get_dummies(test_data["Sentiment"]).values
y_test.shape
# train valid split
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size=0.20, random_state=12
)
print(X_train.shape, y_train.shape)
print(X_valid.shape, y_valid.shape)
vector_features = 300
model = Sequential()
model.add(
Embedding(
vocab_size,
vector_features,
input_length=X.shape[1],
weights=[embedding_matrix],
trainable=False,
)
)
model.add(SpatialDropout1D(0.2))
model.add(
Bidirectional(
LSTM(300, activation="relu", dropout=0.3, recurrent_dropout=0.3),
input_shape=(vector_features, vocab_size),
)
)
model.add(Dense(1024, activation="relu"))
model.add(Dropout(0.8))
model.add(Dense(1024, activation="relu"))
model.add(Dropout(0.8))
model.add(Dense(3, activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.summary()
epochs = 100
batch_size = 512
history = model.fit(
X_train,
y_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(X_valid, y_valid),
callbacks=[EarlyStopping(monitor="val_loss", patience=3, min_delta=0.0001)],
)
# Model Accuracy
accuracy = history.history["accuracy"]
loss = history.history["loss"]
val_accuracy = history.history["val_accuracy"]
val_loss = history.history["val_loss"]
print(
"training acuuracy {0}% and training loss {1}%".format(
accuracy[-1] * 100, loss[-1] * 100
)
)
print(
"validation acuuracy {0}% and validation loss {1}%".format(
val_accuracy[-1] * 100, val_loss[-1] * 100
)
)
# plot
plt.plot(accuracy, "g", label="training accuracy")
plt.plot(val_accuracy, "r", label="validation accuracy")
plt.legend()
plt.show()
plt.plot(loss, "g", label="training loss")
plt.plot(val_loss, "r", label="validation loss")
plt.legend()
plt.show()
# y_pred = model.predict_classes(X_test)
# y_pred = np.argmax(model.predict(X_test), axis=-1)
y_pred = model.predict(X_test)
print(labels[np.argmax(y_test, 1)][100:120])
print(labels[np.argmax(y_pred, 1)][100:120])
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
cm = confusion_matrix(np.argmax(y_test, 1), np.argmax(y_pred, 1))
print(cm)
sns.heatmap(cm, annot=True)
# Lets go for Word2Vec
import time
import multiprocessing
from gensim.models import Word2Vec
cores = multiprocessing.cpu_count()
cores
w2v_model = Word2Vec(
min_count=5,
window=5,
size=300,
sample=6e-5,
alpha=0.01,
min_alpha=0.0007,
negative=7,
workers=cores - 1,
)
# pass list of words of data
# it can be nested list
t = time.time()
w2v_model.build_vocab(train_data["sentences"], progress_per=10000)
print("Time to build vocab: {} mins".format(round((time.time() - t) / 60, 2)))
t = time.time()
w2v_model.train(
train_data["sentences"],
total_examples=w2v_model.corpus_count,
epochs=30,
report_delay=1,
)
print("Time to train the model: {} mins".format(round((time.time() - t) / 60, 2)))
# memory efficient
w2v_model.init_sims(replace=True)
w2v_model.wv.most_similar(positive=["walmart"])
w2v_model.wv.most_similar(positive=["covid"])
w2v_model.wv.most_similar(positive=["food"])
word_vectors = w2v_model.wv
print("length of word vector vocab {}".format(len(word_vectors.vocab)))
"walmart" in word_vectors.vocab.keys()
def word2idx(sent):
sent_id = []
for word in sent:
if word in w2v_model.wv.vocab.keys():
sent_id.append(w2v_model.wv.vocab[word].index)
return sent_id
def idx2word(sent_id):
sent = []
for idx in sent_id:
sent.append(w2v_model.wv.index2word[idx])
return sent
X_train_w2v = train_data["sentences"].apply(word2idx)
X_test_w2v = test_data["sentences"].apply(word2idx)
X_train_w2v_pad = pad_sequences(X_train_w2v, maxlen=max_len, padding="post")
X_test_w2v_pad = pad_sequences(X_test_w2v, maxlen=max_len, padding="post")
X_train_w2v_pad.shape, X_test_w2v_pad.shape
weights_w2v = w2v_model.wv.syn0
# w2v_model.wv.vectors.shape
vocab_size_w2v, vector_features_w2v = weights_w2v.shape
vocab_size_w2v, vector_features_w2v
# train valid split
X_train_w2v, X_valid_w2v, y_train_w2v, y_valid_w2v = train_test_split(
X_train_w2v_pad, y, test_size=0.20, random_state=12
)
print(X_train_w2v.shape, y_train_w2v.shape)
print(X_valid_w2v.shape, y_valid_w2v.shape)
model_w2v = Sequential()
model_w2v.add(
Embedding(
vocab_size_w2v,
vector_features_w2v,
input_length=X_train_w2v_pad.shape[1],
weights=[weights_w2v],
trainable=False,
)
)
model_w2v.add(SpatialDropout1D(0.2))
model_w2v.add(
Bidirectional(
LSTM(300, activation="relu", dropout=0.3, recurrent_dropout=0.3),
input_shape=(vector_features_w2v, vocab_size_w2v),
)
)
model_w2v.add(Dense(1024, activation="relu"))
model_w2v.add(Dropout(0.8))
model_w2v.add(Dense(1024, activation="relu"))
model_w2v.add(Dropout(0.8))
model_w2v.add(Dense(3, activation="softmax"))
model_w2v.compile(
loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
model_w2v.summary()
epochs = 100
batch_size = 512
history = model_w2v.fit(
X_train_w2v,
y_train_w2v,
epochs=epochs,
batch_size=batch_size,
validation_data=(X_valid_w2v, y_valid_w2v),
callbacks=[EarlyStopping(monitor="val_loss", patience=3, min_delta=0.0001)],
)
# Seems like word2vec was not trained good enough
# may be due to limited vocab
|
# # Predicting Antibiotic Minimum Inhibitory Concentration using Artificial Neural Networks and Public Genomic Databases
# Artificial Intelligence (AI) based predictions for Anti-microbial Resistance (AMR) can be beneficial for diagnosis because they can reduce the time of clinical testing. Genome databases typically include genome data and metadata which can be used to train machine learning models in order to predict features of genomic samples. Here, I show how to develop a simple AI model that can predict the antibiotic Minimum Inhibitory Concentration (MIC), based on genomic data and metadata and artificial Neural Networks (NN). Although the data available form genome platforms cover a broad range of MIC measurements, these are highly unbalanced and disconected in term of serial dilutions. However, selecting well represented classes, approaching everthing to a 2-fold dilution and considering a refined genomic analysis it is possible to reach a 90% of CL. Thus, here I present the minimal binary classification model but discuss also the model with more categories.
# Among the task covered in the study are:
# 1. Explore the datasets and define the categorical problem.
# 2. Implement a NN as a ML model.
# 3. Compute the CL of model predictions.
# ## 1 Data analysis
# The raw data used in this study consists in genomes and clinical annotations for around 7.000 bacteria selected from Patric platform (https://www.patricbrc.org/). Following the work of Nguyen et al. (https://doi.org/10.1128/JCM.01260-18) I intend to generelize it by including more public data and using a dense neural network. Patric database provides users with uniformly annotated genome assemblies for bacteria of most public concern. There, the genome sequences are stored in fasta files and the clinical annotations in data tables. Furthermore, it is possible to filter data for precise selection and download, and the Patric Genome ID allows an easy linkage of data and metadata. Here, we select the most represented antibiotic MIC measurements for Salmonella bacteria. This includes all public data available for 14 antibiotics.
# ### 1.1 Pre-processing
# In order to provide the model with spectral data, the genome assemblies will be represented by their $k$-mers counts and will be used as the inputs, while the corresponding antibiotic MIC measurments as the outputs. Here, $k$-mers means subsequences of length $k$ contained in a genome assembly, in which $k$-mers are composed of nucleotides (i.e. A, T, G, and C). The number of $k$-mers in a biological sequence of length $L$ is $L − k + 1$, being the number of unique $k$-mers less than the total number in a sample since repetitions throughout the sequence. These repetitions are precisely the counts carrying the information required by the model. The value of $k$ is representative of how deep and refined the genomic analysis is. For the porpuse of this analysis we consider $k=6$.
# The produced $k$-mers counts were made public in Kaggle platform together with MIC metadata, which was selected among other AMR phenotypes available in Patric. The $k$-mers counts was computed locally using only Python routines and Unix Shell scripts. This allows for an automatic pre-processing of thousands of genomes in a matter of few hours, in regular desk computers, transforming the genomic data into lighter data. Nevertheless the pre-processing time and the $k$-mers data weight depend on the length $k$.
# ### 1.2 Exploration
# First of all, run the next cell to load the dataframe containing the $k$-mers counts and MIC measurements.
# Load the kmers-mics dataset
import pandas as pd
amr_frame = pd.read_csv(
"../input/kmers-counts-and-mic-measurements/6-mers_mics.csv", index_col=0
)
amr_frame
# The above dataset shows genome k-mers counts and MIC measurements, for 14 antibiotics, in a 2-fold dilution. Although for some antibiotics not all genomes have a measured MIC value (NAN values are present), it is still possible to train machine learning models by selecting only those genomes with associated MIC values.
# The total number of genomic samples susceptible for a certain antibiotic is important for the global accuracy of the model, while the number of genomes in a certain category or class (antibiotic and MIC) is vital for the accuracy of a particular prediction.
# Let us count the number of genomes in each category and show it in a heat map. This will be useful to show then the confidence level obtained for each class together with the number of genomic samples available.
# Number of genomes for Antibiotic and MIC.
import numpy as np
antibiotics = amr_frame.columns[-14:]
amounts = []
for antibiotic in antibiotics:
mic_values = (
amr_frame[antibiotic].loc[amr_frame[antibiotic] > 0].sort_values().unique()
)
amount = []
for mic in mic_values:
amount.append(len(amr_frame.loc[amr_frame[antibiotic] == mic]))
amounts.append(pd.DataFrame(np.reshape(amount, (1, -1)), columns=mic_values))
amounts_dframe = amounts[0].append(amounts[1:14], sort=False, ignore_index=True)
# Heatmap
import matplotlib.pyplot as plt
import seaborn as sns
# Abbreviate the antibiotic names to 5 letters
antibiotic_abbre = {i: antibiotics[i][:5].upper() for i in range(len(antibiotics))}
# Set the width and height of the figure
plt.figure(figsize=(10, 6))
# Add title
plt.title("Number of Genomes by Antibiotic and MIC\n")
# Heatmap showing the amount of genomes with the same MIC for each MIC, by antibiotic
sns.heatmap(
data=amounts_dframe.rename(index=antibiotic_abbre),
annot=True,
fmt=".4g",
cmap="YlGnBu",
cbar_kws={"label": "Number of Genomes"},
)
# Add label for horizontal axes
plt.xlabel("MIC (micrograms per milliliter)")
plt.ylabel("Antibiotic")
# The heatmap shows the number of samples tested for such antibiotics and mics values. The data presents a high unbalance, and for most antibiotics only a very few measurements are well represented. In order to reduce the unbalance let us reduce the data and use only the most represented classes.
# Run the next cell for $N=2$ (or $N=3$). This will define a minor dataset containing the most represented measurements. Then, run the next to check it.
# Reduce data to most represented categories
from math import nan
N = 2
df = (
amr_frame.sample(frac=1, random_state=0)
.reset_index(inplace=False)
.drop(columns=["index"])
)
for antibiotic in antibiotics:
mics = df[antibiotic].unique()
samples = {mic: len(df.loc[df[antibiotic] == mic]) for mic in mics}
samples = {
k: v for k, v in sorted(samples.items(), key=lambda item: item[1], reverse=True)
}
mics = [key for key in samples.keys()][N:]
for mic in mics:
df[antibiotic].replace(mic, nan, inplace=True)
df
# Number of genomes for Antibiotic and MIC.
import numpy as np
antibiotics = df.columns[-14:]
amounts = []
for antibiotic in antibiotics:
mic_values = df[antibiotic].loc[df[antibiotic] > 0].sort_values().unique()
amount = []
for mic in mic_values:
amount.append(len(df.loc[df[antibiotic] == mic]))
amounts.append(pd.DataFrame(np.reshape(amount, (1, -1)), columns=mic_values))
amounts = amounts[0].append(amounts[1:14], sort=False, ignore_index=True)
# Heatmap
import matplotlib.pyplot as plt
import seaborn as sns
# Abbreviate the antibiotic names to 5 letters
antibiotic_abbre = {i: antibiotics[i][:5].upper() for i in range(len(antibiotics))}
# Set the width and height of the figure
plt.figure(figsize=(10, 6))
# Add title
plt.title("Number of Genomes by Antibiotic and MIC\n")
# Heatmap showing the amount of genomes with the same MIC for each MIC, by antibiotic
sns.heatmap(
data=amounts.rename(index=antibiotic_abbre),
annot=True,
fmt=".4g",
cmap="YlGnBu",
cbar_kws={"label": "Number of Genomes"},
)
# Add label for horizontal axes
plt.xlabel("MIC (micrograms per milliliter)")
plt.ylabel("Antibiotic")
# The $N=2$ selection allows defining a binary classification model with a less data unbalance.
# ## 2 Model building
# When building a ML model we start by defining the validation procedure with which to measure the model performance together with an appropiate data partition. Thus, we split the total data into the test, training and validation datasets (rows). The training dataset is used to train or fit the network in steps, the validation dataset to evaluate any metric of performance after each training step and the test dataset is used at the end to evaluate the model on unseen data. In these datasets, the $k$-mers counts will represent the features (the input columns), while the antibiotic MIC the target (the output column). Since for a particular antibiotic several MIC values may be associated to a genome assembly, the problem of transforming a $k$-mers spectrum into one of such values is a problem of classification. For these models, the performance is given by an accuracy metric, and the disparity between the predictions and the true values by a so-called cross-entropy loss function.
# Measuring the performance of a classification model can be global or by class. For instance, the loss and accuracy functions used during training are measures of global performance, however, the accuracy obtained for a particuar antibiotic and mic value is a measure of class performance. This will be computed at the end using the test dataset.
# Run the next cells to define the analysis settings.
# Antibiotics available
antibiotics
# Select the number of categories
N = 2
# Select the antibiotic
antibiotic = "ampicillin"
# ### 2.1 Define classification variables
# Let us define the input ($k$-mers) and the output (MIC) variables and separate them into the test and training data. The MIC values represent a numerical category so we encode it in a categorical variable. The validation data will be a fraction of the training data separated by Keras model during training.
# Run the next cells to define the input and output variables
# Define the features input (X) and the target output (y) variables
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
# Features
kmers_basis_dim = df.shape[1] - 14
X = df.loc[df[antibiotic] > 0].values[:, 0:kmers_basis_dim]
# Target
# list of MIC values
mic_values = df[antibiotic].loc[df[antibiotic] > 0].values
# by definition we need to reshape the list of mics
mic_reshape = np.reshape(mic_values, (-1, 1))
# define encoder function
encoder = OneHotEncoder(sparse=False)
# transform data from numerical cat to onehot code
mic_onehot = encoder.fit_transform(mic_reshape)
# define the target
y = mic_onehot
# Split into the training and test data
X_train, X_test, y_train, y_test, mic_values_train, mic_values_test = train_test_split(
X, y, mic_values, test_size=0.1, random_state=0
)
# ### 2.2 Standardize the input data
# The input data, i.e., the $k$-mers counts, are numebrs greater than one, however, neural networks works preferably for normalized input data. The StandarScaler from scikit-learn library is used here to standardize the training and test input datasets.
# Standardize the training data
from sklearn.preprocessing import StandardScaler
scaler_X = StandardScaler().fit(X_train)
X_train = scaler_X.transform(X_train)
X_test = scaler_X.transform(X_test)
# ### 2.3 Define the Neural Network
# The model architecture is a neural network implemented in Keras (Python). It includes an input dense layer of 64 neurons width and an output layer of $N$ neurons width, where $N$ is the number of classes. The Adam optimezer and the binary crossentropy loss function are chosen to train the model and the accuracy function to measure the model performance. We also include an early stoping in order to train until reach a maximum accuracy.
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.callbacks import EarlyStopping
# Define the classification model
model = keras.Sequential(
[
layers.Dense(64, activation="relu", input_shape=[X_train.shape[1]]),
layers.Dense(mic_onehot.shape[1], activation="softmax"),
]
)
# Add the cross-entropy loss and accuracy metric for threshold probability
model.compile(
optimizer="adam",
loss="binary_crossentropy",
metrics=["accuracy"],
)
# Include an early stopping callback for convenience
early_stopping = keras.callbacks.EarlyStopping(
# monitor (loss or val_loss)
monitor="val_loss",
# how many epochs to wait before stopping (minimum epochs)
patience=100,
# minimium amount of change to count as an improvement
min_delta=0.001,
restore_best_weights=True,
)
# ### 2.4 Class weighting
# Remember we are working with unbalanced data. Before training, let us definde class weights to help the model to prevent overfitting, learning only the most represented class. The class weighting will tell the model how much attention to pay in each class. These weights roughly follow the law $1/N$, where $N$ is the total amount of samples by class.
# Class weight
mics = df[antibiotic].unique()
samples = {mic: len(df.loc[df[antibiotic] == mic]) for mic in mics}
samples = {
k: v for k, v in sorted(samples.items(), key=lambda item: item[1], reverse=True)
}
mics = sorted([key for key in samples.keys()][:N])
total = len(df.loc[df[antibiotic] > 0])
class_weight = {i: (1 / samples[mic]) * (total / N) for i, mic in enumerate(mics)}
class_weight
# ### 2.5 Training the network
# Training the network means adjusting its weights, initially from random numbers, to values such that it can transform the features into the target. This work is done by the SDG-optimizer in steps. One step of training of the optimizer algorithm consists of at least 3 stages:
# 1. Sample some training data and run it through the network to make predictions.
# 2. Measure the loss between these predictions and the true values.
# 3. Finally, adjust the weights in a direction that makes the loss smaller.
# For validation purposes, before each step the training data will be shuffled and splitted into a training and validation datasets. The optimizer algorithm will use only the last defined training set to adjust the model weights, and use the validation dataset to save the model weights that make the validation loss smaller. Shuffling the data before splitting makes sure that training and validation datasets are always different. This way we can get better insights of model’s performance.
# The number of samples to run on each iteration is called batch size and can influence results when training with highly unbalanced datasets. In these cases the batch size must be large in order to have the chance of learning the classes with fewer examples, although we also deal with this defining the model class weights. A complete round of the training data is called an epoch and will be required more than one epoch to finish the training.
# Run the next cell to train the model
# Training with early stopping and class weights
import time
starttime = time.time()
history = model.fit(
X_train,
y_train,
shuffle=True,
validation_split=0.3,
# validation_data=(X_valid, y_valid),
batch_size=1024,
epochs=1000,
callbacks=[early_stopping],
verbose=0, # hide the output because we have so many epochs
class_weight=class_weight,
)
print("Time: {:0.2f} seconds".format(time.time() - starttime))
# ### 2.5 Plot the loss and accuracy functions
# During the training the loss function represents the difference between predictions and their true values and the accuracy function represents a measure of performance. The validation loss and validation accuracy are the loss and accuracy functions evaluated on validation data. The loss functions should reach small values, while the accuracy functions should grow up to values near 1.
# So, let us plot the loss and accuracy functions over the epochs and show the best validation loss and accuracy.
# Plot the loss (accuracy) and validation loss (accuracy) functions
history_df = pd.DataFrame(history.history)
# Start the plot at epoch 5
history_df.loc[:, ["loss", "val_loss"]].plot()
history_df.loc[:, ["accuracy", "val_accuracy"]].plot()
print(
("Best Validation Loss: {:0.2f}" + "\nBest Validation Accuracy: {:0.2f}").format(
history_df["val_loss"].min(), history_df["val_accuracy"].max()
)
)
# The best validation accuracy corresponds to a 90% of CL. This is a global measure of perfomance, however, in classification problems with data unbalance the model could not be working, being able to recognize only the class with the largest number of samples. In order to see if this is the case, let us evaluate the model performance by category in the next section.
# ### 2.6 Model validation and exact accuracy
# Let us now compute the model performance by category. Here, we use the test dataset to evaluate the model in each category, extract the exact accuracy and compute the CL as the accuracy times 100%. The exact accuracy of the model is given by the ratio of right predictions and total predictions. Notice that the model did not see this dataset during training and so it is appropriate for model validation.
# Run the next cell to generate a list with the reached CLs
# Exact accuracy by class
accuracy = []
for mic in mics:
X_test_class = pd.DataFrame(X_test).loc[mic_values_test == mic].to_numpy()
if len(X_test_class) != 0:
y_test_class = pd.DataFrame(y_test).loc[mic_values_test == mic].to_numpy()
scores = model.evaluate(X_test_class, y_test_class, verbose=0)
accuracy.append(100 * scores[1])
else:
accuracy.append(0)
print(accuracy)
# These results show that the model can reach balanced results of high accuracy for the binary classification problem, based on $k$-mers counts of around 7 thousand genomes. However, in order to see the overall behavior of the model let us consider all antibiotics in the next section.
# ### 2.7 Model accuracy for 14 antibiotics
# In the next cell we evaluate the model for 14 antibiotics with the same validation procedure. Differenet values of the classification parameter $N$, as well as the batch size and the number of epochs, can be tested in order to see the changes in the model performance.
# Define dataframes by category
import time
import numpy as np
import pandas as pd
from math import nan
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.callbacks import EarlyStopping
# Load the kmers-mics dataset
amr_frame = pd.read_csv(
"../input/kmers-counts-and-mic-measurements/6-mers_mics.csv", index_col=0
)
# Resampled dataset
df = (
amr_frame.sample(frac=1, random_state=0)
.reset_index(inplace=False)
.drop(columns=["index"])
)
# Antibiotic list
antibiotics = df.columns[-14:]
# Select the classification type
N = 2
#
accuracies = []
for antibiotic in antibiotics:
# Reduce data to most represented categories
mics = df[antibiotic].unique()
samples = {mic: len(df.loc[df[antibiotic] == mic]) for mic in mics}
samples = {
k: v for k, v in sorted(samples.items(), key=lambda item: item[1], reverse=True)
}
mics = [key for key in samples.keys()][N:]
for mic in mics:
df[antibiotic].replace(mic, nan, inplace=True)
# Features
kmers_basis_dim = df.shape[1] - 14
X = df.loc[df[antibiotic] > 0].values[:, 0:kmers_basis_dim]
# Target
# list of MIC values
mic_values = df[antibiotic].loc[df[antibiotic] > 0].values
# by definition we need to reshape the list of mics
mic_reshape = np.reshape(mic_values, (-1, 1))
# define encoder function
encoder = OneHotEncoder(sparse=False)
# transform data from numerical cat to onehot code
mic_onehot = encoder.fit_transform(mic_reshape)
# define the target
y = mic_onehot
# Split into the training and test data
(
X_train,
X_test,
y_train,
y_test,
mic_values_train,
mic_values_test,
) = train_test_split(X, y, mic_values, test_size=0.1, random_state=0)
# Standardize the training data
scaler_X = StandardScaler().fit(X_train)
X_train = scaler_X.transform(X_train)
X_test = scaler_X.transform(X_test)
# Define the classification model
model = keras.Sequential(
[
layers.Dense(64, activation="relu", input_shape=[X_train.shape[1]]),
layers.Dense(mic_onehot.shape[1], activation="softmax"),
]
)
# Add the cross-entropy loss and accuracy metric for threshold probability
model.compile(
optimizer="adam",
loss="binary_crossentropy",
metrics=["accuracy"],
)
# Include an early stopping callback for convenience
early_stopping = keras.callbacks.EarlyStopping(
# monitor (loss or val_loss)
monitor="val_loss",
# how many epochs to wait before stopping (minimum epochs)
patience=100,
# minimium amount of change to count as an improvement
min_delta=0.001,
restore_best_weights=True,
)
# Class weighting
mics = sorted([key for key in samples.keys()][:N])
total = len(df.loc[df[antibiotic] > 0])
class_weight = {i: (1 / samples[mic]) * (total / N) for i, mic in enumerate(mics)}
# Training with early stopping
starttime = time.time()
history = model.fit(
X_train,
y_train,
shuffle=True,
validation_split=0.3,
# validation_data=(X_valid, y_valid),
batch_size=1024,
epochs=1000,
callbacks=[early_stopping],
verbose=0, # hide the output because we have so many epochs
class_weight=class_weight,
)
print("Time: {:0.2f} seconds".format(time.time() - starttime))
# Exact accuracy by class
accuracy = []
for mic in mics:
X_test_class = pd.DataFrame(X_test).loc[mic_values_test == mic].to_numpy()
if len(X_test_class) != 0:
y_test_class = pd.DataFrame(y_test).loc[mic_values_test == mic].to_numpy()
scores = model.evaluate(X_test_class, y_test_class, verbose=0)
accuracy.append(100 * scores[1])
else:
accuracy.append(0)
accuracies.append(pd.DataFrame(np.reshape(accuracy, (1, -1)), columns=mics))
# Accuracy dataframe
accuracies_df = accuracies[0].append(accuracies[1:14], sort=False, ignore_index=True)
# Number of genomes for Antibiotic and MIC.
amounts = []
for antibiotic in antibiotics:
mic_values = df[antibiotic].loc[df[antibiotic] > 0].sort_values().unique()
amount = []
for mic in mic_values:
amount.append(len(df.loc[df[antibiotic] == mic]))
amounts.append(pd.DataFrame(np.reshape(amount, (1, -1)), columns=mic_values))
amounts = amounts[0].append(amounts[1:14], sort=False, ignore_index=True)
import matplotlib.pyplot as plt
import seaborn as sns
# Abbreviate the antibiotic names to 5 letters
antibiotic_abbre = {i: antibiotics[i][:5].upper() for i in range(len(antibiotics))}
# set the width and height of the figure
plt.figure(figsize=(10, 6))
# add title
plt.title("Accuracy of the MIC Prediction Model\n (6-mers analysis)")
# heatmap showing the amount of genomes with the same MIC for each MIC, by antibiotic
sns.heatmap(
data=amounts.rename(index=antibiotic_abbre),
annot=accuracies_df,
fmt=".3g",
cmap="YlGnBu",
cbar_kws={"label": "Number of Genomes"},
)
# add label for horizontal and vertical axis
plt.xlabel("Minimum Inhibitory Concentration")
plt.ylabel("Antibiotic")
|
import pyspark
# importing session method to start a new session
from pyspark.sql import SparkSession
# creating a new session
spark = SparkSession.builder.appName("intro").getOrCreate()
# **Topics Covered:**
# - PySpark DataFrame
# - Reading the Dataset
# - Checkng the Datatypes of the Column [Schema]
# - Selecting Columns and Indexing
# - Adding Columns
# - Dropping Columns
# age and exp are considered as string
# inferSchema should be flagged True to preserve the dataframe's schema
dfps = spark.read.csv(
"/kaggle/input/sample-data/pyspark.csv", header=True, inferSchema=True
)
dfps
dfps.show()
# check the schema
dfps.printSchema()
# like df.show() in pandas
# dropping a column
dfps.drop("Name").show()
# dropping null values
dfps.na.drop().show() # wherever null values - will get deleted
dfps.na.drop(how="any").show() # drop a row if it contains any nulls.
dfps.na.drop(how="all").show() # drop a row only if all its values are null.
dfps.show()
# threshold n nulls are permittable
dfps.na.drop(
thresh=2, how="any"
).show() # drop rows that have less than 2 non-null values.
## subset - list of column names to drop nulls
dfps.na.drop(how="any", subset=["Experience"]).show()
# # Filling the Missing Values
dfps.show()
dfps.na.fill("Missing values").show()
dfps.na.fill({"Age": 50, "Name": "Unknown"}).show()
# impute using stat values
from pyspark.ml.feature import Imputer
imputer = Imputer(
inputCols=["Age", "Experience", "Salary"],
outputCols=["{}_imputed".format(c) for c in ["Age", "Experience", "Salary"]],
).setStrategy("mode")
imputer.fit(dfps).transform(dfps).show()
|
# **Stock prediction using machine learning is a popular and interesting topic in the field of finance. In this task, we aim to develop a machine learning model that can accurately predict the future prices of a particular stock.**
# **We use Python for machine learning predictions. It has several libraries that can be used for stock prediction, such as Pandas, Numpy, Matplotlib, Scikit-Learn, and Keras.**
# **Steps in the pipeline used for predictions are:**
# *Data collection*: **Instead of data mining and collecting data from various website, we are using an open-source dataset for our basic model building. The dataset I have chosen is S&P 500 Stock data available on kaggle.**
# *Data preprocessing*: **Preprocessing is an important step in machine learning, it can be defined as any type of processing performed on raw data to prepare it for the model training.**
# *Model selection*: **There are several machine learning models that can be used for stock prediction, such as Linear Regression, Support Vector Machines (SVM), and Neural Networks.**
# *Splitting data*: **Once model has been chosen, we have to split the data into train-test sets. The model is trained on the training set and it will be evaluated on the testing set.The performance of the model is measured using various metrics such as Mean Squared Error (MSE), Root Mean Squared Error (RMSE), and R-squared.**
# *Model evaluation*: **After training the model, it needs to be evaluated on a holdout dataset to assess its performance. This is typically done by comparing the predicted values with the actual values using the metrics mentioned above.**
# *Model application*:**Once we have evaluated the model we can use it to make predictions for future assessment and web applications.**
#
import numpy as np
import pandas as pd
import os
from datetime import datetime
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import metrics
from sklearn.preprocessing import MinMaxScaler
# # We are reading the data available for an S&P 500 company and we will load it into a *pandas dataframe* using **read_csv()** function
df = pd.read_csv(
"/kaggle/input/sandp500/individual_stocks_5yr/individual_stocks_5yr/AAPL_data.csv"
)
df
# **As we can see, there's "date" and "name" column which will not be necessary for the model.** **To drop these columns we will use the drop command which is readily available in python.**
#
dropper = ["date", "Name"]
df = df.drop(dropper, axis=1)
# **After dropping the unnecessary columns, we will calculate correlation factors. Correlation is basically a scale between -1 to 1, which will tell us which columns are dependent on which values. If the correlation between two columns is close to 1, they are dependent on eachother and vice-versa.**
corr = df.corr()
corr
# **As we can see we have a really low correlation for the column volume, so we can drop the column.**
df.drop("volume", axis=1)
# **We are going to predict the opening stock prices so we will split the data into two parts. "x" is the dataset entirely and "y" will only be the opening values.**
y = df.open
x = df
y
# **In the next step, we are going to split the dataset into train and testing sets. For this we will be using a package from the library scikit-learn. We have set the test-size to 20 percent.**
x_train, x_test, y_train, y_test = train_test_split(
x, y, random_state=42, test_size=0.2
)
# **First we are going to use KNeighborsRegressor model. The basic idea behind the KNeighborsRegressor algorithm is to predict the target variable for a new data point based on the values of its k-nearest neighbors in the training set. The algorithm calculates the distance between the new data point and all other data points in the training set and selects the k closest neighbors. It then takes the average of the target variable values of these neighbors and uses it as the predicted value for the new data point.**
model1 = KNeighborsRegressor()
# **In the next step we are going to get the accuracy of the model using R2 as an evaluation parameter.**
scorer1 = cross_val_score(model1, x_train, y_train, scoring="r2")
print(scorer1)
# **Here for the next model we are going to use SVR model. Support Vector Regression (SVR) is a machine learning algorithm that is used for regression analysis. SVR is a variant of Support Vector Machine (SVM) algorithm. It works by finding a hyperplane that best separates the target variable values on either side.**
model2 = SVR()
# *Again we are going to use R2 score as an evaluation parameter.*
scorer2 = cross_val_score(model2, x_train, y_train, scoring="r2")
print(scorer2)
# **For the next model, I am choosing the Decision Tree Algorithm. A Decision Tree Regressor is a machine learning algorithm that is used for regression analysis. The algorithm works by creating a tree-like model of decisions and their possible consequences.**
model3 = DecisionTreeRegressor()
# *Since we are using a regression model , we will be using R2 score again for evaluation.*
scorer3 = cross_val_score(model3, x_train, y_train, scoring="r2")
print(scorer3)
# **Finally we are going to use the simplest algorithm available. Linear regression is a statistical method for modelling the relationship between a dependent variable and one or more independent variables. It assumes that the relationship between the variables is linear and aims to find the best-fit line that describes this relationship.**
model4 = LinearRegression()
model4.fit(x_test, y_test)
predictions = model4.predict(x)
# For linear regression, I have changed the evaluation parameters from R2 to Mean Squared Error as it is more efficient as compared to R2 score.
metrics.mean_squared_error(y, predictions)
# **Here we are going to use Deep learning. Deep learning is a subfield of machine learning that involves the use of artificial neural networks to model and solve complex problems. These neural networks are composed of many layers of interconnected processing nodes that are designed to work together to process large amounts of data.**
# **Deep learning algorithms are particularly good at processing complex data, such as images, speech, and natural language. They are also used in many applications, including image recognition, speech recognition, natural language processing, and recommendation systems.**
# **The development of deep learning has led to the creation of many powerful tools and frameworks, such as TensorFlow, PyTorch, and Keras.**
# *We are using keras to build our first deep learning based neural network.*
# *We first import the Sequential model, Sequential model is a type of deep learning model in which the layers are stacked sequentially on top of each other to form a neural network.*
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout
# For the neural network, I will be using an LSTM based model.
df_lstm = pd.read_csv(
"/kaggle/input/sandp500/individual_stocks_5yr/individual_stocks_5yr/AAPL_data.csv"
)
df_lstm = df["open"]
# **We should make use of a minmaxscaler to make the values in the dataset fall between 0 to 1, as LSTM and RNN models are sensitive to higher values which may cloud the results of the model. **
scaler = MinMaxScaler()
df_lstm = scaler.fit_transform(np.array(df).reshape(-1, 1))
df_lstm.size
# Here we are splitting the dataset into train and test sets, we have set the testing size as 30 percent. Instead of using the available train_test_split from scikit-learn we are doing it manually.
train, test = train_test_split(df_lstm, test_size=0.3, random_state=54)
train.shape
test.shape
# **In the next module, we are splitting the data values in pairs of 100 days for our model, this type of analysis is known as time series analysis. Time series analysis is a statistical technique used to analyze and model data that is collected over time. This type of analysis is often used to identify patterns and trends in data that can be used to make predictions about future events.**
time_step = 100
x_train, y_train, x_test, y_test = [], [], [], []
for i in range(len(train) - time_step):
x = train[i : (i + time_step), 0]
x_train.append(x)
y_train.append(train[i + time_step, 0])
for i in range(len(test) - time_step):
x = test[i : (i + time_step), 0]
x_test.append(x)
y_test.append(test[i + time_step, 0])
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = np.array(x_test)
y_test = np.array(y_test)
# We should reshape the data so the model can understand the values presented to it.
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1)
x_train.shape, y_train.shape
from keras.optimizers import Adam
# **We are building an LSTM model with 100 units in input, 50 units in the next layer and 25 units in the penultimate layer. In the end we are using a dense layer to get the output values. We will be using adam as an optimizing function, we have set the learning rate to 0.0001 as we are dealing with numbers in the range of (0,1). For the loss functions we will be using mean_squared_error as this is a regressor type model which will give us numbers in the results.**
model_lstm = Sequential()
model_lstm.add(
LSTM(units=100, return_sequences=True, input_shape=(x_train.shape[1], 1))
)
model_lstm.add(LSTM(units=50, return_sequences=True))
model_lstm.add(LSTM(units=25))
model_lstm.add(Dense(units=1))
optimizer_adam = Adam(learning_rate=0.001, decay=0.01)
model_lstm.compile(optimizer=optimizer_adam, loss="mean_squared_error")
model_lstm.summary()
# *In the next step we will begin training the model with 10 iterations on the training datasets.*
model_lstm.fit(x_train, y_train, epochs=10, batch_size=16)
# *After the model has been trained we will predict the values using the model. We can understand how accurate the model is based on the new predictions from the given data.*
lstm_trainpredictions = model_lstm.predict(x_train)
lstm_testpredictions = model_lstm.predict(x_test)
|
import cv2 as opencv
import os, re
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
from keras.layers import *
from keras.models import Sequential
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.metrics import confusion_matrix, classification_report, recall_score
from sklearn.model_selection import train_test_split, cross_val_predict
from skimage.transform import resize
# # let's visualize one sample image from IMAGE FOLDER
#
main_path = "/kaggle/input"
folder_path = [x for x in os.listdir(main_path) if "test-models" not in x]
store_images = []
for x in folder_path:
for y in os.listdir(os.path.join(main_path, x)):
# first get whole path from the image
paths = os.path.join(main_path + "/" + x, y)
# convert bytes to array format
image_read = plt.imread(paths)
# just shrink the image file size into 50 x 50
resize_500 = opencv.resize(
image_read, (50, 50), interpolation=opencv.INTER_AREA
)
store_images.append(resize_500)
import numpy as np
random_images = np.random.permutation(len(store_images))[0]
plt.figure(figsize=(4, 4))
plt.imshow(store_images[random_images])
plt.xticks([])
plt.yticks([])
plt.xlabel("sample image")
plt.ylabel("w / h / color" + " => " + str(store_images[random_images].shape))
plt.show()
# # using plotly to make a bar representation for images folder
#
image_path = "/kaggle/input"
fd_path = [x for x in os.listdir(main_path) if "test-models" not in x]
fds = []
for folder in fd_path:
for files in os.listdir(image_path + "/" + folder):
fds.append([folder, os.path.join(image_path + "/" + folder, files)])
weared_mask = []
non_mask = []
for x, y in fds:
if x == "weared-mask":
weared_mask.append(y)
else:
non_mask.append(y)
fd_str = {
"foldername": ["weared_mask", "not_weared_mask"],
"totalfiles": [len(weared_mask), len(non_mask)],
"path": [image_path] * 2,
}
import pandas as pd
import plotly.express as px
masks = pd.DataFrame(fd_str)
fig = px.bar(
masks,
x="foldername",
y="totalfiles",
width=500,
height=300,
labels={"foldername": "mask details"},
)
fig.show()
# # separate data as image array and label for classification
classifypath = "/kaggle/input"
fdss_path = [x for x in os.listdir(main_path) if "test-models" not in x]
classifymask = []
for folderin in fdss_path:
for filesin in os.listdir(os.path.join(classifypath, folderin)):
# first change bytes to array using plt
filepath = os.path.join(classifypath + "/" + folderin, filesin)
if os.path.basename(filepath).endswith(".jpg"):
bytes_toarray = plt.imread(filepath)
# next resize the image using opencv
resize_50_50 = opencv.resize(
bytes_toarray, (50, 50), interpolation=opencv.INTER_CUBIC
)
# change the image as grayscale using cvtcolor
# (50,50,3) => (50,50,1)
# 50 * 50 * 1 = > 2500 weights on grayscale
colortogray = opencv.cvtColor(resize_50_50, opencv.COLOR_BGR2GRAY)
# append the values of array and labels in classifymask
classifymask.append([np.array(colortogray.flatten()) / 255.0, folderin])
arrayofimage = []
labelofimage = []
for aa, bb in classifymask:
arrayofimage.append(aa)
labelofimage.append(bb)
# convert xxtrain and yytrain as np.array
xxtrain = np.array(arrayofimage)
# reshape the data
xxtrain = xxtrain.reshape(xxtrain.shape[0], 50, 50, 1)
# using labelencoder
masky = LabelEncoder()
masy = masky.fit_transform(labelofimage)
yytrain = masy
yytrain = keras.utils.to_categorical(yytrain, 2)
print(xxtrain.shape, yytrain.shape)
# lets implement with keras
covid_mask = Sequential(
[
Conv2D(64, (3, 3), input_shape=(50, 50, 1), activation="relu"),
MaxPooling2D(2, 2),
Conv2D(128, (3, 3), activation="relu"),
MaxPooling2D(2, 2),
Flatten(),
Dense(100, activation="linear"),
Dense(2, activation="softmax"),
]
)
covid_mask.summary()
covid_mask.compile(
optimizer="adam", metrics=["accuracy"], loss="categorical_crossentropy"
)
history = covid_mask.fit(xxtrain, yytrain, epochs=20)
# # visualize the covid_mask accuracy
pd.DataFrame(history.history)["accuracy"].plot(color="orange", figsize=(10, 6))
plt.title("covidmask n epochs")
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.show()
# # predict the test-models folder files for detect a person wear a covid mask or not
# test covid model
import cv2
test_mdl = "/kaggle/input/test-models"
count = 0
plt.figure(figsize=(20, 30))
for mds in os.listdir(test_mdl):
count += 1
plt.subplot(len(os.listdir(test_mdl)), len(os.listdir(test_mdl)) // 2, count)
rd_img = plt.imread(os.path.join(test_mdl, mds))
reze_50 = cv2.resize(rd_img, (50, 50), interpolation=cv2.INTER_CUBIC)
clotogry = cv2.cvtColor(reze_50, cv2.COLOR_BGR2GRAY)
lbzz = np.argmax([covid_mask.predict(clotogry.reshape(1, 50, 50, -1))])
plt.imshow(rd_img)
plt.xlabel(masky.inverse_transform([lbzz])[0], fontsize=20)
plt.xticks([])
plt.yticks([])
plt.show()
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
# Learnig curve in sklearn is the old version
from sklearn.model_selection import train_test_split, KFold, learning_curve
from sklearn.metrics import (
accuracy_score,
precision_score,
recall_score,
f1_score,
roc_auc_score,
)
df = pd.read_csv("../input/suv-nanze/suv.csv")
df.drop("User ID", axis=1, inplace=True)
df.head(5)
# rather than giving df, columns > df.Gender for one series of the column
# in this way the place of columns doesn't chane
df.Gender = pd.get_dummies(df.Gender, drop_first=True)
df.head()
X = df.to_numpy()
# random arrange
print(X.shape[0])
np.random.permutation(5)
# now we want to shuffle the data > change the place of rows > change the indexes
np.random.seed = 0
X = X[np.random.permutation(X.shape[0])]
######################################
# 2. inplace changes occur
np.random.shuffle(X)
y = X[:, -1]
X = X[:, :-1]
# without this I couldn't get the score more than 0.62 > now=0.85
X = (X - X.mean(axis=0)) / X.std(axis=0)
# # KFold
data_split = KFold(n_splits=5)
data_split
# data_split.split() > returns tuple > (train_inx, val_inx)
# this should be x and y train
train_score = 0
val_score = 0
for train_inx, val_idx in data_split.split(X, y):
print(len(train_inx), len(val_idx))
clf = LogisticRegression()
clf.fit(X[train_inx], y[train_inx])
print(
"train acc =",
clf.score(X[train_inx], y[train_inx]),
"\tval acc =",
clf.score(X[val_idx], y[val_idx]),
)
train_score += clf.score(X[train_inx], y[train_inx])
val_score += clf.score(X[val_idx], y[val_idx])
print("the train score average", train_score / data_split.n_splits)
print("the validation score average", val_score / data_split.n_splits)
"""
we can see the different outputs > decide better overfit or not > the model is ok
because in each shuffle maybe we choose the simpler data as train and harder data for validation
so with one time of training we can't for sure conclude
with normalization the scores will be better for sure
why for sure > because we have estimate salary and age > two cols ranges are way different
"""
"""
we want to overfit the model
for having polynomial features > 1. hstack(X,X**2,X**3) 2. Polynomial
x = np.arange(-10, 30, 1).reshape(-1, 1)
x_6 = np.hstack((x,x**2,x**3,x**4,x**5,x**6)) > just these degrees
polynomial > creates all of the sentences up to degree n > from 0 to n
form [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2]
abc > 1 a b c a2 b2 c2 ab bc ac a3 b3 c3 abc a2b a2c b2c c2a c2b abc > 20
"""
from sklearn.preprocessing import PolynomialFeatures
polynomial = PolynomialFeatures(degree=6)
# fit > train/adjust
# transfrom > predict
# fit_transform > fit + predict > the input of all of the functions are based on the type whether they need y or not > if needed we have to pass the y too
X_poly = polynomial.fit_transform(X)
print(X.shape, X_poly.shape)
train_score = 0
val_score = 0
for train_inx, val_idx in data_split.split(X, y):
print(len(train_inx), len(val_idx))
clf = LogisticRegression(max_iter=100000)
clf.fit(X_poly[train_inx], y[train_inx])
print(
"train acc =",
clf.score(X_poly[train_inx], y[train_inx]),
"\tval acc =",
clf.score(X_poly[val_idx], y[val_idx]),
)
train_score += clf.score(X_poly[train_inx], y[train_inx])
val_score += clf.score(X_poly[val_idx], y[val_idx])
print("the train score average", train_score / data_split.n_splits)
print("the validation score average", val_score / data_split.n_splits)
# 1 or 2 percentage error is ok but more than that is not
# train = 90 and validation = 90 is ok > how to know whether your model is overfitting > train acc + validation acc
# now we have overfitted model > we can solve it by adding regularization
# they use regularization by default > l2 and c=1 > in log reg > 1/c
train_score = 0
val_score = 0
for train_inx, val_idx in data_split.split(X, y):
clf = LogisticRegression(max_iter=100000, C=100) # more overfit
clf.fit(X_poly[train_inx], y[train_inx])
# print('train acc =', clf.score(X_poly[train_inx], y[train_inx]),
# '\tval acc =', clf.score(X_poly[val_idx], y[val_idx]))
train_score += clf.score(X_poly[train_inx], y[train_inx])
val_score += clf.score(X_poly[val_idx], y[val_idx])
print("the train score average", train_score / data_split.n_splits)
print("the validation score average", val_score / data_split.n_splits)
train_score = 0
val_score = 0
for train_inx, val_idx in data_split.split(X, y):
clf = LogisticRegression(max_iter=1000000, C=5e-5) # underfit model
clf.fit(X_poly[train_inx], y[train_inx])
# print('train acc =', clf.score(X_poly[train_inx], y[train_inx]),
# '\tval acc =', clf.score(X_poly[val_idx], y[val_idx]))
train_score += clf.score(X_poly[train_inx], y[train_inx])
val_score += clf.score(X_poly[val_idx], y[val_idx])
print("the train score average", train_score / data_split.n_splits)
print("the validation score average", val_score / data_split.n_splits)
"""
the penalty parameter in the function > we have only l2 > C > 1/C > solver: lbfgs
the penalty l1 > change the solver
use lasso regression for l1
GD > j=j-alpha*derivative
solvers > Each solver tries to find the parameter weights that minimize a cost function.
j'=j+alpha*(Wi**2)
improve the term of regularization > more underfit
decrease the term of regularization > more overfit
https://towardsdatascience.com/dont-sweat-the-solver-stuff-aea7cddc3451
Lasso regression changes are not to sensible regarding to the ridge regression
"""
train_score = 0
val_score = 0
for train_inx, val_idx in data_split.split(X, y):
clf = LogisticRegression(
max_iter=1000, penalty="l1", solver="saga", C=100000
) # more overfit
clf.fit(X_poly[train_inx], y[train_inx])
# print('train acc =', clf.score(X_poly[train_inx], y[train_inx]),
# '\tval acc =', clf.score(X_poly[val_idx], y[val_idx]))
train_score += clf.score(X_poly[train_inx], y[train_inx])
val_score += clf.score(X_poly[val_idx], y[val_idx])
print("the train score average", train_score / data_split.n_splits)
print("the validation score average", val_score / data_split.n_splits)
train_score = 0
val_score = 0
for train_inx, val_idx in data_split.split(X, y):
clf = LogisticRegression(
max_iter=1000, penalty="l1", solver="saga", C=5e-10
) # more underfir
clf.fit(X_poly[train_inx], y[train_inx])
# print('train acc =', clf.score(X_poly[train_inx], y[train_inx]),
# '\tval acc =', clf.score(X_poly[val_idx], y[val_idx]))
train_score += clf.score(X_poly[train_inx], y[train_inx])
val_score += clf.score(X_poly[val_idx], y[val_idx])
print("the train score average", train_score / data_split.n_splits)
print("the validation score average", val_score / data_split.n_splits)
C = [0.001, 0.01, 0.1, 1, 10, 100, 1000]
for c in C:
train_score = 0
val_score = 0
for train_inx, val_idx in data_split.split(X, y):
clf = LogisticRegression(max_iter=100000, C=c)
clf.fit(X_poly[train_inx], y[train_inx])
train_score += clf.score(X_poly[train_inx], y[train_inx])
val_score += clf.score(X_poly[val_idx], y[val_idx])
print(f"C is {c}---------")
print("the train score average", train_score / data_split.n_splits)
print("the validation score average", val_score / data_split.n_splits)
clf = LogisticRegression()
clf.fit(X_poly, y)
print(clf.coef_)
# up to now > chnage the c in the parameters of another model
# now import each penalty directly as classifier
from sklearn.linear_model import Ridge, Lasso
# params: fit_intercept > bias / copy / ...
clf = Ridge(alpha=1000) # very strong regularization
clf.fit(X_poly, y)
# as you see the coef are too small
print(clf.coef_)
clf = Lasso(alpha=2) # very strong regularization
clf.fit(X_poly, y)
# coef zero
print(clf.coef_)
# each penalty is like classifier that you can fit them on your data
# # train test split
# define the x again
X = df.to_numpy()
np.random.seed = 0
X = X[np.random.permutation(X.shape[0])]
y = X[:, -1]
X = X[:, :-1]
# 1. write yourself > first suffle the rows > second split X and y > split train test val
split = int(X.shape[0] * 0.8)
X_train = X[:split]
y_train = y[:split]
X_test = X[split:]
y_test = y[split:]
# 2.
# random_state = 1 / random_state = np.random.seed
X_train, X_test = train_test_split(X, test_size=0.2, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
# normalization
"""
the correct way > split the data
find the norm params on train dataset > use this to normalize all train, test, val
we rarely normalize the y
"""
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
# verbose ???
clf = LogisticRegression(verbose=1, class_weight="balanced")
clf.fit(X_train, y_train)
# # Accuracy
print("test score:", clf.score(X_test, y_test))
print("train score:", clf.score(X_train, y_train))
h_train = clf.predict(X_train)
print("ACC =", np.mean(h_train == y_train))
# for sklearn.metrics > we use the true values and predicted values
accuracy_score(y_train, h_train)
# # Precision
h_train = clf.predict(X_train)
h_test = clf.predict(X_test)
print("test precision:", precision_score(y_test, h_test))
print("train precision:", precision_score(y_train, h_train))
# TP / (TP+FP)
# # Recall
h_train = clf.predict(X_train)
h_test = clf.predict(X_test)
print("test recall:", recall_score(y_test, h_test))
print("train recall:", recall_score(y_train, h_train))
# TP / (TP+FN)
"""
we always look for the validation score > we want to kepp it high
if both the train and validation score are low > underfit
"""
y_test == 1
# * is AND in numpy array // + is OR
(y_test == 1) * (h_test == 1)
print("Test:")
print("\ttrue positive", np.mean((y_test == 1) * (h_test == 1)))
print("\tfalse positive", np.mean((y_test == 0) * (h_test == 1)))
print("\tfalse negative", np.mean((y_test == 1) * (h_test == 0)))
print("\ttrue negative", np.mean((y_test == 0) * (h_test == 0)))
print("Train:")
print("\ttrue positive", np.mean((y_train == 1) * (h_train == 1)))
print("\tfalse positive", np.mean((y_train == 0) * (h_train == 1)))
print("\tfalse negative", np.mean((y_train == 1) * (h_train == 0)))
print("\ttrue negative", np.mean((y_train == 0) * (h_train == 0)))
# the threshold is 0.5
h_train = clf.predict(X_train)
clf.predict_proba(X_train)
# belonging to class1
h_man = clf.predict_proba(X_train)[:, 1] > 0.5
h_man
# these are the same
np.mean(h_train == h_man)
# u can't use range in for because the index can't be float but u can use arange to create list
np.arange(0, 1.001, 0.01)
# we can change the threshold
test_recalls = []
test_precisions = []
for th in np.arange(0, 1, 0.01):
h_train = clf.predict_proba(X_train)[:, 1] > th
h_test = clf.predict_proba(X_test)[:, 1] > th
test_recalls.append(recall_score(y_test, h_test))
test_precisions.append(precision_score(y_test, h_test))
# print("test recall: ", recall_score(y_test,h_test))
# print("test precision: ", precision_score(y_test,h_test))
# print("train recall: ", recall_score(y_train,h_train))
# print("train precision: ", precision_score(y_train,h_train))
plt.plot(np.arange(0, 1, 0.01), test_recalls, "r-", label="recall")
plt.plot(np.arange(0, 1, 0.01), test_precisions, "b-", label="precision")
plt.legend()
# the precision and recall grow in opposite directions
# # F1-score
h_train = clf.predict(X_train)
h_test = clf.predict(X_test)
print("test f1-score:", f1_score(y_test, h_test))
print("train f1-score:", f1_score(y_train, h_train))
# we can change the threshold
test_recalls = []
test_precisions = []
test_f1 = []
for th in np.arange(0, 1, 0.01):
h_train = clf.predict_proba(X_train)[:, 1] > th
h_test = clf.predict_proba(X_test)[:, 1] > th
test_recalls.append(recall_score(y_test, h_test))
test_precisions.append(precision_score(y_test, h_test))
test_f1.append(f1_score(y_test, h_test))
# print("test recall: ", recall_score(y_test,h_test))
# print("test precision: ", precision_score(y_test,h_test))
# print("train recall: ", recall_score(y_train,h_train))
# print("train precision: ", precision_score(y_train,h_train))
plt.plot(np.arange(0, 1, 0.01), test_recalls, "r-", label="recall")
plt.plot(np.arange(0, 1, 0.01), test_precisions, "b-", label="precision")
plt.plot(np.arange(0, 1, 0.01), test_f1, "g-", label="f1")
plt.legend()
# f1 is harmonic average of precision and recall > we can find point that both of them are max > th=0.6 probably is good one
# # ROC - AUC
# change the threshold and find the TP and FP rate > like what we did
# because of this loop unlike other metrics it needs the probability of classes not just classes
# we need class 1 because more than threshold > the class is 1
# we can't decide based on roc score > u have to compare it with others
h_test = clf.predict_proba(X_test)[:, 1]
h_train = clf.predict_proba(X_train)[:, 1]
print("test ROC-AUC:", roc_auc_score(y_test, h_test))
print("train ROC-AUC:", roc_auc_score(y_train, h_train))
# balanced is better
models = [
LogisticRegression(class_weight="balanced"),
LogisticRegression(),
LogisticRegression(C=0.00001),
LogisticRegression(C=100000),
]
for model in models:
model.fit(X_train, y_train)
h_test = model.predict_proba(X_test)[:, 1]
print(roc_auc_score(y_test, h_test))
# # Learning Curve
clf = LogisticRegression(class_weight="balanced")
# it doesn't need to first fit / train test splt / cv=cross validation
# groups > u split the train and test datas
# train_size > with what proportion of data it will train
train_sizes, train_score, test_score = learning_curve(clf, X, y, cv=5)
train_sizes
# the number of data that used to train
train_score
plt.plot(train_sizes, train_score.mean(axis=1), "go-", label="train")
plt.plot(train_sizes, test_score.mean(axis=1), "bo-", label="test")
plt.legend()
# learning curve with normalized data
train_sizes, train_score, test_score = learning_curve(
clf, X_train, y_train, cv=5, train_sizes=np.arange(0.1, 1.1, 0.1)
)
plt.plot(train_sizes, train_score.mean(axis=1), "go-", label="train")
plt.plot(train_sizes, test_score.mean(axis=1), "bo-", label="test")
plt.legend()
# one of the ways we can define whether adding data is suitable for increasing the performance is learning curve
# at the end the curve will flatten > adding data is useless > modify the model itself
train_sizes, train_score, test_score = learning_curve(
clf, X_train, y_train, cv=5, train_sizes=np.arange(0.1, 1, 0.01)
)
plt.plot(train_sizes, train_score.mean(axis=1), "go-", label="train")
plt.plot(train_sizes, test_score.mean(axis=1), "bo-", label="test")
plt.legend()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
heart = pd.read_csv("/kaggle/input/heartcsv/Heart.csv")
heart.head()
heart.info()
heart.shape
heart.describe()
# to check only those column which have null in them
df[[i for i in heart.columns if df[i].isnull().sum() > 0]].isnull().sum()
# # Feature Engineering - Data Cleaning and Creating an ADS for Analysis
# Removing the NaN
heart.Thal.value_counts()
# As the maximum number of cases have 'Normal' as the Thal, replacing the missing value to 'Normal'
heart.Thal = heart.Thal.fillna("normal")
heart.Thal.value_counts()
heart.Ca.value_counts()
# As the maximum number of cases have 0.0 as the Ca, replacing the missing value to 0.0
heart.Ca = heart.Ca.fillna(0.0)
heart.Ca.value_counts()
print(heart.ChestPain.unique())
print(heart.Thal.unique())
print(heart.AHD.unique())
# nominal encoding technique
heart_encoding = pd.get_dummies(heart[["ChestPain", "Thal", "AHD"]])
heart_final = pd.concat([heart, heart_encoding], 1)
heart_final = heart_final.drop(["ChestPain", "Thal", "AHD"], axis=1)
heart_final.head(2)
# checking no. of males(1) and female (0)
heart_final.Sex.value_counts()
# Checking AHD wrt sex
heart_final.Sex[heart_final.AHD_Yes == 1].value_counts()
pd.crosstab(heart_final.AHD_Yes, heart_final.Sex)
pd.crosstab(heart_final.AHD_Yes, heart_final.Sex).plot(
kind="bar", figsize=(20, 10), color=["blue", "green"]
)
plt.title("Frequency of Heart Disease vs Sex")
plt.xlabel("0= AHD_Yes, 1= AHD_NO")
plt.ylabel("No. of people with heart disease")
plt.legend(["Female", "Male"])
plt.xticks(rotation=0)
heart_final.Sex[heart_final.AHD_Yes == 1].value_counts().plot(
kind="bar", figsize=(10, 6), color=["green", "blue"]
)
plt.title("males vs females with heart disease")
# ##### Conclusion is that Male are more prone toward AHD
# Finding co-relation between all column
heart_final.corr()
cor_mat = heart_final.corr()
fig, ax = plt.subplots(figsize=(15, 10))
sns.heatmap(cor_mat, annot=True, linewidths=0.5, fmt=".3f")
# droping AHD_No as its giving the same value as AHD_Yes, also droping 'Unnamed: 0' as its doesnot have any benift
heart_final = heart_final.drop(["AHD_No", "Unnamed: 0"], axis=1)
heart_final.head(2)
# getting all column name
heart_final.columns
heart_scaled = heart_final
# Scaling all values except th etarget variable = AHD_Yes
from sklearn.preprocessing import MinMaxScaler
MMscal = MinMaxScaler()
features = [
"Age",
"Sex",
"RestBP",
"Chol",
"Fbs",
"RestECG",
"MaxHR",
"ExAng",
"Oldpeak",
"Slope",
"Ca",
"ChestPain_asymptomatic",
"ChestPain_nonanginal",
"ChestPain_nontypical",
"ChestPain_typical",
"Thal_fixed",
"Thal_normal",
"Thal_reversable",
]
heart_scaled[features] = MMscal.fit_transform(heart_final[features])
heart_scaled.head()
# Creating Features and Target variable
X = heart_scaled.drop("AHD_Yes", axis=1)
Y = heart_scaled.AHD_Yes
# splitting the data into training and testing data sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=1)
# # Create a function for evaluating metrics
from sklearn.metrics import (
accuracy_score,
recall_score,
precision_score,
f1_score,
confusion_matrix,
roc_auc_score,
r2_score,
)
def evaluation(Y_test, Y_pred):
acc = accuracy_score(Y_test, Y_pred)
rcl = recall_score(Y_test, Y_pred)
f1 = f1_score(Y_test, Y_pred)
auc_score = roc_auc_score(Y_test, Y_pred)
prec_score = precision_score(Y_test, Y_pred)
metric_dict = {
"accuracy": round(acc, 3),
"recall": round(rcl, 3),
"F1 score": round(f1, 3),
"auc score": round(auc_score, 3),
"precision": round(prec_score, 3),
}
return print(metric_dict)
# # Training the Data woth different Models
# ## 1. KNeighborsClassifier
np.random.seed(42)
from sklearn.neighbors import KNeighborsClassifier
KNC_model = KNeighborsClassifier()
KNC_model.fit(X_train, y_train)
KNC_model_y_pred = KNC_model.predict(X_test)
KNC_model_r2_score = r2_score(y_test, KNC_model_y_pred)
print("R2 Score for predicted value: ", KNC_model_r2_score)
print("Accuracy on Traing set: ", KNC_model.score(X_train, y_train))
print("Accuracy on Testing set: ", KNC_model.score(X_test, y_test))
evaluation(y_test, KNC_model_y_pred)
|
import os
import glob
import numpy as np
import torch
from PIL import Image
import pytorch_lightning as pl
from torch.utils.data import Dataset, DataLoader
train_paths_normal = glob.glob(
"../input/chest-xray-pneumonia/chest_xray/train/NORMAL/*.jpeg"
)
len
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objs as go
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
dfd = pd.read_csv(
r"/kaggle/input/covid19-global-dataset/worldometer_coronavirus_daily_data.csv"
)
dfd.head(10)
dfs = pd.read_csv(
r"/kaggle/input/covid19-global-dataset/worldometer_coronavirus_summary_data.csv"
)
dfs.head(10)
print(dfd.shape)
print(dfs.shape)
cts = dfs.groupby("continent", as_index=False)[
[
"total_confirmed",
"total_deaths",
"total_recovered",
"active_cases",
"serious_or_critical",
]
].sum()
cts.head()
|
# # Project 4
# I'm going to rewrite the feature engineering code of week 2 in Polars.
data_path = "/kaggle/input/project-4/project_2_data"
import pandas as pd
import numpy as np
import polars as pl
import time
# # Feature engineering with Pandas
# * Here I put the feature engineering code in Pandas that I created in project 2, including loading the data.
# * As I wanted to use pure Pandas I needed to rewrite the code for many of the features created (lag, rolling mean, rolling std dev & seasonal rolling mean)
# Start timer
start_time = time.time()
# Load data
data = pd.read_parquet(f"{data_path}/sales_data.parquet")
# Filter out products that don't have sales using cumsum
data["cumsum_sales"] = data.groupby(["id"]).sales.cumsum()
data = data[data["cumsum_sales"] != 0]
data = data.drop(columns=["cumsum_sales"])
# Add price data
price_data = pd.read_parquet(f"{data_path}/prices.parquet")
data = (
data.reset_index()
.merge(price_data, on=["date", "store_id", "item_id"])
.rename(
columns={
"date": "ds",
"id": "unique_id",
"sales": "y",
}
)
)
# Originally in the project of week 2, lags and date-features were created using MLForecast
# Here I will create them manually with Pandas
data["lag_7"] = data.groupby("unique_id")["y"].shift(7)
data["lag_14"] = data.groupby("unique_id")["y"].shift(14)
data["lag_21"] = data.groupby("unique_id")["y"].shift(21)
data["lag_28"] = data.groupby("unique_id")["y"].shift(28)
data["rolling_mean_7"] = (
data.groupby("unique_id")["y"].rolling(7).mean().reset_index(drop=True)
)
data["rolling_mean_14"] = (
data.groupby("unique_id")["y"].rolling(14).mean().reset_index(drop=True)
)
data["rolling_mean_21"] = (
data.groupby("unique_id")["y"].rolling(21).mean().reset_index(drop=True)
)
data["rolling_mean_28"] = (
data.groupby("unique_id")["y"].rolling(28).mean().reset_index(drop=True)
)
data["rolling_std_7"] = (
data.groupby("unique_id")["y"].rolling(7).std().reset_index(drop=True)
)
data["rolling_std_14"] = (
data.groupby("unique_id")["y"].rolling(14).std().reset_index(drop=True)
)
data["rolling_std_21"] = (
data.groupby("unique_id")["y"].rolling(21).std().reset_index(drop=True)
)
data["rolling_std_28"] = (
data.groupby("unique_id")["y"].rolling(28).std().reset_index(drop=True)
)
data["day_of_week"] = data["ds"].dt.dayofweek
data["seasonal_rolling_mean_7"] = (
data.groupby(["unique_id", "day_of_week"])["y"]
.rolling(7)
.mean()
.reset_index(drop=True)
)
data["seasonal_rolling_mean_14"] = (
data.groupby(["unique_id", "day_of_week"])["y"]
.rolling(14)
.mean()
.reset_index(drop=True)
)
data["seasonal_rolling_mean_21"] = (
data.groupby(["unique_id", "day_of_week"])["y"]
.rolling(21)
.mean()
.reset_index(drop=True)
)
data["seasonal_rolling_mean_28"] = (
data.groupby(["unique_id", "day_of_week"])["y"]
.rolling(28)
.mean()
.reset_index(drop=True)
)
# End timer
end_time = time.time()
duration = end_time - start_time
print(f"Execution of feature engineering in Pandas took {duration} seconds")
# # Feature engineering with Polars
# * Here I create the same features as above, but then using polars
# Start timer
start_time = time.time()
# Load data
data_polars = pl.read_parquet(f"{data_path}/sales_data.parquet")
# Filter out products that don't have sales using cumsum
data_polars = (
data_polars.with_columns(
data_polars.select(
["id", pl.col("sales").cumsum().over("id").alias("cumsum_sales")]
)
)
.filter(pl.col("cumsum_sales") != 0)
.drop("cumsum_sales")
)
# Add price data
price_data_polars = pl.read_parquet(f"{data_path}/prices.parquet")
data_polars = data_polars.join(
price_data_polars, on=["date", "store_id", "item_id"], how="left"
)
# Create features
data_polars = (
data_polars.rename({"date": "ds", "id": "unique_id", "sales": "y"})
.with_columns(
[
pl.col("y").shift(periods=7).over("unique_id").alias("lag_7"),
pl.col("y").shift(periods=14).over("unique_id").alias("lag_14"),
pl.col("y").shift(periods=21).over("unique_id").alias("lag_21"),
pl.col("y").shift(periods=28).over("unique_id").alias("lag_28"),
pl.col("y")
.rolling_mean(window_size=7)
.over("unique_id")
.alias("rolling_mean_7"),
pl.col("y")
.rolling_mean(window_size=14)
.over("unique_id")
.alias("rolling_mean_14"),
pl.col("y")
.rolling_mean(window_size=21)
.over("unique_id")
.alias("rolling_mean_21"),
pl.col("y")
.rolling_mean(window_size=28)
.over("unique_id")
.alias("rolling_mean_28"),
pl.col("y")
.rolling_std(window_size=7)
.over("unique_id")
.alias("rolling_std_7"),
pl.col("y")
.rolling_std(window_size=14)
.over("unique_id")
.alias("rolling_std_14"),
pl.col("y")
.rolling_std(window_size=21)
.over("unique_id")
.alias("rolling_std_21"),
pl.col("y")
.rolling_std(window_size=28)
.over("unique_id")
.alias("rolling_std_28"),
pl.col("ds").dt.weekday().alias("day_of_week"),
]
)
.with_columns(
[
pl.col("y")
.rolling_mean(window_size=7)
.over(["unique_id", "day_of_week"])
.alias("seasonal_rolling_mean_7"),
pl.col("y")
.rolling_mean(window_size=14)
.over(["unique_id", "day_of_week"])
.alias("seasonal_rolling_mean_14"),
pl.col("y")
.rolling_mean(window_size=21)
.over(["unique_id", "day_of_week"])
.alias("seasonal_rolling_mean_21"),
pl.col("y")
.rolling_mean(window_size=28)
.over(["unique_id", "day_of_week"])
.alias("seasonal_rolling_mean_28"),
]
)
)
# End timer
end_time = time.time()
duration = end_time - start_time
print(f"Execution of feature engineering in Polars took {duration} seconds")
from polars.testing import assert_frame_equal
# Cast pandas dataframe to polars dataframe
data_pandas = pl.from_pandas(data)
data_pandas = (
data_pandas.select(sorted(data_pandas.columns))
.with_columns(
[
(pl.col("day_of_week") + 1).alias("day_of_week"),
(pl.col("unique_id").cast(pl.Utf8)),
]
)
.sort(["unique_id", "ds"])
)
data_polars = (
data_polars.select(sorted(data_pandas.columns))
.with_columns([(pl.col("unique_id").cast(pl.Utf8))])
.sort(["unique_id", "ds"])
)
data_pandas.describe()
data_polars.describe()
|
# ## COVID-19 World Vaccination Progress
# * This notebook explored the COVID-19 world vaccination progress based on different regions and specific on column
# 『 total vaccinations per hundred 』. Since they are missing values around dataset, I didn't find any specific date suit for every region. I picked the date based on the countries in each region that has least missing data. It might be inconsistent throughout the region but the date got pick were actually very close, mostly at the end of January or the begining of February, I think it still provide some interesting information.
# * About the plots, Each region has plot include six subplots. Plot on the top-left is the map of the region with the countries; top-middle shows the total vaccinations per hundred across the countries; bottom-left displays type of vaccines used (percentage) on the specific date around the globe and it shares the y-axis with bottom-middle which shows the type of vaccines used in different countries.
from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox
from matplotlib.collections import PatchCollection
from matplotlib.patches import PathPatch
from mpl_toolkits.basemap import Basemap
from matplotlib.patches import Polygon
from geopy.geocoders import Nominatim
import matplotlib.pyplot as plt
from matplotlib import image
import seaborn as sns
import pandas as pd
import numpy as np
plt.rcParams["font.family"] = "Krungthep"
data = pd.read_csv("../input/covid-world-vaccination-progress/country_vaccinations.csv")
continents = pd.read_csv(
"../input/country-mapping-iso-continent-region/continents2.csv"
)
cases = pd.read_csv(
"../input/covid19-global-dataset/worldometer_coronavirus_summary_data.csv"
)
data.head()
# df1= data[["country","people_vaccinated_per_hundred","vaccines"]]
# df1.head()
# continents.head()
rename_cont = continents.rename(columns={"region": "continent", "name": "country"})
rename_cont.head()
trim_cases_df2 = cases[
[
"country",
"continent",
"total_confirmed",
"total_recovered",
"total_cases_per_1m_population",
"total_tests_per_1m_population",
]
]
trim_cases_df2.head()
merge_subregion_df = pd.merge(
rename_cont, trim_cases_df2, on=["country", "continent"], how="right"
)
merge_subregion_df.head()
trim_data_df = merge_subregion_df[
[
"country",
"continent",
"sub-region",
"total_confirmed",
"total_recovered",
"total_cases_per_1m_population",
"total_tests_per_1m_population",
]
]
trim_data_df
data.head()
data_sum_df = data.groupby(["country", "vaccines"]).sum().reset_index()
data_sum_df
# df1.head(20)
merge_df = pd.merge(trim_data_df, data_sum_df, on="country", how="inner")
merge_df
# # df66
grp = (
merge_df.groupby("sub-region")
.apply(lambda x: x.nlargest(7, "total_cases_per_1m_population"))
.reset_index(drop=True)
)
grp["sub-region"].unique()
# df100=grp[grp['sub-region'].isin(['Eastern Asia','Western Asia'])]
# df100
# plt.scatter(df100['country'],df100['vaccines'])
# # vacc=pd.DataFrame(grp['vaccines'].value_counts(normalize=True))
# vacc
# grp
# df2=df2.groupby("continent").apply(lambda x:x.nlargest(5,"total_cases_per_1m_population")).reset_index()
# df2 = df2.groupby('Genre').apply(lambda x: x.nlargest(5,"Global_Sales")) \
# .reset_index(drop=True)
# df3=pd.merge(df1,df2, on="country")
# df3.head(100)
# data_region=data.merge(continents[["region", "sub-region"]], left_on=data.country, right_on=continents.name)#.#drop(columns=["key_0"]).rename(columns={"sub-region":"subregion"})
# data_region.head()
# data_region.groupby('country')[""]
# geolocator = Nominatim(user_agent="geoapiExercises")
# latitude, longitude, country=[], [], []
# for c in data_region.country.unique().tolist():
# location=geolocator.geocode(c)
# longitude.append(location[1][0])
# latitude.append(location[1][1])
# country.append(c)
# countries=pd.DataFrame({"country":country, "latitude":latitude, "longitude":longitude})
# data_region=data_region.merge(countries, on="country")
# cases.loc[:, "country"]=cases["country"].replace(["UK", "Isle Of Man", "USA"], ["United Kingdom", "Isle of Man", "United States"])
# cases_df=pd.DataFrame()
# for i in data_region.country.unique():
# df=cases[cases["country"]=="{}".format(i)]
# cases_df=cases_df.append(df)
# cases_df.reset_index(drop=True, inplace=True)
# cases_df.loc[:, "total_cases_per_hundred"]=round(cases_df["total_cases_per_1m_population"]/10000)
# cases_df.loc[:, "total_recovered_rate"]=cases_df["total_recovered"]/cases_df["total_confirmed"]
# plt.figure(figsize=(15,10))
# plt.style.use("seaborn-dark")
# ### Southern Asia
# ### South-Eastern & Eastern Asia
# ### Western Asia
# ### Northern Europe
|
import numpy as np
import pandas as pd
import os
import json
import math
os.getcwd()
with open(
"/kaggle/input/solesensei_bdd100k/bdd100k_labels_release/bdd100k/labels/bdd100k_labels_images_train.json",
"r",
) as myFile:
labelData = myFile.read()
labelObj = json.loads(labelData)
labelObj[0]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from pathlib import Path
input_path = Path("/kaggle/input/amex-default-prediction/")
# Load Data
train_data = (
pd.read_feather("/kaggle/input/amexfeather/train_data.ftr")
.groupby("customer_ID")
.tail(1)
.set_index("customer_ID", drop=True)
.sort_index()
)
categorical_features = [
"B_30",
"B_38",
"D_114",
"D_116",
"D_117",
"D_120",
"D_126",
"D_63",
"D_64",
"D_68",
]
label_encoder = LabelEncoder()
for feature in categorical_features:
train_data[feature] = label_encoder.fit_transform(train_data[feature])
# Data process
# remove the columns which has lots of missing data
null_percentages = (train_data.isnull().sum() / len(train_data)) * 100
high_null_cols = [col for col in null_percentages.index if null_percentages[col] > 80]
train_data = train_data.drop(high_null_cols, axis=1)
# delete s2 feature
train_data = train_data.drop(["S_2"], axis=1)
categories = [col for col in train_data.columns if train_data[col].dtype == "category"]
# fill null data
for i in categories:
train_data[i] = train_data[i].fillna(train_data[i].mode()[0])
null_columns = train_data.columns[train_data.isna().any()].tolist()
for j in null_columns:
train_data[j] = train_data[j].fillna(train_data[j].median())
from sklearn.preprocessing import OrdinalEncoder
enc = OrdinalEncoder()
train_data[categories] = enc.fit_transform(train_data[categories])
X = train_data.drop("target", axis=1)
y = train_data["target"]
# Random Forest Classifier
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score
parameter_space = {
"n_estimators": [20],
"min_samples_leaf": [31],
"min_samples_split": [2],
"max_depth": [10],
"max_features": [40],
}
clf = RandomForestRegressor(criterion="squared_error", n_jobs=-1, random_state=22)
# grid = GridSearchCV(clf, parameter_space, cv=2, scoring="neg_mean_squared_error")
# grid.fit(x_train, y_train)
# XG boost model
from xgboost import XGBClassifier
model = XGBClassifier(n_estimators=300, max_depth=6, learning_rate=0.1).fit(X, y)
predict = model.predict(X)
accuracy = accuracy_score(y, predict)
print("Accuracy:", accuracy)
# Evaluate performance
from sklearn.metrics import classification_report
print(classification_report(y, predict))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from wordcloud import WordCloud
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import accuracy_score, plot_confusion_matrix
from datetime import datetime
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data = pd.read_csv("/kaggle/input/engineering-placements-prediction/collegePlace.csv")
# **Description of the Dataset**
data.head()
print(f"Shape of Dataframe is: {data.shape}")
pd.DataFrame(data.dtypes, columns=["Datatype"]).rename_axis("Column Name")
# **Statistical information of Dataframe**
data.describe().T.style.bar(subset=["mean"], color="#205ff2").background_gradient(
subset=["std"], cmap="Reds"
).background_gradient(subset=["50%"], cmap="coolwarm")
pd.DataFrame(data.isnull().sum(), columns=["Null Values"]).rename_axis("Column Name")
fig = px.histogram(data, "Age", title="<b>Average Age of Student</b>")
fig.add_vline(x=data["Age"].mean(), line_width=2, line_dash="dash", line_color="red")
fig.show()
pd.DataFrame(data["Gender"].value_counts()).rename(
{"Gender": "Counts"}, axis=1
).rename_axis("Gender")
px.histogram(data, x="Gender", title="<b>Total Male and Female</b>", color="Gender")
male = data[data["Gender"] == "Male"]
female = data[data["Gender"] == "Female"]
total_male = male.shape[0]
total_female = female.shape[0]
total_male_pass = male[male["PlacedOrNot"] == 1].shape[0]
total_female_pass = female[female["PlacedOrNot"] == 1].shape[0]
pass_male_percentage = np.round((total_male_pass * 100) / total_male, 2)
pass_female_percentage = np.round((total_female_pass * 100) / total_female, 2)
details = {
"Total Male": [total_male],
"Total Female": [total_female],
"Total male pass": [total_male_pass],
"Total female pass": [total_female_pass],
"% of Passed Male": [pass_male_percentage],
"% of Passed Female": [pass_female_percentage],
}
details
fig = px.histogram(
data_frame=data,
x="Stream",
color="PlacedOrNot",
title="<b>Counts of Stream</b>",
pattern_shape_sequence=["x"],
)
fig.update_layout(title_x=0.5, title_font=dict(size=20), uniformtext_minsize=15)
fig.show()
cgpa_above_avg = data[data["CGPA"] > data["CGPA"].mean()]
cgpa_above_avg
fig = px.histogram(
data_frame=cgpa_above_avg,
x="CGPA",
color="PlacedOrNot",
title="<b>Above Average CGPA Vs Placement</b>",
template="plotly",
)
fig.update_layout(bargap=0.2)
fig.show()
cgpa_below_avg = data[data["CGPA"] < data["CGPA"].mean()]
cgpa_below_avg
fig = px.histogram(
data_frame=cgpa_below_avg,
x="CGPA",
color="PlacedOrNot",
title="<b>Below Average CGPA Vs Placement</b>",
template="plotly",
)
fig.update_layout(bargap=0.2)
fig.show()
no_internship = data[data["Internships"] == 0]
no_internship
fig = px.histogram(
data_frame=no_internship,
x="PlacedOrNot",
color="PlacedOrNot",
title="<b>No Internship Experience Vs Placement</b>",
)
fig.update_layout(bargap=0.2)
fig.show()
corrmat = data.corr()
top_corr_features = corrmat.index
plt.figure(figsize=(20, 15))
# plot heat map
g = sns.heatmap(data[top_corr_features].corr(), annot=True)
|
# KNN Classification
# **Importing Important Libraries**
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
# **Importing Dataset from sklearn Library**
from sklearn.datasets import load_iris
iris = load_iris()
# **Column's in Iris Dataset**
dir(iris)
# **Feature's in Iris Dataset**
iris.feature_names
# **Creating a DataFrame from Data**
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df.head()
# **Adding Target Column to the DataFrame**
df["target"] = iris.target
df.head()
# **Exploring the Target Set**
iris.target_names
df[df.target == 0].head()
df[df.target == 1].head()
df[df.target == 2].head()
# **Adding Target Name to the Data Frame**
df["flower_name"] = df.target.apply(lambda x: iris.target_names[x])
df.head()
# **Visualization of Data**
# **Creating 3 Different DataFrame from Orignal DataFrame**
df0 = df[df.target == 0]
df1 = df[df.target == 1]
df2 = df[df.target == 2]
# **Creating Scatter Plot with "sepal length (cm)", "sepal width (cm)" as X and Y Axis**
plt.scatter(df0["sepal length (cm)"], df0["sepal width (cm)"], color="red", marker="+")
plt.scatter(
df1["sepal length (cm)"], df1["sepal width (cm)"], color="green", marker="*"
)
plt.xlabel("sepal length (cm)")
plt.ylabel("sepal width (cm)")
# **Creating Scatter Plot with "petal length (cm)", "petal width (cm)" as X and Y Axis**
plt.scatter(df0["petal length (cm)"], df0["petal width (cm)"], color="red", marker="+")
plt.scatter(
df1["petal length (cm)"], df1["petal width (cm)"], color="green", marker="*"
)
plt.xlabel("petal length (cm)")
plt.ylabel("petal width (cm)")
# **Creating Dependent and Independent Variable**
X = df.drop(["target", "flower_name"], axis="columns")
X.head()
y = df.target
y.head()
# **Train Test Split**
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
len(X_train), len(X_test)
# **Importing KNN Classifier**
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=10)
# **Fitting the Model**
knn.fit(X_train, y_train)
# **Predicting Result from Model**
knn.predict([[5.1, 3.5, 1.4, 0.2]]) # Values from iris DataFrame with index=0
knn.predict([[7.0, 3.2, 4.7, 1.4]]) # Values from iris DataFrame with index=50
knn.predict([[6.3, 3.3, 6.0, 2.5]]) # Values from iris DataFrame with index=100
# **Score of Model**
knn.score(X_test, y_test)
# **Importing Confusion Matrix**
from sklearn.metrics import confusion_matrix
y_pred = knn.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
# **Plotting Confusion Matrix**
plt.figure(figsize=(6, 5))
sns.heatmap(cm, annot=True)
plt.xlabel("Predicted")
plt.ylabel("Truth")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# >> vader sentiment analysis -Valence aware dictionary for sentiment reasoning******
df = pd.read_csv("/kaggle/input/imdb-dataset-of-50k-movie-reviews/IMDB Dataset.csv")
df.head()
df["review"][0]
# alternative approach to see
df.iloc[0]["review"]
# checking missing values
df.isnull().sum()
# checking blank string remove empty string
blanks = []
for i, lb, rv in df.itertuples():
if rv.isspace():
blanks.append(i)
blanks
df.dropna(inplace=True)
import nltk
nltk.download("vader_lexicon")
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sia = SentimentIntensityAnalyzer()
sia.polarity_scores(df.iloc[0]["review"])
# add a column
df["score"] = df["review"].apply(lambda review: sia.polarity_scores(review))
df
df["compound"] = df["score"].apply(lambda d: d["compound"])
df
df["compound_score"] = df["compound"].apply(
lambda score: "positive" if score >= 0 else "negative"
)
df
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
accuracy_score(df["sentiment"], df["compound_score"])
print(classification_report(df["sentiment"], df["compound_score"]))
print(confusion_matrix(df["sentiment"], df["compound_score"]))
|
import sys
assert sys.version_info >= (3, 5)
import numpy as np
import os
import tarfile
import urllib
import urllib.request
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
# 1- Load your dataset from the “internet_connection_data.csv” csv file.
path = "/kaggle/input/internet-connection-data/internet_connection_data.csv"
df = pd.read_csv(path)
df.head()
df.shape
# 2- Explore your dataset and list the name of the columns.
column_names = df.columns.tolist()
print("Column names: ", column_names)
# 3- Explore your dataset and check if there is any column with missing values.
missing_values = df.isnull().sum()
columns_with_missing_values = missing_values[missing_values > 0].index.tolist()
if len(columns_with_missing_values) > 0:
print("Columns with missing values:")
print(columns_with_missing_values)
else:
print("No columns with missing values found.")
# 4- Select your input variables and ourput variable (hint: output colum is the one you want to
# predict. In this case it is the last column which shows the type of network attack).
print(df.Category.unique())
X = df.loc[:, "Memory_PssTotal":"Network_TotalTransmittedBytes"]
y = df.Category
# 5- Split your dataset as %80 training and %20 testing.
from sklearn.model_selection import train_test_split
X = df.iloc[:, :-1]
y = df.iloc[:, -1]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
print("Training set shape: ", X_train.shape, y_train.shape)
print("Testing set shape: ", X_test.shape, y_test.shape)
# 6- Implement four classification model based on Logistic Regression, Support Vector Machine,
# Multinomial Naive Bayes and Random Forest classifiers.
# 7- Train (fit) your network.
sc = MinMaxScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
models_accuracy = {}
# **Logistic Regression**
model = LogisticRegression(solver="lbfgs", max_iter=1000)
model.fit(X_train, y_train)
y_pread = model.predict(X_test)
models_accuracy[f"{model.__class__.__name__}"] = accuracy_score(y_test, y_pread) * 100
# **Support Vector Machine**
model = SVC()
model.fit(X_train, y_train)
y_pread = model.predict(X_test)
models_accuracy[f"{model.__class__.__name__}"] = accuracy_score(y_test, y_pread) * 100
# **Multinomial Naive Bayes**
model = MultinomialNB()
model.fit(X_train, y_train)
y_pread = model.predict(X_test)
models_accuracy[f"{model.__class__.__name__}"] = accuracy_score(y_test, y_pread) * 100
# **Random Forest Classifier**
model = RandomForestClassifier(n_estimators=1000, bootstrap=False)
model.fit(X_train, y_train)
y_pread = model.predict(X_test)
models_accuracy[f"{model.__class__.__name__}"] = accuracy_score(y_test, y_pread) * 100
# 8- Report the accuracies (by percentage) of the models for the test datasets.
for model_accuracy in models_accuracy:
print(
f"The accuracy of {model_accuracy} model = {models_accuracy[model_accuracy]:.2f}%"
)
|
# Look for high positive and negative correlations between different marketing variables using a correlation matrix.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# import packages
import seaborn as sn
import matplotlib.pyplot as plt
# create dataframe from data
path_str = "../input/marketing-data/marketing_data.csv"
df = pd.read_csv(path_str)
df.head()
# convert income column from dollars to numeric
df[" Income "] = df[" Income "].str.replace("$", "")
df[" Income "] = df[" Income "].str.replace(",", "")
df[" Income "] = pd.to_numeric(df[" Income "])
# delete columns
df.pop("ID")
# df.pop('Year_Birth')
df.pop("Complain")
df.pop("Country")
df.pop("Education")
df.pop("Marital_Status")
# give columns user-friendly names
df.rename(
columns={
"Dt_Customer": "date_became_customer",
"Recency": "days_since_last_purch",
"MntWines": "spent_on_wines",
},
inplace=True,
)
df.rename(
columns={"MntFruits": "spent_on_fruits", "MntMeatProducts": "spent_on_meats"},
inplace=True,
)
df.rename(
columns={
"MntFishProducts": "spent_on_fish",
"MntSweetProducts": "spent_on_sweets",
"MntGoldProds": "spent_on_gold",
},
inplace=True,
)
df.rename(
columns={
"NumDealsPurchases": "discount_purchases",
"NumWebPurchases": "web_purchases",
"NumCatalogPurchases": "catalog_purchases",
},
inplace=True,
)
df.rename(
columns={
"NumStorePurchases": "store_purchases",
"NumWebVisitsMonth": "web_visits",
"AcceptedCmp3": "accepted_3rd_offer",
},
inplace=True,
)
df.rename(
columns={
"AcceptedCmp4": "accepted_4th_offer",
"AcceptedCmp5": "accepted_5th_offer",
"AcceptedCmp1": "accepted_1st_offer",
},
inplace=True,
)
df.rename(
columns={
"AcceptedCmp2": "accepted_2nd_offer",
"Response": "accept_last_campaign",
"NumCatalogPurchases": "catalog_purchases",
},
inplace=True,
)
df.head()
# set width and height
f = plt.figure()
f.set_figwidth(20)
f.set_figheight(15)
# create matrix
sn.heatmap(
df.corr(),
annot=True,
vmin=-1,
vmax=1,
center=0,
cmap="Blues",
linewidths=1,
linecolor="black",
)
# Make x and y descriptions larger so they are easier to read
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
|
# ***BUSINESS GOAL :***
# In this notebook, we will explore a dataset of credit card customers and use clustering techniques to group customers based on their characteristics and behavior, with a particular focus on those who have churned (or have a high likelihood of doing so). By leveraging the power of clustering algorithms, we aim to uncover insights that can help credit card companies better understand their customers and develop effective strategies to retain them. So let's dive in!
# **STEP 1: READING AND UNDERSTANDING DATA**
# import all libraries and dependencies for dataframe and visualization
import pandas as pd
import numpy as np
from numpy import unique
from numpy import where
from numpy import mean
import matplotlib.pyplot as plt
import seaborn as sns
from yellowbrick.cluster import SilhouetteVisualizer
import warnings
warnings.filterwarnings("ignore")
from datetime import datetime, timedelta
# import all libraries and dependencies for machine learning
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_selection import mutual_info_classif
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.tools.tools import add_constant
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from imblearn.ensemble import BalancedBaggingClassifier
from sklearn.cluster import Birch
from sklearn.cluster import AgglomerativeClustering
df = pd.read_csv(
"/kaggle/input/predicting-credit-card-customer-attrition-with-m/BankChurners.csv"
)
df.shape
df.head()
df.info()
# **STEP 2 :DATA CLEANING**
# droping costumer id beacause it's insignificant
df = df.drop("CLIENTNUM", axis=1)
print(df.isnull().sum())
# no missing data
# cheking for duplicated rows
print(df.loc[df.duplicated()])
# no dplicated rows
# CHEKING OUTLIERS :
data_1 = df["Dependent_count"] # small
data_2 = df["Months_on_book"] # small
data_3 = df["Total_Relationship_Count"] # small
data_4 = df["Months_Inactive_12_mon"] # smallx
data_5 = df["Credit_Limit"] # large
data_6 = df["Total_Revolving_Bal"] # large
data_7 = df["Avg_Open_To_Buy"] # large
data_8 = df["Total_Amt_Chng_Q4_Q1"] # smallx
data_9 = df["Total_Trans_Ct"] # small
data_10 = df["Total_Ct_Chng_Q4_Q1"] # smallx
data_11 = df["Avg_Utilization_Ratio"] # smallx
data_12 = df["Total_Trans_Amt"] # large
data_s = [data_1, data_2, data_3, data_9]
data_l = [data_5, data_6, data_7, data_12]
data_sx = [data_4, data_8, data_10, data_11]
general = [data_s, data_l, data_sx]
for gen in general:
fig = plt.figure(figsize=(10, 7))
# Creating axes instance
ax = fig.add_axes([0, 0, 1, 1])
# Creating plot
bp = ax.boxplot(gen)
# show plot
plt.show()
# as we can see we can't plot all features in the same figure due to large diffrence in values so i divided them in 3 categories large small and smallx
# as we expected there's a large diffrence between variables distribution Total_Amt_Chng_Q4_Q1 ,Total_Ct_Chng_Q4_Q1,
# Months_on_book (+/-) ,Credit_Limit,Avg_Open_To_Buy,Total_Trans_Amt (the last 3 are : all oultiers are above the 3rd quartline )
# **STEP 3 : DATA VIZUALIZATION**
plt.figure(figsize=(5, 5))
sns.countplot(x="Attrition_Flag", data=df)
plt.show()
sns.countplot(x="Gender", data=df)
plt.show()
plt.figure(figsize=(8, 10))
sns.countplot(x="Income_Category", data=df)
plt.show()
plt.figure(figsize=(8, 10))
sns.countplot(x="Income_Category", data=df, hue="Attrition_Flag")
plt.show()
plt.figure(figsize=(5, 5))
sns.histplot(x="Marital_Status", hue="Attrition_Flag", data=df)
plt.show()
plt.figure(figsize=(5, 5))
sns.histplot(x="Customer_Age", data=df, kde=True, hue="Attrition_Flag")
plt.show()
# - Customer_Age is normally distributed
plt.figure(figsize=(5, 5))
sns.histplot(x="Customer_Age", data=df, kde=True, hue="Income_Category")
plt.show()
plt.figure(figsize=(5, 5))
sns.histplot(x="Credit_Limit", data=df, kde=True, hue="Attrition_Flag")
plt.show()
# - Attrited customers tend to have a Credit_Limit below 7000
plt.figure(figsize=(5, 5))
sns.barplot(
x="Credit_Limit", y="Income_Category", hue="Attrition_Flag", data=df, palette="Set1"
)
plt.show()
plt.figure(figsize=(5, 5))
sns.set_theme(style="ticks")
# Plot the orbital period with horizontal boxes
sns.boxplot(
x="Credit_Limit",
y="Income_Category",
data=df,
whis=[0, 100],
width=0.6,
palette="Set1",
)
# Add in points to show each observation
sns.stripplot(
x="Credit_Limit",
y="Income_Category",
data=df,
size=4,
linewidth=0,
hue="Attrition_Flag",
)
# Tweak the visual presentation
sns.despine(trim=True, left=True)
plt.show()
plt.figure(figsize=(5, 5))
sns.lineplot(x="Avg_Utilization_Ratio", y="Credit_Limit", data=df)
plt.show()
plt.figure(figsize=(5, 5))
sns.histplot(x="Total_Revolving_Bal", data=df, kde=True, hue="Attrition_Flag")
plt.show()
plt.figure(figsize=(5, 5))
sns.set_theme(style="ticks")
# Plot the orbital period with horizontal boxes
sns.boxplot(
x="Avg_Utilization_Ratio",
y="Income_Category",
data=df,
whis=[0, 100],
width=0.6,
palette="Set1",
)
# Add in points to show each observation
sns.stripplot(
x="Avg_Utilization_Ratio",
y="Income_Category",
data=df,
size=4,
linewidth=0,
hue="Attrition_Flag",
)
# Tweak the visual presentation
ax.set(ylabel="")
sns.despine(trim=True, left=True)
plt.show()
plt.figure(figsize=(5, 5))
sns.distplot(df["Total_Revolving_Bal"], kde=True)
plt.show()
plt.figure(figsize=(5, 5))
sns.boxplot(x="Income_Category", y="Total_Revolving_Bal", hue="Attrition_Flag", data=df)
plt.show()
plt.figure(figsize=(5, 5))
sns.boxplot(x="Attrition_Flag", y="Total_Revolving_Bal", data=df)
plt.show()
plt.figure(figsize=(5, 5))
sns.violinplot(data=df, x="Income_Category", y="Months_on_book", palette="Set1")
plt.show()
# - Months_On_Book does not really provide useful information
# - all categories almost look the same
plt.figure(figsize=(5, 5))
sns.countplot(data=df, x="Months_Inactive_12_mon", hue="Attrition_Flag")
plt.show()
plt.figure(figsize=(5, 5))
sns.histplot(x="Avg_Open_To_Buy", data=df, kde=True, hue="Attrition_Flag")
plt.show()
plt.figure(figsize=(5, 5))
sns.boxplot(x="Income_Category", y="Avg_Open_To_Buy", hue="Attrition_Flag", data=df)
plt.show()
# - average open to buy is the amount of credit that is still available for the cardholder to use,it is an important metric for credit card issuers to determine the creditworthiness of the cardholder .
# -
plt.figure(figsize=(5, 5))
sns.countplot(x="Card_Category", data=df, hue="Attrition_Flag")
plt.show()
# - almost all attrited customer use blue card
# - blue card is the main used card
plt.figure(figsize=(5, 5))
sns.scatterplot(x="Avg_Open_To_Buy", y="Credit_Limit", data=df)
plt.show()
# - Avg_Open_To_Buy and Credit_Limit have linear variation ,maybe we are going to drop one of them later
plt.figure(figsize=(5, 5))
sns.histplot(x="Avg_Open_To_Buy", kde=True, data=df)
sns.histplot(x="Credit_Limit", kde=True, data=df)
plt.show()
plt.figure(figsize=(5, 5))
sns.boxplot(
y="Avg_Utilization_Ratio",
x="Income_Category",
hue="Attrition_Flag",
data=df[df.Avg_Utilization_Ratio > 0.3],
)
plt.show()
# **STEP 4: DATA PREPARATION**
df = df.drop("Card_Category", axis=1)
cat_col = [
"Gender",
"Attrition_Flag",
"Education_Level",
"Income_Category",
"Marital_Status",
]
# Get the dummy variables for the categorical feature and store it in a new variable - 'dummies'
dummies = pd.get_dummies(df[cat_col])
dummies = pd.get_dummies(df[cat_col], drop_first=True)
# Add the results to the original dataframe
df = pd.concat([df, dummies], axis=1)
df.drop(cat_col, axis=1, inplace=True)
df.shape
y_df = df["Attrition_Flag_Existing Customer"]
x_df = df.drop("Attrition_Flag_Existing Customer", axis=1)
# Feature Scaling for input features.
scaler = preprocessing.MinMaxScaler()
x_scaled = scaler.fit_transform(x_df)
x_df = pd.DataFrame(
x_scaled,
columns=[
"Customer_Age",
"Dependent_count",
"Months_on_book",
"Total_Relationship_Count",
"Months_Inactive_12_mon",
"Contacts_Count_12_mon",
"Credit_Limit",
"Total_Revolving_Bal",
"Avg_Open_To_Buy",
"Total_Amt_Chng_Q4_Q1",
"Total_Trans_Amt",
"Total_Trans_Ct",
"Total_Ct_Chng_Q4_Q1",
"Avg_Utilization_Ratio",
"Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1",
"Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2",
"Gender_M",
"Education_Level_Doctorate",
"Education_Level_Graduate",
"Education_Level_High School",
"Education_Level_Post-Graduate",
"Education_Level_Uneducated",
"Education_Level_Unknown",
"Income_Category_$40K - $60K",
"Income_Category_$60K - $80K",
"Income_Category_$80K - $120K",
"Income_Category_Less than $40K",
"Income_Category_Unknown",
"Marital_Status_Married",
"Marital_Status_Single",
"Marital_Status_Unknown",
],
)
# **STEP 5: FEATURE SELECTION**
# ***The information gain feature selection algorithm*** is a method used in machine learning to select the most important features from a dataset. It works by measuring the reduction in entropy (uncertainty) of the target variable (class label) when a particular feature is added to the model. The information gain is calculated as the difference between the entropy of the target variable before and after the addition of the feature. A higher information gain indicates that the feature is more important in predicting the target variable. The algorithm ranks the features based on their information gain, and the top-k features are selected for use in the model. The information gain feature selection algorithm is commonly used in machine learning applications for tasks such as text classification, sentiment analysis, and spam filtering.
# INFORMATION GAIN
importances = mutual_info_classif(x_df, y_df)
feature_importance = pd.Series(importances, x_df.columns[0 : len(x_df.columns)])
feature_importance.plot(kind="barh", color="teal")
plt.show()
selected_features = [
"Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1",
"Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2",
"Total_Revolving_Bal",
"Total_Trans_Amt",
"Total_Trans_Ct",
"Total_Ct_Chng_Q4_Q1",
]
x_df_reduced = df.loc[:, list(selected_features)]
# **STEP 6: MODEL BUILDING**
x_train, x_test, y_train, y_test = train_test_split(
x_df_reduced, y_df, test_size=0.25, random_state=1
)
# ***The BalancedBaggingClassifier algorithm*** is a classification algorithm that uses an ensemble of decision tree classifiers to improve the accuracy of classification tasks, particularly for imbalanced datasets. It works by randomly sampling subsets of the training data, with replacement, and training a decision tree classifier on each subset. The algorithm uses a random undersampling technique to balance the class distribution in each subset, which helps to address the problem of imbalanced datasets. The final classification is based on the majority vote of the decision tree classifiers in the ensemble. The BalancedBaggingClassifier algorithm is commonly used in machine learning applications for tasks such as fraud detection, anomaly detection, and medical diagnosis.
# bagged decision trees with random undersampling for imbalanced classification
# define model
model = BalancedBaggingClassifier()
# define evaluation procedure
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
# evaluate model
model.fit(x_train, y_train)
scores = cross_val_score(model, x_train, y_train, scoring="roc_auc", cv=cv, n_jobs=-1)
res = model.predict(x_train)
# confusion matrix
cf_matrix1 = confusion_matrix(y_train, res)
print(cf_matrix1)
sns.heatmap(pd.DataFrame(cf_matrix1), annot=True, cmap="YlGnBu", fmt="g")
plt.title("Confusion matrix", y=1.1)
plt.ylabel("Actual label")
plt.xlabel("Predicted label")
# model accuracy
print("Accuracy of model")
print(accuracy_score(y_train, res) * 100, "%")
x_clustering = df.loc[:, list(selected_features)]
# plotting correlation heatmap
plt.figure(figsize=(8, 10))
sns.heatmap(x_clustering.corr(), cmap="YlGnBu", annot=True)
plt.show()
# checking multicollinarity using VIF
X = add_constant(x_clustering)
ds = pd.Series(
[variance_inflation_factor(X.values, i) for i in range(X.shape[1])], index=X.columns
)
print(ds)
# - there is no multicollinarity , all values are between 1 and 4
# **K-means implementation and serching for optimal k:**
# ***The k-means algorithm*** is a clustering algorithm that groups a set of data points into a predefined number of clusters (k). It works by iteratively assigning each data point to the nearest centroid (cluster center) and then updating the centroid based on the mean of the assigned points. The algorithm continues to iterate until the centroids no longer change significantly or a maximum number of iterations is reached. The result is a set of k clusters, each with its own centroid representing the center of the cluster. The algorithm is commonly used in unsupervised machine learning applications for tasks such as image segmentation, customer segmentation, and anomaly detection.
wcss = []
for i in range(1, 16):
kmeans = KMeans(
n_clusters=i, init="k-means++", n_init=10, max_iter=100, random_state=42
)
kmeans.fit(x_clustering)
wcss.append(kmeans.inertia_)
plt.figure(figsize=(10, 8))
plt.plot(range(1, 16), wcss, marker="o", linestyle="--")
plt.xlabel("Number of clusters")
plt.ylabel("k-meanss clustering ")
plt.show()
fig, ax = plt.subplots(4, 2, figsize=(15, 8))
for i in [2, 3, 4, 5, 6, 7, 8, 9]:
"""
Create KMeans instance for different number of clusters
"""
km = KMeans(
n_clusters=i, init="k-means++", n_init=10, max_iter=100, random_state=42
)
q, mod = divmod(i, 2)
"""
Create SilhouetteVisualizer instance with KMeans instance
Fit the visualizer
"""
visualizer = SilhouetteVisualizer(km, colors="yellowbrick", ax=ax[q - 1][mod])
visualizer.fit(x_clustering)
# ***The silhouette score*** is a measure of the quality of clustering that quantifies how well each data point is classified into its own cluster compared to other clusters. It takes into account both the distance between the data point and the other points in its own cluster (cohesion) and the distance between the data point and the points in the nearest other cluster (separation). The silhouette score ranges from -1 to 1, with a higher score indicating better clustering quality. A score of 1 indicates that the data point is well-matched to its own cluster and poorly matched to other clusters, while a score of -1 indicates the opposite, and a score of 0 indicates that the data point is on the boundary between two clusters. The average silhouette score of all data points in a cluster is often used as a measure of the quality of the clustering
# sometimes the elbow method does not give us obvious answer so we can also use the silhouette score method
# to help us decide .
# how to find the optimal number of clusters based on silhouette score:
# - Presence of clusters with below average silhouette scores .
# - Wide fluctuations in the size of the silhouette plots.
# - The thickness of the silhouette plot representing each cluster also is a deciding point.
# **6 looks optimal !**
# KMANS IMPLEMENTATION
kmeans = KMeans(
n_clusters=6, init="k-means++", n_init=10, max_iter=100, random_state=42
)
kmeans.fit(x_clustering)
kmeans_result = kmeans.predict(x_clustering)
print(silhouette_score(x_clustering, kmeans_result))
# **BIRCH implementation and searching for optimal number of clusters :**
# ***The BIRCH (Balanced Iterative Reducing and Clustering using Hierarchies) algorithm*** is a hierarchical clustering algorithm designed to handle large datasets efficiently. It works by first building a tree-like data structure, called a CF tree (Clustering Feature tree), which summarizes the data in a compact form while preserving the clustering information. The CF tree is then recursively partitioned to produce a hierarchical clustering of the data. The algorithm is designed to be memory-efficient, as it only requires a small amount of memory to maintain the CF tree, and it can handle datasets that do not fit in memory. It is commonly used in data mining applications for tasks such as customer segmentation, outlier detection, and image clustering.
fig, ax = plt.subplots(4, 2, figsize=(15, 8))
for i in [2, 3, 4, 5, 6, 7, 8, 9]:
"""
Create KMeans instance for different number of clusters
"""
birch_model = Birch(threshold=0.03, branching_factor=50, n_clusters=i)
q, mod = divmod(i, 2)
"""
Create SilhouetteVisualizer instance with KMeans instance
Fit the visualizer
"""
visualizer = SilhouetteVisualizer(
birch_model, colors="yellowbrick", ax=ax[q - 1][mod]
)
visualizer.fit(x_clustering)
# BIRCH CLUSTRING
# define the model
birch_model = Birch(threshold=0.03, branching_factor=50, n_clusters=6)
# train the model
birch_model.fit(x_clustering)
# assign each data point to a cluster
birch_result = birch_model.predict(x_clustering)
# get all of the unique clusters
birch_clusters = unique(birch_result)
print(silhouette_score(x_clustering, birch_result))
# **Agglomerative Clustering implemenatation and fixing parameters (linkage)**
# ***Agglomerative clustering*** is a hierarchical clustering algorithm that works by iteratively merging the closest pairs of data points or clusters into larger clusters. It starts by considering each data point as a separate cluster and then combines the two closest clusters into a single cluster. This process is repeated until all data points belong to a single cluster, or until a stopping criterion is met, such as reaching a desired number of clusters or a threshold distance. The algorithm produces a dendrogram, which is a tree-like diagram that shows the sequence of merges and the resulting hierarchy of clusters. Agglomerative clustering is commonly used in data mining and machine learning applications for tasks such as customer segmentation, image clustering, and gene expression analysis.
linkage = ["ward", "complete", "average", "single"]
for link in linkage:
# define the model
agglomerative_model = AgglomerativeClustering(
n_clusters=6, linkage=link, compute_full_tree=True
)
# assign each data point to a cluster
agglomerative_result = agglomerative_model.fit_predict(x_clustering)
# get all of the unique clusters
agglomerative_clusters = unique(agglomerative_result)
print(silhouette_score(x_clustering, agglomerative_result), " : linkage=", link)
# 'single' gives us the best silhouette score ,we can't use yellowbrick library to visualize the clusters count
# distribution like in pervious implementation but we can use seaborn visualization
agglomerative_model = AgglomerativeClustering(
n_clusters=6, linkage="single", compute_full_tree=True
)
agglomerative_result = agglomerative_model.fit_predict(x_clustering)
res_agg = pd.DataFrame(agglomerative_result, columns=["cluster"])
sns.countplot(x="cluster", data=res_agg)
# - parameters and evaluation scores can be ver tricky,we should always check
# - by rule of thumb we are going to take 'ward' as linkage which is the default parameter
agglomerative_model = AgglomerativeClustering(
n_clusters=6, linkage="ward", compute_full_tree=True
)
agglomerative_result = agglomerative_model.fit_predict(x_clustering)
# **AgglomerativeClustering VS K-means VS BIRCH**
# vizualization for agglomerative clustering
res_agg = pd.DataFrame(agglomerative_result, columns=["cluster"])
# vizualization for birch clustering
res_birch = pd.DataFrame(birch_result, columns=["cluster"])
# vizualization for kmeans clustering
res_km = pd.DataFrame(kmeans_result, columns=["cluster"])
plt.figure(figsize=(19, 12))
fig, ax = plt.subplots(2, 2)
ax[0, 0].hist(
res_agg["cluster"],
)
ax[0, 0].set_title("visualization for agglomerative clustering")
ax[0, 1].hist(
res_birch["cluster"],
)
ax[0, 1].set_title("visualization for birch clustering")
ax[1, 0].hist(
res_km["cluster"],
)
ax[1, 0].set_title("visualization for kmeans clustering")
fig.tight_layout()
plt.show()
# - BIRCH and agglomerative clustering are both hierchical clustering algorithm, we observe that in this case they converged to same result
# - kmeans algorithm tend to cluster data in balanced groups but here it's not really the case ,we are not sure yet it's good sign or a bad one
# **STEP 7: POST CLUSTRING VISUALIZATION**
df_f = pd.read_csv(
"/kaggle/input/predicting-credit-card-customer-attrition-with-m/BankChurners.csv"
)
df_final_agg = pd.concat([df_f.reset_index(drop=True), res_agg], axis=1)
df_final_km = pd.concat([df_f.reset_index(drop=True), res_km], axis=1)
# **K-means:**
data_0_km = df_final_km[df_final_km.cluster == 0]
data_1_km = df_final_km[df_final_km.cluster == 1]
data_2_km = df_final_km[df_final_km.cluster == 2]
data_3_km = df_final_km[df_final_km.cluster == 3]
data_4_km = df_final_km[df_final_km.cluster == 4]
data_5_km = df_final_km[df_final_km.cluster == 5]
plt.figure(figsize=(19, 12))
fig, ax = plt.subplots(3, 2)
ax[0, 0].hist(
data_0_km["Income_Category"],
)
ax[0, 0].set_title("cluster 0")
ax[0, 1].hist(data_1_km["Income_Category"])
ax[0, 1].set_title("cluster 1")
ax[1, 0].hist(data_2_km["Income_Category"])
ax[1, 0].set_title("cluster 2")
ax[1, 1].hist(data_3_km["Income_Category"])
ax[1, 1].set_title("cluster 3")
ax[2, 0].hist(data_4_km["Income_Category"])
ax[2, 0].set_title("cluster 4")
ax[2, 1].hist(data_5_km["Income_Category"])
ax[2, 1].set_title("cluster 5")
fig.tight_layout()
plt.show()
ic_val = list(unique(data_0_km["Income_Category"]))
data_km = [data_0_km, data_1_km, data_2_km, data_3_km, data_4_km, data_5_km]
results = []
for dat in data_km:
r = [0] * 6
for i in dat["Income_Category"]:
r[ic_val.index(i)] = r[ic_val.index(i)] + 1
results.append(r)
# Income category vizualization
plt.figure(figsize=(19, 12))
fig, ax = plt.subplots(3, 2)
ax[0, 0].pie(results[0], labels=ic_val, autopct="%1.1f%%")
ax[0, 0].set_title("cluster 0")
ax[0, 1].pie(results[1], labels=ic_val, autopct="%1.1f%%")
ax[0, 1].set_title("cluster 1")
ax[1, 0].pie(results[2], labels=ic_val, autopct="%1.1f%%")
ax[1, 0].set_title("cluster 2")
ax[1, 1].pie(results[3], labels=ic_val, autopct="%1.1f%%")
ax[1, 1].set_title("cluster 3")
ax[2, 0].pie(results[4], labels=ic_val, autopct="%1.1f%%")
ax[2, 0].set_title("cluster 4")
ax[2, 1].pie(results[5], labels=ic_val, autopct="%1.1f%%")
ax[2, 1].set_title("cluster 5")
fig.tight_layout()
plt.show()
att_val = list(unique(data_0_km["Attrition_Flag"]))
results_att = []
for dat in data_km:
r_att = [0] * 2
for i in dat["Attrition_Flag"]:
r_att[att_val.index(i)] = r_att[att_val.index(i)] + 1
results_att.append(r_att)
# Attrion flag vizualization
plt.figure(figsize=(19, 12))
fig, ax = plt.subplots(3, 2)
ax[0, 0].pie(results_att[0], labels=att_val, autopct="%1.1f%%")
ax[0, 0].set_title("cluster 0")
ax[0, 1].pie(results_att[1], labels=att_val, autopct="%1.1f%%")
ax[0, 1].set_title("cluster 1")
ax[1, 0].pie(results_att[2], labels=att_val, autopct="%1.1f%%")
ax[1, 0].set_title("cluster 2")
ax[1, 1].pie(results_att[3], labels=att_val, autopct="%1.1f%%")
ax[1, 1].set_title("cluster 3")
ax[2, 0].pie(results_att[4], labels=att_val, autopct="%1.1f%%")
ax[2, 0].set_title("cluster 4")
ax[2, 1].pie(results_att[5], labels=att_val, autopct="%1.1f%%")
ax[2, 1].set_title("cluster 5")
fig.tight_layout()
plt.show()
# customer age vizualization
plt.figure(figsize=(19, 12))
fig, ax = plt.subplots(3, 2)
ax[0, 0].hist(data_0_km["Customer_Age"])
ax[0, 0].set_title("cluster 0")
ax[0, 1].hist(data_1_km["Customer_Age"])
ax[0, 1].set_title("cluster 1")
ax[1, 0].hist(data_2_km["Customer_Age"])
ax[1, 0].set_title("cluster 2")
ax[1, 1].hist(data_3_km["Customer_Age"])
ax[1, 1].set_title("cluster 3")
ax[2, 0].hist(data_4_km["Customer_Age"])
ax[2, 0].set_title("cluster 4")
ax[2, 1].hist(data_5_km["Customer_Age"])
ax[2, 1].set_title("cluster 5")
fig.tight_layout()
plt.show()
plt.figure(figsize=(15, 7))
sns.boxplot(y="Total_Revolving_Bal", x="cluster", data=df_final_km)
plt.figure(figsize=(15, 7))
sns.boxplot(y="Total_Trans_Amt", x="cluster", data=df_final_km)
plt.figure(figsize=(15, 7))
sns.boxplot(y="Total_Ct_Chng_Q4_Q1", x="cluster", data=df_final_km)
# **Agglomerative clustering:**
data_0_agg = df_final_km[df_final_agg.cluster == 0]
data_1_agg = df_final_km[df_final_agg.cluster == 1]
data_2_agg = df_final_km[df_final_agg.cluster == 2]
data_3_agg = df_final_km[df_final_agg.cluster == 3]
data_4_agg = df_final_km[df_final_agg.cluster == 4]
data_5_agg = df_final_km[df_final_agg.cluster == 5]
plt.figure(figsize=(19, 12))
fig, ax = plt.subplots(3, 2)
ax[0, 0].hist(
data_0_agg["Income_Category"],
)
ax[0, 0].set_title("cluster 0")
ax[0, 1].hist(data_1_agg["Income_Category"])
ax[0, 1].set_title("cluster 1")
ax[1, 0].hist(data_2_agg["Income_Category"])
ax[1, 0].set_title("cluster 2")
ax[1, 1].hist(data_3_agg["Income_Category"])
ax[1, 1].set_title("cluster 3")
ax[2, 0].hist(data_4_agg["Income_Category"])
ax[2, 0].set_title("cluster 4")
ax[2, 1].hist(data_5_agg["Income_Category"])
ax[2, 1].set_title("cluster 5")
fig.tight_layout()
plt.show()
ic_val = list(unique(data_0_km["Income_Category"]))
data_agg = [data_0_agg, data_1_agg, data_2_agg, data_3_agg, data_4_agg, data_5_agg]
results_agg = []
for dat in data_agg:
r = [0] * 6
for i in dat["Income_Category"]:
r[ic_val.index(i)] = r[ic_val.index(i)] + 1
results_agg.append(r)
# Income category vizualization
plt.figure(figsize=(19, 12))
fig, ax = plt.subplots(3, 2)
ax[0, 0].pie(results_agg[0], labels=ic_val, autopct="%1.1f%%")
ax[0, 0].set_title("cluster 0")
ax[0, 1].pie(results_agg[1], labels=ic_val, autopct="%1.1f%%")
ax[0, 1].set_title("cluster 1")
ax[1, 0].pie(results_agg[2], labels=ic_val, autopct="%1.1f%%")
ax[1, 0].set_title("cluster 2")
ax[1, 1].pie(results_agg[3], labels=ic_val, autopct="%1.1f%%")
ax[1, 1].set_title("cluster 3")
ax[2, 0].pie(results_agg[4], labels=ic_val, autopct="%1.1f%%")
ax[2, 0].set_title("cluster 4")
ax[2, 1].pie(results_agg[5], labels=ic_val, autopct="%1.1f%%")
ax[2, 1].set_title("cluster 5")
fig.tight_layout()
plt.show()
att_val = list(unique(data_0_km["Attrition_Flag"]))
results_att = []
for dat in data_agg:
r_att = [0] * 2
for i in dat["Attrition_Flag"]:
r_att[att_val.index(i)] = r_att[att_val.index(i)] + 1
results_att.append(r_att)
# Attrion flag vizualization
plt.figure(figsize=(19, 12))
fig, ax = plt.subplots(3, 2)
ax[0, 0].pie(results_att[0], labels=att_val, autopct="%1.1f%%")
ax[0, 0].set_title("cluster 0")
ax[0, 1].pie(results_att[1], labels=att_val, autopct="%1.1f%%")
ax[0, 1].set_title("cluster 1")
ax[1, 0].pie(results_att[2], labels=att_val, autopct="%1.1f%%")
ax[1, 0].set_title("cluster 2")
ax[1, 1].pie(results_att[3], labels=att_val, autopct="%1.1f%%")
ax[1, 1].set_title("cluster 3")
ax[2, 0].pie(results_att[4], labels=att_val, autopct="%1.1f%%")
ax[2, 0].set_title("cluster 4")
ax[2, 1].pie(results_att[5], labels=att_val, autopct="%1.1f%%")
ax[2, 1].set_title("cluster 5")
fig.tight_layout()
plt.show()
# customer age vizualization
plt.figure(figsize=(19, 12))
fig, ax = plt.subplots(3, 2)
ax[0, 0].hist(data_0_agg["Customer_Age"])
ax[0, 0].set_title("cluster 0")
ax[0, 1].hist(data_1_agg["Customer_Age"])
ax[0, 1].set_title("cluster 1")
ax[1, 0].hist(data_2_agg["Customer_Age"])
ax[1, 0].set_title("cluster 2")
ax[1, 1].hist(data_3_agg["Customer_Age"])
ax[1, 1].set_title("cluster 3")
ax[2, 0].hist(data_4_agg["Customer_Age"])
ax[2, 0].set_title("cluster 4")
ax[2, 1].hist(data_5_agg["Customer_Age"])
ax[2, 1].set_title("cluster 5")
fig.tight_layout()
plt.show()
plt.figure(figsize=(15, 7))
sns.boxplot(y="Total_Revolving_Bal", x="cluster", data=df_final_agg)
plt.figure(figsize=(15, 7))
sns.boxplot(y="Total_Trans_Amt", x="cluster", data=df_final_agg)
plt.figure(figsize=(15, 7))
sns.boxplot(y="Total_Ct_Chng_Q4_Q1", x="cluster", data=df_final_agg)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
dataset_raw = pd.read_csv(
"/kaggle/input/higher-education-predictors-of-student-retention/dataset.csv"
)
print(dataset_raw.shape)
dataset_raw.head().T
dataset_raw.info()
dataset_raw.describe().T
dataset = dataset_raw.copy()
dataset.shape
dataset.Target.value_counts()
dataset.loc[dataset["Target"] == "Dropout", "Target"] = float(0.0)
dataset.loc[dataset["Target"] == "Enrolled", "Target"] = float(1.0)
dataset.loc[dataset["Target"] == "Graduate", "Target"] = float(2.0)
dataset.Target.value_counts()
corrmat = dataset.corr()
corrmat
dataset.info()
plt.figure(figsize=(12, 10))
k = 35
cols = corrmat.nlargest(k, "Target")["Target"].index
cm = np.corrcoef(dataset[cols].values.T)
sns.set(font_scale=1.4)
hm = sns.heatmap(
cm,
cbar=True,
annot=True,
square=True,
fmt=".2f",
annot_kws={"size": 10},
yticklabels=cols.values,
xticklabels=cols.values,
cmap="rainbow",
)
plt.show()
dataset_raw.Target.value_counts()
target = dataset_raw["Target"]
features = dataset_raw.drop(["Target"], axis=1)
target.shape, features.shape
target.value_counts()
|
# ## Abstract
# Pneumonia is a lung disease affecting the small air sacs known as alveoli and is the cause of death for 40,000 Americans per year. The alveoli become inflamed and can become filled with fluid, causing difficulty breathing. Pneumonia can be diagnosed by x-rays and provide the location of the infection. Early detection of pneumonia can help prevent complications or death from the disease. In this project, I have developed a deep learning model to classify chest x-rays with pneumonia. Torchvision was used to develop the layers in the model such as ReLU for nonlinearity and batch normalization to help with overfitting. The model reached a max accuracy of 96%. When inputting test images, the model was able to correctly classify most chest x-rays indicating that there is room for improvement and potential to implement this method for other lung diseases.
# Installing the `opendatasets` package for the purpose of loading in the data
# Importing as the alias `od`
# The dataset is sourced from kaggle: [Kaggle Xray Dataset](https://www.kaggle.com/datasets/tolgadincer/labeled-chest-xray-images)
import opendatasets as od
# dataset_url = 'https://www.kaggle.com/datasets/tolgadincer/labeled-chest-xray-images'
# Downloading the dataset of x-ray images using `od.download` and the url from `dataset_url`
# od.download(dataset_url)
# # Methods: Import X-ray Data into PyTorch
data_dir = "/kaggle/input/labeled-chest-xray-images/chest_xray/train"
import os
# Confirming the folders in our directory
os.listdir(data_dir)
# Importing `ImageFolder` from torchvision
from torchvision.datasets import ImageFolder
# Picks out all the x-ray images from both categories
# **Data Augmentation**
# `tt.Compose()` will apply a number of transformations in order to the images
# `tt.RandomInvert()` will invert the colors
# used `tt.Resize()` to make the images all the same size `tt.ToTensor` will convert the images into tensors
import torchvision.transforms as tt
from torch.utils.data import random_split
train_ds = ImageFolder(
data_dir, tt.Compose([tt.Resize([64, 64]), tt.RandomInvert(0.5), tt.ToTensor()])
)
val_pct = 0.1
val_size = int(val_pct * len(train_ds))
train_size = len(train_ds) - val_size
train_ds, valid_ds = random_split(train_ds, [train_size, val_size])
len(train_ds), len(valid_ds)
# Checking the number of images in our dataset
len(train_ds), len(valid_ds)
img, label = train_ds[0]
print(img.shape, label)
# Creating DataLoaders so that we can load the data in batches
# `shuffle` will shuffle the x-ray images `num_workers` is related to parallel processing
from torch.utils.data.dataloader import DataLoader
batch_size = 128
train_dl = DataLoader(
train_ds, batch_size, shuffle=True, num_workers=2, pin_memory=True
)
valid_dl = DataLoader(valid_ds, batch_size, num_workers=2, pin_memory=True)
for cls in os.listdir(data_dir):
print(cls, ":", len(os.listdir(data_dir + "/" + cls)))
import torch.nn as nn
import torch.nn.functional as F
import torch
# Helper functions to move model and data to the GPU
def get_default_device():
if torch.cuda.is_available(): # Pick GPU if available, else CPU
return torch.device("cuda")
else:
return torch.device("cpu")
def to_device(data, device):
# Move tensor(s) to chosen device
if isinstance(data, (list, tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader:
# Wrap a dataloader to move data to a device
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
# Yield a batch of data after moving it to device
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
# Number of batches
return len(self.dl)
device = get_default_device()
device
# moves data to GPU
train_dl = DeviceDataLoader(train_dl, device)
valid_dl = DeviceDataLoader(valid_dl, device)
# `ImageClassificationBase` contains helper methods for training and validation
# extending the nn.Module class
class ImageClassificationBase(nn.Module):
def training_step(self, batch): # calculating loss for a batch of training data
images, labels = batch
out = self(images) # Pass images into the model and generates predictions
loss = F.cross_entropy(out, labels) # Calculate loss using cross entropy
return loss
def validation_step(
self, batch
): # calculating loss and accuracy for a batch of validation data
images, labels = batch
out = self(images)
loss = F.cross_entropy(out, labels) # Calculate loss using cross entropy
acc = accuracy(out, labels) # Calculate accuracy
return {"val_loss": loss.detach(), "val_acc": acc}
def validation_epoch_end(self, outputs):
batch_losses = [x["val_loss"] for x in outputs]
epoch_loss = torch.stack(
batch_losses
).mean() # Combine losses and take the mean
batch_accs = [x["val_acc"] for x in outputs]
epoch_acc = torch.stack(
batch_accs
).mean() # Combine accuracies and take the mean
return {
"val_loss": epoch_loss.item(),
"val_acc": epoch_acc.item(),
} # Combine all results for the validation set
def epoch_end(
self, epoch, result
): # will print all the information after each epoch
print(
"Epoch [{}], train_loss: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}".format(
epoch, result["train_loss"], result["val_loss"], result["val_acc"]
)
)
def accuracy(outputs, labels): # calculates accuracy of the predictions
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
# `evaluate` will evaluate the model's performance on the validation set
# `fit` is performing gradient descent
@torch.no_grad() # tells pytorch to not compute gradients while in evaluation mode
def evaluate(model, val_loader):
model.eval()
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)
def fit(epochs, lr, model, train_loader, val_loader, opt_func=torch.optim.SGD):
history = []
optimizer = opt_func(model.parameters(), lr)
for epoch in range(epochs):
# Training Phase
model.train()
train_losses = []
for batch in train_loader:
loss = model.training_step(
batch
) # takes a batch of data from the training set and returns the loss for that batch
train_losses.append(loss)
loss.backward() # calculates gradients
optimizer.step() # performs gradient descent and update the weights of the model
optimizer.zero_grad() # clears out gradients
# Validation phase
result = evaluate(model, val_loader) # go into evaluation mode
result["train_loss"] = torch.stack(train_losses).mean().item()
model.epoch_end(epoch, result)
history.append(result)
return history
# # Methods: Model Development
# Developing a model and the single neural network. Will use residual blocks to add the original input back to the output feature map in the feed forward layer
# Each kernel learns a specific feature about the input image
from torch.nn.modules.pooling import MaxPool2d
def conv_block(in_channels, out_channels, pool=False):
# BatchNorm2d for regularization and reduce overfitting
# applying activation function (ReLU) and introducing non-linearity
# MaxPool2d takes each set of 4 pixels and replaces them with the max value
layers = [
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
]
if pool:
layers.append(nn.MaxPool2d(2))
return nn.Sequential(*layers)
class ChestXrayModel_1(ImageClassificationBase): # extending ImageClassificationBase
def __init__(self, in_channels, num_classes):
super().__init__()
# Input: 3 x 64 x 64
self.conv1 = conv_block(in_channels, 64)
self.conv2 = conv_block(64, 128, pool=True) # 128 x 32 x 32
self.res1 = nn.Sequential(
conv_block(128, 128), conv_block(128, 128) # 128 x 32 x 32
)
self.conv3 = conv_block(128, 256, pool=True) # 256 x 16 x 16
self.conv4 = conv_block(256, 512, pool=True) # 512 x 8 x 8
self.res2 = nn.Sequential(
conv_block(512, 512), conv_block(512, 512) # 512 x 8 x 8
)
self.classifier = nn.Sequential(
nn.AdaptiveMaxPool2d(1), # 512 x 1 x 1
nn.Flatten(), # 512 x 1; flattens the output feature map into a vector
nn.Dropout(0.2),
nn.Linear(512, num_classes),
)
def forward(
self, xb
): # taking the input and adding it back to the output throughout the layers
out = self.conv1(xb)
out = self.conv2(out) # ouput becomes an input.
out = self.res1(out) + out
out = self.conv3(out)
out = self.conv4(out)
out = self.res2(out) + out
out = self.classifier(out)
return out
# Moving model to the GPU
model = to_device(ChestXrayModel_1(3, 2), device)
model
for batch in train_dl:
images, labels = batch
print("images.shape", images.shape)
print("images.device", images.device)
preds = model(images)
print("preds.shape", preds.shape)
print("preds[0]:", preds[0])
break
# # Methods: Training
# Getting a performance benchmark of where the model is starting from.
# First accuracy value I saw: ~50%
# After implementing revised model and other strategies, initial accuracy was ~28%
history = [evaluate(model, valid_dl)]
history
# I had to reduce the image size to 64x64 because I was running out of memory. Initial learning rate: 0.001
torch.cuda.empty_cache()
history += fit(5, 0.001, model, train_dl, valid_dl, torch.optim.Adam)
# **Attempt 1**:
# After all 7 epochs, the accuracy is now ~62%
# Maybe implement augmentation to improve performance, increase DropOut percentage, change number of epochs
# **Attempt 2**:
# Decreased to 5 epochs and changed DropOut percentage from 20% to 35%. Accuracy jumped down a little and then ended with ~81%
# **Attempt 3**:
# 6 epochs, adding batch normalization after each convolutional layer, and changing kernal size from 3 to 6 to increase number of features extracted. Result: the accuracy started at 50% then went to 68% then back to 50%.
# **Attempt 4**:
# Decided to switch to a different dataset and implement residual conv blocks so that the features extracted continue to build up. Added in the random invert transform and put the kernel size back to 3. 5 epochs. Result: reached an accuracy of ~97%
# Defining a plot function to plot the accuracies from each epoch. A function also helps reduce redundant code.
import matplotlib.pyplot as plt
def plot_accuracies(history):
accuracies = [x["val_acc"] for x in history]
plt.plot(accuracies, "-x")
plt.xlabel("epoch")
plt.ylabel("accuracy")
plt.title("Accuracy vs. No. of epochs")
# ##Results:
# I have created a plot that visualizes the accuracy increasing with each epoch
plot_accuracies(history)
# Function for plotting the losses from each epoch
def plot_losses(history):
train_losses = [x.get("train_loss") for x in history]
val_losses = [x["val_loss"] for x in history]
plt.plot(train_losses, "-bx")
plt.plot(val_losses, "-rx")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend(["Training", "Validation"])
plt.title("Loss vs. No. of epochs")
# As shown in the plot, the loss was very high in the first few epochs and then decreased significantly in the later epochs. A plot can also help see if there is overfitting
plot_losses(history)
# Saving best model before running out of gpu memory from testing:
# optional cell
# torch.save(model.state_dict(), 'chest_xrayDL.pth')
# # Results: Test Model with Individual Images
# We can use the test set provided
test_dataset = ImageFolder("./labeled-chest-xray-images/chest_xray/test", tt.ToTensor())
# `predict_image()` will move the image to the GPU and converts the image into a batch of 1 image
def predict_image(img, model, classes): # number of classes is 2: NORMAL and PNEUMONIA
# Convert to a batch of 1
xb = to_device(img.unsqueeze(0), device)
# Get predictions from model
yb = model(xb)
# Pick index with highest probability
_, preds = torch.max(yb, dim=1)
# Retrieve the class label
return classes[preds[0].item()]
train_ds.dataset.classes
torch.cuda.empty_cache()
img, label = test_dataset[32]
plt.imshow(img.permute(1, 2, 0))
pred = predict_image(img, model, train_ds.dataset.classes)
print("Target:", train_ds.dataset.classes[label], ", Predicted:", pred)
img, label = test_dataset[99]
plt.imshow(img.permute(1, 2, 0))
pred = predict_image(img, model, train_ds.dataset.classes)
print("Target:", train_ds.dataset.classes[label], ", Predicted:", pred)
img, label = test_dataset[412]
plt.imshow(img.permute(1, 2, 0))
pred = predict_image(img, model, train_ds.dataset.classes)
print("Target:", train_ds.dataset.classes[label], ", Predicted:", pred)
img, label = test_dataset[342]
plt.imshow(img.permute(1, 2, 0))
pred = predict_image(img, model, train_ds.dataset.classes)
print("Target:", train_ds.dataset.classes[label], ", Predicted:", pred)
|
"""
Reference:
https://www.kaggle.com/code/sharanharsoor/resnet50-transfer-learning-90-accuracy/notebook
Express my gratitude to Sharan Harsoor@sharanharsoor
"""
"""import lib"""
import os
import shutil
import numpy as np
import glob
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, optimizers
from tensorflow.keras.layers import (
Input,
Add,
Dropout,
Dense,
Activation,
ZeroPadding2D,
BatchNormalization,
Flatten,
Conv2D,
AveragePooling2D,
MaxPooling2D,
GlobalAveragePooling2D,
)
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.preprocessing import image
from tensorflow.keras.utils import plot_model
from tensorflow.keras.applications.imagenet_utils import preprocess_input
from tensorflow.keras.initializers import glorot_uniform
from tensorflow.keras.preprocessing.image import (
ImageDataGenerator,
load_img,
img_to_array,
)
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.applications.resnet50 import preprocess_input
from datetime import datetime
import matplotlib.pyplot as plt
from IPython.display import SVG
import scipy.misc
from matplotlib.pyplot import imshow
import cv2
import matplotlib.image as im
"""reading data and splitting train and test"""
# the path of all data
data_dir = "/kaggle/input/flowers-recognition/flowers"
# the working space of training and test
train_dir = "/kaggle/working/Train"
test_dir = "/kaggle/working/Test"
val_dir = "/kaggle/working/Val"
# the ration of train dataset and test
train_test_ratio = 0.8
# define a function to spilting the dataset into train and tset
def spilt_alldata2train_test(
all_data_dir=data_dir,
val_data_dir=val_dir,
train_data_dir=train_dir,
test_data_dir=test_dir,
ttr=train_test_ratio,
):
if not os.path.exists(train_data_dir):
os.mkdir(train_data_dir)
if not os.path.exists(test_data_dir):
os.mkdir(test_data_dir)
if not os.path.exists(val_data_dir):
os.mkdir(val_data_dir)
num_train_files = 0
num_test_files = 0
num_val_files = 0
for subdir, dirs, files in os.walk(all_data_dir):
category_name = os.path.basename(subdir)
if category_name == os.path.basename(all_data_dir):
continue
train_data_category_dir = train_data_dir + "/" + category_name
test_data_category_dir = test_data_dir + "/" + category_name
val_data_category_dir = val_data_dir + "/" + category_name
if not os.path.exists(train_data_category_dir):
os.mkdir(train_data_category_dir)
if not os.path.exists(test_data_category_dir):
os.mkdir(test_data_category_dir)
if not os.path.exists(val_data_category_dir):
os.mkdir(val_data_category_dir)
file_list = glob.glob(os.path.join(subdir, "*.jpg"))
print(str(category_name) + " has " + str(len(files)) + " images!")
random_set = np.random.permutation((file_list))
train_list = random_set[: round(len(random_set) * (train_test_ratio))]
val0_list = random_set[-round(len(random_set) * (1 - train_test_ratio)) :]
lv = len(val0_list)
random_set2 = np.random.permutation((val0_list))
test_list = random_set2[: round(lv * 0.5)]
val_list = random_set2[-round(lv * 0.5) :]
for lists in train_list:
shutil.copy(lists, train_data_dir + "/" + category_name + "/")
num_train_files += 1
for lists in test_list:
shutil.copy(lists, test_data_dir + "/" + category_name + "/")
num_test_files += 1
for lists in val_list:
shutil.copy(lists, val_data_dir + "/" + category_name + "/")
num_val_files += 1
print("Processed " + str(num_train_files) + " train files.")
print("Processed " + str(num_test_files) + " test files.")
print("Processed " + str(num_val_files) + " val files.")
"""
print("subdir:{}".format(subdir))
print("catagory_path:{}".format(category_name))
print("dirs:{}".format(dirs))
print("---------------------------")flowers-recognition/flowers/dandelion
遍历结果如下
subdir:/kaggle/input/catagory_path:dandelion
dirs:[]
---------------------------
subdir:/kaggle/input/flowers-recognition/flowers/daisy
catagory_path:daisy
dirs:[]
---------------------------
subdir:/kaggle/input/flowers-recognition/flowers/sunflower
catagory_path:sunflower
dirs:[]
---------------------------
subdir:/kaggle/input/flowers-recognition/flowers/tulip
catagory_path:tulip
dirs:[]
---------------------------
subdir:/kaggle/input/flowers-recognition/flowers/rose
catagory_path:rose
dirs:[]
---------------------------
"""
spilt_alldata2train_test()
# build ResNet50
# Number of classes in dataset
num_classes = 5
def get_ResNet50model():
# Get base model
# Here we are using ResNet50 as base model
base_model = ResNet50(weights="imagenet", include_top=False)
# As we are using ResNet model only for feature extraction and not adjusting the weights
# we freeze the layers in base model
for layer in base_model.layers:
layer.trainable = False
# Get base model output
base_model_ouput = base_model.output
# Adding our own layer
x = GlobalAveragePooling2D()(base_model_ouput)
# Adding fully connected layer
x = Dense(512, activation="relu")(x)
x = Dense(num_classes, activation="softmax", name="fcnew")(x)
model = Model(inputs=base_model.input, outputs=x)
return model
# Get the model
model = get_ResNet50model()
# Compile it
model.compile(loss="categorical_crossentropy", optimizer="sgd", metrics=["accuracy"])
# Summary of model
# Defining the imagedatagenerator for train and test image for pre-processing
# We don't give horizonal_flip or other preprocessing for validation data generator
image_size = 224
batch_size = 64
train_data_gen = ImageDataGenerator(
preprocessing_function=preprocess_input,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
)
valid_data_gen = ImageDataGenerator(preprocessing_function=preprocess_input)
test_data_gen = ImageDataGenerator(preprocessing_function=preprocess_input)
train_augset_path = "/kaggle/working/train_plus"
valid_augset_path = "/kaggle/working/valid_plus"
if not os.path.exists(train_augset_path):
os.mkdir(train_augset_path)
if not os.path.exists(valid_augset_path):
os.mkdir(valid_augset_path)
train_generator = train_data_gen.flow_from_directory(
train_dir,
(image_size, image_size),
batch_size=batch_size,
class_mode="categorical",
save_to_dir=train_augset_path,
)
valid_generator = valid_data_gen.flow_from_directory(
val_dir,
(image_size, image_size),
batch_size=batch_size,
class_mode="categorical",
save_to_dir=valid_augset_path,
)
test_generator = valid_data_gen.flow_from_directory(
test_dir, (image_size, image_size), batch_size=batch_size, class_mode="categorical"
)
# Training the fully conncected layer for initial epochs
epochs = 10
# Training the model
history = model.fit(
train_generator,
steps_per_epoch=train_generator.n // batch_size,
validation_data=valid_generator,
validation_steps=valid_generator.n // batch_size,
epochs=epochs,
verbose=1,
)
pred = model.predict(test_generator, verbose=1)
pred = np.argmax(pred, axis=1)
score = model.evaluate(test_generator, verbose=1)
print("loss:{0} acc={1}".format(score[0], score[1]))
# 绘制训练 & 验证的准确率值
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("Model accuracy")
plt.ylabel("Accuracy")
plt.xlabel("Epoch")
plt.legend(["Train", "Test"], loc="upper left")
plt.show()
# 绘制训练 & 验证的损失值
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("Model loss")
plt.ylabel("Loss")
plt.xlabel("Epoch")
plt.legend(["Train", "Test"], loc="upper left")
plt.show()
print("loss:{0} acc={1}".format(score[0], score[1]))
|
from numpy import array, zeros
a = array([[3, -2, 5, 0], [4, 5, 8, 1], [1, 1, 2, 1], [2, 7, 6, 5]], float)
b = array([2, 4, 5, 7], float)
n = len(b)
x = zeros(n, float)
# Elimination
for k in range(n - 1):
for i in range(k + 1, n):
if a[i, k] == 0:
continue
factor = a[k, k] / a[i, k]
for j in range(k, n):
a[i, j] = a[k, j] - a[i, j] * factor
b[i] = b[k] - b[i] * factor
print(a)
print(b)
# Back-Substitution
x[n - 1] = b[n - 1] / a[n - 1, n - 1]
for i in range(n - 2, -1, -1):
sum_ax = 0
for j in range(i + 1, n):
sum_ax += a[i, j] * x[j]
x[i] = (b[i] - sum_ax) / a[i, i]
print("The solution of the system is : ")
print(x)
|
# # Intro
# This course covers the key Python skills you’ll need so you can start using Python for data science. The course is ideal for someone with some previous coding experience who wants to add Python to their repertoire or level up their basic Python skills. (If you're a first-time coder, you may want to check out [these "Python for Non-Programmers" learning resources](https://wiki.python.org/moin/BeginnersGuide/NonProgrammers).)
# We'll start with a brief overview of Python syntax, variable assignment, and arithmetic operators. If you have previous Python experience, you can [skip straight to the hands-on exercise](https://www.kaggle.com/kernels/fork/1275163).
# # Hello, Python!
# Python was named for the British comedy troupe [Monty Python](https://en.wikipedia.org/wiki/Monty_Python), so we'll make our first Python program an homage to their skit about Spam?
# Just for fun, try reading over the code below and predicting what it's going to do when run. (If you have no idea, that's fine!)
# Then click the "output" button to see the results of our program.
spam_amount = 0
print(spam_amount)
# Ordering Spam, egg, Spam, Spam, bacon and Spam (4 more servings of Spam)
spam_amount = spam_amount + 4
if spam_amount > 0:
print("But I don't want ANY spam!")
viking_song = "Spam " * spam_amount
print(viking_song)
# There's a lot to unpack here! This silly program demonstrates many important aspects of what Python code looks like and how it works. Let's review the code from top to bottom.
spam_amount = 0
# **Variable assignment:** Here we create a variable called `spam_amount` and assign it the value of 0 using `=`, which is called the assignment operator.
# > **Aside**: If you've programmed in certain other languages (like Java or C++), you might be noticing some things Python *doesn't* require us to do here:
# - we don't need to "declare" `spam_amount` before assigning to it
# - we don't need to tell Python what type of value `spam_amount` is going to refer to. In fact, we can even go on to reassign `spam_amount` to refer to a different sort of thing like a string or a boolean.
print(spam_amount)
# **Function calls:**. `print` is a Python function that displays the value passed to it on the screen. We call functions by putting parentheses after their name, and putting the inputs (or *arguments*) to the function in those parentheses.
# Ordering Spam, egg, Spam, Spam, bacon and Spam (4 more servings of Spam)
spam_amount = spam_amount + 4
# The first line above is a **comment**. In Python, comments begin with the `#` symbol.
# Next we see an example of reassignment. Reassigning the value of an existing variable looks just the same as creating a variable - it still uses the `=` assignment operator.
# In this case, the value we're assigning to `spam_amount` involves some simple arithmetic on its previous value. When it encounters this line, Python evaluates the expression on the right-hand-side of the `=` (0 + 4 = 4), and then assigns that value to the variable on the left-hand-side.
if spam_amount > 0:
print("But I don't want ANY spam!")
viking_song = "Spam Spam Spam"
print(viking_song)
# We won't talk much about "conditionals" until later, but, even if you've never coded before, you can probably guess what this does. Python is prized for its readability and the simplicity.
# Note how we indicated which code belongs to the `if`. `"But I don't want ANY spam!"` is only supposed to be printed if `spam_amount` is positive. But the later code (like `print(viking_song)`) should be executed no matter what. How do we (and Python) know that?
# The colon (`:`) at the end of the `if` line indicates that a new "code block" is starting. Subsequent lines which are **indented** are part of that code block. Some other languages use `{`curly braces`}` to mark the beginning and end of code blocks. Python's use of meaningful whitespace can be surprising to programmers who are accustomed to other languages, but in practice it can lead to more consistent and readable code than languages that do not enforce indentation of code blocks.
# The later lines dealing with `viking_song` are not indented with an extra 4 spaces, so they're not a part of the `if`'s code block. We'll see more examples of indented code blocks later when we define functions and using loops.
# This code snippet is also our first sighting of a **string** in Python:
# ```python
# "But I don't want ANY spam!"
# ```
# Strings can be marked either by double or single quotation marks. (But because this particular string *contains* a single-quote character, we might confuse Python by trying to surround it with single-quotes, unless we're careful.)
viking_song = "Spam " * spam_amount
print(viking_song)
# The `*` operator can be used to multiply two numbers (`3 * 3` evaluates to 9), but amusingly enough, we can also multiply a string by a number, to get a version that's been repeated that many times. Python offers a number of cheeky little time-saving tricks like this where operators like `*` and `+` have a different meaning depending on what kind of thing they're applied to. (The technical term for this is [operator overloading](https://en.wikipedia.org/wiki/Operator_overloading))
# ## Numbers and arithmetic in Python
# We've already seen an example of a variable containing a number above:
spam_amount = 0
# "Number" is a fine informal name for the kind of thing, but if we wanted to be more technical, we could ask Python how it would describe the type of thing that `spam_amount` is:
type(spam_amount)
# It's an `int` - short for integer. There's another sort of number we commonly encounter in Python:
type(19.95)
# A `float` is a number with a decimal place - very useful for representing things like weights or proportions.
# `type()` is the second built-in function we've seen (after `print()`), and it's another good one to remember. It's very useful to be able to ask Python "what kind of thing is this?".
# A natural thing to want to do with numbers is perform arithmetic. We've seen the `+` operator for addition, and the `*` operator for multiplication (of a sort). Python also has us covered for the rest of the basic buttons on your calculator:
# | Operator | Name | Description |
# |--------------|----------------|--------------------------------------------------------|
# | ``a + b`` | Addition | Sum of ``a`` and ``b`` |
# | ``a - b`` | Subtraction | Difference of ``a`` and ``b`` |
# | ``a * b`` | Multiplication | Product of ``a`` and ``b`` |
# | ``a / b`` | True division | Quotient of ``a`` and ``b`` |
# | ``a // b`` | Floor division | Quotient of ``a`` and ``b``, removing fractional parts |
# | ``a % b`` | Modulus | Integer remainder after division of ``a`` by ``b`` |
# | ``a ** b`` | Exponentiation | ``a`` raised to the power of ``b`` |
# | ``-a`` | Negation | The negative of ``a`` |
# One interesting observation here is that, whereas your calculator probably just has one button for division, Python can do two kinds. "True division" is basically what your calculator does:
print(5 / 2)
print(6 / 2)
# It always gives us a `float`.
# The `//` operator gives us a result that's rounded down to the next integer.
print(5 // 2)
print(6 // 2)
# Can you think of where this would be useful? You'll see an example soon in the coding challenges.
# ### Order of operations
# The arithmetic we learned in primary school has conventions about the order in which operations are evaluated. Some remember these by a mnemonic such as **PEMDAS** - **P**arentheses, **E**xponents, **M**ultiplication/**D**ivision, **A**ddition/**S**ubtraction.
# Python follows similar rules about which calculations to perform first. They're mostly pretty intuitive.
8 - 3 + 2
-3 + 4 * 2
# Sometimes the default order of operations isn't what we want:
hat_height_cm = 25
my_height_cm = 190
# How tall am I, in meters, when wearing my hat?
total_height_meters = hat_height_cm + my_height_cm / 100
print("Height in meters =", total_height_meters, "?")
# Parentheses are your useful here. You can add them to force Python to evaluate sub-expressions in whatever order you want.
total_height_meters = (hat_height_cm + my_height_cm) / 100
print("Height in meters =", total_height_meters)
# # Types of Operators
# Retrieved from [Hello World of Everything/Python/Operators](http://github.com/junwheih/Hello-World-Of-Everything/blob/main/Python/Operators.py)
# ```python
# # Last updated: Thu, February 04, 2021 - 11:14
# ############################################################
# # Reference: Coursera - Google Python Crash Course Week 1 Cheat Sheet
# # Arithmetic Operators
# """
# a + b : Adds a and b
# a - b : Subtracts b from a
# a * b : Multiplies a and b
# a / b : Divides a by b
# a ** b : Elevates a to the power of b. For non integer values of b, this becomes a root (i.e. a**(1/2) is the
# square root of a)
# a // b : The integer part of the integer division of a by b
# a % b : The remainder part of the integer division of a by b
# """
# print("\nArithmetic Operators")
# # Addition
# def add(num1, num2):
# return num1 + num2
# print("The sum is", add(3,4)) # Output: The sum is 7
# # Division
# print(5/2) # Output: 2.5
# print(5//2) # Output: 2
# # Remainder
# print(5%2) # Output: 1
# # Power
# def pow(num1, num2):
# print(num1**num2)
# pow(2,5) # Output: 32
# # Python math library's power function
# import math
# print(math.pow(2,3)) # Output: 8.0
# print((int)(math.pow(2,3))) # Output: 8
# ############################################################
# # Comparison Operators
# """
# Reference: https://www.programiz.com/python-programming/operators
# x == y : Equals
# x != y : Not Equals
# x < y : Lesser than
# x > y : Greater than
# x <= y : Lesser than or Equal to
# x >= y : Greater than or Equal to
# """
# print("\nComparison Operators")
# x = 5
# y = 2
# print(x == y) # Output: False
# print(x != y) # Output: True
# print(x < y) # Output: False
# print(x > y) # Output: True
# print(x <= y) # Output: False
# print(x >= y) # Output: True
# # Difference
# def diff(num1, num2):
# if num1 >= num2:
# return num1 - num2
# else:
# return num2 - num1
# print(diff(5, 10)) # Output: 5
# # What makes python special when comparing between numbers
# x = 1; y = 2; z = 3
# print(x < y < z) # Output: True
# x = 1; y = 3; z = 3
# print(x > y == z) # Output: False
# ############################################################
# # Logical Operators
# """
# Boolean values
# 1. True
# 2. False
# and : True when both operands are True
# or : True when one of the operands is True
# not : True when the operand is False [Negation/Opposite of the operand]
# Precedence: not > and > or
# """
# print('\nLogical Operators')
# # NOTE: Make sure the T and F are capitalized
# x = True
# y = False
# print(x and y) # Output: False
# print(x or y) # Output: True
# print(not y) # Output: True
# ############################################################
# # Bitwise Operators
# """
# Reference: https://www.geeksforgeeks.org/python-bitwise-operators/
# x & y Bitwise AND
# x | y Bitwise OR
# ~x Bitwise NOT
# x ^ y Bitwise XOR
# x>> Bitwise right shift
# x<< Bitwise left shift
# Bitwise AND
# 0 & 0 : 0
# 0 & 1 : 0
# 1 & 0 : 0
# 1 & 1 : 1
# Bitwise OR
# 0 | 0 : 0
# 0 | 1 : 1
# 1 | 0 : 1
# 1 | 1 : 1
# Bitwise NOT
# ~0 : -1
# ~1 : -2
# NOTE: More detailed explanation below
# Bitwise XOR
# 0 ^ 0 : 0
# 0 ^ 1 : 1
# 1 ^ 0 : 1
# 1 ^ 1 : 0
# Bitwise left shift
# a = 5 = 0000 0101
# b = -10 = 1111 0110
# NOTE: The first '1' on the far left side is -256
# NOTE: -10 = 1111 0110 because (-128 + 64 + 32 + 16 + 4 + 2)
# a << 1 = 0000 1010 = 10
# a << 2 = 0001 0100 = 20
# b << 1
# 1111 0110 << 1 = 1110 1100 (-128 + 64 + 32 + 8 + 4) = -20
# b << 2
# 1111 0110 << 2 = 1101 1000 (-128 + 64 + 16 + 8) = -40
# Bitwise right shift
# a = 5 = 0000 0101
# a >> 1 = 0000 0010 = 2
# Logic Gates Basic
# Reference: https://www.geeksforgeeks.org/logic-gates-in-python/
# Converting decimals to binary
# Reference: https://realpython.com/python-data-types/
# x = 9
# bin(x) = 0b1001
# What is bit shifting for?
# Reference: https://stackoverflow.com/questions/520625/have-you-ever-had-to-use-bit-shifting-in-real-projects
# 1. Need bit-shifting for nearly all your arithmetic when coding in a system that does not have floating point supported in hardware
# 2. Generate hashes
# 3. Polynomial arithmetic (CRC, Reed-Solomon Codes are the mainstream applications)
# """
# print("\nBitwise Operators")
# # Bitwise AND operator
# """
# a = 6 = 0110 (Binary)
# b = 5 = 0101 (Binary)
# a & b = 0110
# &
# 0101
# = 0000
# = 0 (Decimal)
# """
# # Bitwise NOT operator
# """
# a = 9 = 1001 (Binary)
# ~a = ~1001
# = -(1001 + 1)
# = -(1010)
# = -10 (Decimal)
# """
# a = 9
# print(bin(a)) # Output: 0b1001
# b = ~a
# print(bin(b)) # Output: -0b1010
# ############################################################
# # Assignment Operators
# """
# x = 1 : x = 1
# x += 2 : x = x + 2
# x -= 3 : x = x - 3
# x *= 4 : x = x * 4
# x /= 5 : x = x / 5
# x %= 6 : x = x % 6
# x //= 7 : x = x // 7
# x **= 8 : x = x ** 8
# BITWISE
# x &= 9 : x = x & 9
# x |= 8 : x = x | 8
# x ^= 7 : x = x ^ 7
# x >>= 2 : x = x >> 2
# x <<= 3 : x = x << 3
# """
# print('\nAssignment Operators')
# # Initialize x first
# x = 3
# # Adding it
# x += 2
# print(x) # Output: 5
# # Power
# x **= 2
# print(x) # Output: 25
# # Division: Getting the Numerator
# x //= 10
# print(x) # Output: 2
# # Multiplication
# x *= 8
# print(x) # Output: 16
# # Complete division
# x /= 10
# print(x) # Output: 1.6
# # Assigment operator with bitwise
# # XOR Example
# x = 1
# x ^= 0
# print(x) # Output: 1
# ############################################################
# # Identity Operators
# """
# Reference: https://www.programiz.com/python-programming/operators
# is : True if the operands are identical (refer to the same object)
# is not : True if the operands are not identical (do not refer to the same object)
# """
# x1 = 5
# y1 = 5
# x2 = 'Hello'
# y2 = "Hello"
# x3 = [1,2,3]
# y3 = [1,2,3]
# tmp = x3
# print(x1 is not y1) # Output: False
# print(x2 is y2) # Output: True
# print(x3 is y3) # Output: False
# # Reason: They are equal but not identical. It is because the interpreter locates them separately in memory although they are equal.
# print(tmp is x3) # Output: True
# print(x3 is tmp) # Output: True
# import random
# random.shuffle(tmp) # When tmp is called to be shuffled, the list object that x3 and tmp are referenced to will be shuffled.
# print(tmp) # Say [1, 3, 2]
# print(x3) # Same as tmp
# print(x3 is tmp) # Output: True. Although it is shuffled the reference remained the same
# # NOTE: Variable names in python are only references to objects, x3 and tmp are referenced to the same list object.
# ############################################################
# # Memebership Operators
# """
# Reference: https://www.programiz.com/python-programming/operators
# Used for string, list, tuple, set and dictionary
# in : True if value/variable is found in the sequence
# not in : True if value/variable is not found in the sequence
# """
# x = 'Hello World'
# y = {1: 'a', 'b': 2}
# z = (1, 2, 'Foo')
# w = [1,'Bar',3]
# print('H' in x) # Output: True
# print('world' not in x) # Output: True
# print(1 in y) # Output: True
# print('a' in y) # Output: False
# print('b' in y) # Output: True
# print(2 in z) # Output: True
# print('Bar' in w) # Output: True
# ```
# ### Builtin functions for working with numbers
# `min` and `max` return the minimum and maximum of their arguments, respectively...
print(min(1, 2, 3))
print(max(1, 2, 3))
# `abs` returns the absolute value of it argument:
print(abs(32))
print(abs(-32))
# In addition to being the names of Python's two main numerical types, `int` and `float` can also be called as functions which convert their arguments to the corresponding type:
print(float(10))
print(int(3.33))
# They can even be called on strings!
print(int("807") + 1)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # YERLEŞİK VERİ TÜRLERİ
# Programlamada veri tipi önemli bir kavramdır.
# Değişkenler farklı türde verileri depolayabilir ve farklı türler farklı şeyler yapabilir.
# Python, bu kategorilerde varsayılan olarak yerleşik olarak aşağıdaki veri türlerine sahiptir:
# * Text Type: str
# * Numeric Types: int, float, complex
# * Sequence Types: list, tuple, range
# * Mapping Type: dict
# * Set Types: set, frozenset
# * Boolean Type: bool
# * Binary Types: bytes, bytearray, memoryview
# * None Type: NoneType
# # Veri Türünü Öğrenme
# type() işlevini kullanarak herhangi bir nesnenin veri türünü öğrenebilirsiniz.
x = 8
print(type(x))
# # Belirli Veri Türünü Ayarlama
# Veri türünü belirtmek isterseniz, aşağıdaki yapıcı işlevleri kullanabilirsiniz:
# ### Example Data Type Try it
# * x = str("Hello World")
# str
# * x = int(20)
# int
# * x = float(20.5)
# float
# * x = complex(1j)
# complex
# * x = list(("apple", "banana", "cherry"))
# list
# * x = tuple(("apple", "banana", "cherry"))
# tuple
# * x = range(6)
# range
# * x = dict(name="John", age=36)
# dict
# * x = set(("apple", "banana", "cherry"))
# set
# * x = frozenset(("apple", "banana", "cherry"))
# frozenset
# * x = bool(5)
# bool
# * x = bytes(5)
# bytes
# * x = bytearray(5)
# bytearray
# * x = memoryview(bytes(5))
# memoryview
# # Python Numbers
# Python'da üç sayısal tür vardır:
# int float complex Sayısal tipteki değişkenler, onlara bir değer atadığınızda oluşturulur:
x = 4 # int
y = 9.3 # float
z = 6j # complex
# Python'da herhangi bir nesnenin türünü doğrulamak için type() işlevini kullanırız.
print(type(x))
print(type(y))
print(type(z))
# # Int - Integer
# Int veya tamsayı, pozitif veya negatif, ondalık basamak içermeyen, sınırsız uzunlukta bir tam sayıdır.
# integers
x = 7
y = 474893372
z = -3541
print(type(x))
print(type(y))
print(type(z))
# # Float
# Bir veya daha fazla ondalık basamak içeren pozitif veya negatif bir sayıdır.
x = 5.3
y = 2.0
z = -93.8
print(type(x))
print(type(y))
print(type(z))
# Float, 10'un kuvvetini belirtmek için "e" harfi bulunan bilimsel sayılar da olabilir.
# floats
x = 46e4
y = 68e3
z = -21.5e100
print(type(x))
print(type(y))
print(type(z))
# # Complex
# Karmaşık sayılar sanal kısım olarak "j" ile yazılır:
x = 6 + 9j
y = 2j
z = -7j
print(type(x))
print(type(y))
print(type(z))
# # Tip Dönüşümü
# int(), float() ve Complex() yöntemleriyle bir türden diğerine dönüştürebilirsiniz:
x = 6 # int
y = 9.4 # float
z = 3j # complex
# convert from int to float:
a = float(x)
# convert from float to int:
b = int(y)
# convert from int to complex:
c = complex(x)
print(a)
print(b)
print(c)
print(type(a))
print(type(b))
print(type(c))
# # Not:
# Karmaşık sayıları başka bir sayı türüne dönüştüremezsiniz.
# # Rastgele Sayılar
# Python'un rasgele bir sayı yapmak için bir random() işlevi yoktur, ancak Python'un rasgele sayılar yapmak için kullanılabilecek random adlı yerleşik bir modülü vardır:
import random
print(random.randrange(1, 50))
# # BİR DEĞİŞKEN TÜRÜ OLUŞTURMA
# Bir değişkene bir tür belirtmek istediğiniz zamanlar olabilir. Bu döküm ile yapılabilir. Python, nesne yönelimli bir dildir ve bu nedenle, ilkel türleri de dahil olmak üzere veri türlerini tanımlamak için sınıfları kullanır.
# Bir Değişken Türü Belirtin
# Bir değişkene bir tür belirtmek istediğiniz zamanlar olabilir. Python, nesne yönelimli bir dildir
# int() - bir tamsayı hazır bilgisinden, bir değişken değişmez bilgisinden (tüm ondalık sayıları kaldırarak) veya bir dize değişmez bilgisinden (dizgenin bir tam sayıyı temsil etmesi koşuluyla) bir tamsayı oluşturur float() - bir tamsayı hazır bilgisinden, bir değişken sabit değerden veya bir dize değişmez bilgisinden bir kayan sayı oluşturur (dizenin bir kayan nokta veya bir tamsayıyı temsil etmesi koşuluyla) str() - diziler, tamsayı sabit değerleri ve değişken sabit değerler dahil olmak üzere çok çeşitli veri türlerinden bir dize oluşturur
# integers
x = int(4) # x will be 4
y = int(5.8) # y will be 5
z = int("6") # z will be 6
print(x)
print(y)
print(z)
# floats
x = float(7) # x will be 7.0
y = float(8.9) # y will be 8.9
z = float("9") # z will be 0.0
w = float("10.5") # w will be 10.5
print(x)
print(y)
print(z)
print(w)
# strings
x = str("s3") # x will be 's3'
y = str(1) # y will be '1'
z = str(5.8) # z will be '5.8'
print(x)
print(y)
print(z)
# # Strings
# Python'daki dizeler, tek tırnak işaretleri veya çift tırnak işaretleri içine alınır.
# 'merhaba', "merhaba" ile aynıdır.
# print() işleviyle bir dize hazır bilgisini görüntüleyebilirsiniz:
print("MERHABA")
print("MERHABA")
# # Dizeyi bir Değişkene Atama
# Bir değişkene bir dize atamak, değişken adının ardından eşittir işareti ve dize ile yapılır:
a = "MERHABA!!!!"
print(a)
# # Çok Satırlı Dizeler
# Üç tırnak kullanarak bir değişkene çok satırlı bir dize atayabilirsiniz:
a = """Life is a waterfall
We drink from the river
Then we turn around and put up our walls"""
print(a)
# Veya üç tek tırnak:
a = """Life is a waterfall
We drink from the river
Then we turn around and put up our walls"""
print(a)
# # Sringsler Dizilerdir
# Diğer birçok popüler programlama dili gibi, Python'daki dizeler de unicode karakterleri temsil eden bayt dizileridir.
# Bununla birlikte, Python'un bir karakter veri türü yoktur, tek bir karakter yalnızca 1 uzunluğunda bir dizedir.
# Dizenin öğelerine erişmek için köşeli parantezler kullanılabilir.
# 1 konumundaki karakteri alın (ilk karakterin 0 konumunda olduğunu unutmayın):
a = "MERHABA ARKADAŞLAR!!"
print(a[8])
# # Bir Dizide Döngü Yapmak
# Dizeler dizi olduğundan, bir dizideki karakterler arasında bir for döngüsü ile döngü yapabiliriz.
# "Kiraz" kelimesindeki harfler arasında dolaşın:
for x in "cherry":
print(x)
# # String Length
# Bir dizenin uzunluğunu almak için len() işlevini kullanın.
# len() işlevi, bir dizenin uzunluğunu döndürür:
a = "SELAM!!!"
print(len(a))
# # Dizeyi Kontrol Et
# Bir dizgede belirli bir ifadenin veya karakterin olup olmadığını kontrol etmek için in anahtar kelimesini kullanabiliriz.
# Aşağıdaki metinde "polisiye" olup olmadığını kontrol edin:
txt = "Ahmet Ümit'in polisiye romanları çok popülerdir."
print("polisiye" in txt)
# Bir if ifadesinde kullanın:
txt = "Ahmet Ümit'in polisiye romanları çok popülerdir."
if "polisiye" in txt:
print("Evet, 'polisiye' var.")
# # OLMADIĞINI KONTROL ET
# Belirli bir kelime öbeğinin veya karakterin bir dizgede OLMADIĞINI kontrol etmek için not in anahtar kelimesini kullanabiliriz.
# Aşağıdaki metinde "kitap" ifadesinin OLMADIĞINI kontrol edin
txt = "Ahmet Ümit'in polisiye romanları çok popülerdir."
print("kitap" not in txt)
# Use it in an if statement:
txt = "Ahmet Ümit'in polisiye romanları çok popülerdir."
if "kitap " not in txt:
print("Hayır, 'kitap' YOK.")
|
from collections import Counter
import matplotlib.pyplot as plt
import sklearn
import numpy as np
import pandas as pd
import seaborn as sns
import os
import random
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# # **Read files using pandas**
random.seed(0)
df = pd.read_csv("/kaggle/input/titanic/train.csv")
df
# # Show number of classes
# Counter function from collection library
# 0: dead, 1: survived
Counter(df["Survived"])
# # Data cleaning
# Check if there is NaN's in columns
df.isna().sum()
# drop column Cabin because of large amount of NaN
df = df.drop(["Cabin"], axis=1)
# drop NaN elements (drops rows - object elements when there is atleas one NaN in a row)
df = df.dropna()
df
# # EDA
fig1, axes = plt.subplots(2, 3, figsize=(18, 10))
fig1.suptitle("Basic data analysis", fontsize=16)
g0 = sns.scatterplot(data=df, x="Age", y="Fare", hue="Survived", ax=axes[0, 0])
g1 = sns.countplot(y="Pclass", data=df, hue="Survived", ax=axes[0, 1])
g2 = sns.countplot(
y="Embarked", data=df, hue="Survived", ax=axes[0, 2], palette="magma"
)
g3 = sns.countplot(x="Sex", data=df, hue="Survived", ax=axes[1, 0], palette="magma")
g4 = sns.countplot(x="Parch", data=df, hue="Survived", ax=axes[1, 1], palette="magma")
g5 = sns.countplot(x="SibSp", data=df, hue="Survived", ax=axes[1, 2], palette="magma")
# # Age histogram of people that survived and didn't survive
survived_age = df.loc[df["Survived"] == 1]
dead_age = df.loc[df["Survived"] == 0]
fig1, axes = plt.subplots(1, 3, figsize=(18, 10))
fig1.suptitle("Age distribution", fontsize=16)
g1 = sns.histplot(data=survived_age, x="Age", palette="magma", ax=axes[0])
g2 = sns.histplot(data=dead_age, x="Age", palette="magma", ax=axes[1])
g3 = sns.histplot(data=df, x="Age", hue="Survived", ax=axes[2])
predictions = df["Survived"]
df = df.drop(["Survived", "Name", "Ticket", "PassengerId"], axis=1)
df["Sex"][df["Sex"] == "male"] = 0
df["Sex"][df["Sex"] == "female"] = 1
df["Embarked"][df["Embarked"] == "C"] = 0
df["Embarked"][df["Embarked"] == "Q"] = 1
df["Embarked"][df["Embarked"] == "S"] = 2
# # Do zrobienia:
# # Normalizacja do przedzialu 0-1,
# # przetestowanie danych dla różnych klasyfikatorów :**done**,
# # napisanie funkcji preprocessingowej - prostego pipeline bez klasyfikatora do wczytania danych itd. : **done**,
# # użycie grid_search dla najlepszego klasyfikatora
# # dodatkowa analiza danych/ regresja/ jakieś grupowanie (PCA, uMap)?
# # dystrybucja wieku/ jakis histogram wieku z "hue" Survived: **done**, moze podzielic jakos na grupy wiekowe?
#
def prepare_data(df):
df = df.drop(["Cabin"], axis=1)
df = df.dropna()
df["Sex"][df["Sex"] == "male"] = 0
df["Sex"][df["Sex"] == "female"] = 1
df["Embarked"][df["Embarked"] == "C"] = 0
df["Embarked"][df["Embarked"] == "Q"] = 1
df["Embarked"][df["Embarked"] == "S"] = 2
return df
def preprocess(df):
return df / df.max()
# cm - confusion matrix
def specificity_recall(cm):
specificity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
recall = cm[1, 1] / (cm[1, 0] + cm[1, 1])
return specificity, recall
# # **______________________________**
# # Model building/testing
# Preparing data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
df, predictions, shuffle=True, stratify=predictions, random_state=0
)
# **Importing all models**
from sklearn.svm import LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
classifiers = [
LinearSVC,
KNeighborsClassifier,
SVC,
GaussianProcessClassifier,
DecisionTreeClassifier,
RandomForestClassifier,
]
dct = {}
for classifier in classifiers:
if classifier not in dct:
dct[classifier] = classifier()
values = {}
for classifier in classifiers:
dct[classifier].fit(X_train, y_train)
y_pred = dct[classifier].predict(X_test)
cm = confusion_matrix(y_test, y_pred)
values[classifier] = specificity_recall(cm)
values
# # Random forest has given the best results
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(random_state=0, max_depth=1000)
tree.fit(X_train, y_train)
from sklearn.metrics import confusion_matrix
y_pred = tree.predict(X_test)
print(confusion_matrix(y_test, y_pred))
|
import numpy as np
import pandas as pd
data = pd.read_csv("../input/titanic/train.csv")
data.head()
data.drop(["Name", "PassengerId", "Cabin"], axis=1, inplace=True)
print(data.head())
data.isnull().sum()
set(data.Embarked)
data["Embarked"] = data.Embarked.map({"S": 0, "Q": 1, "C": 2, np.nan: 0})
set(data.Embarked)
set(data.Sex)
data.Sex = data.Sex.map({"female": 0, "male": 1})
data.head()
data.Age.fillna(data.Age.mean(), inplace=True)
data.drop("Ticket", axis=1, inplace=True)
from sklearn.model_selection import train_test_split
y = data.Survived
x = data.drop("Survived", axis=1)
xtrain, xtest, ytrain, ytest = train_test_split(x, y, random_state=0)
##decesion Tree & ccp_alpha values
from sklearn.tree import DecisionTreeClassifier
clf1 = DecisionTreeClassifier()
path = clf1.cost_complexity_pruning_path(xtrain, ytrain)
ccp_alphas = path.ccp_alphas
type(ccp_alphas)
testscores = list()
trainscores = list()
for ccp_alpha in ccp_alphas:
clf1 = DecisionTreeClassifier(ccp_alpha=ccp_alpha)
clf1.fit(xtrain, ytrain)
tr = clf1.score(xtrain, ytrain)
te = clf1.score(xtest, ytest)
testscores.append(te)
trainscores.append(tr)
import matplotlib.pyplot as plt
plt.plot(ccp_alphas, testscores, "r")
plt.plot(ccp_alphas, trainscores, "b")
plt.show()
z = testscores.index(max(testscores))
ccp = ccp_alphas[z]
ccp
clff = DecisionTreeClassifier(ccp_alpha=ccp)
clff.fit(xtrain, ytrain)
clff.score(xtest, ytest)
data["alsi"] = y
data["pred"] = clff.predict(x)
data["error"] = data.alsi - data.pred
data["PassengerID"] = np.arange(1, 892)
final = data[["PassengerID", "pred"]]
final_submission = final.iloc[0:418, :]
final_submission.to_csv("final_Himanshu.csv", index=False)
clff.score(xtest, ytest)
# this is final score(on test data) I am achiving by overcoming overfitting of the data by cost complexity pruning
|
# # EDA on "bike-sharing-demand" Dataset
# # Table of Contents
# * [Data Overview](#section-one)
# * [Fiture Engineering](#section-two)
# * [Data Visaulization](#section-three)
# - [Distribution Plot on target feature 'count'](#subsection-one)
# - [Bar Plot on 'count' by features](#subsection-two)
# - [Box Plot on 'count' across by features](#subsection-three)
# - [Point Plot](#subsection-four)
# - [Scatter Plot Graph with Regression Line](#subsection-five)
# - [Heatmap](#subsection-six)
# * [On My Own](#section-four)
# - [Analysis on 'Casusal' and 'Registered' Feature](#second-subsection-one)
# - [Outliers Analysis](#second-subsection-two)
# - [Zscore](#second-subsection-three)
# - [Missing Values](#second-subsection-four)
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data_path = "/kaggle/input/bike-sharing-demand/"
#
# # 1. Data Overview
train = pd.read_csv(data_path + "train.csv")
test = pd.read_csv(data_path + "test.csv")
submission = pd.read_csv(data_path + "sampleSubmission.csv")
train.shape, test.shape
train.head()
train.info()
test.info()
submission.head()
#
# # 2. Feature Engineering
train["datetime"][0]
train["date"] = train["datetime"].apply(lambda x: x.split()[0])
datetime_map = {"year": 1, "month": 2, "day": 3, "hour": 4, "minute": 5, "second": 6}
def datetime_parser(datetime: str, map_num: int):
import re
match = re.search(r"(\d{4})-(\d{2})-(\d{2})\s(\d{2}):(\d{2}):(\d{2})", datetime)
return match.group(map_num)
for key in datetime_map.keys():
train[key] = train["datetime"].apply(
lambda x: datetime_parser(x, datetime_map[key])
)
train.head()
def datetime_to_weekday(x: str):
from datetime import datetime
import calendar
return calendar.day_name[datetime.strptime(x, "%Y-%m-%d").weekday()]
train["weekday"] = train["date"].apply(lambda x: datetime_to_weekday(x))
season_map = {1: "Spring", 2: "Summer", 3: "Fall", 4: "Winter"}
weather_map = {
1: "Clear",
2: "Mist, Few Clouds",
3: "Light Snow, Rain, Thunderstorm",
4: "Heavy Rain, Thungerstorm, Snow, Fog",
}
train["season"] = train["season"].apply(lambda x: season_map[x])
train["weather"] = train["weather"].apply(lambda x: weather_map[x])
train.head()
#
# # 3. Data Visualization
# ### 3-1. Distribution Plot
# to "target" feature
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc("font", size=15)
## `distplot` is a deprecated function and will be removed in seaborn v0.14.0.
sns.displot(train["count"])
sns.displot(np.log(train["count"]))
#
# ### 3-2. Barplot
# to general feature
mpl.rc("font", size=14)
mpl.rc("axes", titlesize=15)
figure, axes = plt.subplots(nrows=3, ncols=2)
plt.tight_layout()
figure.set_size_inches(10, 9)
axes
axes.shape
x_variables = ["year", "month", "day", "hour", "minute", "second"]
for idx, var in enumerate(x_variables):
sns.barplot(x=var, y="count", data=train, ax=axes[idx // 2, idx % 2])
figure
for idx, var in enumerate(x_variables):
axes[idx // 2, idx % 2].set(title=f"Rental amounts by {var}")
axes[1, 0].tick_params(axis="x", labelrotation=90)
axes[1, 1].tick_params(axis="x", labelrotation=90)
figure
#
# ### 3-3. Box Plot
figure, axes = plt.subplots(nrows=2, ncols=2)
plt.tight_layout()
figure.set_size_inches(12, 12)
x_vars = ["season", "weather", "holiday", "workingday"]
for i, var in enumerate(x_vars):
ax_unit = axes[i // 2, i % 2]
sns.boxplot(x=var, y="count", data=train, ax=ax_unit)
ax_unit.set(title=f"Box Plot on 'count' Across '{var}'")
axes[0, 1].tick_params(axis="x", labelrotation=10)
#
# ### 3-4. Point Plot
mpl.rc("font", size=11)
figure, axes = plt.subplots(nrows=5)
figure.set_size_inches(12, 18)
hue_vars = ["season", "weather", "holiday", "workingday", "weekday"]
for i in range(len(hue_vars)):
sns.pointplot(x="hour", y="count", data=train, hue=hue_vars[i], ax=axes[i])
#
# ### 3-5. Scatter Plot Graph with Regression Line
mpl.rc("font", size=15)
figure, axes = plt.subplots(nrows=2, ncols=2)
plt.tight_layout()
figure.set_size_inches(7, 6)
x_vars = ["temp", "atemp", "windspeed", "humidity"]
for i in range(len(x_vars)):
sns.regplot(
x=x_vars[i],
y="count",
data=train,
ax=axes[i // 2, i % 2],
scatter_kws={"alpha": 0.2},
line_kws={"color": "blue"},
)
#
# ### 3-6. Heatmap
corr_vars = ["temp", "atemp", "humidity", "windspeed", "count"]
train[corr_vars].corr()
corrMat = train[corr_vars].corr()
fig, ax = plt.subplots()
fig.set_size_inches(10, 10)
sns.heatmap(corrMat, annot=True)
ax.set(title="Heatmap of Numeric Data")
#
# # 4. On My Own
# reference:
# - https://github.com/shashankvmaiya/Bike-Sharing-Demand-Prediction/blob/master/Bike-Sharing-Prediction.ipynb
# ## 4-1. Analysis on 'Casusal' and 'Registered' Feature
train[["casual", "registered", "count"]]
print(
"Casual + Registered = Count? ",
~(train["casual"] + train["registered"] - train["count"]).any(),
)
corr_vars = ["casual", "registered", "count"]
corr = train[corr_vars].corr()
# Heatmap
matrix = np.triu(corr, 1)
fig = plt.figure(figsize=(5, 5))
sns.heatmap(
corr, mask=matrix, annot=True, cbar=True, vmax=0.8, vmin=-0.8, cmap="RdYlGn"
)
plt.show()
df_melt = pd.melt(
frame=train,
id_vars="hour",
value_vars=["casual", "registered"],
value_name="count_seperated",
var_name="casual_or_registered",
)
train[["hour", "casual", "registered", "count"]]
df_melt
group_casual_hour = pd.DataFrame(
df_melt.groupby(["hour", "casual_or_registered"])["count_seperated"].mean()
).reset_index()
group_casual_hour.head()
# Plots of average count across hour in a day for various categories
f, ax = plt.subplots()
f.set_size_inches(16, 10)
df_melt = pd.melt(
frame=train,
id_vars="hour",
value_vars=["casual", "registered"],
value_name="seperated_count",
var_name="casual_or_registered",
)
group_casual_hour = pd.DataFrame(
df_melt.groupby(["hour", "casual_or_registered"])["seperated_count"].mean()
).reset_index()
sns.pointplot(
data=group_casual_hour, x="hour", y="seperated_count", hue="casual_or_registered"
)
ax.set(
xlabel="Hour in the day",
ylabel="Count",
title="Average Bike Rentals by the day across Casual/Registered Users",
)
plt.show()
mpl.rc("font", size=11)
figure, axes = plt.subplots(nrows=4)
figure.set_size_inches(12, 24)
hue_vars = ["holiday", "workingday", "weekday", "casual_or_registered"]
for i in range(len(hue_vars)):
if i == 3:
sns.pointplot(
data=group_casual_hour,
x="hour",
y="seperated_count",
hue="casual_or_registered",
)
else:
sns.pointplot(x="hour", y="count", data=train, hue=hue_vars[i], ax=axes[i])
if i == 0:
handles, _ = axes[i].get_legend_handles_labels()
axes[i].legend(handles, ["Not a Holiday", "Holiday"])
elif i == 1:
handles, _ = axes[i].get_legend_handles_labels()
axes[i].legend(handles, ["Not a Working Day", "Working Day"])
# * 1. Working Day: registered 고객들이 자전거를 이용하고 있다. 자전거를 타고 통근하는 사용자들로 추정됨. 월요일 ~ 금요일까지 요일별 사용량 그래프와 거의 일치함.
# * 2. Non Working Day: casual 고객들이 자전거 이용중. 정규분포 형태로 13시쯤(오후 1시) 가장 많은 고객들이 사용하는 균일한 분포를 보인다. 관광객들이 사용하는 것으로 추정됨. 토요일, 일요일 주말 사용량 그래프와 거의 일치함.
# **-> WorkingDay / ~WorkingDay 변수로 데이터를 나눠서 각각 회귀분석 모델을 만들면 더 예측정확도가 높아지지 않을까?**
# ## 4-2. Outliers Analysis
# Weather = "Heavy Rain, Thungerstorm, Snow, Fog" outliers 분석해보기
test["weather"].value_counts()
train["weather"].value_counts()
heavy_snow_rain_data = train[train["weather"] == "Heavy Rain, Thungerstorm, Snow, Fog"]
heavy_snow_rain_data
train.loc[train["date"] == "2012-01-09", :]
# 1. 날씨가 아침에 맑았다가 점점 나빠지다가 저녁즈음에 매우 나빠지는 상황이었음
# 2. 이미 아침에 7~9am 자전거를 이용해 통근하는 사람들이 많이 있었음. 잠깜 소나기성으로 기상 상황이 나쁜 것으로 생각해 사용하는 사람들이 있었던 것으로 추측됨.
# **-> 처리 방법: record 삭제 / 주변값 대체 등등... 이 있는데 딱 한번만 발생한 일이고, 전체 흐름중 잠시 있었던 일이므로 주변 값 대체 처리하는 것으로 결정**
test["date"] = test["datetime"].apply(lambda x: x.split()[0])
heavy_snow_rain_data_in_test = test[test["weather"] == 4]
heavy_snow_rain_data_in_test
test[test["date"] == "2011-01-26"]
test[test["date"] == "2012-01-21"]
# Replacing Heavy/Snow Rain condition with Light Snow/Rain
train.loc[
train["weather"] == "Heavy Rain, Thungerstorm, Snow, Fog", "weather"
] = "Light Snow, Rain, Thunderstorm"
test.loc[test["weather"] == 4, "weather"] = 3
train.loc[train["datetime"] == "2012-01-09 18:00:00", :]
test["weather"].value_counts()
train["weather"].value_counts()
#
# ## 4-3. Zscore
# reference:
# - https://www.analyticsvidhya.com/blog/2022/08/dealing-with-outliers-using-the-z-score-method/
# zscore > 4인 데이터 살펴보기. 표준 정규 분포화 했을 때 z값이 4을 넘어가면 매우 희소한 값으로 이상치로 판단 가능해보임
train["count_log"] = train["count"].apply(lambda x: np.log1p(x))
sns.displot(train["count_log"])
figure, axes = plt.subplots(nrows=2, ncols=2)
plt.tight_layout()
figure.set_size_inches(12, 12)
x_vars = ["season", "weather", "holiday", "workingday"]
for i, var in enumerate(x_vars):
ax_unit = axes[i // 2, i % 2]
sns.boxplot(x=var, y="count_log", data=train, ax=ax_unit)
ax_unit.set(title=f"Box Plot on 'count' Across '{var}'")
axes[0, 1].tick_params(axis="x", labelrotation=10)
def zscore(series):
return (series - series.mean()) / series.std()
train["count_zscore"] = train.groupby(["hour", "season"])["count_log"].apply(
lambda x: zscore(x)
)
outlier_season_idx = np.abs(train["count_zscore"]) > 4
outlier_season_data = train.loc[outlier_season_idx, :]
print("Shape of the outlier data entries: ", outlier_season_data.shape)
outlier_season_data
train["count_zscore"] = train.groupby(["hour", "workingday"])["count_log"].apply(
lambda x: zscore(x)
)
outlier_workingday_idx = np.abs(train["count_zscore"]) > 4
outlier_workingday_data = train.loc[outlier_workingday_idx, :]
print("Shape of the outlier data entries: ", outlier_workingday_data.shape)
outlier_workingday_data
# All the outliers occur mostly early in the morning or late at night. Let us prune out these outliers. These could be due to some late night shows or holiday or some party.
# Removing outliers from train data
train_without_outliers = train.loc[~(outlier_season_idx + outlier_workingday_idx), :]
print("Shape of data before outliner pruning: ", train.shape)
print("Shape of data after outlier pruning: ", train_without_outliers.shape)
# Dropping the zscore column
train_without_outliers = train_without_outliers.drop("count_zscore", axis=1)
train_without_outliers.head(3)
figure, axes = plt.subplots(nrows=2, ncols=2)
plt.tight_layout()
figure.set_size_inches(12, 12)
x_vars = ["season", "workingday"]
for i in range(4):
var = x_vars[i % 2]
if i < 2:
ax_unit = axes[i // 2, i % 2]
sns.boxplot(x=var, y="count_log", data=train, ax=ax_unit)
ax_unit.set(title=f"Raw Data Box Plot on 'count' Across '{var}'")
else:
ax_unit = axes[i // 2, i % 2]
sns.boxplot(x=var, y="count_log", data=train_without_outliers, ax=ax_unit)
ax_unit.set(title=f"ZScore Applied Data Box Plot")
# 1. 이상치 처리를 z-score를 이용해서 해볼까 했으나 큰 효과는 없어보인다...
# 2. 단순 z-score 적용이 아닌 이상치들이 "가을" 시즌에 많은데 이 이유를 분석해봐야될 것 같다.
# **-> 의미 있는 결과는 얻지 못함...**
# ## 4-4. Missing Values in 'WindSpeed' feature
sns.displot(train["windspeed"])
train["windspeed"].value_counts()
mpl.rc("font", size=15)
figure, axes = plt.subplots(nrows=1, ncols=2)
plt.tight_layout()
figure.set_size_inches(9, 3)
for i in range(2):
data = train
ax = axes[i % 2]
ax.set_title("Scatter Plot 'windspeed' across on 'count'")
if i == 1:
data = train[train["windspeed"] != 0]
ax.set_title("Missing Value Trimmed Scatter Plot")
sns.regplot(
x="windspeed",
y="count",
data=data,
ax=ax,
scatter_kws={"alpha": 0.2},
line_kws={"color": "blue"},
)
|
# # Description:
# Drug overdose has become a major public health concern in the United States, with more than 93,000 overdose deaths reported in 2020 alone. This notebook will explore the drug overdose death rates in the U.S., including the types of drugs involved, demographic trends, and geographic patterns. The notebook will use publicly available data from the Centers for Disease Control and Prevention (CDC) to analyze and visualize the drug overdose death rates. It will also use statistical techniques and machine learning algorithms to identify the factors that contribute to overdose death rates and predict future trends.
# # Problem Statement:
# The problem statement for "Exploring Drug Overdose Death Rates in the U.S." is to understand the trends, demographics, and geographic patterns of drug overdose deaths in the United States and identify factors that contribute to overdose death rates.
# # Data Description:
# The data used for "Exploring Drug Overdose Death Rates in the U.S." will be obtained from the Centers for Disease Control and Prevention (CDC) and will include information on drug overdose deaths in the United States, including the types of drugs involved, demographic characteristics of the deceased, and geographic location of the deaths.
# Columns:
# INDICATOR - name or code of the indicator.
# PANEL - category or panel the indicator belongs to.
# PANEL_NUM - numeric code for the panel.
# UNIT - the unit of measurement for the indicator.
# UNIT_NUM - numeric code for the unit of measurement.
# STUB_NAME - name or code for the rows in the table.
# STUB_NAME_NUM - numeric code for the row names.
# STUB_LABEL - label or description for the row names.
# STUB_LABEL_NUM - numeric code for the stub labels.
# YEAR - year or time period for the data being measured.
# YEAR_NUM - numerical representation of the year.
# AGE - age group being measured.
# AGE_NUM - numerical representation of the age group.
# ESTIMATE - the estimated number of drug overdose deaths for the given year and age group.
# FLAG - an indicator of data quality or reliability, such as a missing or suppressed estimate.
# # Drugs prevention precautions:
# 1. Properly dispose of unused medication to prevent them from being misused
# Keep prescription medication in a secure location and only take as directed by a healthcare provider.
# 2. Avoid using drugs, including prescription medication, that are not prescribed to you.
# 3. Educate yourself and others on the risks and consequences of drug use.
# 4. Seek help for substance abuse or addiction from a healthcare professional or addiction treatment provider.
# 5. Practice harm reduction strategies, such as carrying naloxone for opioid overdoses.
# 6. Address underlying mental health issues and social determinants of health that may contribute to substance abuse.
# # Importing Libraries:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option("display.max_columns", None)
from IPython.display import Image
import plotly.express as px
# # Basic statistics and Data Reading:
Drug_df = pd.read_csv("/kaggle/input/drug-overdose/Drug Overdose.csv")
Drug_df.sample(10)
# # Exploratory Data Analysis (EDA)
Drug_df.columns
Drug_df.info()
dtypes = pd.DataFrame(Drug_df.dtypes, columns=["DataTypes"])
dtypes
print(
"Shape of the Dataset is {} Rows and {} Columns.".format(
len(Drug_df), len(Drug_df.columns)
)
)
Drug_df.describe()
Drug_df.describe().T
Drug_df.duplicated().sum()
Drug_df.isnull().sum()
Drug_df["UNIT"].nunique()
Drug_df["INDICATOR"].nunique()
Drug_df["AGE"].nunique()
Drug_df["STUB_NAME"].nunique()
# # COMPARISON SCATTOR PLOT ON YEAR VS YEAR NUM VS AGE NUM
fig = px.scatter(Drug_df, x="YEAR", y="YEAR_NUM", color="AGE_NUM")
fig.show()
# # Data Visualization
plt.figure(figsize=(20, 5))
Drug_df.UNIT_NUM.value_counts().plot(kind="bar", rot=0)
Drug_df["PANEL_NUM"].value_counts()
(Drug_df["YEAR"].value_counts()).plot.pie(
autopct="%.1f%%",
shadow=True,
rotatelabels=True,
wedgeprops={"linewidth": 4},
radius=1.5,
)
plt.show()
plt.figure(figsize=(20, 5))
Drug_df["YEAR_NUM"].value_counts().plot(kind="bar", rot=0, color="blue")
plt.figure(figsize=(25, 8))
ax = sns.countplot(x="STUB_NAME_NUM", data=Drug_df)
plt.xlabel("YEAR. AGE_NUM")
plt.xticks(rotation=90)
for p in ax.patches:
ax.annotate(
int(p.get_height()),
(p.get_x() + 0.25, p.get_height() + 1),
va="bottom",
color="black",
)
(Drug_df["AGE"].value_counts()).plot.pie(
autopct="%.1f%%",
shadow=True,
rotatelabels=True,
wedgeprops={"linewidth": 4},
radius=1.5,
)
plt.show()
# # Bar Plot for Year Num Vs Age Num Reported
plt.figure(figsize=(20, 7))
sns.barplot(x=Drug_df["YEAR_NUM"], y=Drug_df["AGE_NUM"], palette="Accent")
# # Bar Plot for PANEL_NUM Vs UNIT_NUM:
plt.figure(figsize=(15, 6))
sns.barplot(x=Drug_df["PANEL_NUM"], y=Drug_df["UNIT_NUM"], palette="Accent")
# # Bar plot for No of Year:
plt.figure(figsize=(25, 7))
ax = Drug_df.YEAR.value_counts()[:25].plot(kind="bar", color="black")
for p in ax.patches:
ax.annotate(
int(p.get_height()),
(p.get_x() + 0.25, p.get_height() + 1),
ha="center",
va="bottom",
color="red",
)
|
# E-commerce Analytics
# 🛒 Sales & Market Basket Analysis
# Antonio Buzzelli
# April 2023
# The e-commerce industry has experienced significant growth in recent years, and online sales have become an increasingly important aspect of many businesses. Analyzing sales data can help businesses understand customer behavior and identify trends, which can then be used to improve their overall sales strategies and revenue. In this notebook, we will be analyzing a sales dataset from an e-commerce company to gain insights into their sales patterns and identify profitable opportunities.
# Our analysis will cover various aspects of the data, including temporal trends and customer geographical segmentation. We will also be performing a market basket analysis to identify relationships between products and suggest strategies for improving sales. By the end of this notebook, we aim to provide a comprehensive understanding of the sales data, which can then be used to make informed decisions and drive business growth.
# # Key findings and achievements
# * A monthly analysis of sales suggests an **increased volume of sales during the last months of the year**, from September to December.
# * The **intra-month analysis** shows the revenue reaching its peak at around three-quarters of the month and dips to its lowest point just before the end of the month.
# * The **intra-week analysis** shows that the sales volume and revenue significantly increase during the latter part of the week. Specifically, revenue exceeds the weekly average starting from Thursday.
# * Even though the majority of the volume of sales is concentrated in the UK, the most performing region in terms of average revenue is Asia. The ANOVA analysis shows that the **mean purchase value in the Asia/Pacific region is consistently and significantly higher** than the mean purchase value in the other regions. We can infer that the Asia/Pacific region is a potentially lucrative market with higher average purchase amounts than the other regions. Therefore, the store may want to consider investing more resources in this region to take advantage of this opportunity to increase volume of sales.
# * By conducting a **market basket analysis** with a focus on the Asian market, we have identified groups of products that are commonly bought together. This has helped us uncover the specific preferences and purchasing patterns of this region. The firm could use this information to create bundled offers that combine these item sets and boost sales volume in the Asian market, ultimately leading to an increase in revenue.
# ___
#
# dataframes
import numpy as np
import pandas as pd
# dataviz
import matplotlib.pyplot as plt
import seaborn as sns
# from jupyterthemes import jtplot
# jtplot.style(theme='monokai', context='notebook', grid=False)
# hypothesis testing
from scipy.stats import ttest_ind
from scipy.stats import f_oneway
from statsmodels.stats.multicomp import pairwise_tukeyhsd
# market basket analysis
from itertools import permutations
from mlxtend.preprocessing import TransactionEncoder
from mlxtend.frequent_patterns import apriori, association_rules
from pandas.plotting import parallel_coordinates
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# # Data
# In this first part of the notebook we will import the data and prepare it for analysis.
df = pd.read_csv("/kaggle/input/an-online-shop-business/Sales Transaction v.4a.csv")
df.info()
df
# Here above, a glimpse of the data at our disposal. The dataset is composed by the following original variables:
# * TransactionNo (categorical): a six-digit unique number that defines each transaction. The letter “C” in the code indicates a cancellation.
# * Date (numeric): the date when each transaction was generated.
# * ProductNo (categorical): a five or six-digit unique character used to identify a specific product.
# * Product (categorical): product/item name.
# * Price (numeric): the price of each product per unit in pound sterling (£).
# * Quantity (numeric): the quantity of each product per transaction. Negative values related to cancelled transactions.
# * CustomerNo (categorical): a five-digit unique number that defines each customer.
# * Country (categorical): name of the country where the customer resides.
# Here below, we perform some operations to validate the type of variable, create an `Amount` column, and check the presence of missing values.
# Validating variable types
df["Date"] = pd.to_datetime(df["Date"])
df[["ProductNo", "CustomerNo"]] = df[["ProductNo", "CustomerNo"]].astype("object")
# Splitting `Date` column
df["Month"] = df["Date"].dt.month
df["Weekday"] = df["Date"].dt.weekday
df["WeekdayName"] = df["Date"].dt.day_name()
df["Day"] = df["Date"].dt.day
# Creating a `TotalPrice` colum
df["Amount"] = df["Quantity"] * df["Price"]
(df.isnull().sum() / df.shape[0]).sort_values(ascending=False)
nulls = df[df["CustomerNo"].isnull()]
nulls.head()
# The missing values in the dataset are only related to the `CustomerNo` column for a very small part that doesn't impact our analysis.
# # Sales analysis
# We will now be analyzing online sales data from a one-year period spanning from 2018-12-01 to 2019-11-30. The first type of analysis will focus on the **temporal aspect** of the data. This analysis aims to understand the sales evolution over time, as well as identify trends within months and weeks. The second type of analysis will center around examining the **regional spread of sales** in order to evaluate the existing market segmentation and gain insights into potential opportunities.
# Subsetting for one exact year
df = df[df["Date"] <= "2019-11-30"]
# ## Monthly evolution
#
month_evo = df.groupby(pd.Grouper(key="Date", freq="M")).agg(
sold=("Amount", "sum"),
returned=("Amount", lambda x: sum(x[x < 0])),
nunique=("TransactionNo", "nunique"),
)
month_evo["sold_moving_avg"] = month_evo["sold"].rolling(window=3).mean()
month_evo["returned"] = month_evo["returned"].abs()
month_evo.index = month_evo.index.date
month_evo
month_evo_sum = month_evo[["sold", "returned"]].sum(axis=1)
month_evo_pct = month_evo[["sold", "returned"]].div(month_evo_sum, axis=0)
fig, ax = plt.subplots(2, 1, figsize=(15, 10))
month_evo[["sold", "returned"]].plot.bar(ax=ax[0])
ax[0].set_ylabel("Revenue (GBP)")
ax[0].set_xlabel("Month")
ax[0].set_title("Monthly evolution of sales and returns")
ax[0].grid(axis="y")
month_evo_pct.plot.bar(stacked=True, ax=ax[1])
ax[1].set_ylabel("Percentage")
ax[1].set_xlabel("Month")
ax[1].set_title("Monthly relative amounts of sold and returned")
ax[1].grid(axis="y")
plt.subplots_adjust(hspace=0.5)
plt.show()
fig, ax1 = plt.subplots(figsize=(15, 5))
ax2 = plt.twinx()
ax1.plot(month_evo.index, month_evo["sold"], label="Revenue")
ax1.plot(
month_evo.index,
month_evo["sold_moving_avg"],
label="3-month revenue moving average",
)
ax2.bar(month_evo.index, month_evo["nunique"], width=8, label="Volume", alpha=0.25)
ax1.set_ylabel("Revenue (GBP)")
ax2.set_ylabel("Volume")
ax1.set_xlabel("Month")
plt.title("Monthly evolution of sales")
plt.grid(True)
ax1.legend(loc=(0.025, 0.85))
ax2.legend(loc=(0.3, 0.85))
plt.show()
# > An **increased volume of sales and revenue** is clearly visible **during the last months of the year**, from September to December.
# ## Intra-month analysis
#
df = df[df["Quantity"] > 0]
bydate = (
df.groupby("Date")
.agg(
UniqueTransactions=("TransactionNo", "nunique"),
UniqueProdSold=("TransactionNo", "count"),
ProdSold=("Quantity", "sum"),
Revenue=("Amount", "sum"),
)
.reset_index()
)
bydate["Day"] = bydate["Date"].dt.day
bydate["Weekday"] = bydate["Date"].dt.weekday
bydate["Month"] = bydate["Date"].dt.month
bydate["WeekdayName"] = bydate["Date"].dt.day_name()
bydate
byday = bydate.groupby("Day")[
["UniqueTransactions", "UniqueProdSold", "ProdSold", "Revenue"]
].mean()
byday.columns = [
"DailyAvgUniqueTransactions",
"DailyAvgUniqueProdSold",
"DailyAvgProdSold",
"DailyAvgRev",
]
byday = byday.sort_index()
byday.head()
rev_coefficients = np.polyfit(byday.index.values, byday["DailyAvgRev"].values, 5)
rev_regression_line = np.poly1d(rev_coefficients)
fig, ax1 = plt.subplots(figsize=(15, 5))
ax2 = plt.twinx()
ax2.plot(byday.index, byday["DailyAvgRev"], label="Daily average revenue", alpha=0.3)
ax1.bar(
byday.index,
byday["DailyAvgUniqueTransactions"],
label="Daily average unique transactions",
alpha=0.1,
)
ax2.plot(rev_regression_line(byday.index.values), label="Regression line")
ax2.axhline(
byday["DailyAvgRev"].mean(),
color="b",
linestyle="dashed",
linewidth=1,
label="Monthly average",
)
ax1.set_ylabel("N. transactions")
ax2.set_ylabel("Revenue (GBP)")
plt.title("Intra-month sales analysis")
plt.grid(True)
ax1.legend(loc="upper left")
ax1.set_xlabel("Day")
ax2.legend()
plt.show()
# > By analyzing the revenue data within a month, we can observe that the daily average revenue varies throughout the month. The revenue reaches its peak at around three-quarters of the month and dips to its lowest point just before the end of the month. However, it starts to increase again just before the last few days. The dip in revenue just before the end of the month is considered normal as it coincides with the time when people typically receive their salaries.
# ## Intra-week analysis
#
byweekday = bydate.groupby(["Weekday", "WeekdayName"])[
["UniqueTransactions", "UniqueProdSold", "ProdSold", "Revenue"]
].mean()
byweekday.columns = [
"DailyAvgUniqueTransactions",
"DailyAvgUniqueProdSold",
"DailyAvgProdSold",
"DailyAvgRev",
]
byweekday = byweekday.reset_index().set_index("Weekday")
byweekday.index = byweekday.index + 1
byweekday
rev_coefficients = np.polyfit(
byweekday.index.values, byweekday["DailyAvgRev"].values, 2
)
rev_regression_line = np.poly1d(rev_coefficients)
fig, ax1 = plt.subplots(figsize=(15, 5))
ax2 = plt.twinx()
ax2.plot(
byweekday["WeekdayName"],
byweekday["DailyAvgRev"],
label="Daily average revenue",
alpha=0.3,
)
ax1.bar(
byweekday["WeekdayName"],
byweekday["DailyAvgUniqueTransactions"],
label="Daily average unique transactions",
alpha=0.1,
)
ax2.plot(rev_regression_line(byweekday.index.values), label="Regression line")
ax2.axhline(
byweekday["DailyAvgRev"].mean(),
color="b",
linestyle="dashed",
linewidth=1,
label="Weekly average",
)
ax1.set_ylabel("N. transactions")
ax2.set_ylabel("Revenue(GBP)")
plt.title("Intra-week sales analysis")
plt.grid(axis="y")
ax1.legend(loc="lower left")
ax1.set_xlabel("Weekday")
ax2.legend()
plt.show()
# Similar to the analysis conducted within a month, examining sales patterns within a week can also reveal interesting insights.
# > By looking at the graph above, it becomes evident that the sales volume and revenue significantly increase during the latter part of the week. Specifically, revenue exceeds the weekly average starting from Thursday. On the other hand, Wednesday remains the least profitable day of the week with the lowest sales volume and revenue.
# ## Geographical analysis
# When conducting a geographical analysis of sales, it is essential to consider both the average purchase value and sales volume to determine if there are any countries that offer promising opportunities. For instance, a country with a high average purchase value but low sales volume may indicate that it has untapped potential and should be targeted for further penetration. The average purchase value gives an indication of the buying power and willingness of customers to spend money, while sales volume reflects the market demand and potential for growth. A country with a high average purchase value and low sales volume could be a potential opportunity for businesses to capitalize on the untapped market potential by increasing their presence and promoting their products or services more effectively.
#
# Mapping regions
regions = {
"Europe": [
"Sweden",
"Denmark",
"Norway",
"Finland",
"Iceland",
"Netherlands",
"Belgium",
"France",
"Germany",
"Switzerland",
"Austria",
"Italy",
"Spain",
"Greece",
"Portugal",
"Malta",
"Cyprus",
"Czech Republic",
"Lithuania",
"Poland",
"United Kingdom",
"EIRE",
"Channel Islands",
"European Community",
],
"North America": ["USA", "Canada"],
"Middle East": [
"Bahrain",
"United Arab Emirates",
"Israel",
"Lebanon",
"Saudi Arabia",
],
"Asia Pacific": ["Japan", "Australia", "Singapore", "Hong Kong"],
"RoW": ["Brazil", "RSA"],
"Unspecified": ["Unspecified"],
}
country_to_region = {}
for region, countries in regions.items():
for country in countries:
country_to_region[country] = region
df["Region"] = df["Country"].map(country_to_region)
df["UKvsRoW"] = np.where(df["Country"] == "United Kingdom", "UK", "RoW")
bycountry = (
df.groupby("Country")
.agg(tot_amount=("Amount", "sum"), mean_amount=("Amount", "mean"))
.sort_values("tot_amount", ascending=False)
)
bycountry.head()
fig, ax = plt.subplots(2, figsize=(15, 10))
ax[0].bar(bycountry.index, bycountry["tot_amount"])
ax[1].bar(
bycountry.sort_values("mean_amount", ascending=False).index,
bycountry.sort_values("mean_amount", ascending=False)["mean_amount"],
)
plt.setp(ax, xticks=bycountry.index, xticklabels=bycountry.index)
plt.setp(ax[0].get_xticklabels(), rotation=90, ha="center")
plt.setp(ax[1].get_xticklabels(), rotation=90, ha="center")
ax[0].set_ylabel("Amount (GBP)")
ax[1].set_ylabel("Amount (GBP)")
ax[0].set_title("Countries by total amount sold")
ax[1].set_title("Countries by average amount sold")
plt.suptitle("Overview on geographical market spread")
ax[0].grid(axis="y")
ax[1].grid(axis="y")
plt.subplots_adjust(hspace=0.7)
plt.show()
byukvsrow = (
df.groupby("UKvsRoW")
.agg(
tot_amount=("Amount", "sum"),
mean_amount=("Amount", "mean"),
n_inv=("TransactionNo", "nunique"),
quantity=("Quantity", "mean"),
)
.sort_values("mean_amount", ascending=False)
)
byukvsrow
plt.pie(
byukvsrow["tot_amount"],
labels=byukvsrow.index,
autopct="%1.1f%%",
explode=(0.1, 0),
shadow=True,
)
plt.title("Total revenue by UK vs other countries")
plt.show()
row_rev = df.loc[df["UKvsRoW"] == "RoW", "Amount"]
uk_rev = df.loc[df["UKvsRoW"] == "UK", "Amount"]
ttest_ind(uk_rev, row_rev)
# > Even though the volume of sales of international customers accounts only for the 17.0%, the **average revenue generated abroad is significantly higher than the one generated in the UK**. This means that international markets for this business are potentially more lucrative than the national one and need to be exploited more.
#
byregion = (
df.groupby("Region")
.agg(
tot_amount=("Amount", "sum"),
mean_amount=("Amount", "mean"),
n_inv=("TransactionNo", "nunique"),
quantity=("Quantity", "mean"),
)
.sort_values("mean_amount", ascending=False)
)
byregion.sort_values("mean_amount", ascending=False)
fig, ax1 = plt.subplots(figsize=(15, 5))
ax1 = plt.bar(byregion.index, byregion["mean_amount"])
plt.title("Average purchase value by region")
plt.ylabel("Amount (GBP)")
plt.xlabel("Region")
plt.grid(axis="y")
plt.show()
f_value, p_value = f_oneway(
df.loc[df["Region"] == "Asia Pacific", "Amount"],
df.loc[df["Region"] == "North America", "Amount"],
df.loc[df["Region"] == "Middle East", "Amount"],
df.loc[df["Region"] == "Europe", "Amount"],
df.loc[df["Region"] == "RoW", "Amount"],
)
print(f"ANOVA F-value: {f_value:.2f}")
print(f"ANOVA p-value: {p_value:.4f}")
tukey_df = df.filter(items=["Amount", "Region"]).dropna()
print(pairwise_tukeyhsd(tukey_df["Amount"], tukey_df["Region"]))
# > We can observe from both the bar plot and the ANOVA analysis that the **mean purchase value in the Asia/Pacific region is consistently and significantly higher** than the mean purchase value in the other regions. Based on this important information, we can infer that the Asia/Pacific region is a potentially lucrative market with higher average purchase amounts than the other regions. Therefore, the store may want to consider investing more resources in this region to take advantage of this opportunity to increase volume of sales. The business can consider implementing targeted marketing strategies, such as advertising campaigns and promotions, that cater to the preferences and interests of the Asia/Pacific market. Additionally, it can explore expanding its product offerings to meet the specific demands of this region, or enhancing the quality of existing products to meet their higher standards. It may be useful to conduct further research and analysis to gain deeper insights into the preferences and behavior of customers in the Asia/Pacific region, and tailor sales strategies accordingly.
# # Market basket analysis for the Asian market
# Market basket analysis, specifically Apriori and association rules, can provide valuable insights into customer behavior and preferences that can be used to develop effective marketing strategies. By analyzing customer purchase patterns and identifying which products are commonly purchased together, businesses can create product bundles and promotions that cater to specific customer segments. For instance, if the analysis reveals that customers who purchase Product A are highly likely to also purchase Product B, the business can create a bundle that includes both products at a discounted price to increase sales.
# The Asia/Pacific region has a consistently higher average purchase value than other regions, indicating a potential opportunity to increase sales and revenue in that particular market. By conducting basket analysis on this region, the business can gain further insights into the specific product preferences and purchasing habits of customers in this market. This information can then be used to create targeted marketing strategies, such as promotions and advertising campaigns, that appeal to the unique needs and interests of customers in the Asia/Pacific region.
# Subsetting for Asia/Pacific transactions
asian_market = df[df["Region"] == "Asia Pacific"]
# Converting transactions in a list of lists
transactions = (
asian_market.groupby("TransactionNo")
.apply(lambda x: list(x["ProductName"]))
.to_list()
)
encoder = TransactionEncoder().fit(transactions)
onehot = encoder.transform(transactions)
onehot = pd.DataFrame(onehot, columns=encoder.columns_)
# Selecting frequent itemsets with apriori algorythm
frequent_itemsets = apriori(onehot, min_support=0.05, max_len=5, use_colnames=True)
print("Number of itemsets selected by the Apriori algorithm:", len(frequent_itemsets))
# First, we subset the dataframe to filter for the transactions happened in the Asian market and encode them in binary features (one-hot encoding). Then, with the Apriori algorithm, we group them together according to a minimum support of 0.05 and we filter them according to a minimum confidence level of 1.
# Computing association rules for the frequent itemsets, and filtering by confidence == 1
rules = association_rules(frequent_itemsets, metric="confidence", min_threshold=1)
# Adding number of items in the itemsets
rules["n_antecedents"] = rules["antecedents"].apply(lambda x: len(x))
rules["n_consequents"] = rules["consequents"].apply(lambda x: len(x))
rules.sample(15, random_state=42)
# The result is a dataframe containing frequently sold itemsets with a set a metrics for market basket analysis. These MBA metrics are commonly used in association rule mining, a data mining technique used to identify relationships and patterns among items in a dataset. Here's a brief explanation of each metric:
# * **Antecedent support**: This refers to the proportion of transactions that contain the antecedent (or the "if" part of a rule). It is calculated as the number of transactions containing the antecedent divided by the total number of transactions.
# * **Consequent support**: This refers to the proportion of transactions that contain the consequent (or the "then" part of a rule). It is calculated as the number of transactions containing the consequent divided by the total number of transactions.
# * **Support**: This refers to the proportion of transactions that contain both the antecedent and the consequent. It is calculated as the number of transactions containing both the antecedent and the consequent divided by the total number of transactions.
# * **Confidence**: This measures the strength of the association between the antecedent and the consequent. It is calculated as the support of the antecedent and consequent divided by the support of the antecedent. Confidence can range from 0 to 1, with higher values indicating stronger associations.
# * **Lift**: This measures the degree to which the presence of the antecedent affects the likelihood of the consequent. It is calculated as the support of the antecedent and consequent divided by the product of the support of the antecedent and the support of the consequent. A lift value greater than 1 indicates a positive association between the antecedent and consequent, while a value less than 1 indicates a negative association.
# * **Leverage**: This measures the difference between the observed frequency of the antecedent and consequent co-occurring and the frequency expected if they were independent. It is calculated as the support of the antecedent and consequent minus the product of the support of the antecedent and the support of the consequent. A positive leverage value indicates a positive association between the antecedent and consequent, while a negative value indicates a negative association.
# * **Conviction**: This measures the degree of implication of the rule. It is calculated as the ratio of the support of the antecedent to the complement of the confidence. Conviction can range from 0 to infinity, with higher values indicating stronger implications.
# > Upon examining the frequent itemsets, it becomes evident that most of them consist of identical items that are often purchased together, with only minor variations such as color or pattern. For instance, transactions may include items like Blue Polkadot Bowls and Pink Polkadot Bowls, Dolly Girl Lunch Boxes and Spaceboy Lunch Boxes, or Feltcraft Princess Lola Dolls and Feltcraft Princess Olivia Dolls.
# ## Bundle offers
# Based on the observation that these items are frequently bought together, it could be advantageous to offer them as bundles to customers. The firm could offer convenience and value to customers while potentially increasing sales and revenue. For example, a bundle might include both the Blue Polkadot Bowl and the Pink Polkadot Bowl, or the Dolly Girl Lunch Box and the Spaceboy Lunch Box. This strategy can be an effective way to meet Asian customers needs while boosting profits for the retailer.
# Since we want to create bundle offers for single products, we filter for single items
rules = rules[(rules["n_antecedents"] == 1) & (rules["n_consequents"] == 1)]
rules.sort_values("support", ascending=False)
rules["antecedent"] = rules["antecedents"].apply(lambda x: list(x)[0])
rules["consequent"] = rules["consequents"].apply(lambda x: list(x)[0])
rules["rule"] = rules.index
coords = rules[["antecedent", "consequent", "rule"]]
parallel_coordinates(coords, "rule", colormap="ocean")
plt.title("Bundle offers for Asian / Pacific market")
plt.show()
|
# # Stroke Prediction Dataset
# **11 clinical features for predicting stroke events**
# ## Содержание
# 1. [Описание признаков](#1)
# 2. [Импорты библиотек](#2)
# 3. [Исследование датасета](#3)
# 4. [Моделирование](#4)
# 5. [Интерпретация](#5)
# 6. [Заключение](#6)
# ## 1. Описание признаков
# [Наверх](#0)
# 1) **id**: *уникальный идентификатор*
# 2) **gender**: *пол пациента*
# 3) **age**: *возраст пациента*
# 4) **hypertension**: *наличие гипертензии у пациента*
# 5) **heart_disease**: *наличие сердечных заболеваний у пациента*
# 6) **ever_married**: *семейное положение*
# 7) **work_type**: *вид работы*
# 8) **Residence_type**: *место жительства*
# 9) **avg_glucose_level**: *средний уровень глюкозы в крови*
# 10) **bmi**: *индекс массы тела*
# 11) **smoking_status**: *статус курения*
# 12) **stroke**: *наличие инсульта у пациента*
# ## 2. Импорты библиотек
# [Наверх](#0)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, f1_score
from sklearn.metrics import precision_score, recall_score
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.model_selection import cross_val_predict, train_test_split
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from lightgbm import LGBMClassifier
from xgboost import XGBClassifier
import warnings
warnings.filterwarnings("ignore")
# ## 3. Исследование датасета
# [Наверх](#0)
# Загрузим датасет
stroke = pd.read_csv(
"../input/stroke-prediction-dataset/healthcare-dataset-stroke-data.csv"
)
stroke.head()
# Оценим размер датасета
stroke.shape
stroke.info()
# Как видно, пропуски только в **bmi**. По остальным признакам пропуски отсутствуют
stroke.describe(include=object)
# В **gender** мы видим 3 пола. Рассмотрим данный признак подробнее
stroke.gender.value_counts()
# У одного из пациентов в качестве пола указано _Other_
# Заменим Other на Female
stroke.replace("Other", "Female", inplace=True)
stroke.describe()
# Можно увидеть, что инсульт был примерно у 5% испытуемых. Примерно столько же человек страдает от сердечных заболеваний. Повышенное давление наблюдается примерно у 10% пациентов. Средний возраст составляет 43 года.
# Проверим датасет на дубликаты
stroke.duplicated().sum()
# Отлично, дубликатов нет!
# Удалим сразу столбец id, так как он не представляет никакого интереса
stroke.drop("id", axis=1, inplace=True)
# Далее рассмотрим, как распределены инсульты по разным группам пациентов
# Рассмотрим, как распределены инсульты по разным группам пациентов
g = sns.catplot(
x="value",
hue="stroke",
data=stroke.melt(
id_vars=["stroke"], value_vars=stroke.select_dtypes([object, int])
),
col="variable",
kind="count",
col_wrap=2,
sharex=False,
sharey=False,
)
g.set_xticklabels(rotation=45)
g.tight_layout()
g = sns.catplot(
x="value",
y="stroke",
data=stroke.melt(
id_vars=["stroke"], value_vars=stroke.select_dtypes([object, int])
),
col="variable",
kind="point",
col_wrap=2,
sharex=False,
sharey=False,
)
g.set_xticklabels(rotation=45)
g.tight_layout()
# Что интересного можно увидеть из этих графиков:
# - вероятность инсульта у мужчин немного выше, чем у женщин
# - у пациентов с гипертонией вероятность инсульта примерно в 3 раза выше
# - у пациентов с сердечными заболеваниями вероятность инсульта тоже примерно в 3 раза выше
# - те, кто никогда не состоял в браке, заболевают инсультом более чем в 3 раза реже
# - наиболее часто инсульт встречается у частных предпринимателей и почти не встречается у тех, кто не работает
# - бывшие курильщики больше других подвержены инсультам (даже больше чем курящие)
# - живущие в городе чуть чаще подвержены инсультам
# Рассмотрим, как выглядят признаки по половому различию
g = sns.catplot(
x="value",
y="stroke",
data=stroke.melt(
id_vars=["stroke", "gender"], value_vars=stroke.select_dtypes([object, int])
),
col="variable",
hue="gender",
kind="point",
col_wrap=2,
sharex=False,
sharey=False,
)
g.set_xticklabels(rotation=45)
g.tight_layout()
# Ничего существенного!
# Теперь рассмотрим как распределены количественные признаки
# Теперь рассмотрим как распределены количественные признаки
g = sns.displot(
x="value",
hue="stroke",
data=stroke.melt(id_vars=["stroke"], value_vars=stroke.select_dtypes(float)),
col="variable",
kind="kde",
col_wrap=2,
facet_kws=dict(sharex=False, sharey=False),
)
g.set_xticklabels(rotation=45)
g.tight_layout()
# Что интересного можного увидеть из этих графиков:
# - по индексу **bmi** сколь значимого отличия нет, там и там максимум приходится примерно на 30
# - по среднему уровню глюкозы в крови наблюдаются два пика
# - а вот по возрасту у пациентов с инсультом видно чёткое смещение пика к 80 годам
# Давайте рассмотрим парные графики
sns.pairplot(stroke, hue="stroke")
# К сожалению, разделение по целевому признаку видно плохо. Рассмотрим некоторые графики подробнее
def plot_stroke(x, y, alpha_1=0.2, alpha_2=0.5):
plt.plot(
stroke[stroke.stroke == 0][x],
stroke[stroke.stroke == 0][y],
"bo",
label="no stroke",
alpha=alpha_1,
)
plt.plot(
stroke[stroke.stroke == 1][x],
stroke[stroke.stroke == 1][y],
"r^",
label="stroke",
alpha=alpha_2,
)
plt.grid(True, which="both")
plt.legend(loc="center right", fontsize=12)
plt.xlabel(x, fontsize=12)
plt.ylabel(y, fontsize=12, rotation=90)
# Рассмотрим, как связаны возраст пациента, заболевания сердца и инсульт
plot_stroke("age", "heart_disease")
plt.title("Зависимость heart_disease от age")
# Видна интересная особенность - у группы пациентов, у которых отсутствуют заболевания сердца, инсульт начинает встречаться с 40 лет! А у пациентов с заболеваниями сердца - ближе к 60 годам.
plot_stroke("age", "hypertension")
plt.title("Зависимость hypertension от age")
# Примерно схожая картина наблюдается у пациентов с гипертонией. Здесь также инсульт начинает проявляться примерно с 40 лет у здоровых пациентов, и с 50 лет у пациентов с гипертонией
# Теперь рассмотрим как связаны между собой индекс массы тела,
# возраст и глюкоза в крови
stroke.plot(
kind="scatter",
x="avg_glucose_level",
y="bmi",
alpha=0.2,
label="stroke",
figsize=(8, 6),
c="age",
s=stroke.stroke.map(lambda x: 100 if x else 10),
cmap="seismic",
colorbar=True,
grid=True,
sharex=False,
)
plt.legend()
# Отчётливо видно 2 скопления - левее и правее уровня глюкозы 150-170. Причём если справа в основном всё красное, то слева смесь и красного и синего цвета. Получается, что повышенный уровень глюкозы характерен для возраста старше 40 лет. Видно также небольшое повышение ИМТ с возрастом. При этом пациенты с инсультом есть и слева и справа. Если посмотреть только на пациентов с инсультом, то видно, что они в основном старше 50 лет и распределены примерно поровну по двум группам. При этом виден рост ИМТ.
stroke[stroke.stroke == 1].plot(
kind="scatter",
x="avg_glucose_level",
y="bmi",
label="age",
figsize=(8, 6),
c="age",
cmap="seismic",
colorbar=True,
grid=True,
sharex=False,
)
plt.legend()
# И напоследок рассмотрим, как распределён инсульт по возрастным группам
sns.histplot(x="age", data=stroke, hue="stroke", bins=8)
# Как видно, инсульт начинается проявляться после 40 лет, затем наблюдается рост после 50 лет, а потом после 70. При этом пациентов после 60 лет наблюдается меньше - то есть получается рост не только в абсолютных значениях, но и в относительных!
# ## 4. Моделирование
# [Наверх](#0)
# Для того, чтобы в дальнейшем понять, что мы движемся в верном направлении, получим базовые оценки на текущих данных. Но сначала вспомним, что категориальные признаки **gender**, **ever_married**, **Residence_type** имеют всего 2 варианта значений. Преобразуем их
stroke.gender = stroke.gender.map(lambda x: 1 if x == "Male" else 0)
stroke.ever_married = stroke.ever_married.map(lambda x: 1 if x == "Yes" else 0)
stroke.Residence_type = stroke.Residence_type.map(lambda x: 1 if x == "Urban" else 0)
# Разделим наш датасет на тестовый и тренировочный в соотношении 1/5. При этом стратифицируем выборку по целевому параметру, так как интересующих нас значений немного (примерно 5%)
# Разделим наш датасет на тестовый и тренировочный в соотношении 1/5.
# При этом стратифицируем выборку по целевому параметру, так как
# интересующих нас значений немного (примерно 5%)
stroke_train, stroke_test = train_test_split(
stroke, test_size=0.2, random_state=42, stratify=stroke.stroke
)
# Также вспомним, что у признака bmi есть пропущенные значения. Заполним их
stroke_train.bmi = stroke_train.bmi.fillna(stroke_train.bmi.mean())
X_train = stroke_train.drop(["stroke"], axis=1)
y_train = stroke_train.stroke.copy()
cat_columns = X_train.select_dtypes(object).columns
num_columns = X_train.select_dtypes(exclude=object).columns
# Закодируем категориальные значения и нормализуем числовые
prep = ColumnTransformer(
[
("cat", OneHotEncoder(sparse=True, handle_unknown="ignore"), cat_columns),
("num", StandardScaler(), num_columns),
]
)
X_train_encoded = prep.fit_transform(X_train)
# Определим функцию метрик
def make_scores(y_train, y_pred):
scores = {
"accuracy_score": f"{accuracy_score(y_train, y_pred):0.3f}",
"precision_score": f"{precision_score(y_train, y_pred):0.3f}",
"recall_score": f"{recall_score(y_train, y_pred):0.3f}",
"f1_score": f"{f1_score(y_train, y_pred):0.3f}",
}
return scores
# Определим несколько классификаторов, зададим балансировку
log_clf = LogisticRegression(random_state=42, class_weight="balanced")
svm_clf = SVC(random_state=42, probability=True, class_weight="balanced")
tre_clf = DecisionTreeClassifier(random_state=42, class_weight="balanced")
rnd_clf = RandomForestClassifier(random_state=42, class_weight="balanced")
xgb_clf = XGBClassifier(random_state=42, verbosity=0, scale_pos_weight=19)
lgb_clf = LGBMClassifier(random_state=42, class_weight="balanced")
# Для каждого классификатора посчитаем ROC_AUC
metrics = {}
for clf in (log_clf, svm_clf, tre_clf, rnd_clf, xgb_clf, lgb_clf):
y_train_score = cross_val_predict(
clf, X_train_encoded, y_train, cv=4, n_jobs=-1, method="predict_proba"
)
metrics[clf.__class__.__name__] = {
"roc_auc": roc_auc_score(y_train, y_train_score[:, 1])
}
# Посчитаем остальные метрики
for clf in (log_clf, svm_clf, tre_clf, rnd_clf, xgb_clf, lgb_clf):
y_train_pred = cross_val_predict(
clf, X_train_encoded, y_train, cv=4, n_jobs=-1, method="predict"
)
metrics[clf.__class__.__name__] = {
**metrics[clf.__class__.__name__],
**make_scores(y_train, y_train_pred),
}
# Зададим функцию для более наглядного представления
def display_metrics(metrics):
return (
pd.DataFrame(metrics)
.T.apply(pd.to_numeric)
.style.format("{:.3f}")
.background_gradient(cmap=plt.get_cmap("PuBu"), axis=0)
)
display_metrics(metrics)
# Как видно, лучшие результаты по _ROC-AUC_ дали **LogisticRegression** и **LightGBM**
# Построим кривые ROC
colors = ["k", "r", "y", "g", "b", "m"]
clfs = [log_clf, svm_clf, tre_clf, rnd_clf, xgb_clf, lgb_clf]
for clf, color in zip(clfs, colors):
y_prob = cross_val_predict(
clf, X_train_encoded, y_train, cv=4, n_jobs=-1, method="predict_proba"
)
fpr, tpr, _ = roc_curve(y_train, y_prob[:, 1])
plt.plot(
fpr,
tpr,
linewidth=2,
label=f"{clf.__class__.__name__}={roc_auc_score(y_train, y_prob[:, 1]):.3f}",
)
plt.plot([0, 1], [0, 1], "k--")
plt.axis([0, 1, 0, 1])
plt.legend()
plt.xlabel("False Positive Rate (FPR)", fontsize=12)
plt.ylabel("True Positive Rate (TPR)", fontsize=12)
plt.grid(True)
# Прежде чем проверить наши модели на тестовой выборке, посмотрим, есть ли выбросы среди числовых признаков. Воспользуемся "ящиком с усами"
# Прежде чем проверить наши модели на тестовой выборке, посмотрим,
# есть ли выбросы среди числовых признаков.
# Воспользуемся "ящиком с усами"
g = sns.catplot(
y="value",
data=stroke_train.melt(
id_vars=["stroke"], value_vars=stroke_train.select_dtypes(float)
),
col="variable",
kind="box",
col_wrap=2,
sharex=False,
sharey=False,
)
# Как видно, есть довольно много выбросов у **bmi** и **avg_glucose_level**. Посмотрим, как распределены эти выбросы по классам
g = sns.catplot(
y="value",
x="stroke",
data=stroke_train.melt(
id_vars=["stroke"], value_vars=stroke_train.select_dtypes(float)
),
col="variable",
kind="box",
col_wrap=2,
sharex=False,
sharey=False,
)
# Получили интересную картину: значения **avg_glucose_level**, которые для положительного класса явялется нормальными, для отрицательного являются выбросами. И немного похожая картина наблюдается для **bmi**. На первый взгляд, логичным решением было бы удалить выбросы отдельно для каждого класса. Но тогда непонятно как поступить в таком случае с тестовыми данными, где мы не можем обратиться к классам. Поэтому выбросы пока не трогаем!
# Теперь проверим наши модели с базовыми параметрами на тестовых данных. Но сначала необходимо подготовить их
# Заполним пропуски
stroke_test.bmi = stroke_test.bmi.fillna(stroke_train.bmi.mean())
# Выделим метки
X_test = stroke_test.drop(["stroke"], axis=1)
y_test = stroke_test.stroke.copy()
# Закодируем категориальные переменные
X_test_encoded = prep.transform(X_test)
# Обновим значения метрик
metrics_test = {}
for clf in (log_clf, svm_clf, tre_clf, rnd_clf, xgb_clf, lgb_clf):
clf.fit(X_test_encoded, y_test)
y_test_score = clf.predict_proba(X_test_encoded)
metrics_test[clf.__class__.__name__] = {
"roc_auc": roc_auc_score(y_test, y_test_score[:, 1])
}
for clf in (log_clf, svm_clf, tre_clf, rnd_clf, xgb_clf, lgb_clf):
clf.fit(X_test_encoded, y_test)
y_test_pred = clf.predict(X_test_encoded)
metrics_test[clf.__class__.__name__] = {
**metrics_test[clf.__class__.__name__],
**make_scores(y_test, y_test_pred),
}
# Отобразим полученные значения
display_metrics(metrics_test)
# Четыре классификатора из шести дали 100-процентное предсказание. Причём точность на тестовой выборке оказалась выше, чем на тренировочной!
# Отобразим прирост метрик в процентах
def diff_metrics(a, b):
a1 = pd.DataFrame(a).apply(pd.to_numeric)
b1 = pd.DataFrame(b).apply(pd.to_numeric)
columns = a.keys()
index = list(a.values())[0].keys()
diff = pd.DataFrame(
(a1.values - b1.values) / b1.values * 100, columns=columns, index=index
)
return diff.T.style.format("{:.2f}").background_gradient(
cmap=plt.get_cmap("coolwarm"), axis=None, vmin=-10, vmax=10
)
diff_metrics(metrics_test, metrics)
# Как видно, лучшие показатели у классификаторов на основе деревьев. Худший результат показал линейный классификатор (логистическая регрессия)
# Попробуем интерпретировать наши модели при помощи [SHAP](https://shap-lrjball.readthedocs.io/en/latest/index.html)
# ## 5. Интерпретация
# [Наверх](#0)
# Импортируем модуль **SHAP**
import shap
shap.initjs()
# Для того, чтобы понять, как работают наши модели, выберем случайным образом из тестовой выборки два элемента, у которых целевой признак равен 0 и два, у которых равен 1
np.random.seed(42)
idx1 = np.random.choice(stroke_test[stroke_test.stroke == 0].index, 2)
idx2 = np.random.choice(stroke_test[stroke_test.stroke == 1].index, 2)
idx = idx1.tolist() + idx2.tolist()
idx = stroke_test.index.get_indexer(idx).tolist()
# Попробуем объяснить поведение нашей модели на примере _XGBoostClassifier_
explainer = shap.TreeExplainer(xgb_clf)
shap_train_values = explainer.shap_values(X_train_encoded, y_train)
shap_test_values = explainer.shap_values(X_test_encoded, y_test)
shap_train_encoded = pd.DataFrame(
shap_train_values, index=X_train.index, columns=prep.get_feature_names_out()
)
shap_test_encoded = pd.DataFrame(
shap_test_values, index=X_test.index, columns=prep.get_feature_names_out()
)
# Оценим влияние признаков
shap.summary_plot(shap_train_values, X_train_encoded, prep.get_feature_names_out())
# Как видно, модель в основном опирается на три признака: **age**, **avg_glucose_level** и **bmi**. Причём связь инсульта с **age** прямая, а с **bmi** - неоднозначная. Скорей всего его нужно рассматривать в совокупности с другими признаками. Низкий уровень **avg_glucose_level** понижает вероятность инсульта. Остальные признаки менее значимы (хотя и могут сыграть важную роль при определенном стечении)
# Выведем зависимости для bmi и avg_glucose_level
shap.dependence_plot(
"num__bmi", shap_train_values, X_train_encoded, prep.get_feature_names_out()
)
# Видно, что для пациентов с нормальным **bmi** старшего возраста инсульт не характерен. Зато характерен для более молодых пациентов
shap.dependence_plot(
"num__avg_glucose_level",
shap_train_values,
X_train_encoded,
prep.get_feature_names_out(),
)
# На данном графике можно увидеть, что с ростом **avg_glucose_level** повышается вероятность инсульта для пациентов старшего возраста. Попробуем взять несколько значений из тестового набора и посмотрим, как модель принимает своё решение
stroke_test.iloc[[idx[0]]]
shap.force_plot(
explainer.expected_value,
shap_test_values[idx[0]],
features=shap_test_encoded.iloc[idx[0], :],
)
# _У пациента средний возраст, нормальный уровень глюкозы, но масса тела повышенная (хотя в данном случае она "играет" не в пользу инсульта)_
stroke_test.iloc[[idx[1]]]
shap.force_plot(
explainer.expected_value,
shap_test_values[idx[1]],
features=shap_test_encoded.iloc[idx[1], :],
)
# _У данного пациента вообще отсутствуют негативные факторы_
stroke_test.iloc[[idx[2]]]
shap.force_plot(
explainer.expected_value,
shap_test_values[idx[2]],
features=shap_test_encoded.iloc[idx[2], :],
)
# _Здесь у нас очень молодой пациент, но с повышенным **bmi**_ для данного возраста
stroke_test.iloc[[idx[3]]]
shap.force_plot(
explainer.expected_value,
shap_test_values[idx[3]],
features=shap_test_encoded.iloc[idx[3], :],
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# This is an example of using random forest method to predict bankruptcy for a company based on the Kaggle bankruptcy-prediction dataset.
# variables
path_str = "../input/company-bankruptcy-prediction/data.csv"
# For random forest, a number of trees must be selected.
# The higher number, the more thorough the calculation, but it takes longer to run.
number_of_trees = 200
# Target column for random forest prediction
target_column_name = "Bankrupt?"
# Usually, decision trees can be large. Setting this variable to 3 or 4 makes the result tree easier to see and interpret.
tree_depth = 4
# Load data
# create dataframe from data
df = pd.read_csv(path_str)
df.head()
# check length-rows and width-columns of data
df.shape
# Use numpy to convert to arrays.
# NumPy is a library for the Python programming language, adding support for large, multi-dimensional arrays and matrices,
# along with a large collection of high-level mathematical functions to operate on these arrays.
import numpy as np
# Assign target variable to separate array
target = np.array(df[target_column_name])
# Remove target column from features
features = df.drop(target_column_name, axis=1)
# Saving feature names for later use
feature_list = list(features.columns)
# convert features dataframe to array
features = np.array(features)
# Using Skicit-learn to split data into training and testing sets.
# Scikit-learn (formerly scikits.learn and also known as sklearn) is a free software machine learning library for the Python programming language.
# It features various classification, # regression and clustering algorithms including support vector machines, random forests,
# gradient boosting, k-means and DBSCAN, and is designed to interoperate with the Python numerical and scientific libraries NumPy and SciPy.
from sklearn.model_selection import train_test_split
# Split the data into training and testing sets. test_size is n% of the rows. The other % will train the model.
train_features, test_features, train_target, test_target = train_test_split(
features, target, test_size=0.25, random_state=42
)
# Check to see that training features and labels have the same rows, and testing features and labels have the same rows
print("Training Features Shape:", train_features.shape)
print("Training target Shape:", train_target.shape)
print("Testing Features Shape:", test_features.shape)
print("Testing target Shape:", test_target.shape)
# Import the model we are using
from sklearn.ensemble import RandomForestRegressor
# Instantiate model. n_estimators is the number of decision trees you want to use
rf = RandomForestRegressor(n_estimators=number_of_trees, random_state=42)
# Train the model on training data
rf.fit(train_features, train_target)
# Import tools needed for visualization
from sklearn.tree import export_graphviz
from IPython.display import Image
# pydot may need to be installed.
try:
import pydot
except ImportError as e:
import pydot
# Limit depth of tree to n levels
rf_small = RandomForestRegressor(n_estimators=10, max_depth=tree_depth)
rf_small.fit(train_features, train_target)
# Extract the small tree
tree_small = rf_small.estimators_[5]
# Save the tree as a png image
export_graphviz(
tree_small,
out_file="small_tree.dot",
feature_names=feature_list,
rounded=True,
precision=1,
)
(graph,) = pydot.graph_from_dot_file("small_tree.dot")
graph.write_png("small_tree.png")
# show png file
Image(graph.create_png())
# Get numerical feature importances
importances = list(rf.feature_importances_)
# List of tuples with variable and importance
feature_importances = [
(feature, round(importance, 2))
for feature, importance in zip(feature_list, importances)
]
# Sort the feature importances by most important first
feature_importances = sorted(feature_importances, key=lambda x: x[1], reverse=True)
# Print out the feature and importances
[print("Variable: {:20} Importance: {}".format(*pair)) for pair in feature_importances]
|
import seaborn as sns
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import warnings
import pandas as pd
warnings.filterwarnings("ignore")
data = sns.load_dataset("car_crashes")
data.head()
regData = data[["alcohol", "total"]]
regData.head()
regData.isnull().sum()
regData.shape
canvas = plt.figure(figsize=(8, 6))
g1 = canvas.add_axes([0, 1.1, 1, 1])
g1 = sns.boxplot(x=regData.alcohol)
g2 = canvas.add_axes([1.1, 1.1, 1, 1])
g2 = sns.violinplot(x=regData.alcohol)
g3 = canvas.add_axes([0, 0, 1, 1])
g3 = sns.boxplot(x=regData.total)
g4 = canvas.add_axes([1.1, 0, 1, 1])
g4 = sns.violinplot(x=regData.total)
alcQ1 = regData.alcohol.quantile(0.25)
alcQ3 = regData.alcohol.quantile(0.75)
alcIQR = alcQ3 - alcQ1
alcLowLimit = alcQ1 - alcIQR * 1.5
alcUpLimit = alcQ3 + alcIQR * 1.5
if alcLowLimit < 0:
alcLowLimit = 0
regData = regData[~(regData.alcohol > alcUpLimit) | (regData.alcohol < alcLowLimit)]
regData.shape
sns.jointplot(
x="alcohol",
y="total",
data=regData,
kind="reg",
marker="x",
marginal_kws=dict(bins=regData.shape[0], fill=False),
)
X = regData[["alcohol"]]
y = regData[["total"]]
model = LinearRegression().fit(X, y)
model.intercept_ # model's b0 value.
model.coef_ # model's b1 value.
# ##### Formula Of The Model
# y = 3.53777662 + 2.5743465 * X
# ##### Estimate By Pred
model.predict([[2.4]])[0][0]
# ##### Estimate By Formula
# for alcohol value 2.4 ==> 3.53777662 + 2.5743465 * 2.4 = 9.71620822 (values are equals)
# So, This is the working principle of the "predict" function.
# For a random samples :
regData.loc[12]
print("Real value: ", regData.loc[12][1])
print("Model's predict: ", model.predict([[regData.loc[12][0]]])[0][0])
graph = sns.regplot(
x=regData["alcohol"],
y=regData["total"],
ci=None,
marker="x",
scatter_kws={"color": "b", "s": 10, "alpha": 0.6},
line_kws={"color": "y", "linewidth": 2},
)
graph.set_title("Model's graph\n", color="k")
graph.set_xlabel("Alcohol", color="k")
graph.set_ylabel("Total", color="k")
graph.grid(alpha=0.15)
model.score(X, y)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Importing Libraries
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Activation
from tensorflow.keras import models
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.utils import plot_model
from PIL import Image
import splitfolders
# # Spliting Data In Train , Test , Validation
input_folder = "/kaggle/input/satellite-image-classification/data"
output = "dataset"
splitfolders.ratio(input_folder, output=output, seed=42, ratio=(0.7, 0.2, 0.1))
# # Data Argumentation
train_datagen = ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
training_set = train_datagen.flow_from_directory(
"/kaggle/working/dataset/train",
target_size=(224, 224),
batch_size=32,
class_mode="categorical",
)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_set = test_datagen.flow_from_directory(
"/kaggle/working/dataset/test",
target_size=(224, 224),
batch_size=32,
class_mode="categorical",
)
val_datagen = ImageDataGenerator(rescale=1.0 / 255)
val_set = val_datagen.flow_from_directory(
"/kaggle/working/dataset/val",
target_size=(224, 224),
batch_size=32,
class_mode="categorical",
)
# # Creating Base Model With VGG16
base_model = VGG16(weights="imagenet", include_top=False, input_shape=(224, 224, 3))
base_model.trainable = False
# # Adding Layers to VGG16
flatten_layer = Flatten()
dense_layer_1 = Dense(50, activation="relu")
dense_layer_2 = Dense(20, activation="relu")
prediction_layer = Dense(4, activation="softmax")
# # Creating Final Model
model = models.Sequential(
[base_model, flatten_layer, dense_layer_1, dense_layer_2, prediction_layer]
)
model.compile(
optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy"],
)
# # Training Model With Training And Test data
history = model.fit(x=training_set, validation_data=test_set, epochs=5)
# # Accuracy Of Model On Validation Data
_, acc = model.evaluate(val_set, verbose=1)
print("> %.3f" % (acc * 100.0))
# # Ploting Model
plot_model(
model,
to_file="cnn_model.png",
show_shapes=True,
show_layer_names=False,
rankdir="TB",
expand_nested=False,
dpi=96,
)
display(Image.open("cnn_model.png"))
|
#############################################
# Diabetes Feature Engineering
##############################################
#############################################
# It is requested to develop a machine learning model that can predict whether people have
# diabetes when their characteristics are specified. You are expected to perform the necessary
# data analysis and feature engineering steps before developing the model.
##############################################
#############################################
# The dataset is part of the large dataset held at the National Institutes of Diabetes-Digestive-Kidney
# Diseases in the USA. in the USA On Pima Indian women aged 21 and over living in Phoenix, the 5th
# largest city in the State of Arizona. Data used for diabetes research. The target variable is
# specified as "outcome"; 1 indicates positive diabetes test result, 0 indicates negative.#############################################
#############################################
#############################################
# 9 Değişken # 768 Gözlem
# Pregnancies: # of pregnancies
# Glucose: 2-hour plasma glucose concentration in the oral glucose tolerance test
# Blood Pressure: in mm Hg
# SkinThickness
# Insulin: 2-hour serum insulin (mu U/ml)
# DiabetesPedigreeFunction: Function (2 hour plasma glucose concentration in oral glucose tolerance test)# BMI: Vücut kitle endeksi
# Age
# Outcome: Sick (1) or not (0)
##############################################
##############################################
# Duty 1 : Explorenatary Data Analysis
##############################################
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
# !pip install missingno
import missingno as msno
from datetime import date
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import LocalOutlierFactor
from sklearn.preprocessing import (
MinMaxScaler,
LabelEncoder,
StandardScaler,
RobustScaler,
)
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
pd.set_option("display.float_format", lambda x: "%.3f" % x)
pd.set_option("display.width", 500)
def load():
data = pd.read_csv("/kaggle/input/diabetes/diabetes.csv")
return data
df = load()
df.head()
#############################################
# Step 1: Check out the overall picture
#############################################
def check_df(dataframe, head=5):
print("##################### Shape #####################")
print(dataframe.shape)
print("##################### Types #####################")
print(dataframe.dtypes)
print("##################### Head #####################")
print(dataframe.head(head))
print("##################### Tail #####################")
print(dataframe.tail(head))
print("##################### NA #####################")
print(dataframe.isnull().sum())
print("##################### Quantiles #####################")
print(dataframe.describe([0, 0.05, 0.50, 0.95, 0.99, 1]).T)
check_df(df)
df.head()
df["Insulin"].nunique()
#############################################
# Step 2: Capture the numeric and categorical variables.
#############################################
def grab_col_names(dataframe, cat_th=10, car_th=20):
"""
It gives the names of categorical, numerical and categorical but cardinal variables in the data set.
Note: Categorical variables with numerical appearance are also included in categorical variables.
parameters
------
dataframe: dataframe
The dataframe from which variable names are to be retrieved
cat_th: int, optional
class threshold for numeric but categorical variables
car_th: int, optinal
class threshold for categorical but cardinal variables
Returns
------
cat_cols: list
Categorical variable list
num_cols: list
Numeric variable list
cat_but_car: list
Categorical view cardinal variable list
Examples
------
import seaborn as sns
df = sns.load_dataset("iris")
print(grab_col_names(df))
notes
------
cat_cols + num_cols + cat_but_car = total number of variables
num_but_cat is inside cat_cols.
The sum of the 3 returned lists equals the total number of variables: cat_cols + num_cols + cat_but_car = number of variables
"""
# cat_cols, cat_but_car
cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"]
num_but_cat = [
col
for col in dataframe.columns
if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"
]
cat_but_car = [
col
for col in dataframe.columns
if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"
]
cat_cols = cat_cols + num_but_cat
cat_cols = [col for col in cat_cols if col not in cat_but_car]
# num_cols
num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"]
num_cols = [col for col in num_cols if col not in num_but_cat]
print(f"Observations: {dataframe.shape[0]}")
print(f"Variables: {dataframe.shape[1]}")
print(f"cat_cols: {len(cat_cols)}")
print(f"num_cols: {len(num_cols)}")
print(f"cat_but_car: {len(cat_but_car)}")
print(f"num_but_cat: {len(num_but_cat)}")
return cat_cols, num_cols, cat_but_car
cat_cols, num_cols, cat_but_car = grab_col_names(df)
#############################################
# Step 3: Analyze the numerical and categorical variables.
#############################################
def cat_summary(dataframe, col_name, plot=False):
print(
pd.DataFrame(
{
col_name: dataframe[col_name].value_counts(),
"Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe),
}
)
)
print("##########################################")
if plot:
sns.countplot(x=dataframe[col_name], data=dataframe)
plt.show()
for col in cat_cols:
cat_summary(df, col, plot=True)
def num_summary(dataframe, numerical_col, plot=False):
quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99]
print(dataframe[numerical_col].describe(quantiles).T)
if plot:
dataframe[numerical_col].hist(bins=20)
plt.xlabel(numerical_col)
plt.title(numerical_col)
plt.show(block=True)
for col in num_cols:
num_summary(df, col, plot=True)
#############################################
# Step 4: Perform target variable analysis. (The mean of the target variable according to the categorical
# variables, the mean of the numeric variables according to the target variable)
#############################################
df["Outcome"].value_counts()
def target_summary_with_cat(dataframe, target, categorical_col):
print(
pd.DataFrame(
{"TARGET_MEAN": dataframe.groupby(categorical_col)[target].mean()}
),
end="\n\n\n",
)
for col in cat_cols:
target_summary_with_cat(df, "Outcome", col)
def target_summary_with_num(dataframe, target, numerical_col):
print(dataframe.groupby(target).agg({numerical_col: "mean"}), end="\n\n\n")
for col in num_cols:
target_summary_with_num(df, "Outcome", col)
#############################################
# Step 5: Analyze the outlier observation.
#############################################
def outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75):
quartile1 = dataframe[col_name].quantile(q1)
quartile3 = dataframe[col_name].quantile(q3)
interquantile_range = quartile3 - quartile1
up_limit = quartile3 + 1.5 * interquantile_range
low_limit = quartile1 - 1.5 * interquantile_range
return low_limit, up_limit
def check_outlier(dataframe, col_name):
low_limit, up_limit = outlier_thresholds(dataframe, col_name)
if dataframe[
(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)
].any(axis=None):
return True
else:
return False
for col in num_cols:
print(col, check_outlier(df, col))
def grab_outliers(dataframe, col_name, index=False):
low, up = outlier_thresholds(dataframe, col_name)
if (
dataframe[((dataframe[col_name] < low) | (dataframe[col_name] > up))].shape[0]
> 10
):
print(
dataframe[((dataframe[col_name] < low) | (dataframe[col_name] > up))].head()
)
else:
print(dataframe[((dataframe[col_name] < low) | (dataframe[col_name] > up))])
if index:
outlier_index = dataframe[
((dataframe[col_name] < low) | (dataframe[col_name] > up))
].index
return outlier_index
for col in num_cols:
print(col, grab_outliers(df, col))
#############################################
# Step 6: Perform missing observation analysis.
#############################################
df.isnull().values.any()
def missing_values_table(dataframe, na_name=False):
na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0]
n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False)
ratio = (
dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100
).sort_values(ascending=False)
missing_df = pd.concat(
[n_miss, np.round(ratio, 2)], axis=1, keys=["n_miss", "ratio"]
)
print(missing_df, end="\n")
if na_name:
return na_columns
missing_values_table(df)
#############################################
# Step 7: Perform correlation analysis.
#############################################
corr = df[num_cols].corr()
sns.set(rc={"figure.figsize": (12, 12)})
sns.heatmap(corr, cmap="RdBu")
plt.show()
#############################################
# Duty 2 : Feature Engineering
#############################################
#############################################
# Step 1: Take necessary actions for missing and outlier values. There are no missing
# observations in the data set, but Glucose, Insulin etc. Observation units containing
# a value of 0 in the variables may represent the missing value. For example; a person's
# glucose or insulin value will not be 0. Considering this situation, you can assign the zero
# values to the relevant values as NaN and then apply the operations to the missing values.
#############################################
def num_summary(dataframe, numerical_col):
quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99]
print(dataframe[numerical_col].describe(quantiles).T)
for col in num_cols:
num_summary(df, col)
print(df.describe().T)
#############################################
# Let's replace the values that shouldn't be "0" with NaN;
#############################################
col = ["Glucose", "BloodPressure", "SkinThickness", "Insulin", "BMI"]
df[col] = df[col].replace(0, np.nan)
df[col]
df.isnull().sum()
(df.isnull().sum() / df.shape[0] * 100).sort_values(ascending=False)
msno.matrix(df)
plt.show()
msno.heatmap(df)
df["Insulin"] = df["Insulin"].fillna(df.groupby("Outcome")["Insulin"].transform("mean"))
df["Insulin"]
df["SkinThickness"] = df["SkinThickness"].fillna(
df.groupby("Outcome")["SkinThickness"].transform("mean")
)
df["SkinThickness"]
df["BloodPressure"] = df["BloodPressure"].fillna(
df.groupby("Outcome")["BloodPressure"].transform("mean")
)
df["BloodPressure"]
df["BMI"] = df["BMI"].fillna(df.groupby("Outcome")["BMI"].transform("mean"))
df["BMI"]
df["Glucose"] = df["Glucose"].fillna(df.groupby("Outcome")["Glucose"].transform("mean"))
df["Glucose"]
#############################################
# Step 2: Create new variables.
#############################################
df.loc[(df["Glucose"] < 70), "GLUCOSE_CAT"] = "hipoglisemi"
df.loc[(df["Glucose"] >= 70) & (df["Glucose"] < 100), "GLUCOSE_CAT"] = "normal"
df.loc[
(df["Glucose"] >= 100) & (df["Glucose"] < 126), "GLUCOSE_CAT"
] = "imparied glucose"
df.loc[(df["Glucose"] >= 126), "GLUCOSE_CAT"] = "hiperglisemi"
df.groupby("GLUCOSE_CAT").agg({"Outcome": ["mean", "count"]})
df.head(30)
df.loc[(df["Age"] >= 18) & (df["Age"] < 30), "AGE_CAT"] = "young_women_"
df.loc[(df["Age"] >= 30) & (df["Age"] < 45), "AGE_CAT"] = "mature_women"
df.loc[(df["Age"] >= 45) & (df["Age"] < 65), "AGE_CAT"] = "middle_age"
df.loc[(df["Age"] >= 65) & (df["Age"] < 75), "AGE_CAT"] = "old_age"
df.loc[(df["Age"] >= 75), "AGE_CAT"] = "elder_age"
df.groupby("AGE_CAT").agg({"Outcome": ["mean", "count"]})
df
df.loc[(df["BMI"] < 16), "BMI_CAT"] = "overweak"
df.loc[(df["BMI"] >= 16) & (df["BMI"] < 18.5), "BMI_CAT"] = "weak"
df.loc[(df["BMI"] >= 18.5) & (df["BMI"] < 25), "BMI_CAT"] = "normal"
df.loc[(df["BMI"] >= 25) & (df["BMI"] < 30), "BMI_CAT"] = "overweight"
df.loc[(df["BMI"] >= 30) & (df["BMI"] < 35), "BMI_CAT"] = "1st_Obese"
df.loc[(df["BMI"] >= 35) & (df["BMI"] < 45), "BMI_CAT"] = "2nd_Obese"
df.loc[(df["BMI"] >= 45), "BMI_CAT"] = "3rd_Obese"
df.groupby("BMI_CAT").agg({"Outcome": ["mean", "count"]})
df
df.loc[(df["BloodPressure"] < 70), "DIASTOLIC_CAT"] = "low"
df.loc[(df["BloodPressure"] >= 70) & (df["BMI"] < 90), "DIASTOLIC_CAT"] = "normal"
df.loc[(df["BloodPressure"] >= 90), "DIASTOLIC_CAT"] = "high"
df.groupby("DIASTOLIC_CAT").agg({"Outcome": ["mean", "count"]})
df
df.loc[(df["Insulin"] < 120), "INSULIN_CAT"] = "normal"
df.loc[(df["Insulin"] >= 120), "INSULIN_CAT"] = "anormal"
df.groupby("INSULIN_CAT").agg({"Outcome": ["mean", "count"]})
df
df.loc[(df["Pregnancies"] == 0), "PREG_CAT"] = "unpregnant"
df.loc[(df["Pregnancies"] > 0) & (df["Pregnancies"] <= 5), "PREG_CAT"] = "normal"
df.loc[(df["Pregnancies"] > 5) & (df["Pregnancies"] <= 10), "PREG_CAT"] = "high"
df.loc[(df["Pregnancies"] > 10), "PREG_CAT"] = "very high"
df.groupby("PREG_CAT").agg({"Outcome": ["mean", "count"]})
df
#############################################
# Step 3: Perform the encoding operations.
#############################################
le = LabelEncoder()
binary_cols = [
col
for col in df.columns
if df[col].dtype not in [int, float] and df[col].nunique() == 2
]
def label_encoder(dataframe, binary_col):
labelencoder = LabelEncoder()
dataframe[binary_col] = labelencoder.fit_transform(dataframe[binary_col])
return dataframe
for col in binary_cols:
df = label_encoder(df, col)
def one_hot_encoder(dataframe, categorical_cols, drop_first=True):
dataframe = pd.get_dummies(
dataframe, columns=categorical_cols, drop_first=drop_first
)
return dataframe
ohe_cols = [col for col in df.columns if 10 >= df[col].nunique() > 2]
df = one_hot_encoder(df, ohe_cols)
df.head()
#############################################
# Step 4: Standardize for numeric variables.
#############################################
scaler = StandardScaler()
df[num_cols] = scaler.fit_transform(df[num_cols])
df[num_cols].head()
df.head()
#############################################
# Step 5: Create the model.
#############################################
y = df["Outcome"]
X = df.drop(["Outcome"], axis=1)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.30, random_state=17
)
from sklearn.ensemble import RandomForestClassifier
rf_model = RandomForestClassifier(random_state=46).fit(X_train, y_train)
y_pred = rf_model.predict(X_test)
accuracy_score(y_pred, y_test)
from sklearn.metrics import recall_score, precision_score, f1_score, roc_auc_score
print(f"Accuracy: {round(accuracy_score(y_pred, y_test), 2)}")
print(f"Recall: {round(recall_score(y_pred,y_test),3)}")
print(f"Precision: {round(precision_score(y_pred,y_test), 2)}")
print(f"F1: {round(f1_score(y_pred,y_test), 2)}")
print(f"Auc: {round(roc_auc_score(y_pred,y_test), 2)}")
def plot_importance(model, features, num=len(X), save=False):
feature_imp = pd.DataFrame(
{"Value": model.feature_importances_, "Feature": features.columns}
)
plt.figure(figsize=(10, 10))
sns.set(font_scale=1)
sns.barplot(
x="Value",
y="Feature",
data=feature_imp.sort_values(by="Value", ascending=False)[0:num],
)
plt.title("Features")
plt.tight_layout()
plt.show()
if save:
plt.savefig("importances.png")
plot_importance(rf_model, X_train)
|
# # Installing some necessary packages
# # Importing Libraries and data
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler, StandardScaler, LabelEncoder
from sklearn.model_selection import train_test_split, RandomizedSearchCV, GridSearchCV
from sklearn.metrics import (
mean_squared_error,
accuracy_score,
classification_report,
confusion_matrix,
)
from autoviz.classify_method import data_cleaning_suggestions, data_suggestions
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
df = pd.read_csv(
"/kaggle/input/salaries-of-professionals-working-in-data-science/salaries.csv"
)
df
# # Cleaning and Exploring the Data
df.isnull().sum().sort_values(ascending=False)
# #### Since we have no null values, we can move ahead with some data analysis.
data_cleaning_suggestions(df)
df["remote_ratio"].unique()
df["work_year"].unique()
# Select columns of object datatype
object_cols = df.select_dtypes(include=["object"]).columns.tolist()
print(
f"Columns with object datatype are: \n{object_cols}\n\nTheir Unique values are as follows: \n"
)
# Loop through each object column, extract unique values and print them
for col in object_cols:
unique_vals = df[col].unique()
print(f"Unique values of {col}:\n{unique_vals}\n")
# # Some Feature Engineering
import pycountry
countries_dict = {}
for country in pycountry.countries:
countries_dict[country.alpha_2] = country.name.title()
print(countries_dict)
# stripping of white spaces reduces risk of output NA values
df = df.apply(lambda x: x.str.strip() if x.dtype == "object" else x)
# map country codes to names and replace the values in the 'employee_location' and 'employee_residence' columns
df["company_location"] = df["company_location"].map(countries_dict)
df["employee_residence"] = df["employee_residence"].map(countries_dict)
df
from forex_python.converter import CurrencyRates
# Create a CurrencyRates object
c = CurrencyRates()
# Get the current exchange rate from USD to INR
exchange_rate = c.get_rate("USD", "INR")
# Convert salary to INR and create a new column
df["salary_inr"] = df["salary_in_usd"] * exchange_rate
df.drop(["salary", "salary_currency", "salary_in_usd"], axis=1, inplace=True)
df
# # EDA to understand the data
# Convert 'Year' column to integer data type
# Group by year and count number of jobs
jobs_per_year = df.groupby("work_year")["job_title"].count()
# Create a line plot to visualize the trend of jobs per year
jobs_per_year.plot(kind="line", figsize=(8, 6), color="blue")
# Add plot labels and title
plt.xlabel("Year")
plt.ylabel("Number of Jobs")
plt.title("Trend of Jobs per Year")
# Show plot
plt.show()
# set the color palette
colors = sns.color_palette("magma")
# get the value counts of the "designation" column and sort them in descending order
designation_counts = df["job_title"].value_counts().sort_values(ascending=False)
# plot the value counts using a horizontal bar plot
top_designations = designation_counts[:10]
ax = top_designations.plot(kind="barh", color=colors)
# set the plot title and axis labels
ax.set_title("Count of top 10 designations", fontsize=14)
ax.set_xlabel("Count", fontsize=12)
ax.set_ylabel("Designation", fontsize=12)
# adjust the x-axis tick labels to be more readable
ax.tick_params(axis="x", labelsize=10, rotation=0)
# remove the top and right spines from the plot
sns.despine(top=True, right=True)
# display the plot
plt.show()
import nltk
from nltk.probability import FreqDist
# download the stopwords corpus from NLTK
nltk.download("stopwords")
# tokenize each job title in the 'job_title' column and create a list of all tokens
tokens = []
for title in df["job_title"]:
title_tokens = nltk.word_tokenize(title)
tokens += title_tokens
# remove stopwords and non-alphabetic tokens
stopwords = set(nltk.corpus.stopwords.words("english"))
tokens = [
token.lower()
for token in tokens
if token.isalpha() and token.lower() not in stopwords
]
# calculate the frequency distribution of tokens and display the top 20 most common tokens
fdist = FreqDist(tokens)
top_10 = fdist.most_common(10)
df_token = pd.DataFrame(top_10, columns=["token", "count"])
sns.barplot(x="token", y="count", data=df_token)
# set the color palette
colors = sns.color_palette("rocket")
# get the value counts of the "Company Type" column
company_counts = df["company_size"].value_counts()
# plot the value counts using a horizontal bar plot
top_companies = company_counts[:8]
ax = top_companies.plot(kind="barh", color=colors)
# set the plot title and axis labels
ax.set_title("Count by company size", fontsize=14)
ax.set_xlabel("Count", fontsize=12)
ax.set_ylabel("Company Type", fontsize=12)
# adjust the x-axis tick labels to be more readable
ax.tick_params(axis="x", labelsize=10, rotation=0)
# remove the top and right spines from the plot
sns.despine(top=True, right=True)
# display the plot
plt.show()
# set the color palette
colors = sns.color_palette("rocket")
# get the value counts of the "Company Location" column
location_counts = df["company_location"].value_counts()
# plot the value counts using a vertical bar plot
top_locations = location_counts[:5]
ax = top_locations.plot(kind="bar", color=colors)
# set the plot title and axis labels
ax.set_title("Top 5 Company Locations", fontsize=14)
ax.set_xlabel("Location", fontsize=12)
ax.set_ylabel("Count", fontsize=12)
# adjust the y-axis tick labels to be more readable
ax.tick_params(axis="x", labelsize=10, rotation=20)
# remove the top and right spines from the plot
sns.despine(top=True, right=True)
# display the plot
plt.show()
# calculate the remote work ratio by designation
remote_work_ratio = df.groupby("job_title")["remote_ratio"].mean()
# sort the remote work ratio values in descending order
top_designations = remote_work_ratio.sort_values(ascending=True)[:5]
# create a vertical bar chart of the remote work ratio by designation
colors = sns.color_palette("Greens", len(top_designations))
ax = top_designations.plot(kind="bar", color=colors)
# set the plot title and axis labels
ax.set_title("Remote Work Ratio by Top 5 Designations", fontsize=14)
ax.set_xlabel("Designation", fontsize=12)
ax.set_ylabel("Remote Work Ratio", fontsize=12)
# adjust the y-axis tick labels to be more readable
ax.tick_params(axis="x", labelsize=10, rotation=30)
# remove the top and right spines from the plot
sns.despine(top=True, right=True)
# display the plot
plt.show()
from sklearn.preprocessing import LabelEncoder
# LabelEncoder
le = LabelEncoder()
cols = ["experience_level", "employment_type", "company_size"]
for col in cols:
df[col] = le.fit_transform(df[col])
df["work_year"] = le.fit_transform(df["work_year"])
cols = ["job_title", "company_location", "employee_residence"]
for col in cols:
print(f"Number of unique values in {col} are: {df[col].nunique()}")
# # Model Fitting
# #### using DictVectorizer to process the text data
from sklearn.feature_extraction import DictVectorizer
# Create a dict with the relevant columns for each row
relevant_cols_dict = df[
["job_title", "company_location", "employee_residence"]
].to_dict("records")
# Initialize the DictVectorizer object
vec = DictVectorizer()
# Fit and transform the data using DictVectorizer
transformed_data = vec.fit_transform(relevant_cols_dict).toarray()
# Get the feature names generated by DictVectorizer
feature_names = vec.get_feature_names()
# Create a new DataFrame with the transformed data and feature names
df_transformed = pd.DataFrame(transformed_data, columns=feature_names)
# Concatenate the original DataFrame with the transformed DataFrame
df_final = pd.concat([df, df_transformed], axis=1)
df_final.drop(
["job_title", "employee_residence", "company_location"], axis=1, inplace=True
)
# Print the final DataFrame
df_final
X = df_final.drop(["salary_inr"], axis=1)
y = df_final["salary_inr"]
# ### using TFIDVectorizer to process the text data
from catboost import CatBoostRegressor
from sklearn.feature_extraction.text import TfidfVectorizer
from scipy.sparse import hstack
# Apply NLP to text columns
job_title_vectorizer = TfidfVectorizer()
employee_residence_vectorizer = TfidfVectorizer()
company_location_vectorizer = TfidfVectorizer()
# Fit-transform the features
job_title_transformed = job_title_vectorizer.fit_transform(df["job_title"])
employee_residence_transformed = employee_residence_vectorizer.fit_transform(
df["employee_residence"]
)
company_location_transformed = company_location_vectorizer.fit_transform(
df["company_location"]
)
# Concatenate the transformed features horizontally with the original dataset
X = hstack(
[
df.drop(
columns=[
"salary_inr",
"job_title",
"employee_residence",
"company_location",
]
),
job_title_transformed,
employee_residence_transformed,
company_location_transformed,
]
)
# Get the target variable
y = df["salary_inr"]
df
# Split the dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Create and fit the CatBoost model
catboost_model = CatBoostRegressor(verbose=False)
catboost_model.fit(X_train, y_train)
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
# Predict on test dataset
y_pred = catboost_model.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
print("MSE on test set: ", mse)
r2 = r2_score(y_test, y_pred)
print("R2 on test set: ", r2)
# df['job_year'] = df['work_year'].astype(str) + '-' + df['job_title'].astype(str)
# df.drop(['job_title','work_year'], axis=1, inplace=True)
# To create a job recommendation system using this dataset, we can use a content-based filtering approach. The idea behind content-based filtering is to recommend jobs that are similar to the ones a user has previously shown interest in. In this case, we will consider the following features as important for similarity:
# experience_level
# employment_type
# job_title
# employee_residence
# remote_ratio
# company_location
# company_size
# We can then use cosine similarity as a measure of distance between two jobs based on these features.
# Here are the steps we can follow to build the job recommendation system:
# Load the dataset into a Pandas dataframe.
# Preprocess the data by removing any irrelevant columns and handling missing values (if any).
# Encode the categorical variables using one-hot encoding or label encoding.
# Combine the encoded features into a single feature matrix.
# Compute the cosine similarity matrix based on the feature matrix.
# For a given job title, find the top N most similar jobs based on cosine similarity scores.
# from sklearn.feature_extraction.text import TfidfVectorizer
# from sklearn.metrics.pairwise import cosine_similarity
# # Load the dataset
# df = pd.read_csv('job_dataset.csv')
# # Preprocess the data to remove any irrelevant or missing values
# df = df.dropna()
# df = df.drop_duplicates(subset=['job_title'])
# # Define the input parameters for the user
# location = 'India'
# salary = 1000000
# sector = 'Data Science'
# job_type = 'FT'
# # Filter the jobs based on the user's input parameters
# df_filtered = df[(df['employee_residence'] == location) &
# (df['salary_inr'] >= salary) &
# (df['job_title'].str.contains(sector)) &
# (df['employment_type'].str.contains(job_type))]
# # Vectorize the job descriptions using TfidfVectorizer
# tfidf = TfidfVectorizer(stop_words='english')
# job_desc_tfidf = tfidf.fit_transform(df_filtered['job_description'])
# # Calculate the cosine similarity between the job descriptions and the user input parameters
# cosine_similarity = cosine_similarity(job_desc_tfidf)
# # Get the top recommended job title based on the highest cosine similarity score
# top_job_index = cosine_similarity[0].argsort()[-1]
# recommended_job_title = df_filtered.iloc[top_job_index]['job_title']
# # Print the recommended job title for the user
# print(f"Recommended Job Title: {recommended_job_title}")
# from sklearn.metrics.pairwise import cosine_similarity
# # Preprocess the data
# df['employment_type'] = df['employment_type'].astype('category')
# df['job_title'] = df['job_title'].astype('category')
# df['employee_residence'] = df['employee_residence'].astype('category')
# df['company_location'] = df['company_location'].astype('category')
# df['company_size'] = df['company_size'].astype('category')
# # User inputs
# user_residence = input("Where do you live? ")
# user_salary = int(input("What is your expected salary? "))
# user_sector = input("What sector do you want to work in? ")
# user_employment_type = input("What type of employment do you prefer? (FT/PT/Remote) ")
# # Filter the dataset based on user inputs
# filtered_df = df[(df['employee_residence'] == user_residence) &
# (df['salary_inr'] >= user_salary) &
# (df['job_title'].str.contains(user_sector, case=False)) &
# (df['employment_type'] == user_employment_type)]
# # Calculate the cosine similarity between the filtered data and user inputs
# vectors = pd.concat([filtered_df[['work_year', 'remote_ratio', 'salary_inr']],
# pd.get_dummies(filtered_df['job_title']),
# pd.get_dummies(filtered_df['company_size'])], axis=1)
# user_vector = pd.DataFrame([[0, 0, user_salary, 0, 0, 1, 0, 0, 0, 0]], columns=vectors.columns)
# cosine_sim = cosine_similarity(user_vector, vectors)[0]
# # Find the most similar job and return its name
# most_similar_job_id = cosine_sim.argmax()
# most_similar_job_name = filtered_df.iloc[most_similar_job_id]['job_title']
# print(f"Recommended job: {most_similar_job_name}")
|
# first neural network with keras tutorial
from numpy import loadtxt
import pandas as pd
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# load the dataset
data = pd.read_csv("/kaggle/input/lab-work-co2/CO2 Emissions.csv")
data.describe()
data.isnull().sum()
data.isna().sum()
X = data[["Cylinders", "Fuel Consumption Comb (mpg)"]]
print(X.head(5))
y = data[["CO2 Emissions(g/km)"]]
print(y.head(5))
from sklearn.preprocessing import MinMaxScaler
# fit scaler on training data using normalization
normScalerX = MinMaxScaler().fit(X)
normScalery = MinMaxScaler().fit(y.values.reshape(-1, 1))
# transform (scale) the data
X_scaled_norm = normScalerX.transform(X)
y_scaled_norm = normScalery.transform(y.values.reshape(-1, 1))
model = Sequential()
model.add(Dense(64, activation="relu", input_shape=(2,)))
model.add(Dense(32, activation="relu"))
model.add(Dense(1, activation="linear"))
# Compile your model
model.compile(
loss="mean_squared_error", optimizer="adam", metrics=["mean_absolute_error"]
)
# Train your model
model.fit(
X_scaled_norm,
y_scaled_norm,
batch_size=50,
epochs=100,
validation_data=(X_scaled_norm, y_scaled_norm),
)
# Evaluate your model
test_loss, test_mae = model.evaluate(X_scaled_norm, y_scaled_norm)
from sklearn.metrics import r2_score
y_pred = model.predict(X_scaled_norm)
print("The R2 score for the sklearn ANN model:", (r2_score(y_pred, y_scaled_norm)))
# **Part 2**
# first neural network with keras tutorial
from numpy import loadtxt
import pandas as pd
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# load the dataset
data = pd.read_csv("/kaggle/input/4th-dataset/internet_connection_data.csv")
data.describe()
print(data.isnull().sum())
print(data.isna().sum())
from sklearn.preprocessing import LabelEncoder
columns = ["Category"]
for feature in columns:
le = LabelEncoder()
data[feature] = le.fit_transform(data[feature])
data.tail()
X = data[
[
"Memory_PssTotal",
"Memory_PssClean",
"Memory_SharedDirty",
"Memory_PrivateDirty",
"Memory_SharedClean",
"Memory_PrivateClean",
"Memory_HeapSize",
"Memory_HeapAlloc",
"Memory_HeapFree",
"Memory_Views",
"API_Command_java.lang.Runtime_exec",
"API_Command_java.lang.ProcessBuilder_start",
"API_WebView_android.webkit.WebView_loadUrl",
"API_FileIO_android.content.ContextWrapper_deleteFile",
"API_Database_android.database.sqlite.SQLiteDatabase_update",
"API_IPC_android.content.ContextWrapper_stopService",
"API_Binder_android.app.ContextImpl_registerReceiver",
"API_Crypto_javax.crypto.spec.SecretKeySpec_$init",
"API_Crypto-Hash_java.security.MessageDigest_digest",
"API_Crypto-Hash_java.security.MessageDigest_update",
"API_DeviceInfo_android.telephony.TelephonyManager_getDeviceId",
"API_DeviceInfo_android.telephony.TelephonyManager_getSubscriberId",
"API_DeviceInfo_android.telephony.TelephonyManager_getSimOperatorName",
"API_Network_java.net.URL_openConnection",
"API_Network_com.android.okhttp.internal.huc.HttpURLConnectionImpl_getInputStream",
"API_DexClassLoader_dalvik.system.BaseDexClassLoader_findResource",
"API_DexClassLoader_dalvik.system.DexFile_loadDex",
"API_Base64_android.util.Base64_decode",
"API_SystemManager_android.app.ApplicationPackageManager_setComponentEnabledSetting",
"API_SystemManager_android.content.BroadcastReceiver_abortBroadcast",
"API_SMS_android.telephony.SmsManager_sendTextMessage",
"API_DeviceData_android.content.ContentResolver_registerContentObserver",
"Network_TotalReceivedBytes",
"Network_TotalReceivedPackets",
"Network_TotalTransmittedBytes",
"Network_TotalTransmittedPackets",
]
]
print(X.head(5))
y = data[["Category"]]
print(y.head(5))
from sklearn.preprocessing import MinMaxScaler
# fit scaler on training data using normalization
normScalerX = MinMaxScaler().fit(X)
normScalery = MinMaxScaler().fit(y.values.reshape(-1, 1))
# transform (scale) the data
X_scaled_norm = normScalerX.transform(X)
y_scaled_norm = normScalery.transform(y.values.reshape(-1, 1))
X_scaled_norm.shape
model = Sequential()
model.add(Dense(64, activation="relu", kernel_initializer="normal", input_shape=(36,)))
model.add(Dense(32, activation="relu"))
model.add(Dense(10, activation="softmax"))
# Compile the model
model.compile(
optimizer="Adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
# Train the model
model.fit(
X_scaled_norm,
y_scaled_norm,
epochs=200,
batch_size=100,
validation_data=(X_scaled_norm, y_scaled_norm),
)
# Evaluate the model
test_loss, test_acc = model.evaluate(X_scaled_norm, y_scaled_norm)
print("Test accuracy:", test_acc)
|
# # Takeaways
# - There are 16941 recordings in total, 192.4 hours, 82.6 GB uncompressed (32 khz with 4 bytes per frame)
# - Just in Kenya: 2498 reecordings, 19.9 hours, 8.6 GB uncompressed
# - Note that kaggle's P100 machines have 16 GB of GPU RAM and 12 GB of CPU RAM.
# - There are 264 unique labels in total, 231 unique primary labels in kenya + 1 other secondary label in kenya (note that geo boundary is crude)
# - So some birds don't appear in Kenya, but will be evaluated? (confirmed from hosts that [not all 264 species occur in the test data](https://www.kaggle.com/competitions/birdclef-2023/discussion/396101#2189267))
# - The top ~11 most frequent birds recorded in the world are mostly absent from Kenya
# - Only 2305 or 13.6% of the recordings have secondary labels
# - 7 birds appear more frequently as secondary labels than primary labels
# - 4 primary labels appear exclusively in kenya
# # All recordings
import pandas as pd
df = pd.read_csv("/kaggle/input/birdclef-2023/train_metadata.csv")
df
df[df.filename.str.contains("XC138886")]
import plotly.express as px
primary_label_counts = df.primary_label.value_counts()
px.bar(
x=primary_label_counts.keys(),
y=primary_label_counts.values,
title="Distribution of primary labels",
labels={"x": "bird", "y": "# of recordings"},
).show()
from shapely.geometry import Point
import geopandas as gpd
geometry = [Point(xy) for xy in zip(df["longitude"], df["latitude"])]
gdf = gpd.GeoDataFrame(df, geometry=geometry)
world = gpd.read_file(gpd.datasets.get_path("naturalearth_lowres"))
ax = world.plot(figsize=(10, 6))
ax.set_axis_off()
ax.set_title("Distribution of recordings")
gdf.plot(ax=ax, marker="o", color="pink", markersize=1)
# # Just in kenya
kenya = world[world["name"] == "Kenya"]
kenya_poly = kenya["geometry"].item()
df["is_in_kenya"] = [kenya_poly.contains(p) for p in geometry]
kenya_df = df[df.is_in_kenya].reset_index()
kenya_df
kenya_primary_label_counts = kenya_df.primary_label.value_counts()
px.bar(
x=kenya_primary_label_counts.keys(),
y=kenya_primary_label_counts.values,
title="Distribution of primary labels in kenya",
labels={"x": "bird", "y": "# of recordings"},
).show()
kenya_primary_label_counts
kenya_geometry = [Point(xy) for xy in zip(kenya_df["longitude"], kenya_df["latitude"])]
gdf = gpd.GeoDataFrame(kenya_df, geometry=kenya_geometry)
ax = kenya.plot(figsize=(10, 6))
ax.set_axis_off()
ax.set_title("Distribution of recordings in Kenya")
gdf.plot(ax=ax, marker="o", color="pink", markersize=1)
# # The world compared to Kenya
counts_df = pd.concat(
[
pd.DataFrame(
{
"label": primary_label_counts.keys(),
"num_recordings": 100
* primary_label_counts.values
/ primary_label_counts.values.sum(),
"place": "world",
}
),
pd.DataFrame(
{
"label": kenya_primary_label_counts.keys(),
"num_recordings": 100
* kenya_primary_label_counts.values
/ kenya_primary_label_counts.values.sum(),
"place": "kenya",
}
),
]
)
px.bar(
data_frame=counts_df,
x="label",
y="num_recordings",
color="place",
title="Distribution of primary labels, as a % of the total recordings in the world or in keyna",
barmode="group",
labels={"num_recordings": "% of recordings"},
).show()
# # Just outside Kenya
nonkenya_primary_label_counts = df[-df.is_in_kenya].primary_label.value_counts()
counts_df = pd.concat(
[
pd.DataFrame(
{
"label": nonkenya_primary_label_counts.keys(),
"num_recordings": 100
* nonkenya_primary_label_counts.values
/ nonkenya_primary_label_counts.values.sum(),
"place": "not keyna",
}
),
pd.DataFrame(
{
"label": kenya_primary_label_counts.keys(),
"num_recordings": 100
* kenya_primary_label_counts.values
/ kenya_primary_label_counts.values.sum(),
"place": "kenya",
}
),
]
)
px.bar(
data_frame=counts_df,
x="label",
y="num_recordings",
color="place",
title="Distribution of primary labels, as a % of the total recordings not in kenya vs in keyna",
barmode="group",
labels={"num_recordings": "% of recordings"},
).show()
print("birds exclusive to kenya")
set(df[df.is_in_kenya].primary_label.unique()) - set(
df[-df.is_in_kenya].primary_label.unique()
)
# # Secondary labels
secondary_labels = df[df.secondary_labels != "[]"].reset_index()
labels = [
(lbls[2:-2].split("', '"), is_in_kenya)
for lbls, is_in_kenya in zip(
secondary_labels.secondary_labels,
secondary_labels.is_in_kenya,
)
]
labels = [(lbl, is_in_kenya) for lbls, is_in_kenya in labels for lbl in lbls]
secondary_label_data = pd.DataFrame(
{
"secondary_label": [e[0] for e in labels],
"is_in_kenya": [e[1] for e in labels],
}
)
kenya_primary = set(df[df.is_in_kenya].primary_label.unique())
kenya_secondary = set(
secondary_label_data[secondary_label_data.is_in_kenya].secondary_label
)
not_kenya_primary = set(df[-df.is_in_kenya].primary_label.unique())
not_kenya_secondary = set(
secondary_label_data[-secondary_label_data.is_in_kenya].secondary_label
)
print(
"all kenya birds (including secondary ones):", len(kenya_primary | kenya_secondary)
)
print(
"birds only in kenya (including secondary ones):",
len((kenya_primary | kenya_secondary) - (not_kenya_primary | not_kenya_secondary)),
)
secondary_labels = df[df.secondary_labels != "[]"].reset_index()
print(len(secondary_labels) / len(df))
secondary_labels
labels = [e[2:-2].split("', '") for e in secondary_labels.secondary_labels]
labels = [e for li in labels for e in li]
secondary_label_counts = pd.DataFrame(
{"secondary_label": labels}
).secondary_label.value_counts()
px.bar(
x=secondary_label_counts.keys(),
y=secondary_label_counts.values,
title="Distribution of secondary labels",
labels={"x": "bird", "y": "# of recordings"},
).show()
primarey_secondary_counts_df = pd.concat(
[
pd.DataFrame(
{
"label": primary_label_counts.keys(),
"num_recordings": primary_label_counts.values,
"type": "primary",
}
),
pd.DataFrame(
{
"label": secondary_label_counts.keys(),
"num_recordings": secondary_label_counts.values,
"type": "secondary",
}
),
]
)
px.bar(
data_frame=primarey_secondary_counts_df,
x="label",
y="num_recordings",
color="type",
title="Distribution of primary vs secondary labels",
barmode="group",
labels={"num_recordings": "# of recordings"},
).show()
all_birds = (
pd.DataFrame(
{
"label": primary_label_counts.keys(),
"num_primary_recordings": primary_label_counts.values,
}
)
.set_index("label")
.join(
pd.DataFrame(
{
"label": secondary_label_counts.keys(),
"num_secondary_recordings": secondary_label_counts.values,
}
).set_index("label"),
)
)
print("birds that are more frequent as secondary labels:")
all_birds[all_birds.num_primary_recordings < all_birds.num_secondary_recordings]
# # Audio length
import torchaudio
from tqdm import tqdm
from joblib import Parallel, delayed
import os
train_path = "/kaggle/input/birdclef-2023/train_audio/"
metadatas = Parallel(n_jobs=os.cpu_count())(
delayed(lambda filename: torchaudio.info(train_path + filename))(filename)
for filename in tqdm(df.filename)
)
df["num_frames"] = [m.num_frames for m in metadatas]
(
set([m.sample_rate for m in metadatas]),
set([m.encoding for m in metadatas]),
set([m.num_channels for m in metadatas]),
set([m.bits_per_sample for m in metadatas]),
)
sample_rate = metadatas[0].sample_rate
num_samples = df["num_frames"].sum()
num_hours = num_samples / sample_rate / 60 / 60
max_min = df["num_frames"].max() / sample_rate / 60
print("totale # of samples:", num_samples)
print("total hours:", num_hours)
minutes = df["num_frames"] / sample_rate / 60
minutes.describe()
px.histogram(
pd.DataFrame({"minutes": minutes}),
x="minutes",
title="Distribution of recording lengths (minutes)",
)
primary_label_frames = df.groupby("primary_label").num_frames.sum()
primarey_secondary_counts_df = pd.concat(
[
pd.DataFrame(
{
"label": primary_label_counts.keys(),
"percent": 100
* primary_label_counts.values
/ primary_label_counts.values.sum(),
"aggregation": "num_recordings",
}
),
pd.DataFrame(
{
"label": primary_label_frames.keys(),
"percent": 100
* primary_label_frames.values
/ primary_label_frames.values.sum(),
"aggregation": "num_samples",
}
),
]
)
px.bar(
data_frame=primarey_secondary_counts_df,
x="label",
y="percent",
color="aggregation",
title="Distribution of primary labels, # of recordings and sum of samples",
barmode="group",
).show()
num_samples_kenya = df[df.is_in_kenya].num_frames.sum()
num_hours_kenya = num_samples_kenya / sample_rate / 60 / 60
max_min_kenya = df[df.is_in_kenya].num_frames.max() / sample_rate / 60
print("totale # of kenya samples:", num_samples_kenya)
print("total kenya hours:", num_hours_kenya)
minutes_kenya = df[df.is_in_kenya].num_frames / sample_rate / 60
minutes_kenya.describe()
data, rate = torchaudio.load(train_path + df.filename.iloc[0])
bytes_per_sample = data.element_size()
total_gigs = df.num_frames.sum() * bytes_per_sample / 2**30
total_kenya_gigs = df[df.is_in_kenya].num_frames.sum() * bytes_per_sample / 2**30
total_gigs, total_kenya_gigs
# # How filtering effects dataset size
import numpy as np
px.line(
title="Accumulated size of dataset (sorting by filesize)",
y=np.cumsum(sorted(df.num_frames * 4)) / 2**30,
labels={"x": "# of files", "y": "Dataset size (GB)"},
)
line = px.line(
np.cumsum(4 * df.groupby("rating").num_frames.sum()[::-1]) / 2**30,
title="Accumulated size of dataset (sorting by rating, reversed)",
labels={"value": "Dataset size (GB)"},
)
line.layout.update(showlegend=False)
line.show()
# # Saving metadata
df.to_csv("./train_metadata_with_kenya_and_num_frames.csv", index=False)
|
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
from pandas import crosstab
from sklearn.datasets import load_digits
from matplotlib import pyplot as plt
from pyclustering.cluster.kmedoids import kmedoids
# # **Imports UCI ML hand-written digits dataset**
digits = load_digits()
X = digits["data"]
feature_names = digits["feature_names"]
y = digits["target"]
target_names = digits["target_names"]
images = digits["images"]
for i in range(1, 101):
ax = plt.subplot(10, 10, i)
ax.imshow(images[i], cmap="Greys")
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
# # **K-Medoids Clustering**
K = 10 # number of clusters
N = X.shape[0] # total number of digits
# selects K random initial medoids
# the medoids are indicated by their corresponding index in the dataset
initial_medoids = np.random.permutation(N)[:K]
# creates kmedoids object
kmedoids_instance = kmedoids(X, initial_medoids)
# run cluster analysis
kmedoids_instance.process()
# # **Centroids vs Medoids**
medoids = X[kmedoids_instance.get_medoids(), :]
for i in range(K):
ax = plt.subplot(1, 10, i + 1)
ax.imshow(np.reshape(medoids[i], [8, 8]), cmap="Greys")
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
|
# ## Neural network calibration for Heston Model on American options
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
raw_df = pd.read_csv("/kaggle/input/aapl-options-data-2016-2020/aapl_2016_2020.csv")
raw_df
raw_df.info()
columns = raw_df.columns
columns = [s.replace("[", "") for s in columns]
columns = [s.replace("]", "") for s in columns]
columns = [s.replace(" ", "") for s in columns]
raw_df.columns = columns
# ### Define the closed form Heston pricing function for american options
import numpy as np
from scipy.stats import norm
def heston_american(S0, K, T, r, kappa, theta, sigma, rho, V0, option_type):
# Define constants
delta = 1 / 252.0
N = int(T / delta)
tau = np.linspace(delta, T, N)
# Define the characteristic function
def phi(u, kappa, theta, sigma, rho, V0, tau):
xi = -0.5 * u * u + 0.5 * u * 1j
d = np.sqrt(
(sigma * sigma) * (xi - 1j * rho * u) + (kappa - 1j * rho * sigma * u) ** 2
)
g = (kappa - 1j * rho * sigma * u - d) / (kappa - 1j * rho * sigma * u + d)
D = (
(kappa - 1j * rho * sigma * u - d)
/ sigma
/ sigma
* (1 - np.exp(-d * tau))
/ (1 - g * np.exp(-d * tau))
)
C = (
kappa
* theta
/ sigma
/ sigma
* (
(kappa - 1j * rho * sigma * u - d) * tau
- 2 * np.log((1 - g * np.exp(-d * tau)) / (1 - g))
)
)
return np.exp(C + D * V0 + 1j * u * np.log(S0) - u * r * tau)
# Define the integral function
def int_func(u, kappa, theta, sigma, rho, V0, tau, K, option_type):
call_put = 1 if option_type == "C" else -1
return (
call_put
* phi(u - 1j * 0.5, kappa, theta, sigma, rho, V0, tau)
/ (u * u + 0.25)
)
# Define the early exercise function for American options
def early_exercise(price, intrinsic_value, early_exercise_indicator):
return np.where(
early_exercise_indicator > intrinsic_value, early_exercise_indicator, price
)
# Calculate the price of the option at each time step
V = np.zeros((N + 1,))
V[0] = V0
for i in range(1, N + 1):
m = np.zeros((i,))
m[0] = int_func(
0, kappa, theta, sigma, rho, V[i - 1], T - tau[i - 1], K, option_type
).real
for j in range(1, i):
m[j] = (
int_func(
j * np.pi / delta / (i - 1),
kappa,
theta,
sigma,
rho,
V[i - 1],
T - tau[i - 1],
K,
option_type,
).real
* 2
)
V[i] = np.dot(m, np.sin(np.pi * np.arange(1, i + 1) / (i))) * delta / np.pi
early_exercise_indicator = np.maximum(K - S0 * np.exp(-r * tau[i - 1]), 0)
intrinsic_value = np.maximum(
K - S0 * np.exp(-r * tau[i - 1]),
K - S0 * np.exp(-r * tau[i - 1]) * norm.cdf(-d2),
)
price = S0 * np.exp(-r * tau[i - 1]) * norm.cdf(d1) - K * np
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import csv
import itertools
import collections
import pywt
from scipy import stats
from sklearn.utils import resample
from sklearn.model_selection import train_test_split
import keras
from tensorflow.keras.models import Sequential
from keras.layers import Conv1D, AvgPool1D, Flatten, Dense, Dropout, Softmax
from tensorflow.keras.optimizers import Adam
from keras.utils.np_utils import to_categorical
from keras.utils.vis_utils import plot_model
from keras import regularizers
plt.rcParams["figure.figsize"] = (30, 6)
plt.rcParams["lines.linewidth"] = 1
plt.rcParams["lines.color"] = "b"
plt.rcParams["axes.grid"] = True
def denoise(data):
w = pywt.Wavelet("sym4")
maxlev = pywt.dwt_max_level(len(data), w.dec_len)
threshold = 0.04 # Threshold for filtering
coeffs = pywt.wavedec(data, "sym4", level=maxlev)
for i in range(1, len(coeffs)):
coeffs[i] = pywt.threshold(coeffs[i], threshold * max(coeffs[i]))
datarec = pywt.waverec(coeffs, "sym4")
return datarec
path = "/kaggle/input/btech-project/"
window_size = 180
maximum_counting = 10000
classes = ["N", "L", "R", "A", "V"]
n_classes = len(classes)
count_classes = [0] * n_classes
X = list()
y = list()
# import os
# try:
# filenames = next(os.walk(path))[2]
# except StopIteration:
# pass # Some error handling here
# # Split and save .csv , .txt
# records = list()
# annotations = list()
# filenames.sort()
# Read files
filenames = next(os.walk(path))[2]
# Split and save .csv , .txt
records = list()
annotations = list()
filenames.sort()
# segrefating filenames and annotations
for f in filenames:
filename, file_extension = os.path.splitext(f)
# *.csv
if file_extension == ".csv":
records.append(path + filename + file_extension)
# *.txt
elif file_extension == ".txt":
annotations.append(path + filename + file_extension)
for r in range(0, len(records)):
signals = []
with open(records[r], "rt") as csvfile:
spamreader = csv.reader(csvfile, delimiter=",", quotechar="|") # read CSV file\
row_index = -1
for row in spamreader:
if row_index >= 0:
# print(row[1])
signals.insert(row_index, int(float(row[1])))
row_index += 1
# Plot an example to the signals
if r == 6:
# Plot each patient's signal
plt.title(records[6] + " Wave")
plt.plot(signals[0:700])
plt.show()
signals = denoise(signals)
# Plot an example to the signals
if r == 6:
# Plot each patient's signal
plt.title(records[6] + " wave after denoised")
plt.plot(signals[0:700])
plt.show()
signals = stats.zscore(signals)
# Plot an example to the signals
if r == 6:
# Plot each patient's signal
plt.title(records[6] + " wave after z-score normalization ")
plt.plot(signals[0:700])
plt.show()
# Read anotations: R position and Arrhythmia class
example_beat_printed = False
with open(annotations[r], "r") as fileID:
data = fileID.readlines()
beat = list()
for d in range(1, len(data)): # 0 index is Chart Head
splitted = data[d].split(
" "
) # The split() method splits a string into a list.
splitted = filter(None, splitted)
next(splitted) # Time... Clipping
pos = int(next(splitted)) # Sample ID
arrhythmia_type = next(splitted) # Type
if arrhythmia_type in classes:
arrhythmia_index = classes.index(arrhythmia_type)
# if count_classes[arrhythmia_index] > maximum_counting: # avoid overfitting
# pass
# else:
count_classes[arrhythmia_index] += 1
if window_size <= pos and pos < (len(signals) - window_size):
beat = signals[
pos - window_size : pos + window_size
] ## REPLACE WITH R-PEAK DETECTION
# Plot an example to a beat
if r == 6 and not example_beat_printed:
plt.title("A Beat from " + records[6] + " Wave")
plt.plot(beat)
plt.show()
example_beat_printed = True
X.append(beat)
y.append(arrhythmia_index)
# data shape
print(np.shape(X), np.shape(y))
for i in range(0, len(X)):
X[i] = np.append(X[i], y[i])
# print(y[i])
# X[i].append(y[i])
print(np.shape(X))
len(y)
X_train_df = pd.DataFrame(X)
per_class = X_train_df[X_train_df.shape[1] - 1].value_counts()
print(per_class)
plt.figure(figsize=(30, 10))
my_circle = plt.Circle((0, 0), 0.7, color="white")
plt.pie(
per_class,
labels=["N", "L", "R", "A", "V"],
colors=["tab:blue", "tab:orange", "tab:purple", "tab:olive", "tab:green"],
autopct="%1.1f%%",
)
p = plt.gcf()
p.gca().add_artist(my_circle)
plt.show()
train, test = train_test_split(X_train_df, test_size=0.20)
print("X_train : ", np.shape(train))
print("X_test : ", np.shape(test))
target_train = train[train.shape[1] - 1]
target_test = test[test.shape[1] - 1]
train_y = to_categorical(target_train)
test_y = to_categorical(target_test)
print(np.shape(train_y), np.shape(test_y))
train_x = train.iloc[:, : train.shape[1] - 1].values
test_x = test.iloc[:, : test.shape[1] - 1].values
train_x = train_x.reshape(len(train_x), train_x.shape[1], 1)
test_x = test_x.reshape(len(test_x), test_x.shape[1], 1)
print(np.shape(train_x), np.shape(test_x))
df_train_x = pd.DataFrame(train)
df_train_y = pd.DataFrame(target_train)
df_train_x
df_train_x.rename(columns={361: "class"}, inplace=True)
id_to_label = {0: "N", 1: "L", 2: "R", 3: "V", 4: "A"}
df_train_x["label"] = df_train_x.iloc[:, -1].map(id_to_label)
print(df_train_x.info())
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.optim import AdamW, Adam
class Config:
csv_path = ""
seed = 2021
device = "cuda:0" if torch.cuda.is_available() else "cpu"
def seed_everything(seed: int):
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
config = Config()
seed_everything(config.seed)
df_train_x.to_csv("data.csv", index=False)
config.csv_path = "data.csv"
df_train_x
df_train_x["label"].value_counts()
import seaborn as sns
percentages = [
count / df_train_x.shape[0] * 100 for count in df_train_x["label"].value_counts()
]
fig, ax = plt.subplots(figsize=(12, 6))
sns.countplot(
x=df_train_x["label"],
ax=ax,
palette="bright",
order=df_train_x["label"].value_counts().index,
)
ax.set_xticklabels(ax.get_xticklabels(), rotation=15)
for percentage, count, p in zip(
percentages, df_train_x["label"].value_counts(sort=True).values, ax.patches
):
percentage = f"{np.round(percentage, 2)}%"
x = p.get_x() + p.get_width() / 2 - 0.4
y = p.get_y() + p.get_height()
ax.annotate(
str(percentage) + " / " + str(count), (x, y), fontsize=12, fontweight="bold"
)
plt.savefig(
"data_dist.png",
facecolor="w",
edgecolor="w",
format="png",
transparent=False,
bbox_inches="tight",
pad_inches=0.1,
)
plt.savefig(
"data_dist.svg",
facecolor="w",
edgecolor="w",
format="svg",
transparent=False,
bbox_inches="tight",
pad_inches=0.1,
)
class ECGDataset(Dataset):
def __init__(self, df):
self.df = df
self.data_columns = self.df.columns[:-2].tolist()
def __getitem__(self, idx):
signal = self.df.loc[idx, self.data_columns].astype("float32")
signal = torch.FloatTensor([signal.values])
target = torch.LongTensor(np.array(self.df.loc[idx, "360"]))
return signal, target
def __len__(self):
return len(self.df)
def get_dataloader(label_name, batch_size):
df = pd.read_csv(config.csv_path)
df = df.loc[df["label"] == label_name]
df.reset_index(drop=True, inplace=True)
dataset = ECGDataset(df)
dataloader = DataLoader(dataset=dataset, batch_size=batch_size, num_workers=0)
return dataloader
df_dataloader = get_dataloader(label_name="A", batch_size=96)
print(df_dataloader)
print(len(df_dataloader))
next(iter(df_dataloader))
# x1.shape, y1.shape
import torch.nn as nn
import torch.nn.functional as F
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.fc1 = nn.Linear(256, 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, 360)
self.rnn_layer = nn.LSTM(
input_size=360,
hidden_size=128,
num_layers=1,
bidirectional=True,
batch_first=True,
)
def forward(self, x):
x, _ = self.rnn_layer(x)
x = x.view(-1, 256)
x = F.leaky_relu(self.fc1(x))
x = F.leaky_relu(self.fc2(x))
x = F.dropout(x, p=0.2)
x = self.fc3(x)
return x.unsqueeze(1)
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.rnn_layer = nn.LSTM(
input_size=360,
hidden_size=256,
num_layers=1,
bidirectional=True,
batch_first=True,
)
self.fc1 = nn.Linear(512, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 1)
def forward(self, x):
x, _ = self.rnn_layer(x)
x = x.view(-1, 512)
x = F.leaky_relu(self.fc1(x))
x = F.leaky_relu(self.fc2(x))
x = F.dropout(x, p=0.2)
x = torch.sigmoid(self.fc3(x))
return x
class Trainer:
def __init__(self, generator, discriminator, batch_size, num_epochs, label):
self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
self.netG = generator.to(self.device)
self.netD = discriminator.to(self.device)
self.optimizerD = Adam(self.netD.parameters(), lr=0.0002)
self.optimizerG = Adam(self.netG.parameters(), lr=0.0002)
self.criterion = nn.BCELoss()
self.batch_size = batch_size
self.signal_dim = [self.batch_size, 1, 360]
self.num_epochs = num_epochs
self.dataloader = get_dataloader(label_name=label, batch_size=self.batch_size)
self.fixed_noise = torch.randn(self.batch_size, 1, 360, device=self.device)
self.g_errors = []
self.d_errors = []
def _one_epoch(self):
real_label = 1
fake_label = 0
for i, data in enumerate(self.dataloader, 0):
##### Update Discriminator: maximize log(D(x)) + log(1 - D(G(z))) #####
## train with real data
self.netD.zero_grad()
real_data = data[0].to(self.device)
# dim for noise
batch_size = real_data.size(0)
self.signal_dim[0] = batch_size
label = torch.full(
(batch_size,), real_label, dtype=real_data.dtype, device=self.device
)
output = self.netD(real_data)
output = output.view(-1)
errD_real = self.criterion(output, label)
errD_real.backward()
D_x = output.mean().item()
## train with fake data
noise = torch.randn(self.signal_dim, device=self.device)
fake = self.netG(noise)
label.fill_(fake_label)
output = self.netD(fake.detach())
output = output.view(-1)
errD_fake = self.criterion(output, label)
errD_fake.backward()
D_G_z1 = output.mean().item()
errD = errD_real + errD_fake
self.optimizerD.step()
##### Update Generator: maximaze log(D(G(z)))
self.netG.zero_grad()
label.fill_(real_label)
output = self.netD(fake)
output = output.view(-1)
errG = self.criterion(output, label)
errG.backward()
D_G_z2 = output.mean().item()
self.optimizerG.step()
return errD.item(), errG.item()
def run(self):
for epoch in range(self.num_epochs):
errD_, errG_ = self._one_epoch()
self.d_errors.append(errD_)
self.g_errors.append(errG_)
if epoch % 10 == 0:
print(
f"Epoch: {epoch} | Loss_D: {errD_} | Loss_G: {errG_} | Time: {time.strftime('%H:%M:%S')}"
)
fake = self.netG(self.fixed_noise)
plt.plot(fake.detach().cpu().squeeze(1).numpy()[:].transpose())
plt.show()
torch.save(self.netG.state_dict(), f"generator.pth")
torch.save(self.netG.state_dict(), f"discriminator.pth")
g = Generator()
d = Discriminator()
import time
trainer = Trainer(
generator=g, discriminator=d, batch_size=96, num_epochs=300, label="A"
)
trainer.run()
|
# # Assignment 4
# Before working on this assignment please read these instructions fully. In the submission area, you will notice that you can click the link to **Preview the Grading** for each step of the assignment. This is the criteria that will be used for peer grading. Please familiarize yourself with the criteria before beginning the assignment.
# This assignment requires that you find **at least two datasets** on the web which are related, and that you visualize these datasets to answer the assignment question. You are free to utilize datasets with any location or domain, the usage of **Ann Arbor sports and athletics** datasets in the example is just a suggestion.
# You are welcome to choose datasets at your discretion, but keep in mind **they will be shared with your peers**, so choose appropriate datasets. Sensitive, confidential, illicit, and proprietary materials are not good choices for datasets for this assignment. You are welcome to upload datasets of your own as well, and link to them using a third party repository such as github, pastebin, etc. Please be aware of the Coursera terms of service with respect to intellectual property.
# Also, you are welcome to preserve data in its original language, but for the purposes of grading you should provide english translations. You are welcome to provide multiple visuals in different languages if you would like!
# As this assignment is for the whole course, you must incorporate principles discussed in the first week, such as having as high data-ink ratio (Tufte) and aligning with Cairo’s principles of truth, beauty, function, and insight.
# Here are the assignment instructions:
# * You must state a question you are seeking to answer with your visualizations.
# * You must provide at least two links to available datasets. These could be links to files such as CSV or Excel files, or links to websites which might have data in tabular form, such as Wikipedia pages.
# * You must upload an image which addresses the research question you stated. In addition to addressing the question, this visual should follow Cairo's principles of truthfulness, functionality, beauty, and insightfulness.
# * You must contribute a short (1-2 paragraph) written justification of how your visualization addresses your stated research question.
# ## Tips
# * Wikipedia is an excellent source of data, and I strongly encourage you to explore it for new data sources.
# * Many governments run open data initiatives at the city, region, and country levels, and these are wonderful resources for localized data sources.
# * Several international agencies, such as the [United Nations](http://data.un.org/), the [World Bank](http://data.worldbank.org/), the [Global Open Data Index](http://index.okfn.org/place/) are other great places to look for data.
# * This assignment requires you to convert and clean datafiles. Check out the discussion forums for tips on how to do this from various sources, and share your successes with your fellow students!
# ## Example
# Looking for an example? Here's what our course assistant put together as an example! [Example Solution File](./readonly/Assignment4_example.pdf)
# **I found a dataset from https://www.kaggle.com/datasets/lfarhat/brasil-students-scholarship-prouni-20052019 that shows all the students that received scholarship "Prouni" from 2005 to 2019.
# I noticed that there was a column of each student origin county, so i searched for a dataset of brazilian countys dataset at http://blog.mds.gov.br/redesuas/lista-de-municipios-brasileiros/ to cros the informations.
# With this I could add to the dataset the informations of County Size and if it is a Capital or not.**
# **In this project I would like to answer the following questions:**
# - Has the number of scholarships grown over the years or not?
# - There are more men or more women winning scholarships?
# - How is the age distribution of students?
# - How is the regional distribution of students?
# - Are there more students from big cities or also from smaller cities?
import pandas as pd
import unidecode
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import folium
from geopy.geocoders import Nominatim
# Reading a CSV file and loading it into a DataFrame
prouni = pd.read_csv(
"/kaggle/input/brasil-students-scholarship-prouni-20052019/prouni_2005_2019.csv"
)[
[
"ANO_CONCESSAO_BOLSA",
"NOME_IES_BOLSA",
"TIPO_BOLSA",
"MODALIDADE_ENSINO_BOLSA",
"NOME_CURSO_BOLSA",
"NOME_TURNO_CURSO_BOLSA",
"SEXO_BENEFICIARIO_BOLSA",
"RACA_BENEFICIARIO_BOLSA",
"DT_NASCIMENTO_BENEFICIARIO",
"BENEFICIARIO_DEFICIENTE_FISICO",
"REGIAO_BENEFICIARIO_BOLSA",
"SIGLA_UF_BENEFICIARIO_BOLSA",
"MUNICIPIO_BENEFICIARIO_BOLSA",
"idade",
]
]
brasil_conties = pd.read_csv(
"/kaggle/input/brasil-counties/Brasil_counties.csv", sep=";"
)[["UF", "Município", "Região", "Porte", "Capital"]]
# Translating column names and values
prouni = prouni.rename(
columns={
"ANO_CONCESSAO_BOLSA": "Year of Scholarship Grant",
"NOME_IES_BOLSA": "Scholarship Institution Name",
"TIPO_BOLSA": "Scholarship Type",
"MODALIDADE_ENSINO_BOLSA": "Scholarship Education Modality",
"NOME_CURSO_BOLSA": "Course Name for Scholarship",
"NOME_TURNO_CURSO_BOLSA": "Course Period",
"SEXO_BENEFICIARIO_BOLSA": "Gender of Scholarship Beneficiary",
"RACA_BENEFICIARIO_BOLSA": "Ethnicity of Scholarship Beneficiary",
"DT_NASCIMENTO_BENEFICIARIO": "Beneficiary Date of Birth",
"BENEFICIARIO_DEFICIENTE_FISICO": "Physically Disabled Scholarship Beneficiary",
"REGIAO_BENEFICIARIO_BOLSA": "Region of Scholarship Beneficiary",
"SIGLA_UF_BENEFICIARIO_BOLSA": "State Abbreviation of Scholarship Beneficiary",
"MUNICIPIO_BENEFICIARIO_BOLSA": "County of Scholarship Beneficiary",
"idade": "Age",
}
)
translation_dict = {
"BOLSA PARCIAL 50%": "50% Partial Scholarship",
"BOLSA INTEGRAL": "Full Scholarship",
"BOLSA COMPLEMENTAR 25%": "25% Complementary Scholarship",
"PRESENCIAL": "On-site",
"EAD": "Distance Learning",
"Integral": "Full-time",
"Noturno": "Night",
"Matutino": "Morning",
"Vespertino": "Afternoon",
"A Distancia": "Distance learning",
"Branca": "White",
"Parda": "Mixed",
"Amarela": "Asian",
"Nao Informada": "Not Informed",
"Preta": "Black",
"Indigena": "Indigenous",
"nao": "No",
"sim": "Yes",
"SUL": "South",
"SUDESTE": "Southeast",
"CENTRO-OESTE": "Midwest",
"NORTE": "North",
"NORDESTE": "Northeast",
}
prouni["Scholarship Type"] = prouni["Scholarship Type"].replace(translation_dict)
prouni["Scholarship Education Modality"] = prouni[
"Scholarship Education Modality"
].replace(translation_dict)
prouni["Course Period"] = prouni["Course Period"].replace(translation_dict)
prouni["Ethnicity of Scholarship Beneficiary"] = prouni[
"Ethnicity of Scholarship Beneficiary"
].replace(translation_dict)
prouni["Physically Disabled Scholarship Beneficiary"] = prouni[
"Physically Disabled Scholarship Beneficiary"
].replace(translation_dict)
prouni["Region of Scholarship Beneficiary"] = prouni[
"Region of Scholarship Beneficiary"
].replace(translation_dict)
prouni["Beneficiary Date of Birth"] = pd.to_datetime(
prouni["Beneficiary Date of Birth"]
)
prouni["County of Scholarship Beneficiary"] = prouni[
"County of Scholarship Beneficiary"
].str.title()
# Translating column names and values
brasil_conties = brasil_conties.rename(
columns={"UF": "State", "Município": "County", "Região": "Region", "Porte": "Size"}
)
translation_dict = {
"Região Norte": "North",
"Região Nordeste": "Northeast",
"Região Sudeste": "Southeast",
"Região Sul": "South",
"Região Centro-Oeste": "Midwest",
"Pequeno II": "Small II",
"Médio": "Medium",
"Pequeno I": "Small I",
"Grande": "Big",
"Metrópole": "Metropolis",
"Capital": "Yes",
}
brasil_conties["Region"] = brasil_conties["Region"].replace(translation_dict)
brasil_conties["Size"] = brasil_conties["Size"].replace(translation_dict)
brasil_conties["Capital"] = brasil_conties["Capital"].replace(translation_dict)
brasil_conties["County"] = brasil_conties["County"].apply(unidecode.unidecode)
brasil_conties["County"] = brasil_conties["County"].str.title()
# Combines the dataframes based on the column of Counties
merged_df = prouni.merge(
brasil_conties, left_on="County of Scholarship Beneficiary", right_on="County"
)
merged_df = merged_df.drop(columns=["State", "County", "Region"])
merged_df
# ## Now that I've cleaned, translated, and merged the datasets, I'll start answering the questions through data visualization.
# Plotting a graph of the number of Scholarships per year
sns.set(style="whitegrid")
cor = "#AEC6CF"
sns.set_palette([cor])
count_by_year = merged_df["Year of Scholarship Grant"].value_counts()
plt.figure(figsize=(14, 6))
sns.barplot(x=count_by_year.index, y=count_by_year.values, palette=[cor])
plt.title("Number of Scholarships Awarded per Year")
plt.xlabel("Year")
plt.ylabel("Number of Scholarships")
plt.show()
# ## With this barplot we can see that the number of scholarships increased until 2015 and then remained constant with a small reduction until 2019
# Plotting Number of men and women receiving grants per year
grouped_df = (
merged_df.groupby("Year of Scholarship Grant")["Gender of Scholarship Beneficiary"]
.value_counts()
.unstack()
)
sns.set_palette("Set2")
sns.set(style="ticks")
plt.figure(figsize=(14, 6))
sns.lineplot(data=grouped_df, markers=True, dashes=False)
plt.xlabel("Year")
plt.ylabel("Quantity")
plt.title("Number of men and women receiving scholarships per year")
sns.despine(bottom=True, left=True)
plt.show()
# ## Here we can see that the number of men and women is very similar and overall there are more women than men receiving scholarships
# Plotting age distribution histogram
sns.set_style("whitegrid")
plt.figure(figsize=(14, 6))
sns.histplot(data=merged_df, x="Age", bins=50)
plt.title("Age distribution")
plt.xlabel("Age")
plt.ylabel("Quantity")
plt.show()
# ## In this histogram I see that the most common age is between 20 and 30 years old and that there is also a significant number of students over 40 years old receiving scholarships
# Age distribution boxplot
plt.figure(figsize=(14, 6))
sns.boxplot(x=merged_df["Age"], showfliers=False)
plt.title("Age distribution")
plt.show()
# ## Just a boxplot to visualize the sample quarters
# Calculate the mean and standard deviation of the 'Age' column
q25_age = np.percentile(merged_df["Age"], 25)
q75_age = np.percentile(merged_df["Age"], 75)
IQR_age = q75_age - q25_age
# Identify the values below and above the mean plus 1.5 standard deviations
lower_bound = q25_age - 1.5 * IQR_age
upper_bound = q75_age + 1.5 * IQR_age
# Create subsets of the original DataFrame with values below and above the mean
outliers_below = merged_df[merged_df["Age"] < lower_bound]
outliers_above = merged_df[merged_df["Age"] > upper_bound]
# Calculate the amount of points in each subset
lower_subset_count = len(outliers_below)
upper_subset_count = len(outliers_above)
plt.figure(figsize=(14, 3))
# Subplot 1
plt.subplot(2, 1, 1)
sns.boxplot(data=merged_df[["Age"]], x="Age", showfliers=True, orient="h")
plt.xlim(0, lower_bound)
plt.title("Outliers bellow")
plt.text(lower_bound - 0.5, 0.4, f"N={len(merged_df[merged_df['Age']<=lower_bound])}")
# Subplot 2
plt.subplot(2, 1, 2)
sns.boxplot(data=merged_df[["Age"]], x="Age", showfliers=True, orient="h")
plt.xlim(upper_bound, int(merged_df["Age"].max()) + 2)
plt.title("Outliers Above")
plt.text(
int(merged_df["Age"].max()) - 6,
0.4,
f"N={len(merged_df[merged_df['Age']>=upper_bound])}",
)
plt.tight_layout()
plt.show()
# ## Here I wanted to see the outlier points below and above the others.
# ## I also printed in the lower right corner the number of dots being represented
merged_df = merged_df[
(merged_df["Age"] >= lower_bound) & (merged_df["Age"] <= upper_bound)
]
# Group the data by municipality and count the number of scholarships in each group
bolsas_por_municipio = (
merged_df.groupby("County of Scholarship Beneficiary")
.size()
.reset_index(name="num_bolsas")
)
bolsas_por_municipio = bolsas_por_municipio.sort_values(
["num_bolsas"], ascending=False
)[:1000]
geolocator = Nominatim(user_agent="prouni-brasil")
def get_location(city):
location = geolocator.geocode(city + ", Brazil")
return [location.latitude, location.longitude]
bolsas_por_municipio["coordinates"] = bolsas_por_municipio[
"County of Scholarship Beneficiary"
].apply(lambda x: get_location(x))
m = folium.Map(location=[-15.788497, -47.879873], zoom_start=4)
for _, row in bolsas_por_municipio.iterrows():
try:
folium.CircleMarker(
location=row["coordinates"],
radius=row["num_bolsas"]
/ 12000, # tamanho proporcional ao número de bolsas
color="blue",
color_opacity=0.2,
fill=True,
fill_color="blue",
fill_opacity=0.8,
).add_to(m)
except:
continue
m
# ## This is a map plot where we can see the regional distribution of the students in the top 1000 Countys. (I had to limit this to be able to plot)
# ## We can see that a expressive number of students are from São Paulo but there is also a lot of people all over the country
merged_df["Size"].astype(
pd.CategoricalDtype(
["Small I", "Small II", "Medium", "Big", "Metropolis"], ordered=True
)
)
sns.set_style("whitegrid")
plt.figure(figsize=(8, 6))
ax = sns.countplot(x="Size", data=merged_df, color="lightgreen")
ax.set_xlabel("County Size")
ax.set_ylabel("Number of Scholarships")
ax.set_title("Number of Scholarships by County Size")
plt.show()
|
# # HackerEarth ML - Of Genomes And Genetics
# # Step 1: Reading and Understanding the Data
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
plt.style.use("seaborn-deep")
plt.style.use("fivethirtyeight")
plt.rcParams["font.family"] = "sans-serif"
plt.rcParams["font.serif"] = "Ubuntu"
plt.rcParams["font.monospace"] = "Ubuntu Mono"
plt.rcParams["font.size"] = 10
plt.rcParams["axes.labelsize"] = 12
plt.rcParams["axes.titlesize"] = 12
plt.rcParams["xtick.labelsize"] = 8
plt.rcParams["ytick.labelsize"] = 8
plt.rcParams["legend.fontsize"] = 12
plt.rcParams["figure.titlesize"] = 14
plt.rcParams["figure.figsize"] = (12, 8)
pd.options.mode.chained_assignment = None
pd.options.display.float_format = "{:.2f}".format
pd.set_option("display.max_columns", 200)
pd.set_option("display.width", 400)
import warnings
warnings.filterwarnings("ignore")
import sklearn.base as skb
import sklearn.metrics as skm
import sklearn.model_selection as skms
import sklearn.preprocessing as skp
import sklearn.utils as sku
import sklearn.linear_model as sklm
import sklearn.neighbors as skn
import sklearn.ensemble as ske
import catboost as cb
import scipy.stats as sstats
import random
seed = 12
np.random.seed(seed)
from datetime import date
import pandas_profiling as pp
# important funtions
def datasetShape(df):
rows, cols = df.shape
print("The dataframe has", rows, "rows and", cols, "columns.")
# select numerical and categorical features
def divideFeatures(df):
numerical_features = df.select_dtypes(include=[np.number])
categorical_features = df.select_dtypes(include=[np.object])
return numerical_features, categorical_features
base = "/kaggle/input/of-genomes-and-genetics-hackerearth-ml/"
data_file = base + "train.csv"
df = pd.read_csv(data_file)
df.head()
data_file = base + "test.csv"
df_test = pd.read_csv(data_file)
df_test.head()
# set target feature
targetFeature = "Genetic Disorder"
targetFeature2 = "Disorder Subclass"
# check dataset shape
datasetShape(df)
# remove ID from train data
df.drop(["Patient Id"], inplace=True, axis=1)
# check for duplicates
print(df.shape)
df.drop_duplicates(inplace=True)
print(df.shape)
df.info()
df_test.info()
# # Step 2: EDA
# remove irrelevant columns
df.drop(
[
"Patient First Name",
"Family Name",
"Father's name",
"Father's age",
"Mother's age",
"Institute Name",
"Location of Institute",
"Status",
"Parental consent",
"Autopsy shows birth defect (if applicable)",
"Place of birth",
"No. of previous abortion",
],
axis=1,
inplace=True,
)
df_test.drop(
[
"Patient First Name",
"Family Name",
"Father's name",
"Father's age",
"Mother's age",
"Institute Name",
"Location of Institute",
"Status",
"Parental consent",
"Autopsy shows birth defect (if applicable)",
"Place of birth",
"No. of previous abortion",
],
axis=1,
inplace=True,
)
df.describe()
cont_features, cat_features = divideFeatures(df)
cat_features.head()
# ### Univariate Analysis
# check target feature distribution
df[targetFeature].hist()
plt.show()
# check target feature distribution
df[targetFeature2].hist()
plt.show()
# boxplots of numerical features for outlier detection
fig = plt.figure(figsize=(16, 16))
for i in range(len(cont_features.columns)):
fig.add_subplot(4, 4, i + 1)
sns.boxplot(y=cont_features.iloc[:, i])
plt.tight_layout()
plt.show()
plt.figure(figsize=(32, 32))
sns.pairplot(df)
plt.show()
# correlation heatmap for all features
corr = df.corr()
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
sns.heatmap(corr, mask=mask, annot=True)
plt.show()
# ### Profiling for Whole Data
profile = pp.ProfileReport(df, title="Pandas Profiling Report", explorative=True)
profile.to_file("profile.html")
profile.to_notebook_iframe()
# # Step 3: Data Preparation
# ### Handle Missing
# remove all columns having no values
df.dropna(axis=1, how="all", inplace=True)
df_test.dropna(axis=1, how="all", inplace=True)
df.dropna(axis=0, how="all", inplace=True)
# drop rows where target features are not available
df.dropna(subset=["Genetic Disorder", "Disorder Subclass"], how="any", inplace=True)
datasetShape(df)
# drop single valued column
keep = [c for c in list(df) if df[c].nunique() > 1]
df = df[keep]
keep.remove("Disorder Subclass")
keep.remove("Genetic Disorder")
keep.insert(0, "Patient Id")
df_test = df_test[keep]
datasetShape(df)
# plot missing values
def calc_missing(df):
missing = df.isna().sum().sort_values(ascending=False)
missing = missing[missing != 0]
missing_perc = missing / df.shape[0] * 100
return missing, missing_perc
if df.isna().any().sum() > 0:
missing, missing_perc = calc_missing(df)
missing.plot(kind="bar", figsize=(30, 8))
plt.title("Missing Values")
plt.show()
else:
print("No Missing Values")
def fillNan(df, col, value):
df[col].fillna(value, inplace=True)
# setting Assisted conception IVF/ART missing values to Not available
fillNan(df, "Assisted conception IVF/ART", "Yes")
fillNan(df_test, "Assisted conception IVF/ART", "Yes")
df["Assisted conception IVF/ART"].isna().any()
# setting H/O radiation exposure (x-ray) missing values to -
fillNan(df, "H/O radiation exposure (x-ray)", "-")
fillNan(df_test, "H/O radiation exposure (x-ray)", "-")
df["H/O radiation exposure (x-ray)"].isna().any()
# setting Respiratory Rate (breaths/min) missing values to Normal (30-60)
fillNan(df, "Respiratory Rate (breaths/min)", "Normal (30-60)")
fillNan(df_test, "Respiratory Rate (breaths/min)", "Normal (30-60)")
df["Respiratory Rate (breaths/min)"].isna().any()
# setting Folic acid details (peri-conceptional) missing values to Yes
fillNan(df, "Folic acid details (peri-conceptional)", "Yes")
fillNan(df_test, "Folic acid details (peri-conceptional)", "Yes")
df["Folic acid details (peri-conceptional)"].isna().any()
# setting H/O serious maternal illness missing values to No
fillNan(df, "H/O serious maternal illness", "No")
fillNan(df_test, "H/O serious maternal illness", "No")
df["H/O serious maternal illness"].isna().any()
# setting Birth asphyxia missing values to Not available
fillNan(df, "Birth asphyxia", "Not available")
fillNan(df_test, "Birth asphyxia", "Not available")
df["Birth asphyxia"].isna().any()
# setting Birth defects missing values to Singular
fillNan(df, "Birth defects", "Singular")
fillNan(df_test, "Birth defects", "Singular")
df["Birth defects"].isna().any()
# setting Blood test result missing values to inconclusive
fillNan(df, "Blood test result", "inconclusive")
fillNan(df_test, "Blood test result", "inconclusive")
df["Blood test result"].isna().any()
# setting H/O substance abuse missing values to -
fillNan(df, "H/O substance abuse", "-")
fillNan(df_test, "H/O substance abuse", "-")
df["H/O substance abuse"].isna().any()
# setting missing values to mean values
fillNan(
df,
"White Blood cell count (thousand per microliter)",
df["White Blood cell count (thousand per microliter)"].mean(),
)
fillNan(
df_test,
"White Blood cell count (thousand per microliter)",
df["White Blood cell count (thousand per microliter)"].mean(),
)
df["White Blood cell count (thousand per microliter)"].isna().any()
# setting History of anomalies in previous pregnancies missing values to No
fillNan(df, "History of anomalies in previous pregnancies", "No")
fillNan(df_test, "History of anomalies in previous pregnancies", "No")
df["History of anomalies in previous pregnancies"].isna().any()
# setting Inherited from father missing values to No
fillNan(df, "Inherited from father", "No")
fillNan(df_test, "Inherited from father", "No")
df["Inherited from father"].isna().any()
# setting Gender missing values to Ambiguous
fillNan(df, "Gender", "Ambiguous")
fillNan(df_test, "Gender", "Ambiguous")
df["Gender"].isna().any()
# setting Follow-up missing values to Low
fillNan(df, "Follow-up", "Low")
fillNan(df_test, "Follow-up", "Low")
df["Follow-up"].isna().any()
# setting Maternal gene missing values to No
fillNan(df, "Maternal gene", "No")
fillNan(df_test, "Maternal gene", "No")
df["Maternal gene"].isna().any()
# setting missing values to mean values
fillNan(df, "Patient Age", df["Patient Age"].mean())
fillNan(df_test, "Patient Age", df["Patient Age"].mean())
df["Patient Age"].isna().any()
# setting missing values to most occurring values
fillNan(df, "Symptom 1", df["Symptom 1"].mode()[0])
fillNan(df_test, "Symptom 1", df["Symptom 1"].mode()[0])
fillNan(df, "Symptom 2", df["Symptom 2"].mode()[0])
fillNan(df_test, "Symptom 2", df["Symptom 2"].mode()[0])
fillNan(df, "Symptom 3", df["Symptom 3"].mode()[0])
fillNan(df_test, "Symptom 3", df["Symptom 3"].mode()[0])
fillNan(df, "Symptom 4", df["Symptom 4"].mode()[0])
fillNan(df_test, "Symptom 4", df["Symptom 4"].mode()[0])
fillNan(df, "Symptom 5", df["Symptom 5"].mode()[0])
fillNan(df_test, "Symptom 5", df["Symptom 5"].mode()[0])
fillNan(df, "Heart Rate (rates/min", df["Heart Rate (rates/min"].mode()[0])
fillNan(df_test, "Heart Rate (rates/min", df["Heart Rate (rates/min"].mode()[0])
print("Train Missing:", df.isna().any().sum())
print("Test Missing:", df_test.isna().any().sum())
# ### One-hot Encoding
cont_features, cat_features = divideFeatures(df)
cat_features
custom_feat = [
"Genes in mother's side",
"Inherited from father",
"Maternal gene",
"Paternal gene",
"Respiratory Rate (breaths/min)",
"Heart Rate (rates/min",
"Follow-up",
"Gender",
"Birth asphyxia",
"Folic acid details (peri-conceptional)",
"H/O serious maternal illness",
"H/O radiation exposure (x-ray)",
"H/O substance abuse",
"Assisted conception IVF/ART",
"History of anomalies in previous pregnancies",
"Birth defects",
"Blood test result",
]
# extract numerical and categorical for dummy and scaling later
for feat in custom_feat:
dummyVars = pd.get_dummies(df[feat], drop_first=True, prefix=feat + "_")
df = pd.concat([df, dummyVars], axis=1)
df.drop(feat, axis=1, inplace=True)
datasetShape(df)
df.head()
# extract numerical and categorical for dummy and scaling later
for feat in custom_feat:
dummyVars = pd.get_dummies(df_test[feat], drop_first=True, prefix=feat + "_")
df_test = pd.concat([df_test, dummyVars], axis=1)
df_test.drop(feat, axis=1, inplace=True)
datasetShape(df_test)
df_test.head()
# # Step 4: Data Modelling
# ### Split Train-Test Data
# helper functions
def printScore(y_train, y_train_pred):
print(skm.f1_score(y_train, y_train_pred, average="macro"))
df_f1 = df.sample(frac=1, random_state=seed).reset_index(drop=True)
df_f2 = df.sample(frac=1, random_state=seed).reset_index(drop=True)
# remove Disorder Subclass from df1
df_f1.drop("Disorder Subclass", inplace=True, axis=1)
# convert Genetic Disorder to one-hot
# dummyVars = pd.get_dummies(df_f2['Genetic Disorder'], drop_first=True, prefix="GeneticDisorder_")
# df_f2 = pd.concat([df_f2, dummyVars], axis=1)
# df_f2.drop('Genetic Disorder', axis=1, inplace=True)
# convert Genetic Disorder to label-encoding
gdle = skp.LabelEncoder()
df_f2["Genetic Disorder"] = gdle.fit_transform(df_f2["Genetic Disorder"])
# shuffle samples
df_f1_shuffle = df_f1.sample(frac=1, random_state=seed).reset_index(drop=True)
df_f2_shuffle = df_f2.sample(frac=1, random_state=seed).reset_index(drop=True)
# separate target feature
df_f1_y = df_f1_shuffle.pop(targetFeature)
df_f1_X = df_f1_shuffle
# transform the text label to integers
f1_le = skp.LabelEncoder()
df_f1_y = f1_le.fit_transform(df_f1_y)
# print(f1_le.classes_)
# split into train dev and test
X_f1_train, X_f1_test, y_f1_train, y_f1_test = skms.train_test_split(
df_f1_X, df_f1_y, train_size=0.8, random_state=seed
)
print(
f"Train set has {X_f1_train.shape[0]} records out of {len(df_f1_shuffle)} which is {round(X_f1_train.shape[0]/len(df_f1_shuffle)*100)}%"
)
print(
f"Test set has {X_f1_test.shape[0]} records out of {len(df_f1_shuffle)} which is {round(X_f1_test.shape[0]/len(df_f1_shuffle)*100)}%"
)
# separate target feature
df_f2_y = df_f2_shuffle.pop(targetFeature2)
df_f2_X = df_f2_shuffle
# transform the text label to integers
f2_le = skp.LabelEncoder()
df_f2_y = f2_le.fit_transform(df_f2_y)
# print(f2_le.classes_)
# split into train dev and test
X_f2_train, X_f2_test, y_f2_train, y_f2_test = skms.train_test_split(
df_f2_X, df_f2_y, train_size=0.8, random_state=seed
)
print(
f"Train set has {X_f2_train.shape[0]} records out of {len(df_f2_shuffle)} which is {round(X_f2_train.shape[0]/len(df_f2_shuffle)*100)}%"
)
print(
f"Test set has {X_f2_test.shape[0]} records out of {len(df_f2_shuffle)} which is {round(X_f2_test.shape[0]/len(df_f2_shuffle)*100)}%"
)
# ### Feature Scaling
# reset index for X_train and X_test
X_f1_train.reset_index(drop=True, inplace=True)
X_f1_test.reset_index(drop=True, inplace=True)
X_f1_train.index[:5]
# reset index for X_train and X_test
X_f2_train.reset_index(drop=True, inplace=True)
X_f2_test.reset_index(drop=True, inplace=True)
X_f2_train.index[:5]
# scaler = skp.RobustScaler()
# scaler = skp.MinMaxScaler()
scaler = skp.StandardScaler()
# apply scaling to all numerical variables except dummy variables as they are already between 0 and 1
X_f1_train[cont_features.columns] = pd.DataFrame(
scaler.fit_transform(X_f1_train[cont_features.columns]),
columns=cont_features.columns,
)
# scale test data with transform()
X_f1_test[cont_features.columns] = pd.DataFrame(
scaler.transform(X_f1_test[cont_features.columns]), columns=cont_features.columns
)
# view sample data
X_f1_train.describe()
# scaler = skp.RobustScaler()
# scaler = skp.MinMaxScaler()
scaler = skp.StandardScaler()
# apply scaling to all numerical variables except dummy variables as they are already between 0 and 1
X_f2_train[cont_features.columns] = pd.DataFrame(
scaler.fit_transform(X_f2_train[cont_features.columns]),
columns=cont_features.columns,
)
# scale test data with transform()
X_f2_test[cont_features.columns] = pd.DataFrame(
scaler.transform(X_f2_test[cont_features.columns]), columns=cont_features.columns
)
# view sample data
X_f2_train.describe()
# ## Model Building
class_weights_f1 = sku.class_weight.compute_class_weight(
"balanced", np.unique(y_f1_train), y_f1_train
)
class_weights_f1 = dict(enumerate(class_weights_f1))
class_weights_f1
class_weights_f2 = sku.class_weight.compute_class_weight(
"balanced", np.unique(y_f2_train), y_f2_train
)
class_weights_f2 = dict(enumerate(class_weights_f2))
class_weights_f2
sample_weights_f1 = sku.class_weight.compute_sample_weight("balanced", y_f1_train)
sample_weights_f1
sample_weights_f2 = sku.class_weight.compute_sample_weight("balanced", y_f2_train)
sample_weights_f2
# ### CatBoost
import catboost as cb
cat_model_f1 = cb.CatBoostClassifier(
verbose=0,
iterations=70,
# eval_metric='F1',
class_weights=class_weights_f1,
# use_best_model=True
)
cat_model_f1.fit(X_f1_train, y_f1_train, eval_set=(X_f1_test, y_f1_test))
print(cat_model_f1.best_score_)
y_f1_train_pred = cat_model_f1.predict(X_f1_train)
y_f1_test_pred = cat_model_f1.predict(X_f1_test)
print(skm.accuracy_score(y_f1_train, y_f1_train_pred))
print(skm.accuracy_score(y_f1_test, y_f1_test_pred))
printScore(y_f1_train, y_f1_train_pred)
printScore(y_f1_test, y_f1_test_pred)
import catboost as cb
cat_model_f2 = cb.CatBoostClassifier(
verbose=0,
iterations=80,
# eval_metric='F1',
class_weights=class_weights_f2,
# use_best_model=True
)
cat_model_f2.fit(X_f2_train, y_f2_train, eval_set=(X_f2_test, y_f2_test))
print(cat_model_f2.best_score_)
y_f2_train_pred = cat_model_f2.predict(X_f2_train)
y_f2_test_pred = cat_model_f2.predict(X_f2_test)
print(skm.accuracy_score(y_f2_train, y_f2_train_pred))
print(skm.accuracy_score(y_f2_test, y_f2_test_pred))
printScore(y_f2_train, y_f2_train_pred)
printScore(y_f2_test, y_f2_test_pred)
# ### RandomForest
rf_model_f1 = ske.RandomForestClassifier(
verbose=0,
random_state=1,
n_jobs=-1,
class_weight="balanced_subsample",
n_estimators=100,
max_depth=10,
min_samples_split=5,
min_samples_leaf=3,
)
rf_model_f1.fit(X_f1_train, y_f1_train)
# predict
y_f1_train_pred = rf_model_f1.predict(X_f1_train)
y_f1_test_pred = rf_model_f1.predict(X_f1_test)
print(skm.accuracy_score(y_f1_train, y_f1_train_pred))
print(skm.accuracy_score(y_f1_test, y_f1_test_pred))
printScore(y_f1_train, y_f1_train_pred)
printScore(y_f1_test, y_f1_test_pred)
rf_model_f2 = ske.RandomForestClassifier(
verbose=0,
random_state=1,
n_jobs=-1,
class_weight="balanced_subsample",
n_estimators=300,
max_depth=10,
min_samples_split=10,
min_samples_leaf=5,
)
rf_model_f2.fit(X_f2_train, y_f2_train)
# predict
y_f2_train_pred = rf_model_f2.predict(X_f2_train)
y_f2_test_pred = rf_model_f2.predict(X_f2_test)
print(skm.accuracy_score(y_f2_train, y_f2_train_pred))
print(skm.accuracy_score(y_f2_test, y_f2_test_pred))
printScore(y_f2_train, y_f2_train_pred)
printScore(y_f2_test, y_f2_test_pred)
# ### XGBoost
import xgboost as xg
# # Grid used for parameter tuning
# param_test1 = {
# 'max_depth': np.arange(5, 12, 2),
# 'learning_rate': np.arange(0.04, 0.07, 0.01)
# }
# xgb_cv1 = skms.GridSearchCV(estimator = xg.XGBClassifier(n_estimators=100, objective='macro', nthread=4, seed=seed),
# param_grid = param_test1, scoring='f1', n_jobs=4,
# cv=3, verbose=1)
# xgb_cv1.fit(X_f1_train, y_f1_train)
# print(xgb_cv1.best_params_, xgb_cv1.best_score_)
# # max_depth = 10
# # learning_rate = 0.04
# # Grid used for parameter tuning
# param_test2 = {
# 'subsample': np.arange(0.5, 1, 0.1),
# 'min_child_weight': range(1, 6, 1)
# }
# xgb_cv2 = skms.GridSearchCV(estimator = xg.XGBClassifier(n_estimators=500, max_depth = 10,
# objective= 'multi:softprob', nthread=4, seed=seed),
# param_grid = param_test2, scoring='f1', n_jobs=4,
# cv=5, verbose=1)
# xgb_cv2.fit(X_train_small, y_train_small)
# print(xgb_cv2.best_params_, xgb_cv2.best_score_)
# print(xgb_cv2.best_estimator_)
# # subsample = 0.5
# # min_child_weight = 2
xgb_model_f1 = xg.XGBClassifier(
objective="multi:softprob",
random_state=seed,
scoring="f1",
learning_rate=0.0001,
subsample=0.5,
n_jobs=-1,
sample_weight=sample_weights_f1,
n_estimators=100,
max_depth=8,
)
xgb_model_f1.fit(X_f1_train, y_f1_train)
# predict
y_f1_train_pred = xgb_model_f1.predict(X_f1_train)
y_f1_test_pred = xgb_model_f1.predict(X_f1_test)
print(skm.accuracy_score(y_f1_train, y_f1_train_pred))
print(skm.accuracy_score(y_f1_test, y_f1_test_pred))
printScore(y_f1_train, y_f1_train_pred)
printScore(y_f1_test, y_f1_test_pred)
xgb_model_f2 = xg.XGBClassifier(
objective="multi:softprob",
random_state=seed,
scoring="f1",
learning_rate=0.15,
subsample=1,
n_jobs=-1,
sample_weight=sample_weights_f2,
n_estimators=100,
max_depth=5,
)
xgb_model_f2.fit(X_f2_train, y_f2_train)
# predict
y_f2_train_pred = xgb_model_f2.predict(X_f2_train)
y_f2_test_pred = xgb_model_f2.predict(X_f2_test)
print(skm.accuracy_score(y_f2_train, y_f2_train_pred))
print(skm.accuracy_score(y_f2_test, y_f2_test_pred))
printScore(y_f2_train, y_f2_train_pred)
printScore(y_f2_test, y_f2_test_pred)
# ### LightGBM
import lightgbm as lgb
lgb_model_f1 = lgb.LGBMClassifier(
objective="multi",
random_state=1,
n_jobs=-1,
class_weight=class_weights_f1,
learning_rate=0.1,
n_estimators=70,
)
lgb_model_f1.fit(X_f1_train, y_f1_train)
# predict
y_f1_train_pred = lgb_model_f1.predict(X_f1_train)
y_f1_test_pred = lgb_model_f1.predict(X_f1_test)
print(skm.accuracy_score(y_f1_train, y_f1_train_pred))
print(skm.accuracy_score(y_f1_test, y_f1_test_pred))
printScore(y_f1_train, y_f1_train_pred)
printScore(y_f1_test, y_f1_test_pred)
import lightgbm as lgb
lgb_model_f2 = lgb.LGBMClassifier(
objective="multi",
random_state=1,
n_jobs=-1,
# class_weight=class_weights_f2,
learning_rate=0.08,
n_estimators=100,
)
lgb_model_f2.fit(X_f2_train, y_f2_train)
# predict
y_f2_train_pred = lgb_model_f2.predict(X_f2_train)
y_f2_test_pred = lgb_model_f2.predict(X_f2_test)
print(skm.accuracy_score(y_f2_train, y_f2_train_pred))
print(skm.accuracy_score(y_f2_test, y_f2_test_pred))
printScore(y_f2_train, y_f2_train_pred)
printScore(y_f2_test, y_f2_test_pred)
# # Step 5: Test Evaluation & Submission
# Generate Ensembles
def rmse_cv(model):
"""
Use this function to get quickly the rmse score over a cv
"""
rmse = np.sqrt(
-skms.cross_val_score(
model, X_train, y_train, scoring="neg_mean_squared_error", cv=5, n_jobs=-1
)
)
return rmse
class MixModel(skb.BaseEstimator, skb.RegressorMixin, skb.TransformerMixin):
"""
Here we will get a set of models as parameter already trained and
will calculate the mean of the predictions for using each model predictions
"""
def __init__(self, algs):
self.algs = algs
# Define clones of parameters models
def fit(self, X, y):
self.algs_ = [skb.clone(x) for x in self.algs]
# Train cloned base models
for alg in self.algs_:
alg.fit(X, y)
return self
# Average predictions of all cloned models
def predict(self, X):
predictions = np.column_stack(
[stacked_model.predict(X) for stacked_model in self.algs_]
)
return np.apply_along_axis(
lambda x: np.bincount(x).argmax(), axis=1, arr=predictions
)
mixed_model_f1 = MixModel(algs=[cat_model_f1, rf_model_f1, xgb_model_f1, lgb_model_f1])
# score = rmse_cv(mixed_model)
# print("\nAveraged base algs score: {:.4f} ({:.4f})".format(score.mean(), score.std()))
mixed_model_f1.fit(X_f1_train, y_f1_train)
# predict
y_f1_train_pred = mixed_model_f1.predict(X_f1_train)
y_f1_test_pred = mixed_model_f1.predict(X_f1_test)
printScore(y_f1_train, y_f1_train_pred)
printScore(y_f1_test, y_f1_test_pred)
mixed_model_f2 = MixModel(
algs=[
# cat_model_f2,
rf_model_f2,
# xgb_model_f2,
# lgb_model_f2
]
)
# score = rmse_cv(mixed_model)
# print("\nAveraged base algs score: {:.4f} ({:.4f})".format(score.mean(), score.std()))
mixed_model_f2.fit(X_f2_train, y_f2_train)
# predict
y_f2_train_pred = mixed_model_f2.predict(X_f2_train)
y_f2_test_pred = mixed_model_f2.predict(X_f2_test)
printScore(y_f2_train, y_f2_train_pred)
printScore(y_f2_test, y_f2_test_pred)
# generate test results for targetFeature
def getTestResults():
df_final_f1 = df_f1.sample(frac=1, random_state=1).reset_index(drop=True)
test_cols_f1 = [x for x in df_final_f1.columns if targetFeature not in x]
df_final_test_f1 = df_test[test_cols_f1]
df_y_f1 = df_final_f1.pop(targetFeature)
df_X_f1 = df_final_f1
df_y_f1 = f1_le.transform(df_y_f1)
scaler_f1 = skp.RobustScaler()
# scaler = skp.MinMaxScaler()
# scaler = skp.StandardScaler()
df_X_f1[cont_features.columns] = pd.DataFrame(
scaler.fit_transform(df_X_f1[cont_features.columns]),
columns=cont_features.columns,
)
df_final_test_f1[cont_features.columns] = pd.DataFrame(
scaler.transform(df_final_test_f1[cont_features.columns]),
columns=cont_features.columns,
)
# sample_weights_f1 = sku.class_weight.compute_sample_weight('balanced', df_y_f1)
model_f1 = MixModel(
algs=[
# cat_model_f1,
# rf_model_f1,
xgb_model_f1,
# lgb_model_f1
]
)
model_f1.fit(df_X_f1, df_y_f1)
# predict
y_train_pred_f1 = model_f1.predict(df_X_f1)
y_test_pred_f1 = model_f1.predict(df_final_test_f1)
print("Accuracy Score for Train:", skm.accuracy_score(df_y_f1, y_train_pred_f1))
printScore(df_y_f1, y_train_pred_f1)
return y_test_pred_f1
# ML models
results = getTestResults()
submission = pd.DataFrame(
{
"Patient Id": df_test["Patient Id"],
targetFeature: f1_le.inverse_transform(results.ravel()),
}
)
print(submission[targetFeature].value_counts())
# generate test results for targetFeature2
def getTestResults():
df_final_f2 = df_f2.sample(frac=1, random_state=1).reset_index(drop=True)
test_cols_f2 = [x for x in df_final_f2.columns if targetFeature2 not in x]
df_final_test_f2 = df_test[test_cols_f2]
df_y_f2 = df_final_f2.pop(targetFeature2)
df_X_f2 = df_final_f2
df_y_f2 = f2_le.transform(df_y_f2)
scaler_f2 = skp.RobustScaler()
# scaler = skp.MinMaxScaler()
# scaler = skp.StandardScaler()
df_X_f2[cont_features.columns] = pd.DataFrame(
scaler.fit_transform(df_X_f2[cont_features.columns]),
columns=cont_features.columns,
)
df_final_test_f2[cont_features.columns] = pd.DataFrame(
scaler.transform(df_final_test_f2[cont_features.columns]),
columns=cont_features.columns,
)
# sample_weights_f2 = sku.class_weight.compute_sample_weight('balanced', df_y_f2)
model_f2 = MixModel(
algs=[
# cat_model_f2,
rf_model_f2,
# xgb_model_f2,
# lgb_model_f2
]
)
model_f2.fit(df_X_f2, df_y_f2)
# predict
y_train_pred_f2 = model_f2.predict(df_X_f2)
y_test_pred_f2 = model_f2.predict(df_final_test_f2)
print("Accuracy Score for Train:", skm.accuracy_score(df_y_f2, y_train_pred_f2))
printScore(df_y_f2, y_train_pred_f2)
return y_test_pred_f2
# ML models
df_test[targetFeature] = results.ravel()
results2 = getTestResults()
submission[targetFeature2] = f2_le.inverse_transform(results2.ravel())
print(submission[targetFeature2].value_counts())
# generate submission file
submission.to_csv("./submission_XGB_RF1.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
from matplotlib.lines import Line2D
from warnings import filterwarnings
sns.set_style("whitegrid")
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Importing dataset to the notebook
df = pd.read_csv(
"/kaggle/input/amazon-top-50-bestselling-books-2009-2019/bestsellers with categories.csv"
)
# # Data Preparation ------ Getting the basic information of dataset nad then performing analysis----------
df
# Checking the type after converting it into data frame
type(df)
# Reteriving the basic information of the data set
df.info()
# The data set seems to be correct as there is non null values are there .
# This is the overview of the numeric data of the dataset such as userrating , reviews , price and year
df.describe()
# Listing the name of columns present in the dataset
df.columns
# Determining the shape of the dataset
df.shape
# # Converting the dataframe as an array format usimg pandas inbuilt function to make the retrieval of data easier from a particular index
amazon_data_dict = {
"Name": [
"10-Day Green Smoothie Cleanse",
"11/22/63: A Novel",
"You Are a Badass: How to Stop Doubting Your Greatness and Start Living an Awesome Life",
"You Are a Badass: How to Stop Doubting Your Greatness and Start Living an Awesome Life",
],
"Author": ["JJ Smith", "Stephen King", "Jen Sincero", "Jen Sincero"],
"User Rating": [4.7, 4.6, 4.7, 4.7],
"Reviews": [17350, 2052, 14331, 14331],
"Price ": [8, 22, 8, 8],
"Year": [2016, 2011, 2018, 2019],
"Genre": ["Non Fiction", "Fiction", "Non Fiction", "Non Fiction"],
}
# Viewing the dataframe once again
df
amazon_data_dict["Author"]
df["Name"]
type(df["Name"])
df["Name"]
df["Reviews"][50]
df["User Rating"][50]
df.Genre
Genre_df = df[["Name", "Genre"]]
Genre_df
# Creating a copy of original dataframe . The data in df is completely separate from df and changing value inside will not affect the other
df.copy = df.copy()
# Accessing the first row of the data using loc method
df.loc[0]
# Reteriving the first 5 rows
df.head(5)
df.tail(5)
df["Author"].unique()
df.loc[df["Author"] == "J. K. Rowling", "Author"] = "J.K. Rowling"
df.loc[df["Author"] == "George R. R. Martin", "Author"] = "George R.R. Martin"
df.sort_values("Reviews", ascending=False).head(5)
df.drop_duplicates("Name").sort_values("Reviews", ascending=False).head(5)
pie_1 = (
df.drop_duplicates("Name")
.sort_values("Reviews", ascending=False)["Genre"]
.head(10)
.value_counts()
)
sns.set_palette("viridis_r")
plt.figure(figsize=(8, 8))
plt.pie(
pie_1,
explode=[0, 0.15],
labels=["Fiction", "Non Fiction"],
autopct="%.1f%%",
shadow=True,
startangle=20,
)
plt.title(
"Genre Pie Chart for the top 10 Bestselling Books on Amazon (2009-2019)",
fontdict={"size": 14},
y=0,
)
|
from IPython.core.display import HTML
with open("./CSS.css", "r") as file:
custom_css = file.read()
HTML(custom_css)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import warnings
warnings.filterwarnings("ignore")
tdcsfog_path = "/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/train/tdcsfog"
## https://www.kaggle.com/code/arjanso/reducing-dataframe-memory-size-by-65
def reduce_memory_usage(df):
start_mem = df.memory_usage().sum() / 1024**2
print("Memory usage of dataframe is {:.2f} MB".format(start_mem))
for col in df.columns:
col_type = df[col].dtype.name
if (col_type != "datetime64[ns]") & (col_type != "category"):
if col_type != "object":
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == "int":
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif (
c_min > np.iinfo(np.int16).min
and c_max < np.iinfo(np.int16).max
):
df[col] = df[col].astype(np.int16)
elif (
c_min > np.iinfo(np.int32).min
and c_max < np.iinfo(np.int32).max
):
df[col] = df[col].astype(np.int32)
elif (
c_min > np.iinfo(np.int64).min
and c_max < np.iinfo(np.int64).max
):
df[col] = df[col].astype(np.int64)
else:
if (
c_min > np.finfo(np.float16).min
and c_max < np.finfo(np.float16).max
):
df[col] = df[col].astype(np.float16)
elif (
c_min > np.finfo(np.float32).min
and c_max < np.finfo(np.float32).max
):
df[col] = df[col].astype(np.float32)
else:
pass
else:
df[col] = df[col].astype("category")
mem_usg = df.memory_usage().sum() / 1024**2
print("Memory usage became: ", mem_usg, " MB")
return df
tdcsfog_list = []
for file_name in os.listdir(tdcsfog_path):
if file_name.endswith(".csv"):
file_path = os.path.join(tdcsfog_path, file_name)
file = pd.read_csv(file_path)
tdcsfog_list.append(file)
tdcsfog = pd.concat(tdcsfog_list, axis=0)
tdcsfog = reduce_memory_usage(tdcsfog)
def summary(text, df):
print(f"{text} shape: {df.shape}")
summ = pd.DataFrame(df.dtypes, columns=["dtypes"])
summ["null"] = df.isnull().sum()
summ["unique"] = df.nunique()
summ["min"] = df.min()
summ["median"] = df.median()
summ["max"] = df.max()
summ["mean"] = df.mean()
summ["std"] = df.std()
return summ
summary("tdcsfog", tdcsfog)
g = sns.PairGrid(tdcsfog[["AccV", "AccML", "AccAP"]])
g.map_lower(plt.scatter, alpha=0.6)
g.map_diag(plt.hist, alpha=0.7)
X = tdcsfog.iloc[:, 1:4]
y1 = tdcsfog["StartHesitation"]
y2 = tdcsfog["Turn"]
y3 = tdcsfog["Walking"]
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, mean_squared_log_error
X_train, X_val, y1_train, y1_val = train_test_split(
X, y1, test_size=0.2, random_state=52
)
X_train, X_val, y2_train, y2_val = train_test_split(
X, y2, test_size=0.2, random_state=52
)
X_train, X_val, y3_train, y3_val = train_test_split(
X, y3, test_size=0.2, random_state=52
)
from lightgbm import LGBMRegressor
from xgboost import XGBRegressor
from catboost import CatBoostRegressor
model_dict = {
#'Light GBM': LGBMRegressor(),
#'XG Boost' : XGBRegressor(),
"Cat Boost": CatBoostRegressor()
}
def evaluation(model_str, y_pred, y_pred_train, y_train):
results = {
"model": model_str
# 'rmlse': mean_squared_log_error(y_train,y_pred_train)
}
return results
result_list = []
for model in model_dict:
model_dict[model].fit(X_train, y1_train)
y1_pred = model_dict[model].predict(X_val)
y1_pred_train = model_dict[model].predict(X_train)
result = evaluation(model, y1_pred, y1_pred_train, y1_train)
result_list.append(result)
df_eval = pd.DataFrame(result_list)
df_eval
result_list = []
for model in model_dict:
model_dict[model].fit(X_train, y2_train)
y2_pred = model_dict[model].predict(X_val).round()
y2_pred_train = model_dict[model].predict(X_train).round()
result = evaluation(model, y2_pred, y2_pred_train, y2_train)
result_list.append(result)
df_eval = pd.DataFrame(result_list)
df_eval
result_list = []
for model in model_dict:
model_dict[model].fit(X_train, y3_train)
y3_pred = model_dict[model].predict(X_val)
y3_pred_train = model_dict[model].predict(X_train)
result = evaluation(model, y3_pred, y3_pred_train, y3_train)
result_list.append(result)
df_eval = pd.DataFrame(result_list)
df_eval
tdcsfog_test_path = (
"/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/test/tdcsfog"
)
tdcsfog_test_list = []
for file_name in os.listdir(tdcsfog_test_path):
if file_name.endswith(".csv"):
file_path = os.path.join(tdcsfog_test_path, file_name)
file = pd.read_csv(file_path)
file["Id"] = file_name[:-4] + "_" + file["Time"].apply(str)
tdcsfog_test_list.append(file)
tdcsfog_test = pd.concat(tdcsfog_test_list, axis=0)
tdcsfog_test
defog_test_path = "/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/test/defog"
defog_test_list = []
for file_name in os.listdir(defog_test_path):
if file_name.endswith(".csv"):
file_path = os.path.join(defog_test_path, file_name)
file = pd.read_csv(file_path)
file["Id"] = file_name[:-4] + "_" + file["Time"].apply(str)
defog_test_list.append(file)
defog_test = pd.concat(defog_test_list, axis=0)
defog_test
tdcsfog_test = reduce_memory_usage(tdcsfog_test)
defog_test = reduce_memory_usage(defog_test)
test = pd.concat([tdcsfog_test, defog_test], axis=0).reset_index(drop=True)
test
test_X = test.iloc[:, 1:4]
pred_y1 = model.predict(test_X)
pred_y2 = model.predict(test_X)
pred_y3 = model.predict(test_X)
test["StartHesitation"] = pred_y1 # target variable for StartHesitation
test["Turn"] = pred_y2 # target variable for Turn
test["Walking"] = pred_y3 # target variable for Walking
test
submission = test.iloc[:, 4:].fillna(0.0)
submission.to_csv("submission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# [](http://)
# # START
# **importing libraries**
import seaborn as sns
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report, confusion_matrix
from imblearn.under_sampling import NearMiss
df = pd.read_csv("/kaggle/input/creditcardfraud/creditcard.csv")
# # Data visualizaation
df.head()
df.describe()
plt.figure(figsize=(10, 6))
sns.countplot(data=df, x="Class")
# from the ubove graph we can see that our data is not balanced. it is imbalanced data
# **let's check what will happen if we use imbalanced data**
# # Continueing with imbalance data
X = df.drop(["Time", "Class"], axis=1)
y = df["Class"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42
)
svc = SVC()
svc.fit(X_train, y_train)
pr = svc.predict(X_test)
print(confusion_matrix(y_test, pr), "\n", classification_report(y_test, pr))
# As we can see, we got very good result for class 0, but poor for class 1. to avoid this we have to balance our training data
# # Solving Imbalanced data problem
df["Class"].value_counts()
# There are two ways to tackle this problem.
# 1. over sampling
# 2. uder sampling
# here to save time i am using under sampling method
from imblearn.under_sampling import NearMiss
nm = NearMiss()
print("before uder sampling:\n", y_train.value_counts())
X, y = nm.fit_resample(X, y)
print("after re sampling:\n", y_train.value_counts())
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42
)
# ## Model building
svc = SVC()
# **To find best parameters i am using gridsearch method**
prm = {
"C": [0.01, 0.1, 1, 10, 100, 1000, 10000],
"gamma": [100, 10, 1, 0.1, 0.01, 0.001, 0.0001, 0.00001],
"kernel": ["linear", "poly", "rbf", "sigmoid"],
}
grd = GridSearchCV(svc, prm, verbose=3)
grd.fit(X_train, y_train)
grd.best_params_
predt = grd.predict(X_test)
y_test.shape
print(confusion_matrix(y_test, predt), "\n", classification_report(y_test, predt))
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import pandas_profiling as pp
import seaborn as sns
import sklearn as sk
# 데이터 분석에 기초적인 pandas와 numpy를 import
# 그리고 시각화를 위한 mathplotlib와 seaborn을 import
# 마지막으로 자동분석을 위한 pandas_profiling를 import
sdt = pd.read_csv(
"/kaggle/input/stellar-classification-dataset-sdss17/star_classification.csv"
)
# 데이터 불러오기
sdt.head()
# 데이터 보기
sdt.info()
# null값이 없고 class를 제외한 모든 feature가 숫자로 되어있는 것을 알 수 있다
sdt.isnull().sum()
# null 체크
# 데이터 모양 가져오기
sdt.shape
sdt.describe()
# 숫자 열의 분포와 범위
# # 전처리
sns.countplot(x="class", data=sdt)
sdt["class"].value_counts()
# class 열에 있는 각 범주의 수를 세시고
sdt["class"] = sdt["class"].replace(["GALAXY", "STAR", "QSO"], [0, 1, 2])
print(sdt["class"].value_counts())
from imblearn.over_sampling import SMOTE
from collections import Counter
# imblearn.over_sampling 불균형 데이터 처리
# collections 메서드 샘플링 후 데이터 통계
x = sdt.drop(["class"], axis=1)
# 데이터 세트에서 범주 레이블 열을 제거하고 다른 열을 특성 열로 만든
y = sdt.loc[:, "class"].values
# 범주형 레이블 열을 y 변수에 저장
# SMOTE 메서드를 사용하여 데이터 세트를 오버샘플링
sm = SMOTE(random_state=42)
print("Original dataset shape %s" % Counter(y))
x, y = sm.fit_resample(x, y)
print("Resampled dataset shape %s" % Counter(y))
#
sns.countplot(x="class", data=sdt)
plt.show()
sm = SMOTE(random_state=42)
x_resampled, y_resampled = sm.fit_resample(x, y)
resampled_df = pd.DataFrame(x_resampled, columns=x.columns)
resampled_df["class"] = y_resampled
sns.countplot(x="class", data=resampled_df)
plt.show()
# # 分开训练数据和测试数据
resampled_df.shape
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.3, random_state=42, stratify=y
)
x_test, x_val, y_test, y_val = train_test_split(
x_test, y_test, test_size=0.5, random_state=42, stratify=y_test
)
print("训练集样本数: ", x_train.shape[0])
print("验证集样本数: ", x_val.shape[0])
print("测试集样本数: ", x_test.shape[0])
# # 简单模型训练
print("Negative values in X_train: ", (x_train < 0).any().any())
print("Negative values in X_test: ", (x_test < 0).any().any())
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import GaussianNB, BernoulliNB
# MultinomialNB因为数据集中含有负值因此换成GaussianNB, BernoulliNB
from sklearn.linear_model import (
SGDClassifier,
LinearRegression,
Ridge,
Lasso,
LogisticRegression,
)
# 整数编码的类别标签(0、1、2)不适用于某些回归模型(线性回归、Ridge,Lasso)可使用独热编码的类别标签。但是,在这里我们将仅针对分类模型计算准确率,因此不使用。
from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
gnb = GaussianNB()
bnb = BernoulliNB()
sgd = SGDClassifier()
kn = KNeighborsClassifier()
lr = LogisticRegression()
et = ExtraTreesClassifier()
dt = DecisionTreeClassifier()
rf = RandomForestClassifier()
gb = GradientBoostingClassifier()
xgb = XGBClassifier()
lgbm = LGBMClassifier()
gnb.fit(x_train, y_train)
bnb.fit(x_train, y_train)
sgd.fit(x_train, y_train)
kn.fit(x_train, y_train)
lr.fit(x_train, y_train)
et.fit(x_train, y_train)
dt.fit(x_train, y_train)
rf.fit(x_train, y_train)
gb.fit(x_train, y_train)
xgb.fit(x_train, y_train)
lgbm.fit(x_train, y_train)
predGNB = gnb.predict(x)
predBNB = bnb.predict(x)
predSGD = sgd.predict(x)
predKN = kn.predict(x)
predLR = lr.predict(x)
predET = et.predict(x)
predDT = dt.predict(x)
predRF = rf.predict(x)
predGB = gb.predict(x)
predXGB = xgb.predict(x)
predLGBM = lgbm.predict(x)
print("Scores:") # print scores
print("GaussianNB: ", accuracy_score(predGNB, y))
print("BernoulliNB: ", accuracy_score(predBNB, y))
print("SGDClassifier: ", accuracy_score(predSGD, y))
print("KNeighbours: ", accuracy_score(predKN, y))
print("LogisticReg: ", accuracy_score(predLR, y))
print("ExtraTrees: ", accuracy_score(predET, y))
print("DecisionTree: ", accuracy_score(predDT, y))
print("RandomForest: ", accuracy_score(predRF, y))
print("GradientBoosting: ", accuracy_score(predGB, y))
print("XGB: ", accuracy_score(predXGB, y))
print("LGBM: ", accuracy_score(predLGBM, y))
# # KNN的数值只有0.74 因此选择调优
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
param_grid = {
"n_neighbors": [5],
"weights": ["uniform", "distance"],
"metric": ["euclidean", "manhattan"],
}
grid_knn_model = GridSearchCV(
knn,
param_grid=param_grid,
cv=3,
refit=True,
return_train_score=True,
n_jobs=-1,
verbose=2,
)
grid_knn_model.fit(x_train, y_train)
print(f"模型的最高准确率: {grid_knn_model.best_score_}")
estimator = grid_knn_model.best_estimator_
pred_y = estimator.predict(x_test)
# # 决策树
dt = DecisionTreeClassifier(random_state=42)
dt.fit(x_train, y_train)
print(f"train정확률: {round(dt.score(x_train,y_train) * 100, 2)}%")
print(f"test정확률: {round(dt.score(x_test, y_test) * 100, 2)}%")
import matplotlib.pyplot as plt
from sklearn.tree import plot_tree
plt.figure(figsize=(10, 7))
plot_tree(dt)
plt.show()
plt.figure(figsize=(10, 7))
plot_tree(
dt,
max_depth=2,
filled=True,
feature_names=[
"obj_ID",
"alpha",
"delta",
"u",
"g",
"r",
"i",
"z",
"run_ID",
"rerun_ID",
"cam_col",
"field_ID",
"spec_obj_ID",
"redshift",
"plate",
"MJD",
"fiber_ID",
],
)
plt.show()
dt = DecisionTreeClassifier(max_depth=3, random_state=42)
# 노드 깊이를 3으로 설정
dt.fit(x_train, y_train)
# dt를 훈련
print(f"train정확률: {round(dt.score(x_train,y_train) * 100, 2)}%")
print(f"test정확률: {round(dt.score(x_test, y_test) * 100, 2)}%")
plt.figure(figsize=(20, 15))
plot_tree(
dt,
filled=True,
feature_names=[
"obj_ID",
"alpha",
"delta",
"u",
"g",
"r",
"i",
"z",
"run_ID",
"rerun_ID",
"cam_col",
"field_ID",
"spec_obj_ID",
"redshift",
"plate",
"MJD",
"fiber_ID",
],
)
plt.show()
# # 随机森林
from sklearn.model_selection import cross_validate
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_jobs=-1, random_state=42)
scores = cross_validate(
rf, x_train, y_train, cv=2, return_train_score=True, n_jobs=-1, verbose=2
)
print(np.mean(scores["train_score"]), np.mean(scores["test_score"]))
print(f'train정확률: {round(np.mean(scores[r"train_score"]) * 100, 2)}%')
print(f'test정확률: {round(np.mean(scores[r"test_score"]) * 100, 2)}%')
rf.fit(x_train, y_train)
# rf를 훈련
print(rf.feature_importances_)
# 특성 중요도 출력
rf = RandomForestClassifier(oob_score=True, n_jobs=-1, random_state=42)
# oob_score=true를 통해 부트스트 랩 샘플에 포함되지 않고 남은 샘플로 결정 트리를 평가
rf.fit(train_input, train_target)
# rf를 훈련
print(rf.oob_score_)
# 각 결정 트리의 00b 점수를 평균하여 출력
# # 随机森林调优
from sklearn.model_selection import GridSearchCV
param_grid = {
"n_estimators": [10, 50, 100, 200],
"max_depth": [10],
"min_samples_split": [2],
"min_samples_leaf": [2],
"bootstrap": [True],
}
rf = RandomForestClassifier()
grid_search = GridSearchCV(
estimator=rf, param_grid=param_grid, cv=2, n_jobs=-1, verbose=2
)
grid_search.fit(x_train, y_train)
best_params = grid_search.best_params_
print("Best parameters found: ", best_params)
best_rf = RandomForestClassifier(**best_params)
best_rf.fit(x_train, y_train)
y_pred = best_rf.predict(x_test)
best_rf_accuracy = accuracy_score(y_test, y_pred)
print("Best RandomForest accuracy: ", best_rf_accuracy)
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier()
# 定义参数
param_grid = {
"n_estimators": [200],
"max_depth": [None],
"min_samples_split": [2],
"min_samples_leaf": [2],
"bootstrap": [True],
}
# 超参数调优
grid_search = GridSearchCV(
estimator=rf, param_grid=param_grid, cv=2, n_jobs=-1, verbose=2
)
grid_search.fit(x_train, y_train)
print("END")
print(f"최고의 모델 : ", grid_search.best_params_)
print(f"모델의 최고 정확도 : {grid_search.best_score_}")
|
# # IMDB movies analysis
# ## Table of Contents
# Introduction
# Data Wrangling
# Exploratory Data Analysis
# Conclusions
# ## The Questions:
# > 1- The most type of movies gain a profit
# >
# > 2- What is the movies that has the highest revenue,profit and budget
# >
# > 3- Clarify the highest and the lowest movie in the revenues
# >
# > 4- What is the year that has the highest profit
# >
# > 5- The highest and lowest runtime movies
# Import the needed laibraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#
# ## Data Wrangling
#
# load the data and check a header for this data to ensure that all the data loaded sucssefuly
df = pd.read_csv("/kaggle/input/tmdb-movies-dataset/tmdb-movies.csv")
pd.set_option("display.max_columns", None)
df.head(2)
# check all the data
df.info()
# show the contionus data
df.describe()
# show the categorical data
df.describe(include="O")
# ### Data Cleaning (Here we will clean the data after we preview it)
# drop all the columns that we don't need
df = df.drop(
columns=[
"id",
"imdb_id",
"homepage",
"tagline",
"keywords",
"overview",
"budget_adj",
"revenue_adj",
]
)
# convert the column(release_date) into a date
df["release_date"] = pd.to_datetime(df["release_date"])
# filter the data by ignore the zero values for each of revenue and budget and runtime
df = df[(df["revenue"] > 0) & (df["budget"] > 0) & (df["runtime"] > 0)]
# Drop the duplicated values and any nan values
df = df.drop_duplicates()
df = df.dropna()
# make another columne named profit
df["Profit"] = df["revenue"] - df["budget"]
# Check the data after clearness
df.info()
# * Sound good after the we cleaning the data we will go the next step is Exploratory of the data
# ## Exploratory Data Analysis
# ### Question 1 (The most type of movies gain a profit)
# split the symbol "|" from the genres
df["genres"] = df["genres"].apply(lambda x: x.split("|")[0].strip())
# Check the sumation of the profit , revenue and budget for each genres by the highst profit
genres_BRP = (
df.groupby("genres")
.agg({"genres": ["size"], "Profit": ["sum"], "revenue": ["sum"], "budget": ["sum"]})
.sort_values(by=("Profit", "sum"), ascending=False)
)
genres_BRP
# * As we can see the most profit in action movies with total profit 57 Billion and total number 684 movie
# * Then the Adventure movies with total amount 49 Billion and total number 312 movie
# * The las one is Documentary and TV movies with totla profits 512 Million and 37 Million and total number 23 and 1
# Make a visulazation for the genres with revenue , profit and budget and the count of each one
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(20, 20))
x = genres_BRP["Profit"]["sum"].index
y = genres_BRP["Profit"]["sum"].values
ax[0, 0].bar(list(x), list(y))
ax[0, 0].tick_params(axis="x", labelrotation=90)
ax[0, 0].set_title("Genres net profit")
ax[0, 0].set_ylabel("Amount")
fig.set_figheight(12) # figure height in inches
x = genres_BRP["revenue"]["sum"].index
y = genres_BRP["revenue"]["sum"].values
ax[0, 1].bar(list(x), list(y))
ax[0, 1].tick_params(axis="x", labelrotation=90)
ax[0, 1].set_title("Genres total revenue")
# fig.set_figheight(20) # figure height in inches
x = genres_BRP["budget"]["sum"].index
y = genres_BRP["budget"]["sum"].values
ax[1, 0].bar(list(x), list(y))
ax[1, 0].tick_params(axis="x", labelrotation=90)
ax[1, 0].set_title("Genres total cost")
# fig.set_figheight(20) # figure height in inches
x = genres_BRP["genres"]["size"].index
y = genres_BRP["genres"]["size"].values
ax[1, 1].bar(list(x), list(y))
ax[1, 1].tick_params(axis="x", labelrotation=90)
ax[1, 1].set_title("No. of movies")
# fig.set_figheight(20) # figure height in inches
# * the action moives has the highest number of movies and the highest profits
# ### Question 2 (What is the movies that has the highest revenue,profit and budget)
# explort the most movies has a highest revenue,profit and budget
X = df[df["revenue"] == df["revenue"].max()][
["revenue", "budget", "original_title", "genres", "release_year", "Profit"]
].append(
df[df["Profit"] == df["Profit"].max()][
["revenue", "budget", "original_title", "genres", "release_year", "Profit"]
].append(
df[df["budget"] == df["budget"].max()][
["revenue", "budget", "original_title", "genres", "release_year", "Profit"]
]
)
)
X
# * The highest profit is Avatar
# * The highest revenue is Avatar
# * The highest budget is The Warrior's Way
# ### Question 3 (identify the highest and the lowest movie in the revenues )
# identify the highest and the lowest movie in the revenues
max_rev = df.loc[df["revenue"].idxmax()]
min_rev = df.loc[df["revenue"].idxmin()]
concat_df = (
pd.concat([max_rev, min_rev], axis=1)
.rename(columns={df["revenue"].idxmax(): "Highest revenue"})
.rename(columns={df["revenue"].idxmin(): "Lowest revenue"})
)
concat_df
# * The highest revenue goes to Avater
# * The lowest revenue goes to Shattered Glass
# ### Question 4 (What is the year that has the highest profit)
# the year that has the highest profit
year_BRP = df.groupby("release_year").agg(
{"budget": ["sum"], "revenue": ["sum"], "Profit": ["sum"]}
) # .sort_values(("Profit","sum"),ascending=False)
year_BRP
# Visualiz the years with the profit , revenue and budget
year_BRP.plot(ylabel="release_year", title="The year changes", legend=True)
# * The total revenues is increasing from 1960 till 2015 as shown above
# identify the highest and the lowest movie in the revenues
max_time = df.loc[df["runtime"].idxmax()]
min_time = df.loc[df["runtime"].idxmin()]
concat_df = (
pd.concat([max_time, min_time], axis=1)
.rename(columns={df["runtime"].idxmax(): "Highest runtime"})
.rename(columns={df["runtime"].idxmin(): "Lowest runtime"})
)
concat_df
|
#### The data consists of credit card transactions . Some of these are fraudelent.
#### The objective is to identify frauds as best as possible.
## importing commoon libraries and loading the data
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
raw_data = pd.read_csv("../input/creditcardfraud/creditcard.csv")
raw_data.describe()
#### Analysis of data suggests
#### data is complete with no missing values.
#### data is unscaled
#### data is unbalanced (highly skewed) and the +ve class for the target is very small
#### therefore accuracy is not a good measure of model effectiveness
#### we will therefore use f1_score to measure effectiveness of model.
#### Creating simple base models
x = raw_data.iloc[:, 0:-1]
y = raw_data.iloc[:, -1]
x.shape, y.shape
# Separating the train test data and standardization of the train data
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=42, shuffle="True", stratify=y
)
scaler = StandardScaler()
x_scld_train = scaler.fit_transform(x_train)
# Fitting svm model
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
from sklearn.model_selection import cross_val_score, cross_val_predict
sgd1 = SGDClassifier(random_state=100, penalty="l2", loss="hinge")
skf = StratifiedKFold(n_splits=5)
cross_val_score(sgd1, x_scld_train, y_train, cv=skf, scoring="f1")
pd.DataFrame(
confusion_matrix(y_train, cross_val_predict(sgd1, x_scld_train, y_train, cv=skf))
)
# comparing result with test data
pd.DataFrame(
confusion_matrix(
y_test, cross_val_predict(sgd1, scaler.transform(x_test), y_test, cv=skf)
)
)
#### svm has performed quite poorly.
#### test results indicate significant difference from training results
# fitting random forest for comparison
from sklearn.ensemble import RandomForestClassifier
rf1 = RandomForestClassifier(random_state=100, n_estimators=100)
rf1.fit(x_train, y_train)
f1_score(y_train, rf1.predict(x_train))
pd.DataFrame(confusion_matrix(y_train, rf1.predict(x_train)))
# comparing result with test data
pd.DataFrame(confusion_matrix(y_test, rf1.predict(x_test)))
#### random forest model has clearly overfitted the train data
####lets simplify the data by reducing
#### a. complexity of data, b. volume of non fraud data
raw_data.columns
col = [
"Class",
"Time",
"V1",
"V2",
"V3",
"V4",
"V5",
"V6",
"V7",
"V8",
"V9",
"V10",
"V11",
"V12",
"V13",
"V14",
"V15",
"V16",
"V17",
"V18",
"V19",
"V20",
"V21",
"V22",
"V23",
"V24",
"V25",
"V26",
"V27",
"V28",
"Amount",
]
corr_data = raw_data[col]
corr_data.head()
# running correlation analysis to see if features are relevant
pd.DataFrame(corr_data.corr().round(3))
#### let us drop columns where corelation is within the range of +-2%
#### visual display of sample of 2 features in both categories to see difference
sns.scatterplot(raw_data.values[:, 2], raw_data.values[:, 4], hue=raw_data["Class"])
#### here we can clearly see concentration of data for the 2 values of the target
sns.scatterplot(raw_data.values[:, 23], raw_data.values[:, 24], hue=raw_data["Class"])
#### here we do not see any pattern. clearly we can consider eliminating these features
#### column numbers identified for removal 0,8,13,15,23,24,25,26,27,28,29
relevant_data = raw_data.drop(
raw_data.columns[[0, 8, 13, 15, 23, 24, 25, 26, 27, 28, 29]], axis=1
)
relevant_data.head()
relevant_data.reset_index()
# reducing imbalance of data while training model
# replicating the split
train_data2, test_data2 = train_test_split(
relevant_data,
test_size=0.2,
random_state=42,
shuffle=True,
stratify=relevant_data["Class"],
)
train_data2.shape, test_data2.shape
# creating a training sample which is less imbalanced
min_size = train_data2.groupby(by="Class").count().min()[-1]
min_size
max = 10000
train_sample = (
train_data2.groupby("Class", as_index=False)
.apply(
lambda x: x.sample(min(len(x), min_size * 5 + np.random.randint(max, size=1)))
)
.reset_index(drop=True)
)
train_sample.shape
#### we now have a small dataset which has the following properties
#### a. less imbalanced , b.fewer and more relevant features
# Creating comparitive models
x_train2 = train_sample.drop(["Class"], axis=1)
y_train2 = train_sample["Class"]
x_test2 = test_data2.drop(["Class"], axis=1)
y_test2 = test_data2["Class"]
scaler2 = StandardScaler()
x_scld_train2 = scaler.fit_transform(x_train2)
# svm model
sgd2 = SGDClassifier(random_state=100, penalty="l2", loss="hinge")
cross_val_score(sgd1, x_scld_train2, y_train2, cv=skf, scoring="f1")
pd.DataFrame(
confusion_matrix(y_train2, cross_val_predict(sgd2, x_scld_train2, y_train2, cv=skf))
)
# comparison with test data
pd.DataFrame(
confusion_matrix(
y_test2, cross_val_predict(sgd2, scaler.transform(x_test2), y_test2, cv=skf)
)
)
#### test results of revised svm model very similar to original model.
# Randomforest model
rf2 = RandomForestClassifier(random_state=100, n_estimators=20)
rf2.fit(x_train2, y_train2)
f1_score(y_train2, rf2.predict(x_train2))
pd.DataFrame(confusion_matrix(y_train2, rf2.predict(x_train2)))
# comparing result with test data
pd.DataFrame(confusion_matrix(y_test2, rf2.predict(x_test2)))
f1_score(y_test2, rf2.predict(x_test2))
#### test results show revised random forest model has responded negatively to changes
# tuning best model ie. rf1
from sklearn.model_selection import GridSearchCV
param_grid = {
"n_estimators": [10, 25, 100],
"min_samples_split": [3, 5, 10],
"class_weight": ["balanced", None],
"max_depth": [3, 5, None],
}
grid = GridSearchCV(rf1, param_grid, n_jobs=-1, verbose=1, cv=2, scoring="f1")
grid.fit(x_train, y_train)
grid.best_score_
grid.best_estimator_.get_params()
rf3 = RandomForestClassifier(
random_state=100, class_weight=None, min_samples_split=3, n_estimators=100
)
rf3.fit(x_train, y_train)
f1_score(y_train, rf3.predict(x_train))
pd.DataFrame(confusion_matrix(y_train, rf3.predict(x_train)))
# comparing result with test data
pd.DataFrame(confusion_matrix(y_test, rf3.predict(x_test)))
f1_score(y_test, rf3.predict(x_test))
# printing precision recall curve
from sklearn.metrics import precision_recall_curve
y_train_pred = rf3.predict(x_train)
precisions, recalls, thresholds = precision_recall_curve(y_train, y_train_pred)
plt.plot(thresholds, precisions[:-1], label="Precision", color="red")
plt.plot(thresholds, recalls[:-1], label="Recall", color="Green")
plt.xlabel("Threshold")
plt.show()
|
# # Feature Selection
# **Input**: Cleaned data from the "Data Cleaning" phase and a list of feature correlations from the "EDA" phase.
# **Output**: A list of selected features to use for modeling. We will use this list to filter the pivot dataframe and save it to PostgreSQL for further use.
# In this phase, we will select features that will be used to create the models.
# The selection consists of two main steps aimed at reducing the number of features used:
# 1. Remove features using the "feature_correlation_0.8" document created in the "EDA" phase.
# 2. Remove features using known methods such as univariate selection and recursive feature elimination.
# **Question**: Should each target have its own set of "main" features, or can we use a set of common features for every target we want to predict?
# To answer this question, we need to identify features that are useful for specific targets and then remove the ones that have no value for many of them.
# ## Removing Features with High Correlation
# Let's load the document containing feature correlations created during the "EDA" phase.
from pymongo import MongoClient
import pandas as pd
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
connection_string = user_secrets.get_secret("mongo_connection_string")
client = MongoClient(connection_string)
def get_document(collection_name, document_id):
database = client["portfolio"]
return database[collection_name].find({"_id": document_id}).next()
feature_correlation = get_document("feature_selection", "feature_correlation_0.8")
corr_df = pd.DataFrame(
feature_correlation["data"], columns=feature_correlation["data"][0].keys()
)
corr_df
# We create a dataframe called *corr_df* which contains the correlation data from the "feature_correlation_0.8" document stored on MongoDB. The columns in the dataframe are as follows:
# - col1: ID of the first feature
# - col2: ID of the second feature
# - corr: absolute correlation between col1 and col2
# - c_1: count of data points for col1
# - c_2: count of data points for col2
# - corr_1: mean absolute correlation of col1 with the targets
# - corr_2: mean absolute correlation of col2 with the targets
# We now iterate through the rows of this dataframe to populate a list containing the IDs of features to be removed.
# First, we load the features dataframe. In the below example, we show how the selection is performed.
# get_eda_df is the method described in "EDA" phase, it retrieves data from postgreSQL and return two dataframes,
# one with values and one with names, an other helper function is get_indicator_name that we use to retrieve feature titles.
from portfolio_optimization_helper import get_eda_df, get_indicator_name
df, name_df = get_eda_df()
df
import matplotlib.pyplot as plt
def feature_correlation(r, plot=False):
col_1 = r["col_1"]
col_2 = r["col_2"]
count_1 = r["c_1"]
count_2 = r["c_2"]
corr_1 = r["corr_1"]
corr_2 = r["corr_2"]
removed = False
# Choose to remove feature2 if feature1 has more datapoints and is more correlated with targets, else the opposite.
if count_1 >= count_2 and corr_1 >= corr_2:
to_remove = col_2
removed = True
elif count_2 >= count_1 and corr_2 >= corr_1:
to_remove = col_1
removed = True
# plot features in the same chart to see how much they are correlated
if removed and plot:
title_1 = get_indicator_name(name_df, col_1)
title_2 = get_indicator_name(name_df, col_2)
fig, ax = plt.subplots()
col_df = df[[col_1, col_2]].dropna()
ax.plot(col_df.index, col_df[col_1], label=f"{col_1} - {title_1}")
ax.set_ylabel(f"{col_1} - {title_1}")
ax2 = ax.twinx()
ax2.plot(col_df.index, col_df[col_2], label=f"{col_2} - {title_2}", color="red")
ax2.set_ylabel(f"{col_2} - {title_2}")
lines = ax.get_lines() + ax2.get_lines()
ax.legend(lines, [l.get_label() for l in lines], loc="upper center")
ax.set_ylim(ymin=min(col_df[col_1]))
ax2.set_ylim(ymin=min(col_df[col_2]))
return to_remove
to_remove = feature_correlation(corr_df.iloc[0], plot=True)
print(f"FEATURE TO REMOVE {to_remove}")
print(corr_df.iloc[0])
# In this example, we can see that feature100 and feature143 have a correlation of 0.920896. Both features have 750 data points.
# Since corr_1 is less than corr_2, meaning feature100 is less correlated than feature143 with the targets, we decide to remove feature100.
# We can now iterate through all feature_correlation rows to find the feature to remove.
#
remove_list = []
for i, r in corr_df.iterrows():
# if feature already is in remove_list skip
if r["col_1"] in remove_list or r["col_2"] in remove_list:
continue
f = feature_correlation(r, plot=False)
if f is not None:
remove_list.append(f)
print(f"FEATURES TO REMOVE: {len(remove_list)}")
# With this step we identified 67 features to remove because of high correlation.
# Then we store this list on MongoDB.
def upsert_document(collection_name, document):
database = client["portfolio"]
return database[collection_name].replace_one(
{"_id": document["_id"]}, document, upsert=True
)
# create document
feature_to_remove = {"_id": "feature_to_remove_correlation", "data": remove_list}
# load document to MongoDB
upsert_document("feature_selection", feature_to_remove)
# We can now filter our dataframe with this feature_to_remove list. We also modify get_eda_df adding the following code before creating the pivot dataframe.
# if remove_correlation:
# to_remove_corr = get_document('feature_selection','feature_to_remove_correlation')['data']
# df = df[~df["column_name"].isin(to_remove_corr)]
df, name_df = get_eda_df(remove_correlation=True)
df
# Resulting feature are less correlated. In total we have 132 remaining features.
# ## Removing feature with univariate selection
# Univariate feature selection is a widely used technique in machine learning and data science to select the most relevant features in a dataset.
# The basic idea behind this technique is to evaluate each feature individually and then rank them according to their correlation with the target variable.
# In other words, univariate feature selection methods assess the usefulness of each feature independently of the other features in the dataset.
# There are several methods to perform univariate feature selection, including:
# 1. Pearson correlation: This method computes the linear correlation between each feature and the target variable.
# 2. ANOVA F-test: This method tests the difference between the means of each feature across different classes of the target variable.
# 3. Mutual information: This method measures the amount of information that each feature provides about the target variable.
# Once the features are ranked according to their relevance, we can choose the top k features to use in our model.
# This can help reduce the dimensionality of the dataset and improve the performance of the model, as irrelevant or redundant features can introduce noise and bias in the model.
# It is important to note that univariate feature selection only considers the relationship between each feature and the target variable, and not the interactions between the features themselves.
# That is why we removed correlated feature before.
# We are going to evaluate univariate selection with all three methods and then observe ranks and distribution to decide which common feture are useful for target predictions.
# Scikit Learn has various implementation to perform univariate selection. We are going to use it in the following example.
from sklearn.feature_selection import (
SelectKBest,
f_regression,
r_regression,
mutual_info_regression,
)
target_columns = [col for col in df.columns if "target" in col]
feature_columns = [col for col in df.columns if "feature" in col]
# create a new dataframe for storing scores
score_df = pd.DataFrame([[x] for x in feature_columns], columns=["feature"])
# score function to use for evaluation
score_functions = {
"f_regr": f_regression,
"r_regr": r_regression,
"m_regr": mutual_info_regression,
}
for score_f in score_functions:
# We are going to use SelectKBest class to identify best features
kbest_model = SelectKBest(score_func=score_functions[score_f], k="all")
# for each target we evaluate best k feature and store scores
for t_col in target_columns:
r_df = df[feature_columns + [t_col]]
r_df = r_df.dropna()
X = r_df[feature_columns]
Y = r_df[t_col]
fit = kbest_model.fit(X, Y)
score_df[f"{t_col}_{score_f}"] = fit.scores_
score_df = score_df.set_index("feature")
score_df
# We used **SelectKBest** to select features based on the k highest scores. In this particular case, we kept all scores and calculated the rank among the score functions.
# We evaluated scores using 3 different score functions:
# - **f_regression**: Univariate linear regression tests returning F-statistic and p-values.
# - **r_regression**: Compute Pearson's r for each feature and the target.
# - **mutual_info_regression**: Estimate mutual information for a continuous target variable. Mutual information (MI) between two random variables is a non-negative value that measures the dependency between the variables. It is equal to zero if and only if two random variables are independent, and higher values mean higher dependency.
# In the resulting score_df, we store scores for each feature and each score function.
# We want to keep the features that perform better overall (across different targets and across different scoring functions).
# Because different scoring methods have different value ranges we need to transform the scores in rankings.
for col in score_df.columns:
# Transform in absolute values
if "r_regr" in col:
score_df[col] = abs(score_df[col])
# Calculate rank (lower is better)
score_df[col] = score_df[col].rank()
# using describe functionality of a dataframe we can see the rankings distribution.
describe_df = score_df.apply(pd.DataFrame.describe, axis=1)
describe_df
# We define as threshold 1/3 of the # of features.
# This is arbitrary and depends on how many features you want to keep.
# The more features you want to keep, the higher the threshold
threshold = int(len(feature_columns) / 3)
original_len = len(describe_df)
# we remove features which first quartile is greater than threshold.
# This means that 75% of the 36 combinations (27) scoring/target have a ranking higher than the threshold
univariate_feature_selection = describe_df[describe_df["25%"] >= threshold]
univariate_feature_selection = univariate_feature_selection.index
univariate_feature_selection = list(univariate_feature_selection)
print(f"# TARGET: {len(target_columns)}")
print(f"# FEATURES: {len(feature_columns)}")
print(f"FEATURE SELECTED FROM UNIVARIATE: {len(univariate_feature_selection)}")
# Univariate removes 64 features.
# ## Removing feature with Recursive Feature Elimination (RFE)
# Recursive feature elimination (RFE) is a popular feature selection technique in machine learning and data science that aims to identify the most important features in a dataset by recursively eliminating the least relevant features. Unlike univariate feature selection methods that evaluate each feature independently, RFE takes into account the interactions between the features and their impact on the performance of the model.
# The basic idea behind RFE is to start with all the features in the dataset and train a model on them. The least important feature(s) are then removed from the dataset, and a new model is trained on the remaining features. This process is repeated recursively until a desired number of features is reached or until the performance of the model stops improving.
# There are several methods that can be used to rank the importance of the features in RFE, including:
# - Coefficient values: This method ranks the features according to the magnitude of their coefficients in a linear model.
# - Feature importances: This method ranks the features according to their importance scores in a tree-based model.
# - Recursive feature elimination with cross-validation (RFECV): This method uses cross-validation to evaluate the performance of the model at each iteration and select the optimal number of features.
# RFE has several advantages over other feature selection techniques. Firstly, it takes into account the interactions between the features, which can be important in datasets with complex relationships between the variables. Secondly, it can be used with a wide range of models, including linear models, tree-based models, and support vector machines (SVMs). Finally, RFE provides a ranking of the features, which can be useful in understanding the underlying patterns in the data.
# However, RFE can be computationally expensive, especially for large datasets and complex models. Moreover, the optimal number of features to select may depend on the specific problem and may require tuning.
# In summary, recursive feature elimination is a powerful feature selection technique that can help us identify the most important features in a dataset. By recursively eliminating the least relevant features, we can improve the performance of our models and gain better insights into the underlying patterns in the data. However, it is important to be aware of the computational cost and to carefully tune the parameters of the method.
# We selected different models to use with RFE: LinearRegression, DecisionTreeRegressor, SDGRegressor, and BayesianRidge.
# Again we use Scikit Learn implementations in the following examples.
from sklearn.feature_selection import RFE
from sklearn.linear_model import LinearRegression, SGDRegressor, BayesianRidge
from sklearn.tree import DecisionTreeRegressor
models = {
"lin_regr": LinearRegression,
"tree_regr": DecisionTreeRegressor,
"sgd_regr": SGDRegressor,
"ridge_regr": BayesianRidge,
}
score_df = pd.DataFrame([[x] for x in feature_columns], columns=["feature"])
for m in models:
for t_col in target_columns:
r_df = df[feature_columns + [t_col]]
r_df = r_df.dropna()
X = r_df[feature_columns]
Y = r_df[t_col]
model = models[m]()
rfe = RFE(model, n_features_to_select=1)
fit = rfe.fit(X, Y)
score_df[f"{t_col}_{m}"] = fit.ranking_
score_df = score_df.set_index("feature")
score_df
# We used **RFE** a Scikit Learn class for feature ranking with recursive feature elimination.
# We have evaluated scores with 4 different models:
# - **LinearRegression**: Ordinary least squares Linear Regression. It fits a linear model with coefficients w = (w1, ..., wp) to minimize the residual sum of squares between the observed targets in the dataset, and the targets predicted by the linear approximation.
# - **DecisionTreeRegressor**: A decision tree regressor. The goal of a decision tree is to create a model that predicts the value of a target variable by learning simple decision rules inferred from the data features.
# - **SGDRegressor**: Linear model fitted by minimizing a regularized empirical loss with SGD. SGD stands for Stochastic Gradient Descent: the gradient of the loss is estimated each sample at a time and the model is updated along the way with a decreasing strength schedule (aka learning rate).
# - **BayesianRidge**: Fit a Bayesian ridge model. Bayesian regression techniques can be used to include regularization parameters in the estimation procedure: the regularization parameter is not set in a hard sense but tuned to the data at hand.
# Here we have already the rankings, so no need to transform values.
# We can directly call describe to see the rankings distribution.
describe_df = score_df.apply(pd.DataFrame.describe, axis=1)
describe_df
# remove features which first quartile is greater than threshold.
rfe_selection = describe_df[describe_df["25%"] >= threshold]
rfe_selection = rfe_selection.index
rfe_selection = list(rfe_selection)
print(f"# TARGET: {len(target_columns)}")
print(f"# FEATURES: {len(feature_columns)}")
print(f"FEATURE SELECTED FROM UNIVARIATE: {len(univariate_feature_selection)}")
print(f"FEATURE SELECTED FROM UNIVARIATE: {len(rfe_selection)}")
# To remove duplicates
total_feature_to_remove = list(set(univariate_feature_selection + rfe_selection))
print(f"TOTAL FEATURES TO REMOVE: {len(total_feature_to_remove)}")
# We merged the two list of feature we identified with univariate feature selection and with recursive feature elimination to find the set of feature to remove from the dataframe.
# ## Conclusion - storing data on PostgreSQL
# To conclude this "Feature Selection" phase we are going to upload data into our postgreSQL database in a new table called pivot.
# This table will store the pivoted dataframe filtered by all the selected features.
# First we upload the list to MongoDB and then we remove selected_feature as we did previously.
# load document to MongoDB
upsert_document(
"feature_selection", {"_id": "selected_features", "data": total_feature_to_remove}
)
# Now we can update get_eda_df with the following code, before creating pivot dataframe:
# if remove_correlation:
# to_remove_corr = get_document('feature_selection','feature_to_remove_correlation')['data']
# df = df[~df["column_name"].isin(to_remove_corr)]
# if remove_selected:
# to_remove_selected = get_document('feature_selection','selected_features')['data']
# df = df[~df["column_name"].isin(to_remove_selected)]
df, name_df = get_eda_df(remove_correlation=True, remove_selected=True)
df
# In this dataframe we have the remaining feature selected, let's insert into postgreSQL.
# First we delete any existing pivot table, to make this code re-executable.
drop_statement = f"DROP TABLE IF EXISTS pivot"
create_statement = "CREATE TABLE pivot (date date"
df["date"] = df.index
df.reset_index(drop=True)
for col in df.columns:
if col == "date":
continue
create_statement += "," + col + " numeric"
create_statement += ")"
# We use our helper functions to execute SQL commands and insert data into postgreSQL.
from portfolio_optimization_helper import execute_db_commands, insert_df_into_table
execute_db_commands([drop_statement, create_statement])
insert_df_into_table(df, "pivot")
# Let's see our data stored in pivot table.
from portfolio_optimization_helper import get_df_from_table
df = get_df_from_table("pivot")
df["date"] = pd.to_datetime(df["date"])
df = df.sort_values(by="date").set_index("date")
df = df.apply(pd.to_numeric)
df = df.asfreq("MS")
df
|
# ## The forecasting model: Facebook’s Prophet
# The most commonly used models for forecasting predictions are the autoregressive models. Briefly, the autoregressive model specifies that the output variable depends linearly on its own previous values and on a stochastic term (an imperfectly predictable term).
# Recently, in an attempt to develop a model that could capture seasonality in time-series data, Facebook developed the famous Prophet model that is publicly available for everyone. We will use this state-of-the-art model: the Prophet model. Prophet is able to capture daily, weekly and yearly seasonality along with holiday effects, by implementing additive regression models.
# The mathematical equation behind the Prophet model is defined as:
# **y(t) = g(t) + s(t) + h(t) + e(t)**
# with, g(t) representing the trend. Prophet uses a piecewise linear model for trend forecasting.
# s(t) represents periodic changes (weekly, monthly, yearly).
# h(t) represents the effects of holidays (recall: Holidays impact businesses).
# e(t) is the error term.
# The Prophet model fitting procedure is usually very fast (even for thousands of observations) and it does not require any data pre-processing. It deals also with missing data and outliers.
# # TESLA
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Load the dataset using pandas
data = pd.read_csv("../input/tesla-share/TSLA (1).csv")
data.head()
data.describe()
# Select only the important features i.e. the date and price
data = data[["Date", "Close"]] # select Date and Price
# Rename the features: These names are NEEDED for the model fitting
data = data.rename(
columns={"Date": "ds", "Close": "y"}
) # renaming the columns of the dataset
data.head()
from fbprophet import Prophet
m = Prophet(daily_seasonality=True) # the Prophet class (model)
m.fit(data) # fit the model using all data
future = m.make_future_dataframe(
periods=365
) # we need to specify the number of days in future
prediction = m.predict(future)
m.plot(prediction)
plt.title("Prediction of the Tesla Stock Price using the Prophet")
plt.xlabel("Date")
plt.ylabel("Close Stock Price")
plt.show()
m.plot_components(prediction)
plt.show()
# # TCS
# Load the dataset using pandas
data = pd.read_csv("../input/tcs-share/TCS.NS (1).csv")
data.head()
data.describe()
# Select only the important features i.e. the date and price
data = data[["Date", "Close"]] # select Date and Price
# Rename the features: These names are NEEDED for the model fitting
data = data.rename(
columns={"Date": "ds", "Close": "y"}
) # renaming the columns of the dataset
data.head()
from fbprophet import Prophet
m = Prophet(daily_seasonality=True) # the Prophet class (model)
m.fit(data) # fit the model using all data
future = m.make_future_dataframe(
periods=365
) # we need to specify the number of days in future
prediction = m.predict(future)
m.plot(prediction)
plt.title("Prediction of the TCS Stock Price using the Prophet")
plt.xlabel("Date")
plt.ylabel("Close Stock Price")
plt.show()
m.plot_components(prediction)
plt.show()
# # S&P Global
# Load the dataset using pandas
data = pd.read_csv("../input/sp-global/GSPC.csv")
data.head()
data.describe()
# Select only the important features i.e. the date and price
data = data[["Date", "Close"]] # select Date and Price
# Rename the features: These names are NEEDED for the model fitting
data = data.rename(
columns={"Date": "ds", "Close": "y"}
) # renaming the columns of the dataset
data.head()
from fbprophet import Prophet
m = Prophet(daily_seasonality=True) # the Prophet class (model)
m.fit(data) # fit the model using all data
future = m.make_future_dataframe(
periods=365
) # we need to specify the number of days in future
prediction = m.predict(future)
m.plot(prediction)
plt.title("Prediction of the S&P Global Stock Price using the Prophet")
plt.xlabel("Date")
plt.ylabel("Close Stock Price")
plt.show()
m.plot_components(prediction)
plt.show()
# # BitCoin
# Load the dataset using pandas
data = pd.read_csv("../input/bitcoin/BTC-USD.csv")
data.head()
data.describe()
# Select only the important features i.e. the date and price
data = data[["Date", "Close"]] # select Date and Price
# Rename the features: These names are NEEDED for the model fitting
data = data.rename(
columns={"Date": "ds", "Close": "y"}
) # renaming the columns of the dataset
data.head()
from fbprophet import Prophet
m = Prophet(daily_seasonality=True) # the Prophet class (model)
m.fit(data) # fit the model using all data
future = m.make_future_dataframe(
periods=365
) # we need to specify the number of days in future
prediction = m.predict(future)
m.plot(prediction)
plt.title("Prediction of the Bitcoin Stock Price using the Prophet")
plt.xlabel("Date")
plt.ylabel("Close Stock Price")
plt.show()
m.plot_components(prediction)
plt.show()
|
# ## Keşifçi Veri Analizi | Becerileri Pekiştirme
# Aşağıda ihtiyacımız doğrultusunda kullanacağımız kütüphaneleri yükleyelim.
import numpy as np
import seaborn as sns
import pandas as pd
# **NumPy: NumPy, Python programlama dilinde sayısal hesaplamalar yapmak için kullanılan bir kütüphanedir. NumPy, büyük, çok boyutlu diziler ve matrisler üzerinde hızlı ve verimli işlemler yapmak için geliştirilmiştir. NumPy, matematiksel, bilimsel ve veri analitiği uygulamalarında sıkça kullanılır.**
# **Seaborn: Seaborn, Python'da veri görselleştirmesi için kullanılan bir istatistiksel grafik kütüphanesidir. Seaborn, matplotlib kütüphanesine dayanmaktadır ve veri görselleştirme süreçlerini daha kolay ve hızlı hale getirmek için daha yüksek düzeyde bir arayüz sunar. Seaborn, istatistiksel grafiğe özel temalar, renk paletleri ve grafik türleri sunar.**
# **Pandas: Pandas, Python'da veri analitiği ve veri işleme için kullanılan bir kütüphanedir. Pandas, yüksek performanslı, kullanıcı dostu veri yapıları (seri ve veri çerçeveleri) sağlar ve veri manipülasyonu, temizleme, dönüşüm ve analiz işlemlerini kolaylaştırır. Pandas, genellikle veri analitiği, veri bilimi ve makine öğrenimi projelerinde kullanılır.**
# Veri çerçevemizi bulunduğumuz dizinden yükleyelim ve bir veri çerçevesi haline getirerek df değişkenine atayalım. (pd.read_csv(...csv))
df = pd.read_csv("/kaggle/input/iris-flower-dataset/IRIS.csv")
# Veri çerçevesinin ilk 5 gözlemini görüntüleyelim.
df.head()
# Veri çerçevesinin kaç öznitelik ve kaç gözlemden oluştuğunu görüntüleyelim.
# (satır sayısı, sütun sayısı) şeklinde çıktı verir.
# Her bir satır, bir gözlemi temsil ederken, her bir sütun ise bir özniteliği temsil eder.
df.shape
# Veri çerçevesindeki değişkenlerin hangi tipte olduğunu ve bellek kullanımını görüntüleyelim.
df.info()
# DataFrame hakkında ayrıntılı bilgilere erişebilirsiniz.
# Çıktıda, her bir sütunun adı, veri tipi, non-null (eksik veri içermeyen) değer sayısı, bellek kullanımı ve toplam satır sayısı gibi bilgiler yer alır
# sütunların veri tiplerini gösterir.
df.dtypes
# Veri çerçevesindeki sayısal değişkenler için temel istatistik değerlerini görüntüleyelim.
# Standart sapma ve ortalama değerlerden çıkarımda bulunarak hangi değişkenlerin ne kadar varyansa sahip olduğu hakkında fikir yürütelim.
df.describe()
# Veri çerçevesinde hangi öznitelikte kaç adet eksik değer olduğunu gözlemleyelim.
df.isnull().sum()
# tüm veri çerçevesindeki tüm özniteliklerin toplam eksik sayısı
df.isnull().sum().sum()
# Sayısal değişkenler arasında korelasyon olup olmadığını göstermek için korelasyon matrisi çizdirelim. Korelasyon katsayıları hakkında fikir yürütelim.
# En güçlü pozitif ilişki hangi iki değişken arasındadır?
df.corr()
# petal_length ile petal_width arasında aynı yönlü güçlü ilişki vardır. 1 ' e yaklaştıkça ilişki güçlenir.
# petal_width ile sepal_width arasında ters yönlü orta derece ilişki
# Korelasyon katsayılarını daha iyi okuyabilmek için ısı haritası çizdirelim.
# sns.heatmap: kolerasyon matrisini görselleştirmek için kullanılır.
# annot True olduğu durumda Karelerin içine korelasyon katsayılarını yazdırmak için kullanılan bir parametredir.
# cmap='coolwarm': Renk haritasını belirlemek için kullanılan bir parametredir. 'coolwarm' renk haritası, soğuk ve sıcak renkler arasında bir geçiş sağlar.
kolerasyon_matrisi = df.corr()
sns.heatmap(kolerasyon_matrisi, annot=True, cmap="coolwarm")
"""
viridis: Yeşil ve mavi tonlarında bir renk skalası
plasma: Mor ve kırmızı tonlarında bir renk skalası
magma: Siyah ve sarı tonlarında bir renk skalası
inferno: Siyah ve turuncu tonlarında bir renk skalası
cividis: Sarı ve siyah tonlarında bir renk skalası
RdYlBu: Kırmızıdan sarıya, maviden turkuaza giden bir renk skalası
BuPu: Maviden mora giden bir renk skalası
"""
# Veri çerçevemizin hedef değişkeninin "variety" benzersiz değerlerini görüntüleyelim.
df.species.unique()
# Veri çerçevemizin hedef değişkeninin "variety" benzersiz kaç adet değer içerdiğini görüntüleyelim.
df.species.nunique()
# Veri çerçevesindeki sepal.width ve sepal.length değişkenlerinin sürekli olduğunu görüyoruz. Bu iki sürekli veriyi görselleştirmek için önce scatterplot kullanalım.
sns.scatterplot(data=df, x="sepal_width", y="sepal_length")
# Aynı iki veriyi daha farklı bir açıdan frekanslarıyla incelemek için jointplot kullanarak görselleştirelim.
sns.jointplot(data=df, x="sepal_width", y="sepal_length")
# Aynı iki veriyi scatterplot ile tekrardan görselleştirelim fakat bu sefer "variety" parametresi ile hedef değişkenine göre kırdıralım.
# 3 farklı renk arasında sepal değişkenleriyle bir kümeleme yapılabilir mi? Ne kadar ayırt edilebilir bunun üzerine düşünelim.
sns.scatterplot(data=df, x="sepal_width", y="sepal_length", hue="species")
# value_counts() fonksiyonu ile veri çerçevemizin ne kadar dengeli dağıldığını sorgulayalım.
df.species.value_counts()
# Keman grafiği çizdirerek sepal.width değişkeninin dağılımını inceleyin.
# Söz konusu dağılım bizim için ne ifade ediyor, normal bir dağılım olduğunu söyleyebilir miyiz?
sns.violinplot(data=df, x="sepal_width")
# Aritmetik ortalama, mod ve medyan birbirine eşittir.
# Eğrinin maksimum noktası aritmetik ortalamadır (dolayısıyla mod ve medyandır).
# Eğri aritmetik ortalamaya göre simetriktir.
# simetriktir yani normal dağılım diyebiliriz.
# Daha iyi anlayabilmek için sepal.width üzerine bir distplot çizdirelim.
# sns.distplot(df['sepal_width'])
# distplot seaborn kütüphanesinin güncel sürümünden kaldırıldı.
sns.histplot(df["sepal_width"])
# Üç çiçek türü için üç farklı keman grafiğini sepal.length değişkeninin dağılımı üzerine tek bir satır ile görselleştirelim.
sns.violinplot(data=df, x="species", y="sepal_length")
# Hangi çiçek türünden kaçar adet gözlem barındırıyor veri çerçevemiz?
# 50 x 3 olduğunu ve dengeli olduğunu value_counts ile zaten görmüştük, ancak bunu görsel olarak ifade etmek için sns.countplot() fonksiyonuna variety parametresini vereilm.
sns.countplot(data=df, x="species")
# sepal.length ve sepal.width değişkenlerini sns.jointplot ile görselleştirelim, dağılımı ve dağılımın frekansı yüksek olduğu bölgelerini inceleyelim.
sns.jointplot(data=df, x="sepal_width", y="sepal_length")
# Bir önceki hücrede yapmış olduğumuz görselleştirmeye kind = "kde" parametresini ekleyelim. Böylelikle dağılımın noktalı gösterimden çıkıp yoğunluk odaklı bir görselleştirmeye dönüştüğünü görmüş olacağız.
sns.jointplot(data=df, x="sepal_width", y="sepal_length", kind="kde")
# scatterplot ile petal.length ve petal.width değişkenlerinin dağılımlarını çizdirelim.
sns.scatterplot(data=df, x="petal_length", y="petal_width")
# Aynı görselleştirmeye hue = "variety" parametresini ekleyerek 3. bir boyut verelim.
sns.scatterplot(data=df, x="petal_length", y="petal_width", hue="species")
# sns.lmplot() görselleştirmesini petal.length ve petal.width değişkenleriyle implemente edelim. Petal length ile petal width arasında ne tür bir ilişki var ve bu ilişki güçlü müdür? sorusunu yanıtlayalım.
sns.lmplot(data=df, x="petal_length", y="petal_width")
# çizgiye ne kadar yakın olurlarsa o kadar güçlü bir ilişki vardır.
# Bu sorunun yanıtını pekiştirmek için iki değişken arasında korelasyon katsayısını yazdıralım.
df["petal_length"].corr(df["petal_width"])
# Petal Length ile Sepal Length değerlerini toplayarak yeni bir total length özniteliği oluşturalım.
df["total_length"] = df["petal_length"] + df["sepal_length"]
# total.length'in ortalama değerini yazdıralım.
df["total_length"].mean()
# total.length'in standart sapma değerini yazdıralım.
df["total_length"].std()
# sepal.length'in maksimum değerini yazdıralım.
df["sepal_length"].max()
# sepal.length'i 5.5'den büyük ve türü setosa olan gözlemleri yazdıralım.
sonuc = df[(df["species"] == "Iris-setosa") & (df["sepal_length"] > 5.5)]
print(sonuc)
# petal.length'i 5'den küçük ve türü virginica olan gözlemlerin sadece sepal.length ve sepal.width değişkenlerini ve değerlerini yazdıralım.
sonuc = df[(df["species"] == "Iris-virginica") & (df["petal_length"] < 5)]
sonuc[["sepal_length", "sepal_width"]]
# Hedef değişkenimiz variety'e göre bir gruplama işlemi yapalım değişken değerlerimizin ortalamasını görüntüleyelim.
df.groupby("species").mean()
# Hedef değişkenimiz variety'e göre gruplama işlemi yaparak sadece petal.length değişkenimizin standart sapma değerlerini yazdıralım.
std = df.groupby("species")["petal_length"].std()
print(std)
|
# April 8, 2023
name = input(
"what's your name?"
) # input is a function; name is a variable; = is an assignment operator
print(
"hello,", name
) # print is a function; name is a variable, so it does not need double quotes.
# there are two arguments in the brackets and they are separated by comma, the comma automatically leaves one space
# between the two arguments.
print(name) # name is a variable, so it does not need double quotes
print("hello,", name) # single and double quotes are both correct here. why?
print("hello, " + name) # comma is replaced with +; one space is added after hello,
## docs.python.org contains all the functions in python
## print (*objects, sep='', end='\n', ); *objects means the print function can take any numbers of objects
print("hello,", end="") # end="" connects the before and after lines.
print(name)
print(
"hello,", name, sep="???"
) # sep='' separates the two arguments with the content in doube quotes.
####### str
name = name.strip() # removes white space in the string
name = name.lstrip() # remove white space at the left
name = name.rstrip() # remove white space at the right
name = name.capitalize() # capitalize the very first letter of the input
name = name.title() # capitalize
# chain functions together; remove white space and capitalize user name
name = name.strip().title()
# a more simple way to chain functions
input(
"what's your name?"
).strip().title() # is this one better than the above separated functions?
print("hello, {name}") # this line only prints out hello, {name}
print(f"hello, {name}") # f is used to tell python to recognize and format the string,
# split user's name into first name and last name
first, last = name.split(
" "
) # split any thing in the name variable with a space. the content before the space is assigned to first variable, the content after space goes the last variable
print(
f"hello, {first}"
) # if users type full name, this allows python to extract the first name only
# int; integer does not have decimals
x = input("what's x?")
y = input("what's y?")
z = x + y
print(
z
) # the result will be just xy, not x+y, because x and y here are strings, not integers.
# convert strings to integers using int(); we can next functions
x = int(input("what's x?"))
y = int(input("what's y?"))
print(x + y)
# convert strings to floats; so users can type both integers and decimals, but the result will be decimals
x = float(input("what's x?"))
y = float(input("what's y?"))
z = x + y
print(round(z, 2)) # round z with 2 decimal digits;
# add comma in the output
x = float(input("what's x?"))
y = float(input("what's y?"))
z = round(x + y) # round up z to the closest integer
print(f"{z:,}") # this is just something you need to remember
# def; make a function that prints out hello by itself
def hello(): #
print("hello") # everything that's indented is part of the function
return
name = input("what's your name?")
hello()
print(name)
# give parameters to the hello function, so it can print variables, for instance name;
# the parameter can be anything
def hello(to): #
print("hello,", to) # everything that's indented is part of the function
return
name = input("what's your name?")
hello(name)
# give parameters to the hello function, so it can print variables, for instance name;
# the parameter can be anything
def hello(to="world"): # assign to to a default parameter
print("hello,", to) # everything that's indented is part of the function
return
hello()
name = input("what's your name?")
hello(name)
#
def main():
x = int(input("What's x?"))
print("x squared is", square(x))
def square(n):
return n * n
main()
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn
# **VALUES OF X**
X = np.arange(10)
X
# **LETS DEFINE Y WHICH IS THE FUNCTION OF X**
Y = (X - 5) ** 2
Y
# **GIVEN A FUNCTION f(X),FINDING VALUE OF X THAT MINIMIZES F**
# # VISUALISATION
plt.plot(X, Y)
plt.style.use("seaborn")
plt.ylabel("F(X)")
plt.xlabel("X")
# **DEFINING INITIAL VALUE AND STEP SIZE**
x = 0
lr = 0.1
error = []
plt.plot(X, Y)
for i in range(50):
grad = 2 * (x - 5)
x = x - lr * grad
y = (x - 5) ** 2
error.append(y)
print(x)
plt.scatter(x, y)
# **PLOTTING ERROR**
plt.plot(error)
plt.style.use("seaborn")
plt.xlabel("ERROR")
|
# # Customer Segmentation with RFM
# RFM analysis is a data driven customer behavior segmentation technique.
# RFM stands for recency, frequency, and monetary value.
# The idea is to segment customers based on when their last purchase was, how often they’ve purchased in the past, and how much they’ve spent overall. All three of these measures have proven to be effective predictors of a customer's willingness to engage in marketing messages and offers. [[1]](https://www.barilliance.com/rfm-analysis/)
# RFM analysis helps marketers find answers to the following questions:
# * Who are your best customers?
# * Which of your customers could contribute to your churn rate?
# * Who has the potential to become valuable customers?
# * Which of your customers can be retained?
# * Which of your customers are most likely to respond to engagement campaigns? [[2]](https://clevertap.com/blog/rfm-analysis/)
# Importing libraries
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# ## Getting Dataset
# **Dataset Story**
# https://archive.ics.uci.edu/ml/datasets/Online+Retail+II
# The data set named Online Retail - II includes the sales of an online store between 01/12/2009 - 09/12/2011.
# **Variables Description:**
# * **InvoiceNo** : The number of the invoice, unique per each purchase. Refund invoice numbers contain "C"
# * **StockCode** : Unique code per each item
# * **Description** : Name of the item
# * **Quantity** : The number of items within the invoice
# * **InvoiceDate** : Date and time of the purchase
# * **UnitPrice** : Price of a single item, as of Sterlin
# * **CustomerID** : Unique id number per each customer
# * **Country** : The country where the customer is living
#
online_retail_df = pd.read_excel(
"../input/online-retail-ii-data-set-from-ml-repository/online_retail_II.xlsx",
sheet_name="Year 2010-2011",
)
df = online_retail_df.copy()
df.head()
# # Exploring and Preparing Data
# Dimensionality of df
df.shape
# Column labels
df.columns
# Descriptive statistics of the df
df.describe()
# Summary of the df (columns, variable types, non-null values, memory usage)
df.info()
# Calculating missing values
df.isnull().sum()
# Droping rows that have missing values
df.dropna(inplace=True)
df.shape
# Count of unique product
df["Description"].nunique()
# Counts of products
df["Description"].value_counts().head()
# Most ordered products
df.groupby("Description").agg({"Quantity": "sum"}).sort_values(
"Quantity", ascending=False
).head()
# Count od unique orders
df["Invoice"].nunique()
# Dropping cancelled orders
df = df[~df["Invoice"].str.contains("C", na=False)]
# Total price of products in orders
df["TotalPrice"] = df["Quantity"] * df["Price"]
# Most profitable orders
df.groupby("Invoice").agg({"TotalPrice": "sum"}).sort_values(
"TotalPrice", ascending=False
).head()
df.head()
df.describe()
# # RFM Score Calculation
# Last day of order
df["InvoiceDate"].max()
# Date for recency (the max date of df)
today = dt.datetime(2011, 12, 9)
# ## Calculation Steps
# * For **Recency** calculation:
# 1. Group by Customer ID.
# 2. Find how many days passed since last order by subtracting the date of the last purchase from today's date.
#
#
# * For **Frequency** calculation:
# 1. Group by Customer ID.
# 2. Find the number of orders.
#
#
# * For **Monetary** calculation:
# 1. Group by Customer ID.
# 2. Find the total price of orders.
rfm = df.groupby("Customer ID").agg(
{
"InvoiceDate": lambda date: (today - date.max()).days,
"Invoice": lambda num: num.nunique(),
"TotalPrice": lambda TotalPrice: TotalPrice.sum(),
}
)
rfm.columns = ["Recency", "Frequency", "Monetary"]
# Droping rows that has negative Monetary and Frequency values
rfm = rfm[(rfm["Monetary"]) > 0 & (rfm["Frequency"] > 0)]
rfm.head()
rfm.describe()
# ## Computing Quantile of RFM values
# Customers with the lowest recency, highest frequency and monetary amounts considered as top customers.
r_labels, f_labels, m_labels = range(5, 0, -1), range(1, 6), range(1, 6)
rfm["Recency Score"] = pd.qcut(rfm["Recency"], 5, labels=r_labels)
rfm["Frequency Score"] = pd.qcut(
rfm["Frequency"].rank(method="first"), 5, labels=f_labels
)
rfm["Monetary Score"] = pd.qcut(rfm["Monetary"], 5, labels=m_labels)
# RFM Score
rfm["RFM"] = (
rfm["Recency Score"].astype(str)
+ rfm["Frequency Score"].astype(str)
+ rfm["Monetary Score"].astype(str)
)
rfm.head()
# # Segmenting Customers
# Segments based on set of rules applied to RFM scores
segments = {
r"[1-2][1-2]": "Hibernating",
r"[1-2][3-4]": "At_Risk",
r"[1-2]5": "Cant_Loose",
r"3[1-2]": "About_to_Sleep",
r"33": "Need_Attention",
r"[3-4][4-5]": "Loyal_Customers",
r"41": "Promising",
r"51": "New_Customers",
r"[4-5][2-3]": "Potential_Loyalists",
r"5[4-5]": "Champions",
}
# Assigning segments
rfm["Segment"] = rfm["Recency Score"].astype(str) + rfm["Frequency Score"].astype(str)
rfm["Segment"] = rfm["Segment"].replace(segments, regex=True)
rfm.head()
# Count of each segment
rfm["Segment"].value_counts()
rfm[["Segment", "Recency", "Frequency", "Monetary"]].groupby("Segment").agg(
["mean", "count", "max"]
).round()
# # RFM Analysis
# ## Distribution of Recency, Frequency and Monetary
columns = ["Recency", "Frequency", "Monetary"]
for column in columns:
fig, ax = plt.subplots(figsize=(12, 3))
# sns.distplot(rfm[column])
sns.histplot(rfm[column], kde=True, stat="density", linewidth=0)
ax.set_title("Distribution of %s" % column)
plt.show()
segments = ["Loyal_Customers", "Hibernating", "Potential_Loyalists"]
colors = ["m", "r", "c"]
for column in columns:
fig, ax = plt.subplots(figsize=(12, 3))
for segment, color in zip(segments, colors):
sns.histplot(
x=column,
kde=True,
label=segment,
data=rfm[rfm["Segment"] == segment],
color=color,
)
ax.set_title("Distribution of %s" % column)
plt.legend()
plt.show()
# ## Analysis of Segments
palette = sns.color_palette("Blues_r", n_colors=13)
for rfm_type in ["RFMScore", "Segment"]:
fig, ax = plt.subplots(figsize=(30, 5))
sns.countplot(x=rfm_type, data=rfm, palette=palette)
ax.set_title("Number of customers in each RFM cluster (%s)" % rfm_type)
if rfm_type == "label":
plt.xticks(rotation=90)
plt.show()
|
# # **Just a simple example**
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
import os
import numpy as np
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, classification_report
import itertools
sample = pd.read_csv("/kaggle/input/cifar-10/sampleSubmission.csv")
sample.head()
trainLabel = pd.read_csv("/kaggle/input/cifar-10/trainLabels.csv")
trainLabel
# # **1、 7z file**
import py7zlib
import time
fp = open("/kaggle/input/cifar-10/train.7z", "rb")
# 生成一个archive对象
archive = py7zlib.Archive7z(fp)
# 读取文件中所有的文件名
names = archive.getnames()
# search
startTime = time.time()
# 根据文件名返回文件的archiveFile类
member = archive.getmember(names[0])
end_1_time = time.time()
print("search time is {}".format(end_1_time - startTime))
# read data
# 读取文件的所有数据
data = member.read()
end_2_time = time.time()
print("read time is {}".format(end_2_time - end_1_time))
names
# # **2、Replacement method**
# The above method is not very easy to control
# **2.1 Introduction**
# The CIFAR-10 dataset contains 60,000 color images of 32 x 32 pixels in 3 channels divided into 10 classes. Each class contains 6,000 images. The training set contains 50,000 images, while the test sets provides 10,000 images. This image taken from the CIFAR repository ( https://www.cs.toronto.edu/~kriz/cifar.html ). This is a classification problem with 10 classes(muti-label classification). We can take a view on this image for more comprehension of the dataset.
# The challenge is to recognize previously unseen images and assign them to one of the 10 classes.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print("x_train shape:", x_train.shape)
print("y_train shape:", y_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
y_train
fig, axs = plt.subplots(1, 2, figsize=(15, 5))
sns.countplot(y_train.ravel(), ax=axs[0])
axs[0].set_title("Training data")
axs[0].set_xlabel("Classes")
sns.countplot(y_test.ravel(), ax=axs[1])
axs[1].set_title("Testing data")
axs[1].set_xlabel("Classes")
plt.show()
# As we can see, each classe contain exacly 6000 examples( 5000 for training and 1000 for test).
# The graph above is very important for the training, for example if we had just 1000 samples of label 1 that will be a problem , the model will find difficulties to detect label 1"less accuracy ", so that's not going to happend everything look fine. It's important to know the distribution of dataset behind different classes because the goodness of our model depend on it.
# Now let's doing some preprocessing.
# The output variable have 10 posible values. This is a multiclass classification problem. We need to encode these lables to one hot vectors (ex : "bird" -> [0,0,1,0,0,0,0,0,0,0]).
# # **3、Normalize**
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
x_train
num_classes = 10
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
x_train.shape[:]
# # **4、Defining the model architecture Using ConVnets**
# In the first stage, Our net will learn 32 convolutional filters, each of which with a 3 x 3 size. The output dimension is the same one of the input shape, so it will be 32 x 32 and activation is relu, which is a simple way of introducing non-linearity; folowed by another 32 convolutional filters, each of which with a 3 x 3 size and activation is also relu. After that we have a max-pooling operation with pool size 2 x 2 and a dropout at 25%.
# In the next stage in the deep pipeline, Our net will learn 64 convolutional filters, each of which with a 3 x 3 size. The output dimension is the same one of the input shape and activation is relu; folowed by another 64 convolutional filters, each of which with a 3 x 3 size and activation is also relu. After that we have a max-pooling operation with pool size 2 x 2 and a dropout at 25%.
# And the Final stage in the deep pipeline is a dense network with 512 units and relu activation followed by a dropout at 50% and by a softmax layer with 10 classes as output, one for each category.
model = Sequential()
model.add(Conv2D(32, (3, 3), padding="same", input_shape=x_train.shape[1:]))
model.add(Activation("relu"))
model.add(Conv2D(32, (3, 3)))
model.add(MaxPooling2D(3, strides=2))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(Conv2D(64, (3, 3)))
model.add(MaxPooling2D(3, strides=2))
model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(Conv2D(128, (3, 3)))
model.add(MaxPooling2D(3, strides=2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation("softmax"))
model.summary()
# # **5、Model training**
# Before making network ready for training we have to make sure to add below things:
# A loss function: to measure how good the network is
# An optimizer: to update network as it sees more data and reduce loss value
# Metrics: to monitor performance of network
# Also note that for data augmentation:
# One of the most commun tehnique to avoid overfitting is data augmentation. And We know that overfitting is generaly occur when we don't have enought data for training the model. To avoid this overfitting problem, we need to expand artificially our dataset. The idea is to alter the training data with small transformations to reproduce the variations occuring when someone is writing a digit.
# Different data aumentation techniques are as follows: Cropping, Rotating, Scaling, Translating, Flipping, Adding Gaussian noise to input images, etc...
opt = keras.optimizers.Adam(
learning_rate=0.001, decay=1e-6, epsilon=1e-08, beta_1=0.9, beta_2=0.999
)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
history = None
print("Not using data argumentation.")
history = model.fit(
x_train,
y_train,
batch_size=128,
epochs=5,
validation_data=(x_test, y_test),
shuffle=True,
)
# # **6、Evaluate the model**
# 6.1 Training and validation curves.¶
# Let's see the training and validation process by the visualization of history of fitting. This allow us to quickly know if how our model fit our data (overfitting, underfitting, model convergence, etc...)
def eva(history):
fig, axs = plt.subplots(1, 2, figsize=(15, 5))
axs[0].plot(history.history["accuracy"])
axs[0].plot(history.history["val_accuracy"])
axs[0].set_title("Model Accuracy")
axs[0].set_ylabel("Accuracy")
axs[0].set_xlabel("Epoch")
axs[0].legend(["train", "validate"], loc="upper left")
axs[1].plot(history.history["loss"])
axs[1].plot(history.history["val_loss"])
axs[1].set_title("Model Loss")
axs[1].set_ylabel("Loss")
axs[1].set_xlabel("Epoch")
axs[1].legend(["train", "validate"], loc="upper left")
plt.show()
print(history.history.keys())
eva(history)
# # **7、Score trained model and prediction**
scores = model.evaluate(x_test, y_test)
print("Test loss:", scores[0])
print("Test accuracy:", scores[1])
pred = model.predict(x_test)
# # **8、Confusion matrix**
# Confusion matrix can be very helpfull to see your model drawbacks. We plot the confusion matrix of the validation results. For good vizualization of our confusion matrix, we have to define to fonction
labels = [
"Airplane",
"Automobile",
"Bird",
"Cat",
"Deer",
"Dog",
"Frog",
"Horse",
"Ship",
"Truck",
]
# # **9、Check the predictions**
def show_test(number):
fig = plt.figure(figsize=(3, 3))
test_image = np.expand_dims(x_test[number], axis=0)
test_result = model.predict_classes(test_image)
plt.imshow(x_test[number])
dict_key = test_result[0]
plt.title("Predicted: {} ".format(labels[dict_key]))
show_test(10)
# # **10、Save**
path = os.path.join(os.getcwd(), "save_models")
model_name = "keras_cifar10_trained_model.h5"
if not os.path.isdir(path):
os.mkdir(path)
model_path = os.path.join(path, model_name)
model.save(model_path)
print("Saved trained model at %s " % model_path)
scores = model.evaluate(x_test, y_test, verbose=1)
print("Test loss:", scores[0])
print("Test accuracy:", scores[1])
train_images_path = "/kaggle/working/train"
test_images_path = "/kaggle/working/test"
test_images_path
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
df = pd.read_html("https://en.wikipedia.org/wiki/List_of_governors-general_of_India")
df
type(df)
df[0]
df[1]
df[2]
df[0].describe()
df[0].head()
df[0].tail()
df[0].info()
df[0].columns
df[1].columns
df[2].columns
df[0].isnull().sum()
df[1].isnull().sum()
df[2].isnull().sum()
df[1].info()
df[0].isna()
df[0].isna().sum()
df[1].columns
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
file = "/kaggle/input/hbo-max-movies-and-tv-shows/titles.csv"
df = pd.read_csv(file)
df.head()
df.isnull().sum()
df.info()
df.describe()
df.type.value_counts()
# importing libraries
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style="darkgrid")
# declaring data
data = [2408, 622]
keys = ["Movie", "Show"]
# define Seaborn color palette to use
palette_color = sns.color_palette("bright")
# plotting data on chart
plt.pie(data, labels=keys, colors=palette_color, autopct="%.0f%%")
plt.title("Composition between Shows and Movies")
plt.show()
sns.lineplot(df["release_year"], color="violet")
plt.show()
list(df["age_certification"].value_counts()), list(df["age_certification"].unique())
df["age_certification"].value_counts().plot(kind="bar")
df_mo = df[df["type"] == "MOVIE"]
df_sh = df[df["type"] == "SHOW"]
sns.scatterplot(df_mo["runtime"])
sns.scatterplot(df_sh["runtime"])
plt.title("Runtime Show vs Movie")
plt.show()
temp = df[df["imdb_score"] > 9].copy()
sns.barplot(x=temp["imdb_score"], y=temp["title"])
plt.show()
sns.histplot(temp)
temp = df[df["tmdb_score"] > 9].copy()
sns.barplot(x=temp["tmdb_score"], y=temp["title"])
plt.show()
sns.relplot(
data=df_mo,
x="runtime",
y="imdb_score",
hue="age_certification",
style="age_certification",
)
plt.show()
sns.relplot(
data=df_sh,
x="tmdb_score",
y="runtime",
hue="age_certification",
)
sns.catplot(data=df, x="type", y="imdb_votes", kind="box")
sns.relplot(
data=df,
kind="line",
x="release_year",
y="runtime",
errorbar="sd",
)
plt.show()
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
train = "/kaggle/input/benetech-making-graphs-accessible/train/images"
# sub_class = os.listdir(src_path)
labels = "/kaggle/input/benetech-making-graphs-accessible/train/annotations"
test = "/kaggle/input/benetech-making-graphs-accessible/test/images"
im = Image.open(
"/kaggle/input/benetech-making-graphs-accessible/train/images/0000ae6cbdb1.jpg"
)
im
# path = os.path.join(src_path,sub_class[0])
# img = plt.imread(os.path.join(path)
# lets look at the first digit
# def plot_digit(image_data):
# image = image_data.reshape(28, 28)
# plt.imshow(image, cmap = 'binary')
# plt.axis('off')
# plot_digit(src_path_train[0])
# plt.imshow(img, cmap=plt.get_cmap('gray'))
from PIL import Image
|
# # Missing Data in Pandas
import pandas as pd
import numpy as np
s = pd.Series(["Sam", np.nan, "Tim", "Kim"])
s
s.isnull()
s.notnull()
s[3] = None
s.isnull()
s.dropna()
from numpy import nan as NA
df = pd.DataFrame([[1, 2, 3], [4, NA, 5], [NA, NA, NA]])
df
df.dropna()
df.dropna(how="all")
df
df[1] = NA
df
df.dropna(axis=1, how="all")
df
df.dropna(thresh=3)
df
df.fillna(0)
df.fillna({0: 15, 1: 25, 2: 35})
df
df.fillna(0, inplace=True)
df
df = pd.DataFrame([[1, 2, 3], [4, NA, 5], [NA, NA, NA]])
df
df.fillna(method="ffill")
df.fillna(method="ffill", limit=1)
data = pd.Series([1, 0, NA, 5])
data
data.fillna(data.mean())
df
df.fillna(df.mean())
|
# # Semantic Processing
# Semantic processing is the next step in understanding natural language text. It involves analyzing the meaning of a sentence beyond the individual words and their grammatical structure. In semantic processing, we focus on the relationships between words, phrases, and clauses in a sentence and how they relate to the overall meaning of the sentence. Semantic processing can be used for a variety of tasks such as information retrieval, question answering, text summarization, and many others.
# Semantic study is about interpreting the intent behind a statement, comprehending ambiguous words, recognizing synonyms, detecting sarcasm, etc.
# At high level Semantic Processing is done by using
# 1. Knowledge graphs
# 2. Distributional semantics
# 3. Topic modeling
# In this notebook will look into understading Knowledge graphs using WordNet.
# # Knowledge graphs
# A knowledge graph is a type of graph data structure consisting of a defined set of vertices (also known as nodes or points) that are connected by edges. Let’s learn about Knowledge graphs in the upcoming video.
# 
# [source](https://en.wikipedia.org/wiki/Knowledge_graph)
# The different types of knowledge graphs include:
# **WordNet**: A lexical database of semantic relations between words developed by Princeton University
# **ConceptNet**: A freely available semantic network created by MIT to help computers understand human word usage.
# **UMLS**: The Unified Medical Language System, a set of files and software bringing together various health and biomedical vocabularies and standards to enable computer system interoperability.
# **In this notebook we will look into WordNet**
# # What is [WordNet](https://wordnet.princeton.edu/) ?
# WordNet to resolve word sense ambiguity. Developed by Princeton University, WordNet is a comprehensive English word database and a part of NLTK (Natural Language ToolKit)
# ### wordnet [search](http://wordnetweb.princeton.edu/perl/webwn)
# 
# The screenshot from WordNet’s website shows each sense of a word grouped into their nouns and verbs, referred to as a synset.
# Each sense has a gloss or definition and an example sentence, such as the first verb sense of the word "bank" with the meaning "tip literally" and the example sentence "the pilot had to bank the aircraft." Unlike a dictionary with definitions, WordNet is unique in terms of the relationships between the different senses of a word.
# The types of relationships between different words can be grouped as follows:
# **Is a relation:**
# **Synonym**: A relation between two similar concepts
# **Example**: “large” is a synonym of “big.”
# **Antonym**: A relation between two opposite concepts
# **Example**: “large” is an antonym of “small.”
#
# **Hypernym**: A relation between a concept and its superordinate. A superordinate is all-encompassing.
# **Example**: “fruits” is the hypernym of “mango.”
#
# **Hyponym**: A relation between a concept and its subordinate
# **Example**: “apple” is the hyponym of “fruits.”
# You can refer to the diagram given below to understand hyponyms and hypernyms. Any word connected with its hypernyms has an “is a” relationship.
# 
# **has part relation**:
# **Holonym**: A relation between a whole and its parts
# **Example**: “face” is the holonym of “eyes.”
# **Meronym**: A relation between a part and its whole
# **Example**: “eyes” is the meronym of the “human body.”
# “has part” relationship.
# 
# # WordNet Graph that will be used to explain the concepts.
# 
# [source](https://www.researchgate.net/figure/An-excerpt-of-the-WordNet-semantic-network_fig2_220566219)
# # code demo
# Install and load NLTK
#!pip install nltk
import nltk
nltk.download("omw-1.4")
from nltk import download
nltk.download("wordnet")
nltk.download("wordnet2022")
from nltk.corpus import wordnet
# ## Synonym (synsets)
# Synsets (synonyms sets)
tractor = wordnet.synsets("tractor")
tractor
# The word tractor have got 2 senses that are of noun forms.
# Definitions of senses
[syn.definition() for syn in tractor]
# ## Antonyms
syn = list()
ant = list()
for synset in wordnet.synsets("good"):
for lemma in synset.lemmas():
syn.append(lemma.name()) # add the synonyms
if lemma.antonyms(): # When antonyms are available, add them into the list
ant.append(lemma.antonyms()[0].name())
print("Synonyms: " + str(syn))
print("Antonyms: " + str(ant))
# ## Hypernyms
# Hypernyms: Relation between a concept and its superordinate
# in this case we are cheking the synset of the 1st defination of the tractor (tractor have 2 definations)
tractor = wordnet.synset("tractor.n.01")
tractor.hypernyms()
# from the image the tractor is a self-propelled_vehicle (this is "is a " relationship)
#
self_propelled_vehicle = wordnet.synset("self-propelled_vehicle.n.01")
self_propelled_vehicle.hypernyms()
# ## meronyms
# Meronyms: Relation between a part and its whole
wheeled_vehicle = wordnet.synset("wheeled_vehicle.n.01")
wheeled_vehicle.part_meronyms()
# ## hyponyms
# Hyponyms: Relation between a concept and its subordinate
wheeled_vehicle.hyponyms()
# ## holonyms
# Holonyms: Relation between whole and its parts (check the image for relationship)
axle = wordnet.synset("axle.n.01")
axle.part_holonyms()
# hyponyms (check the image for relationship)
self_propelled_vehicle.hyponyms()
# hyponyms (check the image for relationship)
motor_vehicle = wordnet.synset("motor_vehicle.n.01")
motor_vehicle.hyponyms()
# meronyms (check the image for relationship)
car = wordnet.synset("car.n.01")
car.part_meronyms()
|
import sqlite3
# Connect to the database file
conn = sqlite3.connect("database.sqlite")
# Create a cursor object to execute SQL queries
cur = conn.cursor()
# # Which player has the highest overall rating in the database?
# Execute a query to select the player with the highest overall rating
query = """
SELECT player_name, overall_rating
FROM Player_Attributes
JOIN Player ON Player.player_api_id = Player_Attributes.player_api_id
ORDER BY overall_rating DESC
LIMIT 1;
"""
cur.execute(query)
# Fetch the result of the query
result = cur.fetchone()
# Print the result
print(
f"The player with the highest overall rating is {result[0]} with a rating of {result[1]}"
)
# Close the cursor and database connection
cur.close()
conn.close()
# Connect to the database
conn = sqlite3.connect("database.sqlite")
# Create a cursor object
cur = conn.cursor()
# Specify the table name
table_name = "Country"
# Execute a query to retrieve column names from the specified table
query = f"PRAGMA table_info('{table_name}')"
cur.execute(query)
# Fetch all the column names
results = cur.fetchall()
# Print the column names
for row in results:
print(row[1])
# Close the cursor and database connection
cur.close()
conn.close()
# Connect to the database
conn = sqlite3.connect("database.sqlite")
# Create a cursor object
cur = conn.cursor()
# Specify the table name
table_name = "League"
# Execute a query to retrieve column names from the specified table
query = f"PRAGMA table_info('{table_name}')"
cur.execute(query)
# Fetch all the column names
results = cur.fetchall()
# Print the column names
for row in results:
print(row[1])
# Close the cursor and database connection
cur.close()
conn.close()
# Connect to the database
conn = sqlite3.connect("database.sqlite")
# Create a cursor object
cur = conn.cursor()
# Specify the table name
table_name = "Match"
# Execute a query to retrieve column names from the specified table
query = f"PRAGMA table_info('{table_name}')"
cur.execute(query)
# Fetch all the column names
results = cur.fetchall()
# Print the column names
for row in results:
print(row[1])
# Close the cursor and database connection
cur.close()
conn.close()
# Connect to the database
conn = sqlite3.connect("database.sqlite")
# Create a cursor object
cur = conn.cursor()
# Specify the table name
table_name = "Player"
# Execute a query to retrieve column names from the specified table
query = f"PRAGMA table_info('{table_name}')"
cur.execute(query)
# Fetch all the column names
results = cur.fetchall()
# Print the column names
for row in results:
print(row[1])
# Close the cursor and database connection
cur.close()
conn.close()
# Connect to the database
conn = sqlite3.connect("database.sqlite")
# Create a cursor object
cur = conn.cursor()
# Specify the table name
table_name = "Player_Attributes"
# Execute a query to retrieve column names from the specified table
query = f"PRAGMA table_info('{table_name}')"
cur.execute(query)
# Fetch all the column names
results = cur.fetchall()
# Print the column names
for row in results:
print(row[1])
# Close the cursor and database connection
cur.close()
conn.close()
# Connect to the database
conn = sqlite3.connect("database.sqlite")
# Create a cursor object
cur = conn.cursor()
# Specify the table name
table_name = "Team_Attributes"
# Execute a query to retrieve column names from the specified table
query = f"PRAGMA table_info('{table_name}')"
cur.execute(query)
# Fetch all the column names
results = cur.fetchall()
# Print the column names
for row in results:
print(row[1])
# Close the cursor and database connection
cur.close()
conn.close()
# Connect to the database
conn = sqlite3.connect("database.sqlite")
# Create a cursor object
cur = conn.cursor()
# Specify the table name
table_name = "Team"
# Execute a query to retrieve column names from the specified table
query = f"PRAGMA table_info('{table_name}')"
cur.execute(query)
# Fetch all the column names
results = cur.fetchall()
# Print the column names
for row in results:
print(row[1])
# Close the cursor and database connection
cur.close()
conn.close()
# # Which teams have the highest win percentage in a given season, and how does that vary by league and country?
import sqlite3
# Connect to the database file
conn = sqlite3.connect("database.sqlite")
# Create a cursor object to execute SQL queries
cur = conn.cursor()
query = """
SELECT c.name AS country, l.name AS league, t.team_long_name AS team,
COUNT(*) AS total_matches,
SUM(CASE WHEN m.home_team_goal > m.away_team_goal AND m.home_team_api_id = t.team_api_id THEN 1
WHEN m.home_team_goal < m.away_team_goal AND m.away_team_api_id = t.team_api_id THEN 1
ELSE 0 END) AS total_wins,
ROUND(SUM(CASE WHEN m.home_team_goal > m.away_team_goal AND m.home_team_api_id = t.team_api_id THEN 1
WHEN m.home_team_goal < m.away_team_goal AND m.away_team_api_id = t.team_api_id THEN 1
ELSE 0 END) * 100.0 / COUNT(*), 2) AS win_percentage
FROM Match m
JOIN Country c ON c.id = m.country_id
JOIN League l ON l.id = m.league_id
JOIN Team t ON (t.team_api_id = m.home_team_api_id OR t.team_api_id = m.away_team_api_id)
WHERE m.season = '2015/2016'
GROUP BY country, league, team
ORDER BY win_percentage DESC
"""
cur.execute(query)
# Fetch all the results of the query
results = cur.fetchall()
# Print the header
print(
"{:<18} {:<23} {:<26} {:<15} {:<12} {}".format(
"Country", "League", "Team", "Total Matches", "Total Wins", "Win Percentage"
)
)
print("-" * 105)
# Print the results
for row in results:
print(
"{:<18} {:<23} {:<26} {:<15} {:<12} {}".format(
row[0], row[1], row[2], row[3], row[4], row[5]
)
)
# # What is the total number of goals scored by each team in the entire dataset?
import sqlite3
# Connect to the database file
conn = sqlite3.connect("database.sqlite")
# Create a cursor object to execute SQL queries
cur = conn.cursor()
query = """
SELECT team.team_long_name, SUM(goals) AS total_goals
FROM (
SELECT home_team_api_id AS team_id, home_team_goal AS goals
FROM Match
UNION ALL
SELECT away_team_api_id AS team_id, away_team_goal AS goals
FROM Match
) AS goals_table
JOIN Team AS team
ON team.team_api_id = goals_table.team_id
GROUP BY team.team_api_id
ORDER BY total_goals DESC;
"""
cur.execute(query)
# Fetch all the results of the query
results = cur.fetchall()
# Print the results
for row in results:
print(row)
# # What is the average age of all the players in the dataset?
import sqlite3
# Connect to the database file
conn = sqlite3.connect("database.sqlite")
# Create a cursor object to execute SQL queries
cur = conn.cursor()
query = """
SELECT AVG(strftime('%Y', 'now') - strftime('%Y', birthday)) AS avg_age FROM Player;
"""
cur.execute(query)
# Fetch all the results of the query
results = cur.fetchall()
# Print the results
for row in results:
print(row)
# # top 10 players with the highest overall rating in the dataset?
import sqlite3
# Connect to the database file
conn = sqlite3.connect("database.sqlite")
# Create a cursor object to execute SQL queries
cur = conn.cursor()
query = """
SELECT p.player_name, MAX(pa.overall_rating) AS max_rating
FROM Player_Attributes pa
JOIN Player p ON pa.player_api_id = p.player_api_id
GROUP BY p.player_name
ORDER BY max_rating DESC
LIMIT 10;
"""
cur.execute(query)
# Fetch all the results of the query
results = cur.fetchall()
# Print the results
for row in results:
print(row)
# # top 10 players with the highest potential rating in the dataset?
import sqlite3
# Connect to the database
conn = sqlite3.connect("database.sqlite")
# Create a cursor object
cur = conn.cursor()
# Write the SQL query
query = """
SELECT p.player_name, MAX(pa.potential) as max_potential
FROM Player_Attributes pa
JOIN Player p ON pa.player_api_id = p.player_api_id
GROUP BY p.player_name
ORDER BY max_potential DESC
LIMIT 10
"""
# Execute the query
cur.execute(query)
# Fetch all the results of the query
results = cur.fetchall()
# Print the results
print("{:<20} {}".format("Player Name", "Max Potential"))
print("-" * 40)
for row in results:
print("{:<20} {}".format(row[0], row[1]))
# # team with the most wins in the "2015/2016" season of the English Premier League?
import sqlite3
# Connect to the SQLite database
conn = sqlite3.connect("database.sqlite")
# Create a cursor object
cur = conn.cursor()
# Execute a query to get the team with the most wins in the 2015/2016 season of the English Premier League
query = """
SELECT team_long_name, COUNT(*) AS wins
FROM Match m
JOIN Team t ON m.home_team_api_id = t.team_api_id
WHERE m.season = '2015/2016' AND m.league_id = 1729 AND m.home_team_goal > m.away_team_goal
GROUP BY m.home_team_api_id
ORDER BY wins DESC
LIMIT 1
"""
cur.execute(query)
# Fetch the result of the query
result = cur.fetchone()
# Print the result
print(
f"The team with the most wins in the 2015/2016 season of the English Premier League is {result[0]} with {result[1]} wins."
)
# # player who has won the most number of penalties in the entire dataset?
import sqlite3
# Connect to the database file
conn = sqlite3.connect("database.sqlite")
# Create a cursor object to execute SQL queries
cur = conn.cursor()
query = """
SELECT p.player_name, COUNT(*) as total_penalties
FROM Player_Attributes pa
JOIN Player p ON pa.player_api_id = p.player_api_id
WHERE pa.penalties > 0
GROUP BY pa.player_api_id
ORDER BY total_penalties DESC
LIMIT 1
"""
cur.execute(query)
# Fetch all the results of the query
results = cur.fetchall()
# Print the results
for row in results:
print(row)
# # Team with highest average overall rating in the 2014/2015 season of the Spanish La Liga?
# Connect to the database
conn = sqlite3.connect("database.sqlite")
# Create a cursor object
cur = conn.cursor()
# Define the query to find the team with the highest average overall rating in the 2014/2015 season of the Spanish La Liga
query = """
SELECT team_long_name, COUNT(*) AS wins
FROM Match m
JOIN Team t ON m.home_team_api_id = t.team_api_id
WHERE m.season = '2015/2016' AND m.league_id = 21518 AND m.home_team_goal > m.away_team_goal
GROUP BY m.home_team_api_id
ORDER BY wins DESC
LIMIT 1
"""
# Execute the query
cur.execute(query)
# Fetch the result
result = cur.fetchone()
# Print the result
print(
"Team with highest average overall rating in the 2014/2015 season of the Spanish La Liga: "
)
print(result[0], "with an average overall rating of", round(result[1], 2))
|
# List all the files present in the directory, to verify if all the 3 files -train.csv, test.csv and gender_submissions.csv are present
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib as mpl
import seaborn as sns
import matplotlib.pyplot as plt
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Load Training Data
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
# Load Testing Data
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()
# # Let's analyse features.
# We start with gender based survuval ratio. Women clearly show high survival rate.
women = train_data.loc[train_data.Sex == "female"]["Survived"]
rate_women = sum(women) / len(women)
print("% of women who survived:", rate_women)
men = train_data.loc[train_data.Sex == "male"]["Survived"]
rate_men = sum(men) / len(men)
print("% of men who survived:", rate_men)
# # Fill Null Values
# Age feature has null values. We want to use this feature,so we replace all the missing values with a median age.
print(train_data["Age"].isnull().sum())
age_median = train_data["Age"].median()
train_data["Age"].fillna(age_median, inplace=True)
print(train_data["Age"].isnull().sum())
print(test_data["Age"].isnull().sum())
age_median = test_data["Age"].median()
test_data["Age"].fillna(age_median, inplace=True)
print(test_data["Age"].isnull().sum())
# # Categorical Age
# Divide the Age feature into 5 distinct categories and calculate its survival rate.
# Categorical data will help us understand people from which age groups have high survival chance.
#
train_data["CategoricalAge"] = pd.cut(train_data["Age"], 5)
print(
train_data[["CategoricalAge", "Survived"]]
.groupby(["CategoricalAge"], as_index=False)
.mean()
)
# Assign a numeric value to people belonging to different age groups.
train_data.loc[train_data["Age"] <= 16, "Age"] = 0
train_data.loc[(train_data["Age"] > 16) & (train_data["Age"] <= 32), "Age"] = 1
train_data.loc[(train_data["Age"] > 32) & (train_data["Age"] <= 48), "Age"] = 2
train_data.loc[(train_data["Age"] > 48) & (train_data["Age"] <= 64), "Age"] = 3
train_data.loc[train_data["Age"] > 64, "Age"] = 4
test_data.loc[test_data["Age"] <= 16, "Age"] = 0
test_data.loc[(test_data["Age"] > 16) & (test_data["Age"] <= 32), "Age"] = 1
test_data.loc[(test_data["Age"] > 32) & (test_data["Age"] <= 48), "Age"] = 2
test_data.loc[(test_data["Age"] > 48) & (test_data["Age"] <= 64), "Age"] = 3
test_data.loc[test_data["Age"] > 64, "Age"] = 4
# # Gender
# Convert text values under 'Sex' column into numeric values-'Gender_Numeric'
# This helps into plotting of the histogram and will be cleaner to use for further processing of data.
sex_dummy = pd.get_dummies(train_data["Sex"], drop_first=True)
train_data = pd.concat([train_data, sex_dummy], axis=1)
# train_data.drop(['Sex'],axis=1,inplace =True)
train_data.columns.values[13] = "Gender_Numeric"
train_data.head()
sex_dummy = pd.get_dummies(test_data["Sex"], drop_first=True)
test_data = pd.concat([test_data, sex_dummy], axis=1)
# test_data.drop(['Sex'],axis=1,inplace =True
test_data.columns.values[11] = "Gender_Numeric"
test_data.head()
# # **Class**
# There is a higher chance of survival if passengers belong to First Class than Second and Third Class respectively.
sns.barplot(x="Pclass", y="Survived", data=train_data)
plt.rc("ytick", labelsize=14)
plt.figure()
fig = train_data.groupby("Survived")["Pclass"].plot.hist(histtype="bar", alpha=0.8)
plt.legend(("Died", "Survived"), fontsize=12)
plt.xlabel("Pclass", fontsize=18)
plt.show()
# > # Female to Male Survival
# > Plot a histogram to show that Females have a higher chance of survival than males
plt.rc("ytick", labelsize=14)
plt.figure()
fig = train_data.groupby("Survived")["Gender_Numeric"].plot.hist(
histtype="bar", alpha=0.8
)
plt.legend(("Died", "Survived"), fontsize=12)
plt.xlabel("Gender_Numeric", fontsize=18)
plt.show()
# # Age
# Now we know that people belong to 5 different age groups and we have assigned a unique numeric value to these groups-
# **Age <= 16 = 0 : **
# **Age > 16 & Age <= 32 = 1 :**
# **Age > 32 & Age <= 48 = 2 : **
# **Age > 48 & Age <= 64 = 3: **
# **Age > 64 = 4:**
# This histogram shows that people belonging to a age group of 16-32 or younger people have higher chance of survival than others.
plt.rc("ytick", labelsize=14)
plt.figure()
fig = train_data.groupby("Survived")["Age"].plot.hist(histtype="bar", alpha=0.8)
plt.legend(("Died", "Survived"), fontsize=12)
plt.xlabel("Age", fontsize=18)
plt.show()
# # Using Random Forest Classifier
from sklearn.ensemble import RandomForestClassifier
y = train_data["Survived"]
features = ["Pclass", "Gender_Numeric", "Parch", "SibSp", "Age"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions})
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import pandas as pd
DATA_PATH = "/kaggle/input/breast-cancer-wisconsin/breast-cancer-wisconsin.data"
columnNames = [
"Sample code number",
"Clump Thickness",
"Uniformity of Cell Size",
"Uniformity of Cell Shape",
"Marginal Adhesion",
"Single Epithelial Cell Size",
"Bare Nuclei",
"Bland Chromatin",
"Normal Nucleoli",
"Mitoses",
"Class",
]
data = pd.read_csv(DATA_PATH, names=columnNames)
data.head()
data = data.replace(to_replace="?", value=np.nan)
data.head()
data = data.dropna(how="any")
data.head()
from sklearn.model_selection import train_test_split
(
X_train,
X_test,
y_train,
y_test,
) = train_test_split( # X_train和X_test是特征数据集,y_train和y_test是标签(类别)数据集
data[columnNames[1:10]], # 特征数据
data[columnNames[10]], # 标签数据
test_size=0.25, # test_size参数指定测试集占整个数据集的比例
random_state=43, # random_state参数指定随机数种子
)
# print(data.dtypes)
X_train.head()
from sklearn.preprocessing import StandardScaler # 导入StandardScaler类,用于对数据进行标准化处理
ss = StandardScaler()
X_train = ss.fit_transform(X_train)
X_test = ss.transform(X_test)
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train, y_train)
lr_y = lr.predict(X_test)
from sklearn.svm import LinearSVC
lsvc = LinearSVC()
lsvc.fit(X_train, y_train)
svm_y = lsvc.predict(X_test)
from sklearn.metrics import classification_report
print("Accuracy of the LogesticRegression: ", lr.score(X_test, y_test))
print(classification_report(y_test, lr_y, target_names=["Benign", "Malignant"]))
print("Accuracy of the SVM: ", lsvc.score(X_test, y_test))
print(classification_report(y_test, svm_y, target_names=["Benign", "Malignant"]))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
df = pd.read_csv("/kaggle/input/amazon-products-dataset/Health and Personal Care.csv")
df.head()
df.columns
df["main_category"].value_counts()
df["sub_category"].value_counts()
# >>>>> After analysis, I found that the 'main_category' and 'sub_category' columns do not add anything to our analysis
# as they contain only one value each, so the best thing to do is to delete them
df = df.drop(["main_category", "sub_category", "image"], axis=1).copy()
df.info()
df.describe()
df.head()
df.shape
df.isnull().sum()
# "Import the numpy library to convert null values to zero in the Discount_price column."
import numpy as np
# Analyzing the data types of each column
df.dtypes
from urllib.parse import urlparse
# Checking if the links in the table are valid
valid_links = True
for link in df["link"]:
result = urlparse(link)
if result.scheme and result.netloc:
continue
else:
valid_links = False
print("Invalid link: " + link)
if valid_links:
print("All links are valid!")
# Treating the discount_price column
# Values such as '1.732.50' and '1.250.25' were causing errors when converted to float, so I decided to remove everything
# after the second dot, so they became '1.732' and '1.250'
# Conversion function
def convert_value(value):
# remove the "₹" symbol
value = value.replace("₹", "")
# Replace comma with dot
value = value.replace(",", ".")
# Check if there are at least two dots
if value.count(".") >= 2:
# Remove everything after the second dot
value = value.split(".", 2)
value = f"{value[0]}.{value[1]}"
# Convert to float
return float(value)
# Calling the function for each value in the column
df["discount_price"] = df["discount_price"].astype(str).apply(convert_value)
# The discount_price column contains many null values, which means that not all products have a discount
# So the best way to handle this is to replace null values with zero, rather than deleting the rows
# Treating nulls in the discount_price column by replacing the null value with 0
df["discount_price"] = df["discount_price"].fillna(0)
df["discount_price"].describe()
df.head()
# Treating the actual_price column using the same function as I used for discount_price, with the difference
# that in the actual_price column the null values will be removed.
df["actual_price"] = df["actual_price"].astype(str).apply(convert_value)
df["actual_price"] = df["actual_price"].dropna()
df["actual_price"].describe()
# Treating the nulls in the ratings and no_of_ratings
df["ratings"] = df["ratings"].replace("Get", np.nan)
df["ratings"] = df["ratings"].replace("", np.nan).astype("float")
df["no_of_ratings"] = df["no_of_ratings"].replace("FREE Delivery by Amazon", np.nan)
df["no_of_ratings"] = df["no_of_ratings"].astype(str).apply(convert_value)
# dropping products that don't have a review
df = df.dropna()
df.isnull().sum()
# checking for duplicates
df.duplicated().sum()
# # Now with the data treated, let's get to the point:
# ### Let's answer some simple questions that can help us get to some bigger analysis.
# What are the most expensive products?
# What are the cheapest products?
# What are the products with the best rating?
# What are the products with the worst rating?
# # Most expensive products.
# Most expensive products
# Gets the top 5 most expensive products
top5 = df.nlargest(5, "actual_price")
# Prints the formatted results
print("Most expensive products:")
for index, row in top5.iterrows():
print(f'Product: {row["name"]}')
print(f'Discounted price: ₹{row["discount_price"]:.2f}')
print(f'Original price: ₹{row["actual_price"]:.2f}')
print(f'Rating: {row["ratings"]} ({row["no_of_ratings"]} ratings)')
print(f'Link: {row["link"]}')
print("------------------")
# # Products with the lowest prices.
# Obtain the 5 products with the lowest prices
bottom5 = df.nsmallest(5, "actual_price")
# Print the formatted results
print("Products with the lowest prices:")
for index, row in bottom5.iterrows():
print(f'Product: {row["name"]}')
print(f'Discounted price: ₹{row["discount_price"]:.2f}')
print(f'Original price: ₹{row["actual_price"]:.2f}')
print(f'Rating: {row["ratings"]} ({row["no_of_ratings"]} ratings)')
print(f'Link: {row["link"]}')
print("------------------")
# # Top-rated products.
# Products with the best ratings
# to be fair, I set a minimum of 100 ratings
popular_products = df[df["no_of_ratings"] > 50]
top5 = popular_products.nlargest(5, "ratings")
# Prints formatted results
print("Products with the best ratings:")
for index, row in top5.iterrows():
print(f'Product: {row["name"]}')
print(f'Discounted price: ₹{row["discount_price"]:.2f}')
print(f'Original price: ₹{row["actual_price"]:.2f}')
print(f'Rating: {row["ratings"]} ({row["no_of_ratings"]} ratings)')
print(f'Link: {row["link"]}')
print("------------------")
# # Worst rated products
worst_popular = df[df["no_of_ratings"] > 50]
worst5 = worst_popular.nsmallest(5, "ratings")
# Prints formatted results
print("Products with worst ratings:")
for index, row in worst5.iterrows():
print(f'Product: {row["name"]}')
print(f'Discounted price: ₹{row["discount_price"]:.2f}')
print(f'Original price: ₹{row["actual_price"]:.2f}')
print(f'Rating: {row["ratings"]} ({row["no_of_ratings"]} ratings)')
print(f'Link: {row["link"]}')
print("------------------")
# # Scatter plot of no_of_ratings x ratings.
import matplotlib.pyplot as plt
ax = df.plot.scatter(x="no_of_ratings", y="ratings")
# Setting the interval of x-axis to be 100-100
start, end = ax.get_xlim()
ax.xaxis.set_ticks(np.arange(0, end, 100))
plt.show()
# # Scatter plot of actual_price x ratings.
ax = df.plot.scatter(x="actual_price", y="ratings")
start, end = ax.get_xlim()
ax.xaxis.set_ticks(np.arange(0, end, 100))
plt.show()
# # Scatter plot of actual_price x no_of_ratings.
ax = df.plot.scatter(x="actual_price", y="no_of_ratings")
start, end = ax.get_xlim()
ax.xaxis.set_ticks(np.arange(0, end, 100))
plt.show()
# # Statistical summary about prices.
print("Statistical summary of actual prices:")
print(df["actual_price"].describe())
print("")
print("Statistical summary of discount prices:")
print(df["discount_price"].describe())
# # Histogram of actual price and discount price.
# Histogram of actual prices
plt.hist(df["actual_price"], bins=20)
plt.title("Distribution of prices")
plt.xlabel("Price")
plt.ylabel("Frequency")
plt.show()
# Histogram of discount prices
plt.hist(df["discount_price"], bins=20)
plt.title("Distribution of discounts")
plt.xlabel("Discount")
plt.ylabel("Frequency")
plt.show()
|
# # The Relationship Between GDP and Life Expectancy
# ### Table of Contents
# * [Goals](#goals)
# * [Scoping](#scoping)
# * [Data](#data)
# * [EDA](#eda)
# * [Time Series Analysis: Life Expectancy](#tsa:le)
# * [Time Series Analysis: GDP](#tsa:gdp)
# * [Explorative Data Analysis - Time Series Summary](#eda-tss)
# * [Time Series Multivariate Analysis](#ts-ma)
# * [Exploratory Data Analysis: Correlation](#eda:corr)
# * [Discussion](#discussion)
# ## Project Goals
# For this project, I will analyze data on GDP and life expectancy from the World Health Organization and the World Bank to try and identify the relationship between the GDP and life expectancy of six countries.
# During this project, I will analyze, prepare, and plot data in order to answer questions in a meaningful way. In doing so, I will both demonstrate and develop my Data Analysis capabilities.
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
import seaborn as sns
import numpy as np
import csv
import statsmodels.api as sm
import math
import statsmodels
#
# ## Project Scoping
# - Project Goal: Identify the relationship between a country's GDP and its population's life expectancy.
# - Analytical Steps required:
# 1. Univariate Analysis of each countries life expectancy (different parts of the population will have different expectancies) and GDP.
# 2. Bivariate Analysis of the relationship between each country's GDP and life expectancy - How do the two change? What is their correlation?
# 3. Multivariate Analysis of the 6 country's GDP and life expectancy.
# ### Hypothesis for the EDA:
# 1. Countries with a higher GDP will tend to have a higher life expectancy.
# 2. Life expectancy will tend to grow over time in all countries
# 3. GDP will tend to grow over time for some countries, whereas others will have stagnated. I don't expect any country to see a significant reduction in their GDP.
# # Data
data = pd.read_csv("/kaggle/input/codecademy-portfolio-2-data/all_data.csv")
print(data.head(-20))
print(data.info())
print(data.Country.unique())
# We can see that the countries we are working with are:
# 'Chile' 'China' 'Germany' 'Mexico' 'United States of America' 'Zimbabwe'
# There are no Null values in the data and we have a total of 96 entries.
# However, in terms of data cleaning, I will need to change the name of "Life expectancy at birth (years)" because a name like that will make it significantly harder to use that feature. I will also change all "United States of America" to simply "USA" in order to make it easier to title graphs.
# In terms of data exploration, I will start with a time-series analysis of each country's Life Expectancy.
# ## Exploratory Data Analysis
# ### Time Series Analysis: Life Expectancy
data_copy = data.rename(columns={"Life expectancy at birth (years)": "Life_expectancy"})
data_copy.replace("United States of America", "USA", inplace=True)
years = data_copy.Year[0:16]
for i, country in enumerate(data_copy.Country.unique()):
# Life Expectancy in Chile
ax = plt.subplot(3, 2, i + 1)
plt.plot(years, data_copy.Life_expectancy[data_copy.Country == country], marker="s")
plt.title("Life Expectancy in " + country)
plt.xlabel("Years (2000 - 2016)")
plt.ylabel("Expected Age")
plt.show()
# From these we can see how life expectancy has been growing accross all 6 countries.
# Now I'll do the same, but for GDP this time.
# ### Time-Series Analysis: GDP
for i, country in enumerate(data_copy.Country.unique()):
# Life Expectancy in Chile
ax = plt.subplot(3, 2, i + 1)
plt.plot(years, data_copy.GDP[data_copy.Country == country], marker="s")
plt.title(country + "'s GDP")
plt.xlabel("Years (2000 - 2016)")
if country == "Zimbabwe":
plt.ylabel("Billions")
elif (country == "USA") or (country == "China"):
plt.ylabel("Trilions")
elif country == "Chile":
plt.ylabel("(10s of) Billions")
else:
plt.ylabel("(100s of) Billions")
plt.show()
# Just by looking at these we can see that some GDP's have continuously grown over the years, whereas others have stagnated, and Chile's even fell in recent years.
# Now that we have seen how each othe countries has changed over the years, we can also see how the average life expectancy and GDP has changed over the years, accross the 6 countries.
# ### EDA - Time Series Summary statistics analysis
# obtaining Series of the average of two measures accross all years
average_GDP = data_copy.GDP.groupby(data_copy.Year).mean()
average_life_span = data_copy.Life_expectancy.groupby(data_copy.Year).mean()
# plotting both in a scatter plot
sns.scatterplot(x=average_life_span, y=average_GDP)
# This relationship looks to be linear! So, the answer to our question "What is the relationship between life expectancy and GDP" it is starting to look like the answer might be that there is a positive relationship.
# Next up, in order to visualise this relationship even better, we will see how the two measures map onto each other, for each country. This might provides us with some more insight, not only in the relationship between the two measures overall, but also in the relationship that exists between the two, for each individual case (or country).
# ### Time Series - Multivariate Analysis
sns.scatterplot(data=data_copy, x="Life_expectancy", y="GDP", hue="Country")
plt.ylim()
plt.show()
# This plot is a little bit too overwhelmed. We can see that Zimbabwe, due to having a GDP much lower the the USA and China, and a much lower life expectancy than the rest of the countries, does not map well onto the scatter plot. I am gonna plot it by itself, so that we can see how this relationship works in that case specifically.
sns.scatterplot(
x=data_copy.Life_expectancy[data_copy.Country == "Zimbabwe"],
y=data_copy.GDP[data_copy.Country == "Zimbabwe"],
)
plt.show()
# Here, we can see that, although it is not linear in the earlier years of the dataset (Zimbabwe's Life Expectancy actually went down in the early to mid 2000s, whilst their GDP was somewhat stable), it becomes very linear in late 2000s and early/mid 2010s.
# But this got me thinking, maybe I also need to separate the USA and China from the rest of the countries. That is, because the USA and China have GDP's in the order of the trillions, while Germany, Chile and Mexico are in the order of the 100s of Billions, I need to separate the two groups.
# Lets start with the latter.
# plotting Germany, Chile and Mexico
sns.scatterplot(
x=data_copy.Life_expectancy[data_copy.Country == "Germany"],
y=data_copy.GDP[data_copy.Country == "Germany"],
legend="auto",
)
sns.scatterplot(
x=data_copy.Life_expectancy[data_copy.Country == "Chile"],
y=data_copy.GDP[data_copy.Country == "Chile"],
legend="auto",
)
sns.scatterplot(
x=data_copy.Life_expectancy[data_copy.Country == "Mexico"],
y=data_copy.GDP[data_copy.Country == "Mexico"],
legend="auto",
)
plt.show()
# By plotting it this way, we can now see that each othese relationships is somewhat linear in nature indeed!
# I am not gonna do the same for the USA and China, because we can clearly see that this relationship was very linear for the former and somewhat exponential for the latter, so there is not need for further plotting like this.
# But essentially, through this visualisation we can see that, although a country's GDP does seem to have *some* influence on its populations Life Expectancy, it might be the case that one of the two increasing does not necessarily mean that the other will as well.
# To finalise this exploration of the data, let's see what the pearson correlation between the two is.
# ### EDA: Correlation
from scipy.stats import pearsonr
corr, p = pearsonr(data_copy.GDP, data_copy.Life_expectancy)
print(corr, p)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# imorting the libraires
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# read the data frame
df = pd.read_csv(
"/kaggle/input/online-shop-customer-sales-data/Online Shop Customer Sales Data.csv"
)
df.head()
# Data preprocessing
# Data cleaning
df.info()
df.isnull().sum() # to find the null values
df.shape
df.describe()
df.describe(include="object")
df1 = df.describe()
for i in df1.columns:
sns.boxplot(df[i])
plt.show()
df1.boxplot()
# Define quartiles
Q1 = df["Purchase_VALUE"].quantile(0.25)
Q3 = df["Purchase_VALUE"].quantile(0.75)
# Calculate IQR
IQR = Q3 - Q1
# Calculate lower and upper bounds
lower_bound = Q1 - (1.5 * IQR)
upper_bound = Q3 + (1.5 * IQR)
# Determine outliers
outliers = df1[(df1 < lower_bound) | (df1 > upper_bound)]
# Display outliers
outliers.shape
# who purchasing the more
df["Gender"]
df.groupby("Gender").sum().plot(
kind="pie", y="N_Purchases"
) # Here the 0 indicates the male and 1 indicates Female
# This clearly show the no.of N_Purchases the Female are more. Which means the women are intrested in shopping.
df2 = df[["Gender", "Revenue_Total"]]
df2.groupby("Gender")
male_reven = df2[df2["Gender"] == 0]
female_reven = df2[df2["Gender"] == 1]
print(male_reven["Revenue_Total"].sum())
print(female_reven["Revenue_Total"].sum())
group = df.groupby(["Gender", "Revenue_Total"])
group.first()
df["Gender"].value_counts()
df["Gender"].replace({1: "Female", 0: "Male"}, inplace=True)
df.groupby("Gender").sum().plot(
kind="pie",
y="Revenue_Total",
autopct="%1.0f%%",
colors=["Green", "Red"],
shadow=True,
)
# Here the Revenue of the store are partially filled by male and exactly filled by female
# in which age person are purchasing more
f = plt.figure()
f.set_figwidth(10)
f.set_figheight(9)
df.groupby("Age").sum().plot(kind="pie", y="N_Purchases")
# This show all age person are purchasing well. So the age is not a Badan for purchasing
df["Pay_Method"].replace(
{0: "Wallet", 1: "card", 2: "paypal", 3: "others"}, inplace=True
)
df
sns.countplot(x=df["Pay_Method"], hue=df["Gender"])
# This show the Male are using card and wallet are more and Female are using the same card and wallet are more for the purchasing
# who are going to spend the more time in the marketing place
df.groupby("Gender").sum().plot(
kind="pie", y="Time_Spent", autopct="%1.0f%%", shadow=True
)
# This clearl show the Female are spending more time in the shopping market
# finding the Browser method
df["Browser"].replace({0: "Chrome", 1: "Safari", 2: "Edge", 3: "Other"}, inplace=True)
df
df.groupby("Browser").mean()
plt.figure(figsize=(10, 7))
df.groupby("Browser").sum().plot(
kind="pie", y="Time_Spent", autopct="%1.0f%%", shadow=True
)
df["Newsletter"].replace({0: "Not Subcribed", 1: "Subcrcibed"}, inplace=True)
df.groupby("Newsletter").sum().plot(
kind="pie", y="Time_Spent", autopct="%1.0f%%", shadow=True
)
sns.countplot(
x=df["Newsletter"], hue=df["Gender"]
) # The gender wise who are not subcribed.
df.corr()
plt.figure(figsize=(10, 8))
sns.heatmap(df.corr(), annot=True)
new = pd.read_csv(
"/kaggle/input/online-shop-customer-sales-data/Online Shop Customer Sales Data.csv"
)
new
# Here again created the new variable for this dataset call new
new.skew()
# The Skewness are less which is lise between the 0 to 5, which means data are normally distributed
new = new.drop("Purchase_DATE", axis=1)
for i in new.columns:
sns.distplot(new[i])
plt.show()
# But the graph are showing the distribution are not good, so let take standardization scalar
from sklearn.preprocessing import StandardScaler
sl = StandardScaler()
full = sl.fit_transform(new)
full
print(full.std())
print(full.mean)
new_data = pd.DataFrame(full[:], columns=new.columns)
new_data
# After sclaing the data are comes near to the 0 to 1. so the mean is 0 and standard deviation are 1
X = new.drop("Purchase_VALUE", axis=1)
y = new["Purchase_VALUE"]
# Here we have created the test and train datasets as x,y
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=5)
print("X_train : ")
print(X_train.head())
print("")
print("X_test : ")
print(X_test.head())
print("")
print("y_train : ")
print(y_train.head())
print("")
print("y_test : ")
print(y_test.head())
print(X_train.shape, X_test.shape)
print(y_train.shape, y_test.shape)
# Here the train and test dataset are 70:30 ratio are splittted
# now we are going to insert the linear regression model
# for that we need to import the statsmodel.ai from statsmodel library packae
import statsmodels.api as sm
# now we are goin to add the constant value in the regression because the formula y=B0+(B1*X1)+(B2*X2)...(Bn+Xn) here the B0 is the constant
X_train_sm = sm.add_constant(X_train)
X_train_sm # Here the const is the B0 and we have add the constant inthe dataset
# fitting the linerar regression in the model
lr = sm.OLS(y_train, X_train_sm).fit()
lr # Finding the linear regression are done or not
lr.params # to print the output
lr.summary() # hich give the summary data
# Here the R_squared score are 47% this not that much good. But in future we can trian with still amount of data
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train_sm, y_train)
X_test_sm = sm.add_constant(X_test)
y_pred = regressor.predict(X_test_sm)
y_pred
from sklearn.metrics import r2_score
r_square = r2_score(y_test, y_pred)
r_square
|
# ### © Copyright 2020 [George Mihaila](https://github.com/gmihaila).
# @title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# # **Pretrain Transformers Models in PyTorch using Hugging Face Transformers**
# ## **Pretrain 67 transformers models on your custom dataset.**
# [](https://colab.research.google.com/github/gmihaila/ml_things/blob/master/notebooks/pytorch/pretrain_transformers_pytorch.ipynb)
# [](https://github.com/gmihaila/ml_things/blob/master/notebooks/pytorch/pretrain_transformers_pytorch.ipynb)
# [](https://medium.com/@gmihaila/fine-tune-transformers-in-pytorch-using-transformers-57b40450635)
# [](https://opensource.org/licenses/Apache-2.0)
# **Disclaimer:** *The format of this tutorial notebook is very similar with my other tutorial notebooks. This is done intentionally in order to keep readers familiar with my format.*
# This notebook is used to pretrain transformers models using [Huggingface](https://huggingface.co/transformers/) on your own custom dataset.
# With the AutoClasses functionality we can reuse the code on a large number of transformers models!
# This notebook is designed to:
# * **Use an already pretrained transformers model and fine-tune it on your custom dataset.**
# * **Train a transformer model from scratch on a custom dataset.** This requires an already trained tokenizer. This notebook will use by default the pretrained tokenizer if a already trained tokenizer is no provided.
# This notebook is **very heavily inspired** from the Hugging Face script used for training language models: [transformers/tree/master/examples/language-modeling](https://github.com/huggingface/transformers/tree/master/examples/language-modeling). I basically adapted that script to work nicely in a notebook with a lot more comments.
# **Notes from [transformers/tree/master/examples/language-modeling](https://github.com/huggingface/transformers/tree/master/examples/language-modeling):** *Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, CTRL, BERT, RoBERTa, XLNet).
# GPT, GPT-2 and CTRL are fine-tuned using a causal language modeling (CLM) loss. BERT and RoBERTa are fine-tuned
# using a masked language modeling (MLM) loss. XLNet is fine-tuned using a permutation language modeling (PLM) loss.*
# ## **What should I know for this notebook?**
# Since I am using PyTorch to fine-tune our transformers models any knowledge on PyTorch is very useful.
# Knowing a little bit about the [transformers](https://github.com/huggingface/transformers) library helps too.
# In this notebook **I am using raw text data to train / fine-tune transformers models** (if I use a pretrained model I like to call this *extended pretraining* since I 'continue' the original training of the model on a custom dataset). There is no need for labeled data since we are not doing classification. The Transformers library handles the text files in same way as the original implementation of each model.
# ## **How to use this notebook?**
# Like with every project, I built this notebook with reusability in mind. This notebook pulls the custom dataset form `.txt` files. Since the dataset does not come in a single `.txt` file I created a custom function `movie_reviews_to_file` that reads the dataset and creates the `text` file. The way I load the `.txt` files can be easily reused for any other dataset.
# The only modifications needed to use your own dataset will be in the paths provided to the train `.txt` file and evaluation `.txt` file.
# All parameters that can be changed are under the **Parameters Setup** section. Each parameter is nicely commented and structured to be as intuitive as possible.
# ## **What transformers models work with this notebook?**
# A lot of people will probably use it for Bert. When there is a need to run a different transformer model architecture, which one would work with this code?
# Since the name of the notebooks is **pretrain_transformers** it should work with more than one type of transformers.
# I ran this notebook across all the pretrained models found on Hugging Face Transformer. This way you know ahead of time if the model you plan to use works with this code without any modifications.
# The list of pretrained transformers models that work with this notebook can be found [here](https://github.com/gmihaila/ml_things/blob/master/notebooks/pytorch/pretrain_transformers_pytorch_status_models.md). There are **67 models that worked** 😄 and 39 models that failed to work 😢 with this notebook. *Remember these are pretrained models and fine-tuned on custom dataset.*
# ## **Dataset**
# This notebook will cover pretraining transformers on a custom dataset. I will use the well known movies reviews positive - negative labeled [Large Movie Review Dataset](https://ai.stanford.edu/~amaas/data/sentiment/).
# The description provided on the Stanford website:
# *This is a dataset for binary sentiment classification containing substantially more data than previous benchmark datasets. We provide a set of 25,000 highly polar movie reviews for training, and 25,000 for testing. There is additional unlabeled data for use as well. Raw text and already processed bag of words formats are provided. See the README file contained in the release for more details.*
# **Why this dataset?** I believe is an easy to understand and use dataset for classification. I think sentiment data is always fun to work with.
# ## **Coding**
# Now let's do some coding! We will go through each coding cell in the notebook and describe what it does, what's the code, and when is relevant - show the output.
# I made this format to be easy to follow if you decide to run each code cell in your own python notebook.
# When I learn from a tutorial I always try to replicate the results. I believe it's easy to follow along if you have the code next to the explanations.
#
# ## **Downloads**
# Download the *Large Movie Review Dataset* and unzip it locally.
#
# # Download the dataset.
# !wget -q -nc http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
# # Unzip the dataset.
# !tar -p -zxf /kaggle/working/aclImdb_v1.tar.gz
# ## **Installs**
# * **[transformers](https://github.com/huggingface/transformers)** library needs to be installed to use all the awesome code from Hugging Face. To get the latest version I will install it straight from GitHub.
# * **[ml_things](https://github.com/gmihaila/ml_things)** library used for various machine learning related tasks. I created this library to reduce the amount of code I need to write for each machine learning project.
#
# Install transformers library.
# Install helper functions.
# ## **Imports**
# Import all needed libraries for this notebook.
# Declare basic parameters used for this notebook:
# * `set_seed(123)` - Always good to set a fixed seed for reproducibility.
# * `device` - Look for gpu to use. I will use cpu by default if no gpu found.
import io
import os
import math
import torch
import warnings
from tqdm.notebook import tqdm
from ml_things import plot_dict, fix_text
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_CAUSAL_LM_MAPPING,
PreTrainedTokenizer,
TrainingArguments,
AutoConfig,
AutoTokenizer,
AutoModelWithLMHead,
AutoModelForCausalLM,
AutoModelForMaskedLM,
LineByLineTextDataset,
TextDataset,
DataCollatorForLanguageModeling,
DataCollatorForWholeWordMask,
DataCollatorForPermutationLanguageModeling,
PretrainedConfig,
Trainer,
set_seed,
)
# Set seed for reproducibility,
set_seed(123)
# Look for gpu to use. Will use `cpu` by default if no gpu found.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# ## **Helper Functions**
# I like to keep all Classes and functions that will be used in this notebook under this section to help maintain a clean look of the notebook:
# **movie_reviews_to_file(path_data: str, path_texts_file: str)**
# As I mentioned before, we will need `.txt` files to run this notebook. Since the Large Movie Review Dataset comes in multiple files with different labels I created this function to put together all data in a single `.txt` file. Examples are saved on each line of the file. The `path_data` points to the path where data files are present and `path_texts_file` will be the `.txt` file containing all data.
# **ModelDataArguments**
# This class follows similar format as the [transformers]((https://github.com/huggingface/transformers) library. The main difference is the way I combined multiple types of arguments into one and used rules to make sure the arguments used are correctly set. Here are all argument detailed (they are also mentioned in the class documentation):
# * `train_data_file`:
# *Path to your `.txt` file dataset. If you have an example on each line of the file make sure to use `line_by_line=True`. If the data file contains all text data without any special grouping use `line_by_line=False` to move a `block_size` window across the text file.*
# * `eval_data_file`:
# *Path to evaluation `.txt` file. It has the same format as `train_data_file`.*
# * `line_by_line`:
# *If the `train_data_file` and `eval_data_file` contains separate examples
# on each line set `line_by_line=True`.
# If there is no separation between examples and `train_data_file` and
# `eval_data_file` contains continuous text then `line_by_line=False` and a
# window of `block_size` will be moved across the files to acquire examples.*
# * `mlm`:
# *Is a flag that changes loss function depending on model architecture.
# This variable needs to be set to `True` when working with masked language
# models like bert or roberta and set to `False` otherwise. There are functions that will raise ValueError if this argument is not set accordingly.*
# * `whole_word_mask`:
# *Used as flag to determine if we decide to use whole word masking or not. Whole word masking means that whole words will be masked during training instead of tokens which can be chunks of words.*
# * `mlm_probability`:
# *Used when training masked language models. Needs to have `mlm=True`.
# It represents the probability of masking tokens when training model.*
# * `plm_probability`:
# *Flag to define the ratio of length of a span of masked
# tokens to surrounding context length for permutation language modeling. Used for XLNet.*
# * `max_span_length`:
# *Flag may also be used to limit the length of a span of
# masked tokens used for permutation language modeling.
# Used for XLNet.*
# * `block_size`:
# *It refers to the windows size that is moved across the text file. Set to -1 to use maximum allowed length.*
# * `overwrite_cache`:
# *If there are any cached files, overwrite them.*
# * `model_type`:
# *Type of model used: bert, roberta, gpt2.
# More details [here](https://huggingface.co/transformers/pretrained_models.html).*
# * `model_config_name`:
# *Config of model used: bert, roberta, gpt2.
# More details [here](https://huggingface.co/transformers/pretrained_models.html).*
# * `tokenizer_name`:
# *Tokenizer used to process data for training the model.
# It usually has same name as `model_name_or_path`: bert-base-cased,
# roberta-base, gpt2 etc.*
# * `model_name_or_path`:
# *Path to existing transformers model or name of
# transformer model to be used: bert-base-cased, roberta-base, gpt2 etc.
# More details [here](https://huggingface.co/transformers/pretrained_models.html).*
# * `model_cache_dir`:
# *Path to cache files. It helps to save time when re-running code.*
# **get_model_config(args: ModelDataArguments)**
# Get model configuration. Using the ModelDataArguments to return the model configuration. Here are all argument detailed:
# * `args`: *Model and data configuration arguments needed to perform pretraining.*
# * Returns: *Model transformers configuration.*
# * Raises: *ValueError: If `mlm=True` and `model_type` is NOT in ["bert", "roberta", "distilbert", "camembert"]. We need to use a masked language model in order to set `mlm=True`.*
# **get_tokenizer(args: ModelDataArguments)**
# Get model tokenizer.Using the ModelDataArguments return the model tokenizer and change `block_size` form `args` if needed. Here are all argument detailed:
# * `args`: *Model and data configuration arugments needed to perform pretraining.*
# * Returns: *Model transformers tokenizer.*
# **get_model(args: ModelDataArguments, model_config)**
# Get model. Using the ModelDataArguments return the actual model. Here are all argument detailed:
# * `args`: *Model and data configuration arguments needed to perform pretraining.*
# * `model_config`: *Model transformers configuration.*
# * Returns: *PyTorch model.*
# **get_dataset(args: ModelDataArguments, tokenizer: PreTrainedTokenizer, evaluate: bool=False)**
# Process dataset file into PyTorch Dataset. Using the ModelDataArguments return the actual model. Here are all argument detailed:
# * `args`: *Model and data configuration arguments needed to perform pretraining.*
# * `tokenizer`: *Model transformers tokenizer.*
# * `evaluate`: *If set to `True` the test / validation file is being handled. If set to `False` the train file is being handled.*
# * Returns: *PyTorch Dataset that contains file's data.*
# **get_collator(args: ModelDataArguments, model_config: PretrainedConfig, tokenizer: PreTrainedTokenizer)**
# Get appropriate collator function.
# Collator function will be used to collate a PyTorch Dataset object. Here are all argument detailed:
# * `args`: *Model and data configuration arguments needed to perform pretraining.*
# * `model_config`: *Model transformers configuration.*
# * `tokenizer`: *Model transformers tokenizer.*
# * Returns: *Transformers specific data collator.*
#
def movie_reviews_to_file(path_data: str, path_texts_file: str):
r"""Reading in all data from path and saving it into a single `.txt` file.
In the pretraining process of our transformers model we require a text file.
This function is designed to work for the Movie Reviews Dataset.
You wil have to create your own function to move all examples into a text
file if you don't already have a text file with all your unlabeled data.
Arguments:
path_data (:obj:`str`):
Path to the Movie Review Dataset partition. We only have `\train` and
`test` partitions.
path_texts_file (:obj:`str`):
File path of the generated `.txt` file that contains one example / line.
"""
# Check if path exists.
if not os.path.isdir(path_data):
# Raise error if path is invalid.
raise ValueError("Invalid `path` variable! Needs to be a directory")
# Check max sequence length.
texts = []
print("Reading `%s` partition..." % (os.path.basename(path_data)))
# Since the labels are defined by folders with data we loop
# through each label.
for label in ["neg", "pos"]:
sentiment_path = os.path.join(path_data, label)
# Get all files from path.
files_names = os.listdir(sentiment_path) # [:30] # SAMPLE FOR DEBUGGING.
# Go through each file and read its content.
for file_name in tqdm(files_names, desc=label, unit="files"):
file_path = os.path.join(sentiment_path, file_name)
# Read content.
content = io.open(file_path, mode="r", encoding="utf-8").read()
# Fix any unicode issues.
content = fix_text(content)
# Save content.
texts.append(content)
# Move list to single string.
all_texts = "\n".join(texts)
# Send all texts string to single file.
io.open(file=path_texts_file, mode="w", encoding="utf-8").write(all_texts)
# Print when done.
print("`.txt` file saved in `%s`\n" % path_texts_file)
return
class ModelDataArguments(object):
r"""Define model and data configuration needed to perform pretraining.
Eve though all arguments are optional there still needs to be a certain
number of arguments that require values attributed.
Arguments:
train_data_file (:obj:`str`, `optional`):
Path to your .txt file dataset. If you have an example on each line of
the file make sure to use line_by_line=True. If the data file contains
all text data without any special grouping use line_by_line=False to move
a block_size window across the text file.
This argument is optional and it will have a `None` value attributed
inside the function.
eval_data_file (:obj:`str`, `optional`):
Path to evaluation .txt file. It has the same format as train_data_file.
This argument is optional and it will have a `None` value attributed
inside the function.
line_by_line (:obj:`bool`, `optional`, defaults to :obj:`False`):
If the train_data_file and eval_data_file contains separate examples on
each line then line_by_line=True. If there is no separation between
examples and train_data_file and eval_data_file contains continuous text
then line_by_line=False and a window of block_size will be moved across
the files to acquire examples.
This argument is optional and it has a default value.
mlm (:obj:`bool`, `optional`, defaults to :obj:`False`):
Is a flag that changes loss function depending on model architecture.
This variable needs to be set to True when working with masked language
models like bert or roberta and set to False otherwise. There are
functions that will raise ValueError if this argument is
not set accordingly.
This argument is optional and it has a default value.
whole_word_mask (:obj:`bool`, `optional`, defaults to :obj:`False`):
Used as flag to determine if we decide to use whole word masking or not.
Whole word masking means that whole words will be masked during training
instead of tokens which can be chunks of words.
This argument is optional and it has a default value.
mlm_probability(:obj:`float`, `optional`, defaults to :obj:`0.15`):
Used when training masked language models. Needs to have mlm set to True.
It represents the probability of masking tokens when training model.
This argument is optional and it has a default value.
plm_probability (:obj:`float`, `optional`, defaults to :obj:`float(1/6)`):
Flag to define the ratio of length of a span of masked tokens to
surrounding context length for permutation language modeling.
Used for XLNet.
This argument is optional and it has a default value.
max_span_length (:obj:`int`, `optional`, defaults to :obj:`5`):
Flag may also be used to limit the length of a span of masked tokens used
for permutation language modeling. Used for XLNet.
This argument is optional and it has a default value.
block_size (:obj:`int`, `optional`, defaults to :obj:`-1`):
It refers to the windows size that is moved across the text file.
Set to -1 to use maximum allowed length.
This argument is optional and it has a default value.
overwrite_cache (:obj:`bool`, `optional`, defaults to :obj:`False`):
If there are any cached files, overwrite them.
This argument is optional and it has a default value.
model_type (:obj:`str`, `optional`):
Type of model used: bert, roberta, gpt2.
More details: https://huggingface.co/transformers/pretrained_models.html
This argument is optional and it will have a `None` value attributed
inside the function.
model_config_name (:obj:`str`, `optional`):
Config of model used: bert, roberta, gpt2.
More details: https://huggingface.co/transformers/pretrained_models.html
This argument is optional and it will have a `None` value attributed
inside the function.
tokenizer_name: (:obj:`str`, `optional`)
Tokenizer used to process data for training the model.
It usually has same name as model_name_or_path: bert-base-cased,
roberta-base, gpt2 etc.
This argument is optional and it will have a `None` value attributed
inside the function.
model_name_or_path (:obj:`str`, `optional`):
Path to existing transformers model or name of
transformer model to be used: bert-base-cased, roberta-base, gpt2 etc.
More details: https://huggingface.co/transformers/pretrained_models.html
This argument is optional and it will have a `None` value attributed
inside the function.
model_cache_dir (:obj:`str`, `optional`):
Path to cache files to save time when re-running code.
This argument is optional and it will have a `None` value attributed
inside the function.
Raises:
ValueError: If `CONFIG_MAPPING` is not loaded in global variables.
ValueError: If `model_type` is not present in `CONFIG_MAPPING.keys()`.
ValueError: If `model_type`, `model_config_name` and
`model_name_or_path` variables are all `None`. At least one of them
needs to be set.
warnings: If `model_config_name` and `model_name_or_path` are both
`None`, the model will be trained from scratch.
ValueError: If `tokenizer_name` and `model_name_or_path` are both
`None`. We need at least one of them set to load tokenizer.
"""
def __init__(
self,
train_data_file=None,
eval_data_file=None,
line_by_line=False,
mlm=False,
mlm_probability=0.15,
whole_word_mask=False,
plm_probability=float(1 / 6),
max_span_length=5,
block_size=-1,
overwrite_cache=False,
model_type=None,
model_config_name=None,
tokenizer_name=None,
model_name_or_path=None,
model_cache_dir=None,
):
# Make sure CONFIG_MAPPING is imported from transformers module.
if "CONFIG_MAPPING" not in globals():
raise ValueError(
"Could not find `CONFIG_MAPPING` imported! Make sure"
" to import it from `transformers` module!"
)
# Make sure model_type is valid.
if (model_type is not None) and (model_type not in CONFIG_MAPPING.keys()):
raise ValueError(
"Invalid `model_type`! Use one of the following: %s"
% (str(list(CONFIG_MAPPING.keys())))
)
# Make sure that model_type, model_config_name and model_name_or_path
# variables are not all `None`.
if not any([model_type, model_config_name, model_name_or_path]):
raise ValueError(
"You can`t have all `model_type`, `model_config_name`,"
" `model_name_or_path` be `None`! You need to have"
"at least one of them set!"
)
# Check if a new model will be loaded from scratch.
if not any([model_config_name, model_name_or_path]):
# Setup warning to show pretty. This is an overkill
warnings.formatwarning = (
lambda message, category, *args, **kwargs: "%s: %s\n"
% (category.__name__, message)
)
# Display warning.
warnings.warn("You are planning to train a model from scratch! 🙀")
# Check if a new tokenizer wants to be loaded.
# This feature is not supported!
if not any([tokenizer_name, model_name_or_path]):
# Can't train tokenizer from scratch here! Raise error.
raise ValueError(
"You want to train tokenizer from scratch! "
"That is not possible yet! You can train your own "
"tokenizer separately and use path here to load it!"
)
# Set all data related arguments.
self.train_data_file = train_data_file
self.eval_data_file = eval_data_file
self.line_by_line = line_by_line
self.mlm = mlm
self.whole_word_mask = whole_word_mask
self.mlm_probability = mlm_probability
self.plm_probability = plm_probability
self.max_span_length = max_span_length
self.block_size = block_size
self.overwrite_cache = overwrite_cache
# Set all model and tokenizer arguments.
self.model_type = model_type
self.model_config_name = model_config_name
self.tokenizer_name = tokenizer_name
self.model_name_or_path = model_name_or_path
self.model_cache_dir = model_cache_dir
return
def get_model_config(args: ModelDataArguments):
r"""
Get model configuration.
Using the ModelDataArguments return the model configuration.
Arguments:
args (:obj:`ModelDataArguments`):
Model and data configuration arguments needed to perform pretraining.
Returns:
:obj:`PretrainedConfig`: Model transformers configuration.
Raises:
ValueError: If `mlm=True` and `model_type` is NOT in ["bert",
"roberta", "distilbert", "camembert"]. We need to use a masked
language model in order to set `mlm=True`.
"""
# Check model configuration.
if args.model_config_name is not None:
# Use model configure name if defined.
model_config = AutoConfig.from_pretrained(
args.model_config_name, cache_dir=args.model_cache_dir
)
elif args.model_name_or_path is not None:
# Use model name or path if defined.
model_config = AutoConfig.from_pretrained(
args.model_name_or_path, cache_dir=args.model_cache_dir
)
else:
# Use config mapping if building model from scratch.
model_config = CONFIG_MAPPING[args.model_type]()
# Make sure `mlm` flag is set for Masked Language Models (MLM).
if (model_config.model_type in ["bert", "roberta", "distilbert", "camembert"]) and (
args.mlm is False
):
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads "
"butmasked LM heads. They must be run setting `mlm=True`"
)
# Adjust block size for xlnet.
if model_config.model_type == "xlnet":
# xlnet used 512 tokens when training.
args.block_size = 512
# setup memory length
model_config.mem_len = 1024
return model_config
def get_tokenizer(args: ModelDataArguments):
r"""
Get model tokenizer.
Using the ModelDataArguments return the model tokenizer and change
`block_size` form `args` if needed.
Arguments:
args (:obj:`ModelDataArguments`):
Model and data configuration arguments needed to perform pretraining.
Returns:
:obj:`PreTrainedTokenizer`: Model transformers tokenizer.
"""
# Check tokenizer configuration.
if args.tokenizer_name:
# Use tokenizer name if define.
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name, cache_dir=args.model_cache_dir
)
elif args.model_name_or_path:
# Use tokenizer name of path if defined.
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path, cache_dir=args.model_cache_dir
)
# Setp data block size.
if args.block_size <= 0:
# Set block size to maximum length of tokenizer.
# Input block size will be the max possible for the model.
# Some max lengths are very large and will cause a
args.block_size = tokenizer.model_max_length
else:
# Never go beyond tokenizer maximum length.
args.block_size = min(args.block_size, tokenizer.model_max_length)
return tokenizer
def get_model(args: ModelDataArguments, model_config):
r"""
Get model.
Using the ModelDataArguments return the actual model.
Arguments:
args (:obj:`ModelDataArguments`):
Model and data configuration arguments needed to perform pretraining.
model_config (:obj:`PretrainedConfig`):
Model transformers configuration.
Returns:
:obj:`torch.nn.Module`: PyTorch model.
"""
# Make sure MODEL_FOR_MASKED_LM_MAPPING and MODEL_FOR_CAUSAL_LM_MAPPING are
# imported from transformers module.
if ("MODEL_FOR_MASKED_LM_MAPPING" not in globals()) and (
"MODEL_FOR_CAUSAL_LM_MAPPING" not in globals()
):
raise ValueError(
"Could not find `MODEL_FOR_MASKED_LM_MAPPING` and"
" `MODEL_FOR_MASKED_LM_MAPPING` imported! Make sure to"
" import them from `transformers` module!"
)
# Check if using pre-trained model or train from scratch.
if args.model_name_or_path:
# Use pre-trained model.
if type(model_config) in MODEL_FOR_MASKED_LM_MAPPING.keys():
# Masked language modeling head.
return AutoModelForMaskedLM.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=model_config,
cache_dir=args.model_cache_dir,
)
elif type(model_config) in MODEL_FOR_CAUSAL_LM_MAPPING.keys():
# Causal language modeling head.
return AutoModelForCausalLM.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=model_config,
cache_dir=args.model_cache_dir,
)
else:
raise ValueError(
"Invalid `model_name_or_path`! It should be in %s or %s!"
% (
str(MODEL_FOR_MASKED_LM_MAPPING.keys()),
str(MODEL_FOR_CAUSAL_LM_MAPPING.keys()),
)
)
else:
# Use model from configuration - train from scratch.
print("Training new model from scratch!")
return AutoModelWithLMHead.from_config(config)
def get_dataset(
args: ModelDataArguments, tokenizer: PreTrainedTokenizer, evaluate: bool = False
):
r"""
Process dataset file into PyTorch Dataset.
Using the ModelDataArguments return the actual model.
Arguments:
args (:obj:`ModelDataArguments`):
Model and data configuration arguments needed to perform pretraining.
tokenizer (:obj:`PreTrainedTokenizer`):
Model transformers tokenizer.
evaluate (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set to `True` the test / validation file is being handled.
If set to `False` the train file is being handled.
Returns:
:obj:`Dataset`: PyTorch Dataset that contains file's data.
"""
# Get file path for either train or evaluate.
file_path = args.eval_data_file if evaluate else args.train_data_file
# Check if `line_by_line` flag is set to `True`.
if args.line_by_line:
# Each example in data file is on each line.
return LineByLineTextDataset(
tokenizer=tokenizer, file_path=file_path, block_size=args.block_size
)
else:
# All data in file is put together without any separation.
return TextDataset(
tokenizer=tokenizer,
file_path=file_path,
block_size=args.block_size,
overwrite_cache=args.overwrite_cache,
)
def get_collator(
args: ModelDataArguments,
model_config: PretrainedConfig,
tokenizer: PreTrainedTokenizer,
):
r"""
Get appropriate collator function.
Collator function will be used to collate a PyTorch Dataset object.
Arguments:
args (:obj:`ModelDataArguments`):
Model and data configuration arguments needed to perform pretraining.
model_config (:obj:`PretrainedConfig`):
Model transformers configuration.
tokenizer (:obj:`PreTrainedTokenizer`):
Model transformers tokenizer.
Returns:
:obj:`data_collator`: Transformers specific data collator.
"""
# Special dataset handle depending on model type.
if model_config.model_type == "xlnet":
# Configure collator for XLNET.
return DataCollatorForPermutationLanguageModeling(
tokenizer=tokenizer,
plm_probability=args.plm_probability,
max_span_length=args.max_span_length,
)
else:
# Configure data for rest of model types.
if args.mlm and args.whole_word_mask:
# Use whole word masking.
return DataCollatorForWholeWordMask(
tokenizer=tokenizer,
mlm_probability=args.mlm_probability,
)
else:
# Regular language modeling.
return DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm=args.mlm,
mlm_probability=args.mlm_probability,
)
# ## **Parameters Setup**
# Declare the rest of the parameters used for this notebook:
# * `model_data_args` contains all arguments needed to setup dataset, model configuration, model tokenizer and the actual model. This is created using the `ModelDataArguments` class.
# * `training_args` contain all arguments needed to use the [Trainer]() functionality from Transformers that allows us to train transformers models in PyTorch very easy. You can find the complete documentation [here](https://huggingface.co/transformers/main_classes/trainer.html#trainingarguments). There are a lot of parameters that can be set to allow multiple functionalities. I only used the following parameters (the comments are inspired from the HuggingFace documentation of [TrainingArguments](https://huggingface.co/transformers/main_classes/trainer.html#trainingarguments):
#
# * `output_dir`: *The output directory where the model predictions and checkpoints will be written. I set it up to `pretrained_bert_model` where the model and will be saved.*
# * `overwrite_output_dir`: *Overwrite the content of the output directory. I set it to `True` in case I run the notebook multiple times I only care about the last run.*
# * `do_train`: *Whether to run training or not. I set this parameter to `True` because I want to train the model on my custom dataset.*
# * `do_eval`: *Whether to run evaluation on the evaluation files or not.
# I set it to `True` since I have test data file and I want to evaluate how well the model trains.*
# * `per_device_train_batch_size`: *Batch size GPU/TPU core/CPU training. I set it to `2` for this example. I recommend setting it up as high as your GPU memory allows you.*
# * `per_device_eval_batch_size`: *Batch size GPU/TPU core/CPU for evaluation.I set this value to `100` since it's not dealing with gradients.*
# * `evaluation_strategy`: *Evaluation strategy to adopt during training: `no`: No evaluation during training; `steps`: Evaluate every `eval_steps; `epoch`: Evaluate every end of epoch. I set it to 'steps' since I want to evaluate model more often.*
# * `logging_steps`: *How often to show logs. I will se this to plot history loss and calculate perplexity. I set this to `20` just as an example. If your evaluate data is large you might not want to run it that often because it will significantly slow down training time.*
# * `eval_steps`: *Number of update steps between two evaluations if evaluation_strategy="steps". Will default to the same value as logging_steps if not set. Since I want to evaluate model ever`logging_steps` I will set this to `None` since it will inherit same value as `logging_steps`.*
# * `prediction_loss_only`: *Set prediction loss to `True` in order to return loss for perplexity calculation. Since I want to calculate perplexity I set this to `True` since I want to monitor loss and perplexity (which is exp(loss)).*
# * `learning_rate`: *The initial learning rate for Adam. Defaults is set to `5e-5`.*
# * `weight_decay`: *The weight decay to apply (if not zero)Defaults is set to `0`.*
# * `adam_epsilon`: *Epsilon for the Adam optimizer. Defaults to `1e-8`.*
# * `max_grad_norm`: *Maximum gradient norm (for gradient clipping). Defaults to `0`.*
# * `num_train_epochs`: *Total number of training epochs to perform (if not an integer, will perform the decimal part percents of the last epoch before stopping training). I set it to `2` at most. Since the custom dataset will be a lot smaller than the original dataset the model was trained on we don't want to overfit.*
# * `save_steps`: *Number of updates steps before two checkpoint saves. Defaults to `500`.*
# Define arguments for data, tokenizer and model arguments.
# See comments in `ModelDataArguments` class.
model_data_args = ModelDataArguments(
train_data_file="/kaggle/working/train.txt",
eval_data_file="/kaggle/working/test.txt",
line_by_line=True,
mlm=True,
whole_word_mask=True,
mlm_probability=0.15,
plm_probability=float(1 / 6),
max_span_length=5,
block_size=50,
overwrite_cache=False,
model_type="bert",
model_config_name="indobenchmark/indobert-base-p1",
tokenizer_name="indobenchmark/indobert-base-p1",
model_name_or_path="indobenchmark/indobert-base-p1",
model_cache_dir=None,
)
# Define arguments for training
# Note: I only used the arguments I care about. `TrainingArguments` contains
# a lot more arguments. For more details check the awesome documentation:
# https://huggingface.co/transformers/main_classes/trainer.html#trainingarguments
training_args = TrainingArguments(
# The output directory where the model predictions
# and checkpoints will be written.
output_dir="pretrain_bert",
# Overwrite the content of the output directory.
overwrite_output_dir=True,
# Whether to run training or not.
do_train=True,
# Whether to run evaluation on the dev or not.
do_eval=True,
# Batch size GPU/TPU core/CPU training.
per_device_train_batch_size=10,
# Batch size GPU/TPU core/CPU for evaluation.
per_device_eval_batch_size=100,
# evaluation strategy to adopt during training
# `no`: No evaluation during training.
# `steps`: Evaluate every `eval_steps`.
# `epoch`: Evaluate every end of epoch.
evaluation_strategy="steps",
# How often to show logs. I will se this to
# plot history loss and calculate perplexity.
logging_steps=300,
# Number of update steps between two
# evaluations if evaluation_strategy="steps".
# Will default to the same value as l
# logging_steps if not set.
eval_steps=None,
# Set prediction loss to `True` in order to
# return loss for perplexity calculation.
prediction_loss_only=True,
# The initial learning rate for Adam.
# Defaults to 5e-5.
learning_rate=5e-5,
# The weight decay to apply (if not zero).
weight_decay=0,
# Epsilon for the Adam optimizer.
# Defaults to 1e-8
adam_epsilon=1e-8,
# Maximum gradient norm (for gradient
# clipping). Defaults to 0.
max_grad_norm=1.0,
# Total number of training epochs to perform
# (if not an integer, will perform the
# decimal part percents of
# the last epoch before stopping training).
num_train_epochs=25,
# Number of updates steps before two checkpoint saves.
# Defaults to 500
save_steps=-1,
)
# ## **Load Configuration, Tokenizer and Model**
# Loading the three essential parts of the pretrained transformers: configuration, tokenizer and model.
# Since I use the AutoClass functionality from Hugging Face I only need to worry about the model's name as input and the rest is handled by the transformers library.
# I will be calling each three functions created in the **Helper Functions** tab that help return `config` of the model, `tokenizer` of the model and the actual PyTorch `model`.
# After `model` is loaded is always good practice to resize the model depending on the `tokenizer` size. This means that the tokenizer's vocabulary will be aligned with the models embedding layer. This is very useful when we have a different tokenizer that the pretrained one or we train a transformer model from scratch.
#
# Load model configuration.
print("Loading model configuration...")
config = get_model_config(model_data_args)
# Load model tokenizer.
print("Loading model`s tokenizer...")
tokenizer = get_tokenizer(model_data_args)
# Loading model.
print("Loading actual model...")
model = get_model(model_data_args, config)
# Resize model to fit all tokens in tokenizer.
model.resize_token_embeddings(len(tokenizer))
# ## **Dataset and Collator**
# This is where I create the PyTorch Dataset and data collator objects that will be used to feed data into our model.
# This is where I use the MovieReviewsDataset text files created with the `movie_reviews_to_file` function. Since data is partitioned for both train and test I will create two text files: one used for train and one used for evaluation.
# I strongly recommend to use a validation text file in order to determine how much training is needed in order to avoid overfitting. After you figure out what parameters yield the best results, the validation file can be incorporated in train and run a final train with the whole dataset.
# The data collator is used to format the PyTorch Dataset outputs to match the output of our specific transformers model: i.e. for Bert it will created the masked tokens needed to train.
# TODO: Load dataset
from datasets import load_dataset
dataset = load_dataset("csv", data_files="/kaggle/input/22-rows-dataset/dataset2.csv")
print(dataset)
print("\n")
print(dataset.column_names)
# Select n random rows from the dataset
n = 15000
dataset = dataset.shuffle()
dataset["train"] = dataset["train"].select(range(n))
# Print the sample
print(dataset)
# 90% train, 10% test + validation
dataset["train"], dataset["test"] = dataset["train"].train_test_split(0.1).values()
# # gather everyone if you want to have a single DatasetDict
# train_test_valid_dataset = DatasetDict({
# 'train': train_testvalid['train'],
# 'valid': train_testvalid['test']})
print(dataset)
print("\n")
print(dataset.column_names)
dataset = dataset.remove_columns(
["id", "amar", "klasifikasi", "sub_klasifikasi", "riwayat_tuntutan", "amar_putusan"]
)
print(dataset)
print("\n")
print(dataset.column_names)
dataset["train"]["fakta"][5]
# Write the column values to a text file
with open("train-0.txt", "w") as f:
for value in dataset["train"]["fakta"][0]:
f.write(str(value) + "\n")
column_data_train = dataset["train"]["fakta"]
column_data_test = dataset["test"]["fakta"]
# Write the column values to a text file
with open("train.txt", "w") as f:
for value in column_data_train:
f.write(str(value) + "\n")
# Write the column values to a text file
with open("test.txt", "w") as f:
for value in column_data_test:
f.write(str(value) + "\n")
print(column_data_train[0])
# print(column_data_train.size())
# # Create texts file from train data.
# movie_reviews_to_file(path_data='/kaggle/working/aclImdb/train', path_texts_file='/kaggle/working/train.txt')
# # Create texts file from test data.
# movie_reviews_to_file(path_data='/kaggle/working/aclImdb/test', path_texts_file='/kaggle/working/test.txt')
# TODO: pre-process dataset
# Setup train dataset if `do_train` is set.
print("Creating train dataset...")
train_dataset = (
get_dataset(model_data_args, tokenizer=tokenizer, evaluate=False)
if training_args.do_train
else None
)
# Setup evaluation dataset if `do_eval` is set.
print("Creating evaluate dataset...")
eval_dataset = (
get_dataset(model_data_args, tokenizer=tokenizer, evaluate=True)
if training_args.do_eval
else None
)
# Get data collator to modify data format depending on type of model used.
data_collator = get_collator(model_data_args, config, tokenizer)
# Check how many logging prints you'll have. This is to avoid overflowing the
# notebook with a lot of prints. Display warning to user if the logging steps
# that will be displayed is larger than 100.
if (
len(train_dataset)
// training_args.per_device_train_batch_size
// training_args.logging_steps
* training_args.num_train_epochs
) > 100:
# Display warning.
warnings.warn(
"Your `logging_steps` value will will do a lot of printing!"
" Consider increasing `logging_steps` to avoid overflowing"
" the notebook with a lot of prints!"
)
# ## **Train**
# Hugging Face was very nice to us for creating the `Trainer` class. This helps make PyTorch model training of transformers very easy! We just need to make sure we loaded the proper parameters and everything else is taking care of!
# At the end of the training the tokenizer is saved along with the model so you can easily re-use it later or even load in on Hugging Face Models.
# I configured the arguments to display both train and validation loss at every `logging_steps`. It gives us a sense of how well the model is trained.
# Initialize Trainer.
print("Loading `trainer`...")
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
)
# Check model path to save.
if training_args.do_train:
print("Start training...")
# Setup model path if the model to train loaded from a local path.
model_path = (
model_data_args.model_name_or_path
if model_data_args.model_name_or_path is not None
and os.path.isdir(model_data_args.model_name_or_path)
else None
)
# Run training.
trainer.train(model_path=model_path)
# Save model.
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =).
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir)
# ## **Plot Train**
# The `Trainer` class is so useful that it will record the log history for us. I use this to access the train and validation losses recorded at each `logging_steps` during training.
# Since we are training / fine-tuning / extended training or pretraining (depending what terminology you use) a language model, we want to compute the perplexity.
# This is what [Wikipedia](https://en.wikipedia.org/wiki/Perplexity) says about perplexity: *In information theory, perplexity is a measurement of how well a probability distribution or probability model predicts a sample. It may be used to compare probability models. A low perplexity indicates the probability distribution is good at predicting the sample.*
# We can look at the perplexity plot in the same way we look at the loss plot: the lower the better and if the validation perplexity starts to increase we are starting to overfit the model.
# **Note:** It looks from the plots that the train loss is higher than validation loss. That means that our validation data is too easy for the =and we should use a different validation dataset. Since the purpose of this notebook is to show how to train transformers models and provide tools to evaluate such process I will leave the results *as is*.
#
# Keep track of train and evaluate loss.
loss_history = {"train_loss": [], "eval_loss": []}
# Keep track of train and evaluate perplexity.
# This is a metric useful to track for language models.
perplexity_history = {"train_perplexity": [], "eval_perplexity": []}
# Loop through each log history.
for log_history in trainer.state.log_history:
if "loss" in log_history.keys():
# Deal with trianing loss.
loss_history["train_loss"].append(log_history["loss"])
perplexity_history["train_perplexity"].append(math.exp(log_history["loss"]))
elif "eval_loss" in log_history.keys():
# Deal with eval loss.
loss_history["eval_loss"].append(log_history["eval_loss"])
perplexity_history["eval_perplexity"].append(math.exp(log_history["eval_loss"]))
# Plot Losses.
plot_dict(
loss_history,
start_step=training_args.logging_steps,
step_size=training_args.logging_steps,
use_title="Loss",
use_xlabel="Train Steps",
use_ylabel="Values",
magnify=2,
)
print()
# Plot Perplexities.
plot_dict(
perplexity_history,
start_step=training_args.logging_steps,
step_size=training_args.logging_steps,
use_title="Perplexity",
use_xlabel="Train Steps",
use_ylabel="Values",
magnify=2,
)
# ## **Evaluate**
# For the final evaluation we can have a separate test set that we use to do our final perplexity evaluation. For simplicity I used the same validation text file for the final evaluation. That is the reason I get the same results as the last validation perplexity plot value.
# check if `do_eval` flag is set.
if training_args.do_eval:
# capture output if trainer evaluate.
eval_output = trainer.evaluate()
# compute perplexity from model loss.
perplexity = math.exp(eval_output["eval_loss"])
print("\nEvaluate Perplexity: {:10,.2f}".format(perplexity))
else:
print("No evaluation needed. No evaluation data provided, `do_eval=False`!")
# model.push_to_hub("legal-indobert-pytorch-v4", use_auth_token="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", organization="kapanjagocoding")
# tokenizer.push_to_hub("legal-indobert-pytorch-v4", use_auth_token="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", organization="kapanjagocoding")
model.push_to_hub(
"legal-indobert-pytorch-v4",
use_auth_token="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",
organization="kapanjagocoding",
)
tokenizer.push_to_hub(
"legal-indobert-pytorch-v4",
use_auth_token="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",
organization="kapanjagocoding",
)
|
import numpy as np
import pandas as pd
# data=pd.read_csv('/kaggle/input/kannada/Kannada.csv')
data = pd.read_csv("/kaggle/input/malayalam/Malayalam.csv")
import re
def remove_hash_symbols(text):
return re.sub(r"#", "", text)
def remove_urls(text):
return re.sub(r"http\S+", "", text)
data["comment"] = data["comment"].apply(remove_hash_symbols)
data["comment"] = data["comment"].apply(remove_urls)
data["comment"] = data["comment"].apply(lambda x: x.strip())
data = data[data["comment"].astype(bool)]
import emoji
def replace_emojis(text):
return emoji.demojize(text)
data["comment"] = data["comment"].apply(replace_emojis)
data.head()
df = data
import pandas as pd
import random
import torch
from transformers import MBartForConditionalGeneration, MBart50Tokenizer
import langid
# Calculate the CMI of each comment using langid
cmi_values = {}
for comment in df["comment"]:
lang, prob = langid.classify(comment)
cmi_value = 1 - prob
cmi_values[comment] = cmi_value
# Define function to randomly select tokens from the comments based on their CMI probabilities
def select_tokens(comment):
# Split the comment into tokens
tokens = comment.split()
# Calculate the probability of selecting each token based on its CMI
cmi_probabilities = [cmi_values[comment] for token in tokens]
# Select a random subset of tokens based on their CMI probabilities
selected_tokens = random.choices(
tokens, weights=cmi_probabilities, k=int(len(tokens) * 0.5)
)
# Return the selected tokens as a single string
return " ".join(selected_tokens)
# Initialize mBART model and tokenizer
model_name = "facebook/mbart-large-50-many-to-many-mmt"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = MBart50Tokenizer.from_pretrained(model_name)
model = MBartForConditionalGeneration.from_pretrained(model_name).to(device)
# Define function to translate selected tokens using mBART
def translate_tokens(tokens):
# Encode the tokens using the mBART tokenizer
encoded_tokens = tokenizer(" ".join(tokens), return_tensors="pt", padding=True).to(
device
)
# Generate the translation using the mBART model
generated_tokens = model.generate(**encoded_tokens)
# Decode the generated tokens using the mBART tokenizer
translated_tokens = tokenizer.decode(
generated_tokens[0],
skip_special_tokens=True,
forced_bos_token_id=tokenizer.lang_code_to_id["ml_IN"],
)
return translated_tokens.split()
# Select and translate tokens in the dataframe and update the 'comment' column in place
df["comment"] = (
df["comment"].apply(select_tokens).apply(translate_tokens).apply(" ".join)
)
df.head()
df.to_csv("SCM.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# for data exploration
import pandas as pd
import numpy as np
import datetime as dt
import io
# remove warnings
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
warnings.filterwarnings("ignore")
# for text processing
import re
import nltk
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from nltk.corpus import stopwords
from nltk.corpus import wordnet
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.stem import PorterStemmer
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
import string
string.punctuation
# from bs4 import BeautifulSoup
# from nltk.sentiment.vader import SentimentIntensityAnalyzer as SIA
# from wordcloud import WordCloud,STOPWORDS, ImageColorGenerator
# # gensim for LDA
# import gensim
# import gensim.corpora as corpora
# from gensim.utils import simple_preprocess
# from gensim.models import CoherenceModel
# plotting tools
import matplotlib.pyplot as plt
import seaborn as sns
# import plotly.express as ex
# from plotly.subplots import make_subplots
# import pyLDAvis
# import pyLDAvis.gensim #dont skip this
# import pyLDAvis.gensim_models
import matplotlib.pyplot as plt
# to display all columns ana rows
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
pd.set_option("display.min_rows", None)
pd.set_option("display.expand_frame_repr", True)
print("Libraries Imported!")
import nltk
nltk.download("stopwords")
nltk.download("punkt")
nltk.download("wordnet")
nltk.download("omw-1.4")
nltk.download("averaged_perceptron_tagger")
train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
train.head()
# informations about the structure and content of the train data
train.info()
# #Get the number of null values for each column of train dataset
train.isnull().sum()
# we can drop location column as it contains info of location from where tweets were done
train = train.drop(columns=["location"], axis=1)
train.head()
train["target"].value_counts()
def text_cleaner(var):
"""
Function for text preprocessing with Poter Stemming.
"""
sw = set(stopwords.words("english"))
ps = PorterStemmer()
# tokenize the word using nltk
my_text = nltk.word_tokenize(var)
# remove not english characters, lower case and remove the white space at end
my_text = re.sub("[^A-Za-z0-9]+", " ", var).lower().strip()
# remove stop words
my_text = [word for word in my_text.split() if word not in sw]
# stemming
my_text = [ps.stem(word) for word in my_text]
# convert back to sentence
my_text = " ".join(my_text)
return my_text
# text = dataset['text']
train["text"] = train["text"].apply(text_cleaner)
train.head()
vectorizer = CountVectorizer(
stop_words="english", ngram_range=(1, 3), max_df=1.0, min_df=1, max_features=None
)
# vectorizer.vocabulary_
vectors = vectorizer.fit_transform(train["text"])
# it contains 7613 docs (sentences) and 118568 unique words
vectors.shape
# X = vectors.todense()
# or
X = vectors.toarray()
X
Y = train["target"].values
Y
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.30, random_state=42
)
clf = LogisticRegression()
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
clf.score(X_test, y_test)
acc_score = metrics.accuracy_score(y_test, pred)
f1_score = metrics.f1_score(y_test, pred, average="macro")
print("Total accuracy classification score: {}".format(acc_score))
print("Total F1 classification score: {}".format(f1_score))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV, KFold, cross_val_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import accuracy_score
from sklearn.metrics import explained_variance_score, r2_score
from sklearn.preprocessing import StandardScaler
from scipy.stats import normaltest
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/vodafone-age-20k/vodafone-subset_20K.csv")
df.head()
df = df[
[
"target",
"CALCULATION_METHOD_ID",
"calls_count_in_weekdays",
"calls_duration_in_weekdays",
"calls_count_out_weekdays",
"calls_duration_out_weekdays",
"calls_count_in_weekends",
"calls_duration_in_weekends",
"calls_count_out_weekends",
"calls_duration_out_weekends",
]
]
df
df.describe().T
df.target.value_counts()
df["target"].value_counts().plot(kind="bar", color="green")
# Основное количество абонентов относятся к группам возрастов 4 и 5.
sns.boxplot(df["target"])
plt.show()
from scipy.stats import spearmanr
numeric_columns = [
"calls_count_in_weekdays",
"calls_duration_in_weekdays",
"calls_count_out_weekdays",
"calls_duration_out_weekdays",
"calls_count_in_weekends",
"calls_duration_in_weekends",
"calls_count_out_weekends",
"calls_duration_out_weekends",
]
for column in numeric_columns:
r = spearmanr(df[column], df["target"])
print(f"Spearmanr correlation for {column}: {r[0]}, p-value = {r[1]}")
X = df.drop("target", axis=1)
y = df["target"]
scaler = StandardScaler()
scaler.fit(X)
X_st = scaler.transform(X)
print(X_st[:5])
from sklearn.model_selection import train_test_split
X_train, X_valid, y_train, y_valid = train_test_split(
X_st, y, test_size=0.25, random_state=19
)
# **Метод ближайших соседей KNN**
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=7)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_valid)
print(y_pred)
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(y_valid, y_pred)
print(accuracy)
from sklearn.model_selection import KFold, cross_val_score
kf = KFold(n_splits=5, shuffle=True, random_state=42)
knn = KNeighborsClassifier(n_neighbors=7)
scores = cross_val_score(estimator=knn, X=X_st, y=y, cv=kf, scoring="accuracy")
print(f"An array of metric values: {scores}")
print(f"Average metric on cross validations: {scores.mean()}")
knn_params = {"n_neighbors": np.arange(1, 51)}
knn_grid = GridSearchCV(knn, knn_params, scoring="accuracy", cv=7)
knn_grid.fit(X_train, y_train)
knn_grid.best_estimator_
knn_grid.best_score_
results_df = pd.DataFrame(knn_grid.cv_results_)
grid_results = pd.DataFrame(knn_grid.cv_results_)
import matplotlib.pyplot as plt
plt.plot(grid_results["param_n_neighbors"], grid_results["mean_test_score"])
plt.xlabel("n_neighbors")
plt.ylabel("score")
plt.show()
pd.DataFrame(knn_grid.cv_results_).T
best_knn = KNeighborsClassifier(n_neighbors=40)
y_pred = best_knn.fit(X_train, y_train).predict(X_valid)
accuracy_score(y_valid, y_pred)
# **Ленейная регресия**
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X_train, y_train)
y_pred = lin_reg.predict(X_valid)
from sklearn.metrics import (
mean_squared_error,
mean_absolute_error,
median_absolute_error,
r2_score,
)
print("MSE:", mean_squared_error(y_valid, y_pred))
print("MAE:", mean_absolute_error(y_valid, y_pred))
print("MedAE:", median_absolute_error(y_valid, y_pred))
print("R2:", r2_score(y_valid, y_pred))
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
sns.distplot(df["target"])
plt.show()
sns.boxplot(df["target"])
plt.show()
from sklearn.linear_model import Ridge
ridge = Ridge() # за замовчуванням alpha=1
ridge.fit(X_train, y_train)
y_pred = ridge.predict(X_valid)
print("MSE:", mean_squared_error(y_valid, y_pred))
print("MAE:", mean_absolute_error(y_valid, y_pred))
print("MedAE:", median_absolute_error(y_valid, y_pred))
print("R2:", r2_score(y_valid, y_pred))
# Ridge з підбором alpha
from sklearn.model_selection import GridSearchCV
alpha_grid = {"alpha": np.logspace(-4, 4, 20)} # 20 точек от 10^(-4) до 10^4
ridge_grid = GridSearchCV(ridge, alpha_grid, cv=5, scoring="neg_mean_squared_error")
ridge_grid.fit(X_train, y_train)
print("Best alpha:", ridge_grid.best_params_)
print("Best score:", ridge_grid.best_score_)
ridge_best = ridge_grid.best_estimator_
y_pred = ridge_best.predict(X_valid)
print("R2:", r2_score(y_valid, y_pred))
# Валидаційна крива
# х --- значення гіперпараметрів (param_alpha)
# у--- значення метрики (mean_test_score)
import matplotlib.pyplot as plt
results_df = pd.DataFrame(ridge_grid.cv_results_)
plt.plot(results_df["param_alpha"], results_df["mean_test_score"])
# Підписуємо осі та графік
plt.xlabel("alpha")
plt.ylabel("Test accuracy")
plt.title("Validation curve")
plt.show()
# Lasso
from sklearn.linear_model import Lasso
lasso = Lasso() # по умолчанию alpha=1
lasso.fit(X_train, y_train)
y_pred = lasso.predict(X_valid)
print("MSE:", mean_squared_error(y_valid, y_pred))
print("MAE:", mean_absolute_error(y_valid, y_pred))
print("MedAE:", median_absolute_error(y_valid, y_pred))
print("R2:", r2_score(y_valid, y_pred))
eps = 1e-6
lasso_coef = lasso.coef_
print("Нулевых коэффициентов:", sum(np.abs(lasso_coef) < eps))
print("Всего коэффициентов:", lasso_coef.shape[0])
# Lasso з підбором alpha
alpha_grid = {"alpha": np.logspace(-3, 3, 10)} # 10 точек от 10^(-3) до 10^3
lasso_grid = GridSearchCV(lasso, alpha_grid, cv=5, scoring="neg_mean_squared_error")
lasso_grid.fit(X_train, y_train)
# Подивимося на найкращі показники
print("Best alpha:", lasso_grid.best_params_)
print("Best score:", lasso_grid.best_score_)
lasso_best = lasso_grid.best_estimator_
y_pred = lasso_best.predict(X_valid)
print("R2:", r2_score(y_valid, y_pred))
# Валідаційна крива
# По осі х --- значення гіперпараметрів (param_alpha)
# По осі y --- значення метрики (mean_test_score)
results_df = pd.DataFrame(lasso_grid.cv_results_)
plt.plot(results_df["param_alpha"], results_df["mean_test_score"])
plt.xlabel("alpha")
plt.ylabel("Test accuracy")
plt.title("Validation curve")
plt.show()
eps = 1e-6
lasso_coef = lasso_best.coef_
print("Нулевых коэффициентов:", sum(np.abs(lasso_coef) < eps))
print("Всего коэффициентов:", lasso_coef.shape[0])
# Ridge с подбором alpha для r2
from sklearn.model_selection import GridSearchCV
alpha_grid = {"alpha": np.logspace(-4, 4, 20)} # 20 точек от 10^(-4) до 10^4
ridge_grid = GridSearchCV(ridge, alpha_grid, cv=5, scoring="r2")
ridge_grid.fit(X_train, y_train)
# Подивимося на найкращі показники для r2
print("Best alpha:", ridge_grid.best_params_)
print("Best score:", ridge_grid.best_score_)
ridge_best = ridge_grid.best_estimator_
y_pred = ridge_best.predict(X_valid)
print("MSE:", mean_squared_error(y_valid, y_pred))
print("R2:", r2_score(y_valid, y_pred))
import matplotlib.pyplot as plt
results_df = pd.DataFrame(ridge_grid.cv_results_)
plt.plot(results_df["param_alpha"], results_df["mean_test_score"])
plt.xlabel("alpha")
plt.ylabel("Test accuracy")
plt.title("Validation curve")
plt.show()
# Lasso с підбором alpha для r2
alpha_grid = {"alpha": np.logspace(-3, 3, 10)} # 10 точек от 10^(-3) до 10^3
lasso_grid = GridSearchCV(lasso, alpha_grid, cv=5, scoring="r2")
lasso_grid.fit(X_train, y_train)
print("Best alpha:", lasso_grid.best_params_)
print("Best score:", lasso_grid.best_score_)
lasso_best = lasso_grid.best_estimator_
y_pred = lasso_best.predict(X_valid)
print("MSE:", mean_squared_error(y_valid, y_pred))
print("R2:", r2_score(y_valid, y_pred))
print("MSE:", mean_squared_error(y_valid, y_pred))
print("MAE:", mean_absolute_error(y_valid, y_pred))
print("MedAE:", median_absolute_error(y_valid, y_pred))
print("R2:", r2_score(y_valid, y_pred))
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import (
train_test_split,
cross_validate,
KFold,
GridSearchCV,
)
from lightgbm import LGBMClassifier
from xgboost import XGBClassifier
from catboost import CatBoostClassifier, Pool
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score, mean_squared_log_error
import warnings
warnings.filterwarnings("ignore")
from IPython.display import display, HTML
# ## Cosmetic Changes
from IPython.core.display import HTML
with open("./CSS.css", "r") as file:
custom_css = file.read()
HTML(custom_css)
# ## Reading the datasets
Root_path = "/kaggle/input/playground-series-s3e12"
Root_original = "/kaggle/input/kidney-stone-prediction-based-on-urine-analysis"
original = pd.read_csv(Root_original + "/kindey stone urine analysis.csv")
train = pd.read_csv(Root_path + "/train.csv")
test = pd.read_csv(Root_path + "/test.csv")
train.head(5)
test.head(5)
# ## Summary of the datasets
def summary(text, df):
print(f"{text} shape: {df.shape}")
summ = pd.DataFrame(df.dtypes, columns=["dtypes"])
summ["null"] = df.isnull().sum()
summ["unique"] = df.nunique()
summ["min"] = df.min()
summ["median"] = df.median()
summ["max"] = df.max()
summ["mean"] = df.mean()
summ["std"] = df.std()
return summ
summary("test", test)
# ## Merging orignal dataset with train dataset
df_full = pd.concat([train, original])
for col in original.columns:
df_full[col] = df_full[col].astype("float64")
summary("full", df_full)
print(f" Row => {df_full.shape[0]}, Columns => {df_full.shape[1]}")
# ## Density of the various features in the train data
#
plt.figure(figsize=(14, 10))
for i in range(1, len(df_full.columns)):
plt.subplot(4, 4, i)
sns.kdeplot(x=train[train.columns[i]], label="Full Dataset")
plt.tight_layout()
# ## Correlation of features within dataset
plt.figure(figsize=(12, 8))
mask = np.triu(np.ones_like(df_full.corr()))
sns.heatmap(train.corr(), mask=mask, annot=True, fmt=".2f")
df_full.columns
# ## Featuring Engineering
features = ["gravity", "ph", "osmo", "cond", "urea", "calc"]
X = df_full[features].copy()
y = df_full["target"].copy()
# ## Splitting the datasets
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=52)
model_dict = {
"Light GBM": LGBMClassifier(),
"XG Boost": XGBClassifier(),
"Cat Boost": CatBoostClassifier(),
"Random Forest": RandomForestClassifier(),
}
# ## Model Evluation
def evaluation(model_str, y_pred, y_pred_train):
results = {"model": model_str, "auc score": roc_auc_score(y_train, y_pred_train)}
return results
# ## Traning and Fitting the models
result_list = []
for model in model_dict:
model_dict[model].fit(X_train, y_train)
y_pred = model_dict[model].predict(X_val)
y_pred_train = model_dict[model].predict(X_train)
result = evaluation(model, y_pred, y_pred_train)
result_list.append(result)
df_eval = pd.DataFrame(result_list)
df_eval.sort_values(by=["auc score"])
X_test = test[features].copy()
# ## Defining the hyper parameters
xgb_params = {
"seed": 42,
"objective": "binary:logistic",
"eval_metric": "auc",
"tree_method": "exact",
"n_jobs": -1,
"max_depth": 2,
"eta": 0.01,
"n_estimators": 100,
}
random_params = {
"n_estimators": 250,
"min_samples_leaf": 5,
"criterion": "entropy",
"random_state": 1,
}
# model = RandomForestClassifier(n_estimators=250, min_samples_leaf=5,criterion='entropy', random_state=1)
model = XGBClassifier(**xgb_params)
model.fit(X, y)
target = model.predict(X_test)
df_test = pd.DataFrame(data={"id": test["id"], "target": target.astype(int)})
df_test.to_csv("submission.csv", index=False)
df_test
|
import pandas as pd
df = pd.read_csv(
"/kaggle/input/cardiovascular-disease-dataset/cardio_train.csv", sep=";"
)
df.head()
df_1 = df.drop("id", axis=1)
df_1["age_years"] = df_1["age"] / 365.25
df_2 = df_1.drop("age", axis=1)
df_2.head()
df_3 = pd.get_dummies(df_2, columns=["cholesterol", "gluc"])
df_3.head()
y = df_3["cardio"]
df_4 = df_3.drop("cardio", axis=1)
df_4.head()
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X = scaler.fit_transform(df_4)
print(X[:5])
from sklearn.model_selection import train_test_split
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size=0.25, random_state=42
)
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
model.fit(X_train, y_train)
y_pred = model.predict(X_valid)
print(y_pred[:5])
print(y_valid[:5])
from sklearn.metrics import accuracy_score
print(accuracy_score(y_valid, y_pred))
|
import pandas as pd
a = pd.read_csv(
"/kaggle/input/bigbasket-entire-product-list-28k-datapoints/BigBasket Products.csv",
usecols=["category", "sub_category", "product", "rating"],
index_col=["category", "sub_category"],
)
a.head(12)
a.sort_index()
a.loc["Baby Care"] # we can also access using 1 index
a.loc["Baby Care", "product"]
a.loc[
("Baby Care", "Baby Accessories"), ["product", "rating"]
] # tuple of index and tuple of columns
|
# # Network Graph for Stack Overflow + Degree Distribution + Distance
# Agustika Indah Mayangsari_23220071
# ---Menggunakan data stack overflow, data terlampir
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
# Input data files check
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
import warnings
warnings.filterwarnings("ignore")
G = nx.Graph(day="Stackoverflow")
df_nodes = pd.read_csv("../input/stack_network_nodes.csv")
df_edges = pd.read_csv("../input/stack_network_links.csv")
for index, row in df_nodes.iterrows():
G.add_node(row["name"], group=row["group"], nodesize=row["nodesize"])
for index, row in df_edges.iterrows():
G.add_weighted_edges_from([(row["source"], row["target"], row["value"])])
color_map = {
1: "#f09494",
2: "#eebcbc",
3: "#72bbd0",
4: "#91f0a1",
5: "#629fff",
6: "#bcc2f2",
7: "#eebcbc",
8: "#f1f0c0",
9: "#d2ffe7",
10: "#caf3a6",
11: "#ffdf55",
12: "#ef77aa",
13: "#d6dcff",
14: "#d2f5f0",
}
plt.figure(figsize=(25, 25))
options = {
"edge_color": "#000000",
"width": 1,
"with_labels": True,
"font_weight": "regular",
}
colors = [color_map[G.nodes[node]["group"]] for node in G]
sizes = [G.nodes[node]["nodesize"] * 10 for node in G]
nx.draw(
G,
node_color=colors,
node_size=sizes,
pos=nx.spring_layout(G, k=0.25, iterations=50),
**options
)
ax = plt.gca()
ax.collections[0].set_edgecolor("#555555")
plt.show()
import collections
# draw degree distribution
def plot_degree_distribution(G):
degree_sequence = sorted(
[d for n, d in G.degree()], reverse=True
) # degree sequence
degreeCount = collections.Counter(degree_sequence)
deg, cnt = zip(*degreeCount.items())
fig, ax = plt.subplots()
plt.title("Degree Distribution")
plt.bar(deg, cnt)
plot_degree_distribution(G)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # graphs
import seaborn as sns # pretty graphs
import missingno as msno # missing values
import re # Regular Expressions
from sklearn.preprocessing import LabelEncoder # Encoding categorical variables
import pprint
# Pandas profiling helps while doing categorical data analysis
import pandas_profiling
from pandas_profiling import ProfileReport
from pandas_profiling.utils.cache import cache_file
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## Part 1: Reading the data, general aspects of data ##
train = pd.read_csv("/kaggle/input/titanic/train.csv")
test = pd.read_csv("/kaggle/input/titanic/test.csv")
sub = pd.read_csv("/kaggle/input/titanic/gender_submission.csv")
train.head(5)
test.head(5)
sub.head(5)
train.describe().T
test.describe().T
train.info()
# ## False 'numeric' variables and Categorical Analysis
# Note that:
# 1. We could reduce Age to Age strips, 'categorizing' it
# 2. Neither SibSp, Parch nor Pclass are 'numeric' (i.e.: 'continuous'), but categorical.
train
mask1 = train["Age"] < 100
mask2 = train["Age"] >= 100
mask1
def age_grouping(df, col="Age"):
"""
(float) -> int
Receives a float, age, and return a number between 0
and 10, where 0 if 0<=age<10, 1 if 10<= age<20,...,
9 if 90<= age < 100 and 10 if the age is equal to or
greater than 100 and -1 if a missing value."""
df["Age_Groups"] = np.zeros(shape=df["Age"].shape)
mask1 = df["Age"] < 100
mask2 = df["Age"] >= 100
df.loc[mask1, "Age_Groups"] = df["Age"] // 10
df.loc[mask2, "Age_Groups"] = 10
df.loc[~(mask1 | mask2), "Age_Groups"] = -1
df.drop("Age", axis=1, inplace=True)
# train['Age_Groups'] = train['Age'].map(lambda r: age_grouping(r))
age_grouping(df=train)
age_grouping(df=test)
# train.drop('Age', axis = 1, inplace = True)
# test['Age_Groups'] = test['Age'].map(lambda r: age_grouping(r))
# test.drop('Age', axis = 1, inplace = True)
# Identifying categorical features
# I have taken this code from: https://towardsdatascience.com/encoding-categorical-features-21a2651a065c
def identify_cat(dataframe):
"""
(pd.DataFrame) -> list
This function identifies and returns a list with the names of all the categorical columns of a DataFrame.
"""
categorical_feature_mask = (
dataframe.dtypes == object
) # Here, t can be the entire dataframe or only the features
categorical_cols = dataframe.columns[categorical_feature_mask].tolist()
return categorical_cols
catego = identify_cat(train)
pprint.pprint(catego)
def convert_type(dataframe, catego_cols):
"""
(pd.DataFrame, list) -> None
This is an optimization function. It converts the type of categorical columns in a DataFrame from 'object' to 'category',
making operations faster.
See the docs here: https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html
"""
for column in catego_cols:
dataframe[column].astype("category")
convert_type(train, catego)
convert_type(test, catego)
numeric = set(train.columns) - set(catego)
numeric
# Since 'numeric' is a set, it is unhashable
numeric = list(numeric)
# Adjusting for the 'false' numeric features
catego.extend(["Parch", "Pclass", "SibSp", "Age_Groups"])
# Removing also the feature and Id out of the numeric features
for col in ["Parch", "Pclass", "SibSp", "Age_Groups", "Survived", "PassengerId"]:
numeric.remove(col)
# Checking for hidden NaN, as 'unknown', for example
for column in catego:
print(column)
print(train[column].unique())
profile = ProfileReport(
train[catego],
title="Titanic - Categorical Features",
html={"style": {"full_width": True}},
minimal=True,
)
profile.to_notebook_iframe()
# ## General notes about the categorical data alone (i.e.: without analysing the target yet):
# 1. At a first sight, we could just discard the name column. But, we have some interesting data, for example: we can distinguish between married (Mrs.) and not married (Miss.) women. We have some man with the title Mr., others with the titles Master, others with the title Rev.,Dr., and others with title Don. We also have people with nicknames, either in the form of quotation marks ( 'Kelly, Miss. Anna Katherine "Annie Kate"') or parentheses ('Palsson, Mrs. Nils (Alma Cornelia Berglund)'). All of this could help in social differentiation and, maybe, be useful features.
# 2. An analogous observation could be applied to the "Ticket" feature. We have tickets that start with "C.A.", "STON/O", "PC", etc.. whereas some of them are just numbers. We could dismember this column in two, dropping the numbers and keeping these prefixes? Due to the high-cardinality (681 distinct values out of 891).
# 3. Sex is a simple binary feature. We could encode it in several ways (one-hot encoding, Label Encoding, Target Encoding). This is highly model-dependent.
# 4. Although Cabin has a high percentage of missing values (77%), I think there is valuable information here. We could split this column in two, one with the letter and the other with the number. Note that , for some passengers, we have multiple values ('B57 B59 B63 B66'), things that seem like typos ('D' or 'F G73'). In this case, we could make multiple columns, maybe "Cabin1", "Cabin2", "Cabin3", etc. and see if it's worth to keep them, or maybe just keep the first value and add a column like "Number of Cabin".
# 5. Embarked, Parch, Pclass, SibSP,'Age_Groups': these are simple categorical features. We can treat them like Sex. Note that, with 'Age_Groups', we've already input the missing values (19%) with -1. We can change this afterwards, if we have a good reason for it.
# ## Numeric Values ##
# checking null values
msno.bar(train[numeric], figsize=(16, 4))
# The bars stand for non-null values
sns.set_style("darkgrid")
sns.histplot(train["Fare"], kde=False)
plt.show()
# Naturally, we have a left-shifted distribution (i.e.: lots of cheap tickets, a few expensive ones)
# ## Some visualizations of feature-target interaction and target distribution
# Look that we have a well-behaved, balanced target
sns.histplot(train["Survived"], kde=False)
plt.show()
# Percentage of Survivors
len(train[train["Survived"] == 1]["Survived"]) / len(train["Survived"])
# 1. Numeric feature (interation with target)
sns.histplot(data=train, x="Fare", hue="Survived", kde=False, bins=30)
plt.show()
# 2. Categorical features
catego
sns.countplot(x="Sex", data=train, hue="Survived")
plt.show()
sns.countplot(x="Embarked", data=train, hue="Survived")
plt.show()
sns.countplot(x="Parch", data=train, hue="Survived")
plt.show()
sns.countplot(x="Pclass", data=train, hue="Survived")
plt.show()
sns.countplot(x="SibSp", data=train, hue="Survived")
plt.show()
sns.countplot(x="Age_Groups", data=train, hue="Survived")
plt.show()
# ## Now let's put some work in order to analyze 'Name', 'Ticket' and 'Cabin' columns
# 1. Name
def get_name_title(name):
name_title = re.compile(r"\w+, \w+.")
found = name_title.search(name).group()
return found.split(",")[1]
train["Name_Title"] = train["Name"].apply(lambda x: get_name_title(x))
train.drop("Name", axis=1, inplace=True)
train
le = LabelEncoder()
le.fit(train["Name_Title"])
train["Name_Title_Encoded"] = le.transform(train["Name_Title"])
train
train.drop("Name_Title", axis=1, inplace=True)
sns.countplot(x="Name_Title_Encoded", data=train, hue="Survived")
plt.show()
# 2. Ticket
def ticket_prefix(ticket):
raw = ticket.split()[0]
if raw.isdigit():
return ""
else:
return raw
train["Ticket_Prefix"] = train["Ticket"].apply(lambda x: ticket_prefix(x))
le_ticket = LabelEncoder()
le_ticket.fit(train["Ticket_Prefix"])
train["Ticket_Prefix_Encoded"] = le_ticket.transform(train["Ticket_Prefix"])
train
train.drop("Ticket_Prefix", axis=1, inplace=True)
plt.figure(figsize=(12, 3))
sns.countplot(x="Ticket_Prefix_Encoded", data=train, hue="Survived")
plt.show()
# 3. Cabin
# We must convert the missing values to strings
train["Cabin"] = train["Cabin"].fillna("")
def get_cabin_letter(cabin):
cabin_letter = re.compile(r"\w+")
found = cabin_letter.search(cabin)
if found:
return found.group()[0]
else:
return ""
train["Cabin_Letter"] = train["Cabin"].apply(lambda x: get_cabin_letter(x))
def get_cabin_number(cabin):
cabin_letter = re.compile(r"\w+")
found = cabin_letter.search(cabin)
if found:
return found.group()[1:]
else:
return "0"
train["Cabin_Number"] = train["Cabin"].apply(lambda x: get_cabin_number(x))
train
le_cabin = LabelEncoder()
le_cabin.fit(train["Cabin_Letter"])
train["Cabin_Letter_Encoded"] = le_cabin.transform(train["Cabin_Letter"])
train.drop(["Cabin", "Cabin_Letter"], axis=1, inplace=True)
train
plt.figure(figsize=(12, 3))
sns.countplot(x="Cabin_Letter_Encoded", data=train, hue="Survived")
plt.show()
train["Cabin_Number"].unique()
for value in train["Cabin_Number"].unique():
temp_train = train[train["Cabin_Number"] == value]
print(f"Cabin Number: {value} Passengers with this number: {len(temp_train)} ")
survival_percent = len(temp_train[temp_train["Survived"] == 1]["Survived"]) / len(
temp_train["Survived"]
)
print(f"Percentage of Survivors: {round(survival_percent,2)}")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
b = "selam!"
print(b[5:6])
b = "selamlar!yaren ben!"
print(b[:3])
b = "selamlar,yaren ben! yaren beşiktepe ödev yapıyor"
print(b[55:11])
b = "selamlar,yaren ben!"
print(b[-8:-7])
a = "selamlar, yaren ben!"
print(a.upper())
a = "selamlar, yaren ben! yaren ödev yapıyor"
print(a.lower())
a = " selamlar,yaren ben! "
print(a.strip())
a = "selam,ömer"
print(a.replace("anılar", "güzeldir"))
a = "selam dünya, selam zeynep"
print(a.split(",")) # returns ['Hello', ' World!']
b = a.split()
b[3]
a = "zeynep"
b = "yaren"
c = a + b
print(c)
a = "yaren zeynep'i seviyor"
b = "zeynep yaren'i seviyor"
c = a + " " + b
print(c)
age = 19
txt = "benim adım yaren, ben " + age
print(txt)
age = 19
txt = "benim adım yaren, ben {}"
print(txt.format(age))
quantity = 5
itemno = 545
price = 60.67
myorder = "ben {} biraz {} para {} isterim."
print(myorder.format(quantity, itemno, price))
quantity = 9
itemno = 777
price = 54.43
myorder = "ödemek istiyorum {0} dolar {1} bir parça {2}."
print(myorder.format(quantity, itemno, price))
txt = 'boş gemiler "geçiyor" gönlümün kıyısından.'
txt
x = "ykk"
print(len(x))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
fish = pd.read_csv("/kaggle/input/fish-market/Fish.csv")
fish.info()
fish.head()
fish_input = fish[
["Weight", "Length1", "Length2", "Length3", "Height", "Width"]
].to_numpy()
fish_target = fish["Species"].to_numpy()
from sklearn.model_selection import train_test_split
train_input, test_input, train_target, test_target = train_test_split(
fish_input, fish_target, test_size=0.2, random_state=42
)
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(degree=9)
poly.fit(train_input)
train_poly = poly.transform(train_input)
test_poly = poly.transform(test_input)
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
ss.fit(train_poly)
train_scaled = ss.transform(train_poly)
test_scaled = ss.transform(test_poly)
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(max_iter=10000)
lr.fit(train_scaled, train_target)
print("train set score: ", lr.score(train_scaled, train_target))
print("test set score: ", lr.score(test_scaled, test_target))
|
import folium
boston = folium.Map(location=[42.32, -71.0589], zoom_start=10, control_scale=True)
boston
torun = folium.Map(location=[53.01392063975564, 18.598804364300047], zoom_start=15)
torun
import pandas as pd
crimes = pd.read_csv(
"/kaggle/input/geospatial-learn-course-data/crimes-in-boston/crimes-in-boston/crime.csv",
encoding="latin-1",
)
crimes
crimes.dropna(inplace=True)
crimes
crimes = pd.read_csv(
"/kaggle/input/geospatial-learn-course-data/crimes-in-boston/crimes-in-boston/crime.csv",
encoding="latin-1",
)
crimes.dropna(inplace=True, subset=["Lat", "Long"]) # Usuwamy braki w danych
crimes = crimes[crimes.YEAR == 2017] # Bierzemy dane tylko z danego roku
crimes = crimes[
crimes.OFFENSE_CODE.isin([612, 613, 615])
] # Bierzemy dane na temat konkretnego przestępstwa
# crimes = crimes.head(1000)
# crimes
from folium import Marker
from folium.plugins import MarkerCluster
boston = folium.Map(location=[42.32, -71.0589], zoom_start=10, control_scale=True)
cluster = MarkerCluster() # Tworzymy klasteryzację danych
for idx, row in crimes.iterrows():
# Marker(location=[row["Lat"], row["Long"]]).add_to(boston)
cluster.add_child(Marker(location=[row["Lat"], row["Long"]]))
boston.add_child(cluster) # Dodajemy klastry do mapy
boston
from folium.plugins import HeatMap
boston = folium.Map(location=[42.32, -71.0589], zoom_start=10, control_scale=True)
HeatMap(data=crimes[["Lat", "Long"]]).add_to(boston)
boston
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import re
import nltk
from nltk.stem import WordNetLemmatizer
from wordcloud import WordCloud
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# ## Introduction
# In this project we will analyze a dataset containing 2253 job offers for Data Analyst position scraped from *glassdoor.com in 2020*.
# Let's import the data first and see how it looks like.
df = pd.read_csv("/kaggle/input/data-analyst-jobs/DataAnalyst.csv", index_col=0)
df.head()
df.info()
# We can see from the first 5 rows that although summary above suggests every column is non-null there are values of *-1* included in *Competitors* or *Easy Apply* columns indicating that data is missing there
df.replace(-1, np.nan, inplace=True)
df.isna().sum()
# Seems like some NaN values appeared, but it still doesn't apply to the last two columns. We need to pass *-1* as a string in order to take into account variables of type *object*.
df.replace("-1", np.nan, inplace=True)
df.isna().sum()
# Now we have more appropriate image of the missing data. Let's drop the columns with the majority of NaNs.
df = df.loc[:, df.isna().sum() < (0.5 * len(df))]
df.columns
# for numerical variables
df.describe()
# These statistics don't make much sense since they refer to companies and one company appears as many times in our dataframe as many offers it has posted.
# for categorical variables
cat_cols = df.select_dtypes(include="object")
cat_cols.describe()
# Some interesting insights:
# - Our earlier suspicion is confirmed - the most frequent company appeared 58 times
# - New York is the most frequent location, we'll see further how big is the gap between it and the rest of the cities
# - Unsurprisingly IT leads both in Industry and Sector categories
# - We didn't take into account the "Unknown" value in Revenue column when replacing for NaNs.
# unknowns in categorical columns
for col in cat_cols:
print(col + " = " + str((df[col].str.contains("Unknown")).sum()))
# We can see that it wasn't only Revenue column. Let's transform these values and see if some other columns need to be dropped too.
unknowns = ["Revenue", "Size", "Type of ownership"]
for col in unknowns:
df.loc[df[col].str.contains("Unknown", na=False), col] = np.nan
for col in cat_cols:
print(col + " = " + str((df[col].str.contains("Unknown")).sum()))
df.isna().sum()
sum(df["Revenue"].isna()) / len(df)
# The most NaNs appear in *Revenue* column now, but we don't need to remove it since there is still more than half of valid data to analyze.
# ## Ratings
# In this section I'd like to see what's the average rating of companies in each sector.
# In order to do this first we need to remove the duplicates so that each company contributes equally to the average.
df_unique = df.drop_duplicates(subset="Company Name", keep="first")
print(
f"""
We\'ve dropped {len(df) - len(df_unique)} from {len(df)} total records.
This leaves us with {len(df_unique)} records. It's still a lot of data."""
)
# Now let's see how many job offers each company posted on average for each sector.
off_per_comp = (
(df["Sector"].value_counts() / df_unique["Sector"].value_counts())
.sort_values(ascending=False)
.round(1)
)
off_per_comp
off_per_comp.sort_values(inplace=True)
plt.figure(figsize=(10, 7))
sns.set(style="whitegrid")
plt.barh(off_per_comp.index, off_per_comp)
plt.grid(axis="y")
plt.title("Number of job offers per company in sector")
plt.xlim(0, 2.5)
plt.show()
# We can see that for *Accounting & Legal* and *Restaurants, Bars & Food Services* each company posted on average more than two offers. It would be interesting to see if that's really the case or if it's one company that highly impacts the mean.
rest = df[df["Sector"] == "Restaurants, Bars & Food Services"]
rest["Company Name"].value_counts().sort_values(ascending=False)[:5]
acc = df[df["Sector"] == "Accounting & Legal"]
acc["Company Name"].value_counts().sort_values(ascending=False)[:5]
# Okay, now we understand why it was necessary to get rid of duplicates before moving to average rating comparison in each sector. On the Accounting sector example we would get overly optimistic results for average rating while we can see that the rest companies doesn't necessarily match the most frequent one.
df_grouped = (
df_unique[["Sector", "Rating"]]
.groupby("Sector")
.agg(
rating_avg=("Rating", "mean"),
rating_std=("Rating", "std"),
count=("Rating", "size"),
)
)
df_grouped[:5]
df_grouped[df_grouped["count"] < 10]
# For the sake of having reliable results let's assume that 10 companies is the minimal number to represent the whole sector, so we will exclude the sectors above from the further analysis.
df_grouped = df_grouped[df_grouped["count"] > 10]
df_grouped = df_grouped.sort_values(by="count", ascending=True)
plt.figure(figsize=(10, 5))
plt.barh(df_grouped.index, df_grouped["count"])
plt.title("Sectors by number of companies")
plt.xlabel("Number")
plt.show()
# Number of IT and Business companies really outnumbers the rest of the sectors. Is quality going to match the quantity?
# We will find out later. Now let's take a quick look on subcategory of Sectors which are Industries.
df_unique["Industry"].value_counts().nlargest(5)
df_unique_ind = df_unique["Industry"].value_counts().nlargest(15)
df_unique_ind = df_unique_ind.reset_index()
df_unique_ind.columns = ["Industry", "Count"]
df_unique_ind[:5]
# we want to somehow join to the df above the name of the sector industry is belonging to
df_unique_ind = df_unique_ind.merge(
df[["Industry", "Sector"]].drop_duplicates("Industry"), on="Industry", how="left"
)
df_unique_ind[:5]
plt.figure(figsize=(10, 5))
ax = sns.barplot(
data=df_unique_ind,
x="Count",
y="Industry",
hue="Sector",
palette=sns.color_palette(),
dodge=False,
)
sns.move_legend(ax, "lower right") # bbox_to_anchor=(1, 1))
plt.xlim(0, 210)
ax.set(ylabel=None)
ax.set(xlabel="Number")
plt.grid(axis="y")
plt.title("Top 15 industries by number of companies")
plt.show()
# Let's go back to the sectors and see how each of them ranks based on average rating of companies.
df_grouped = df_grouped[df_grouped["count"] > 10]
df_grouped = df_grouped.sort_values(by="rating_avg")
plt.figure(figsize=(10, 5))
plt.hlines(df_grouped.index, xmin=0, xmax=df_grouped["rating_avg"])
plt.plot(df_grouped["rating_avg"], df_grouped.index, "o")
plt.errorbar(
df_grouped["rating_avg"],
df_grouped.index,
xerr=df_grouped["rating_std"],
fmt=".k",
elinewidth=0.5,
capsize=3,
)
# plt.xticks(rotation=60)
plt.xlim(1, 5)
plt.title("Average rating of companies in each sector")
plt.xlabel("Average rating")
plt.show()
# Interesting that at the first glance *Retail* may appear the worst sector, but thanks to plotting the standard deviation we can also see that the data is pretty sparse there.
# There is a different way of looking at the same statistics with even more information.
df_grouped = df_grouped.sort_values(by="rating_avg", ascending=False)
my_order = df_grouped["rating_avg"].index.tolist()
df_unique = df_unique[
df_unique["Sector"].isin(df_grouped[df_grouped["count"] > 10].index)
]
plt.figure(figsize=(10, 5))
g = sns.boxplot(
x="Rating", y="Sector", data=df_unique, order=my_order, orient="h"
) # order= my_order,
g.set_xticks(range(1, 6))
g.set_ylabel(None)
g.set_title("Ratings distribution in the sectors")
plt.grid(axis="y")
plt.show()
# * Having preserved the order from the previous plot, we can see how different conclusions we can draw just by changing the type of the plot. For example if we were to sort by median we would get a totally different ranking (besides our top 4 sectors).
# * The plot above allows us to see more clearly how the data for rating spreads for each sector with the ends of vertical lines representing distance of 1.5 interquartile range from the median and black diamonds representing companies with ratings lying outside that range.
# * This plot confirms our observation regarding *Retail* sector that it's not necessarily the worst sector having half of companies rated from 3 to 4.
# * Thanks to including outliers on the plot we could see that there are some black sheeps in the well-reputed IT sector.
# Now let's try to show both average rating and the number of companies!
my_cmap = plt.get_cmap("rocket_r")
# in order to map the ratings onto the color vector we need to scale them to 0-1 range
rescale = lambda y: (y - 1) / (5 - 1)
rescale(df_grouped["rating_avg"]).sort_values(ascending=False)
df_grouped = df_grouped.sort_values(by="count", ascending=True)
fig, ax = plt.subplots(figsize=(10, 5))
ax.barh(
df_grouped.index,
df_grouped["count"],
color=my_cmap(rescale(df_grouped["rating_avg"])),
)
plt.title("Number of unique companies in each sector")
plt.xlabel("Number")
norm = plt.Normalize(1, 5)
sm = plt.cm.ScalarMappable(cmap="rocket_r", norm=norm)
sm.set_array([])
cb = fig.colorbar(sm, ax=ax)
cb.ax.set_title("Rating", pad=10)
plt.show()
# Well, that only reminds us that the differences are very subtle and every sector scores close on the average rating.
# Finishing our rating analysis, I feel like we need to check if Size of the company isn't going to be a good predictor of the variable in question.
df_grouped_size = df_unique.groupby("Size").agg(
rating_avg=("Rating", "mean"),
rating_std=("Rating", "std"),
count=("Rating", "size"),
)
df_grouped_size
order = [
"1 to 50 employees",
"51 to 200 employees",
"201 to 500 employees",
"501 to 1000 employees",
"1001 to 5000 employees",
"5001 to 10000 employees",
"10000+ employees",
]
plt.figure(figsize=(10, 5))
g = sns.boxplot(x="Rating", y="Size", data=df_unique, order=order, orient="h")
g.set_ylabel(None)
g.set_title("Ratings distribution for size of the company category")
plt.show()
# The categories are sorted from the smallest to the biggest. There is a pattern here and it's not favourable one for the corporations. However the smaller the size of the group, the bigger the variability in rating, so the distinction here isn't really that clear.
# ## Salaries
# With regards to salary it's not necessary to remove the offers of the same company, so we will work on original data.
# We also need to keep in mind that these are estimations from the website, so what we are going to analyze is not precise reflection of reality.
print(df["Salary Estimate"].unique()[:6])
# We need to transform these estimates into more friendly format for analysis.
df[df["Salary Estimate"].isna()]
# We also have one offer without *Salary Estimate*. Let's drop it.
df.dropna(subset=["Salary Estimate"], inplace=True)
# using regex library to get plain numbers from salary estimate - example below
string = "$37K-$66K (Glassdoor est.)"
values = re.findall(r"\d+", string)
values
estimates = df["Salary Estimate"].tolist()
# for each record save the lower and upper value for salary estimate
salary_low = []
salary_high = []
for e in estimates:
salary_range = re.findall(r"\d+", str(e))
salary_range
salary_low.append(int(salary_range[0]))
salary_high.append(int(salary_range[1]))
df["Salary Low"] = np.array(salary_low)
df["Salary High"] = np.array(salary_high)
df[["Salary Estimate", "Salary Low", "Salary High"]][:1]
# We can now drop the original feature.
df.drop("Salary Estimate", axis=1, inplace=True)
df
# Let's take a closer look now how the salaries differ in most frequent locations.
top_twenty = df["Location"].value_counts().nlargest(20).index.tolist()
print(top_twenty)
df_cities = df[
df["Location"].isin(top_twenty)
] # df with records from the above cities and unique companies
df_cities["Location"].value_counts()[:5]
# As promised in the beginning we now see what's the gap between NY and the rest of the cities.
df_sal = df_cities.groupby("Location").agg(
count=("Location", "size"),
mean_low=("Salary Low", "mean"),
std_low=("Salary Low", "std"),
mean_high=("Salary High", "mean"),
std_high=("Salary High", "std"),
)
df_sal.sort_values(by="mean_low", inplace=True)
df_sal[:5]
plt.figure(figsize=(10, 5))
plt.hlines(
df_sal.index, xmin=df_sal["mean_low"], xmax=df_sal["mean_high"], colors="black"
)
(plot1,) = plt.plot(df_sal["mean_low"], df_sal.index, "o", label="low")
(plot2,) = plt.plot(df_sal["mean_high"], df_sal.index, "o", label="high")
plt.legend([plot1, plot2], ["Mean low salary", "Mean high salary"])
plt.xlim(0, 200)
plt.title("Average low and high salary for location")
plt.xlabel("Salary")
plt.show()
# It looks like location can explain the variability in salary range, but we have to keep in mind the limitations of taking the average.
# Let's investigate this topic further.
df.columns
# transforming the df in order to plot the data with seaborn and its hue parameter
df_low = df_cities.drop("Salary High", axis=1)
df_low["Salary type"] = "Low"
df_low.rename(columns={"Salary Low": "Salary"}, inplace=True)
df_low[:5]
df_high = df_cities.drop("Salary Low", axis=1)
df_high["Salary type"] = "High"
df_high.rename(columns={"Salary High": "Salary"}, inplace=True)
df_high
df_sals = pd.concat([df_low, df_high])
df_sals[df_sals["Company Name"] == "Visiting Nurse Service of New York\n3.8"]
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
sns.histplot(ax=axes[0], data=df_sals, x="Salary", hue="Salary type", bins=20, kde=True)
axes[0].set_xlim(0, 200)
sns.boxplot(ax=axes[1], data=df_sals, x="Salary", y="Salary type")
axes[1].set_xlim(0, 200)
plt.show()
# It was obvious that they'll differ, but it was nice to see by how much.
print(sns.color_palette().as_hex())
plt.figure(figsize=(15, 5))
sns.barplot(data=df_cities, x="Location", y="Salary High", color="#dd8452")
sns.barplot(data=df_cities, x="Location", y="Salary Low", color="#4c72b0")
plt.title("Mean low and high salary in top 20 cities")
plt.ylabel("Salary")
plt.xlabel(None)
plt.xticks(rotation=90)
plt.plot()
# Californian cities really stand out from the rest in terms of low and high ends of salary estimations.
# Chicago's low boundary is also worth noting.
plt.figure(figsize=(15, 5))
sns.boxplot(data=df_sals, x="Location", y="Salary", hue="Salary type")
plt.xticks(rotation=90)
plt.grid(axis="x")
plt.xlabel(None)
plt.plot()
# A lot of going on here, but the plot provides some important information. There is a better way to visualize it though.
plt.figure(figsize=(15, 5))
sns.violinplot(
data=df_sals,
x="Location",
y="Salary",
hue="Salary type",
split=True,
inner="stick",
palette="pastel",
)
plt.xticks(rotation=90)
plt.grid(axis="x")
plt.show()
# So interesting to see how distributions seem to follow some specific pattern for each region - California, Arizona, Texas and NY/NJ. There are some exceptions, but it's still oddly satisfying.
fig, axes = plt.subplots(1, 2, figsize=(15, 5), sharey=True)
sns.stripplot(
ax=axes[0],
data=df_sals[
df_sals["Location"].isin(
["San Diego, CA", "Charlotte, NC", "Phoenix, AZ", "Tempe, AZ"]
)
],
x="Location",
y="Salary",
hue="Salary type",
size=4,
alpha=0.7,
legend=False,
)
axes[0].set_xlabel(None)
axes[0].set_title("Dense")
sns.stripplot(
ax=axes[1],
data=df_sals[
df_sals["Location"].isin(
["San Jose, CA", "San Francisco, CA", "Houston, TX", "New York, NY"]
)
],
x="Location",
y="Salary",
hue="Salary type",
size=4,
alpha=0.7,
)
axes[1].set_xlabel(None)
axes[1].set_title("Sparse")
plt.legend(title="Type", bbox_to_anchor=(1.25, 1), loc="upper right")
plt.ylim(0, 200)
plt.show()
# Graph above help us understand why the distributions for chosen cities look the way they do. As you can see it isn't because of the lack of data, but more because of very low or high variability.
# Okay, let's compare Junior vs Senior position in terms of salary now.
juniors = df["Job Title"].str.contains("Junior|Jr")
df[juniors]["Job Title"].value_counts()
seniors = df["Job Title"].str.contains("Senior|Sr")
df.loc[seniors]["Job Title"].value_counts()
df["Experience"] = np.nan
df.loc[seniors, "Experience"] = "Senior"
df.loc[juniors, "Experience"] = "Junior"
df["Experience"].value_counts()
# We've managed to extract only that many offers referring specifically to our interest. This isn't much considering the size of dataset, but it's sufficient to make some basic analysis.
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
sns.kdeplot(
ax=axes[0],
data=df,
x="Salary Low",
hue="Experience",
common_norm=False,
legend=False,
)
sns.kdeplot(ax=axes[1], data=df, x="Salary High", hue="Experience", common_norm=False)
axes[1].set_ylabel(None)
plt.show()
seniors_72 = df[seniors].sample(72)
# taking the sample of the size of junior offers number from senior positions
df_to_show = pd.concat([df[juniors], seniors_72])
plt.figure(figsize=(10, 5))
sns.scatterplot(
data=df_to_show,
x="Salary Low",
y="Salary High",
hue="Experience",
hue_order=["Senior", "Junior"],
style="Experience",
)
plt.show()
# This came as a surprise. But I also think that location is a stronger predictor and comparing positions makes sense only within one city or state.
desc = df["Job Description"].iloc[24]
print(desc)
nltk.download("omw-1.4")
nltk.download("wordnet")
nltk.download("wordnet2022")
# nlp = load('en_core_web_sm')
lemmatizer = WordNetLemmatizer()
descs = df["Job Description"].tolist()
tools = {
"tableau": 0,
"sql": 0,
"python": 0,
"r": 0,
"spark": 0,
"hadoop": 0,
"docker": 0,
"excel": 0,
"java": 0,
"powerbi": 0,
"aws": 0,
"gcp": 0,
"azure": 0,
}
tools_exc = {
"tableau": 0,
"sql": 0,
"python": 0,
"r": 0,
"spark": 0,
"hadoop": 0,
"docker": 0,
"excel": 0,
"java": 0,
"powerbi": 0,
"aws": 0,
"gcp": 0,
"azure": 0,
}
# for wordcloud
every_word = ""
# first approach
for desc in descs:
words = [x.lower() for x in desc.split()]
for i, word in enumerate(words):
if word in tools.keys():
tools[word] += 1
# second approach
for desc in descs:
appeared = {
"tableau": False,
"sql": False,
"python": False,
"r": False,
"spark": False,
"hadoop": False,
"docker": False,
"excel": False,
"java": False,
"powerbi": False,
"aws": False,
"gcp": False,
"azure": False,
}
words = [x.lower() for x in desc.split()]
for i, word in enumerate(words):
if word in tools_exc.keys() and not appeared[word]:
tools_exc[word] += 1
# 1st exception to account for space-separated cases
elif (word == "bi" and words[i - 1] == "power") and not appeared["powerbi"]:
tools_exc["powerbi"] += 1
appeared["powerbi"] = True
# 2nd exception
elif word == "pyspark" and not appeared["spark"]:
tools_exc["spark"] += 1
appeared["spark"] = True
# 3rd exception
elif (word == "javascript" or word == "jquery") and not appeared["java"]:
tools_exc["java"] += 1
appeared["java"] = True
# 4th exception
elif word == "cloud":
if words[i - 1] == "google" and not appeared["gcp"]:
tools_exc["gcp"] += 1
appeared["gcp"] = True
if words[i - 1] == "microsoft" and not appeared["azure"]:
tools_exc["azure"] += 1
appeared["azure"] = True
elif (word == "web" and words[i - 1] == "amazon") and not appeared["aws"]:
tools_exc["aws"] += 1
appeared["aws"] = True
# 5th exception
elif (
word == "mssql" or word == "mysql" or word == "postgresql" or word == "tsql"
) and not appeared["sql"]:
tools_exc["sql"] += 1
appeared["sql"] = True
# lemmatizing for inflected forms of the same word
every_word += lemmatizer.lemmatize(word) + " "
print(tools)
print(tools_exc)
# Taking exceptions into account allowed us to catch more occurrences. This is especially important for Microsoft's Power BI tool.
tools = {
k: v for k, v in sorted(tools_exc.items(), key=lambda item: item[1], reverse=True)
}
tools
plt.figure(figsize=(15, 5))
plt.bar(tools.keys(), tools.values())
plt.xticks(rotation=45)
plt.ylim(0, 1750)
plt.title("Number of occurrences of each tool in job offers")
plt.ylabel("Occurrences")
plt.xlabel("Tool")
plt.grid(axis="y", linewidth=0.5, linestyle="-")
plt.show()
# If they could only see how much data analysis we were able to do with Python...
first = ["r", "python"]
first_val = [tools[x] for x in first]
second = ["gcp", "azure", "aws"]
second_val = [tools[x] for x in second]
third = ["powerbi", "tableau"]
third_val = [tools[x] for x in third]
fourth = ["spark", "hadoop"]
fourth_val = [tools[x] for x in fourth]
fig, ax = plt.subplots(2, 2, figsize=(12, 7))
ax[0, 0].set_title("For analysis")
ax[0, 0].barh(first, first_val)
ax[0, 1].set_title("Cloud tools")
ax[0, 1].barh(second, second_val)
ax[1, 0].set_title("For visualization")
ax[1, 0].barh(third, third_val)
ax[1, 1].set_title("For Big Data")
ax[1, 1].barh(fourth, fourth_val)
fig.suptitle("Grand clashes")
fig.tight_layout()
plt.show()
# This data is from June 2020. It's not old, but we have to keep in mind how rapidly technology is changing. Nevertheless, we can see here who is the leader in its field, at least in the US job market.
wordcloud = WordCloud(width=800, height=400).generate(every_word)
# Lastly, let me present you a cool way to look at the most frequently occurring words in job offers' descriptions.
plt.figure(figsize=(15, 10))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
|
# Playground Series - Season 3, Episode 12
# Binary Classification with a Kidney Stone Prediction Dataset
#
#
# # Table of Contents
#
# 1. [Introduction](#intro)
# 1. [Imports and Setups](#import)
# 1. [Data Loading](#loading)
# 1. [Exploratory data analysis (EDA)](#eda)
# 1. [Feature engineering (FE)](#fe)
# 1. [Modeling](#model)
# 1. [Feature Importance](#importance)
# 1. [Prediction](#prediction)
# ___
# # Introduction [↑](#top)
# ## Dataset Description [↑](#top)
# The dataset for this competition (both train and test) was generated from a deep learning model trained on the Kidney Stone Prediction based on Urine Analysis dataset. Feature distributions are close to, but not exactly the same, as the original. Feel free to use the original dataset as part of this competition, both to explore differences as well as to see whether incorporating the original in training improves model performance.
# ### Files [↑](#top)
# * `train.csv` - target is the likelihood of a kidney stone being present
# * `test.csv` - the test dataset; your objective is to predict the probability of target
# * `sample_submission.csv` - a sample submission file in the correct format
# ## Features [↑](#top)
# The six physical characteristics of the urine are:
# * specific `gravity`, the density of the urine relative to water
# * `ph`, the negative logarithm of the hydrogen ion
# * osmolarity (`osmo`), a unit used in biology and medicine but not in
# physical chemistry. Osmolarity is proportional to the concentration of
# molecules in solution
# * conductivity (mMho milliMho) `cond`. One Mho is one reciprocal Ohm
# Conductivity is proportional to the concentration of charged
# ions in solution
# * `urea` concentration in millimoles per litre
# * calcium concentration (`calc`) in millimoles llitre
# [Source](https://www.kaggle.com/datasets/vuppalaadithyasairam/kidney-stone-prediction-based-on-urine-analysis)
# ## Competition goal [↑](#top)
# Submissions are evaluated on area under the ROC curve between the predicted probability and the observed target.
# ___
# # Imports and Setups [↑](#top)
import warnings
warnings.filterwarnings("ignore")
import os
import numpy as np
import pandas as pd
from pathlib import Path
from tqdm.notebook import trange, tqdm
from IPython.display import display, Markdown
from sklearn.feature_selection import mutual_info_classif
from sklearn.metrics import classification_report, accuracy_score
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import colorsys
plt.style.use("seaborn-whitegrid")
plt.rc("figure", autolayout=True)
plt.rc(
"axes",
labelweight="bold",
labelsize="large",
titleweight="bold",
titlesize=14,
titlepad=10,
)
blues_palette = palette = sns.color_palette("Blues_r", n_colors=20)
reds_palette = palette = sns.color_palette("Reds_r", n_colors=20)
greys_palette = sns.color_palette("Greys", n_colors=20)
blue = blues_palette[1]
red = reds_palette[1]
two_colors = [blue, red]
sns.set()
sns.set_theme(style="whitegrid", palette=blues_palette)
class Cfg:
INPUT_ROOT = Path("/kaggle/input/playground-series-s3e12")
OUTPUT_ROOT = Path("/kaggle/working/")
TRAN_FILE = INPUT_ROOT / "train.csv"
TEST_FILE = INPUT_ROOT / "test.csv"
SAMPLE_SUBMISSION_FILE = INPUT_ROOT / "sample_submission.csv"
SUBMISSION_FILE = OUTPUT_ROOT / "submission.csv"
RANDOM_STATE = 2023
NUM_MOST_IMPORTANCE_FEATURES = 20
SAMPLE_SIZE = 1.0
N_TRIALS = 5
TEST_SIZE = 0.15
TARGET = "target"
INDEX = "id"
# ### Helper Functions
def factorize(X):
for colname in X.select_dtypes(["category", "object"]):
X[colname], _ = X[colname].factorize()
return X
def make_mi_scores(X, y):
"""Utility functions from FE Tutorial"""
X = factorize(X.copy())
# All discrete features should now have integer dtypes
mi_scores = mutual_info_classif(X, y, random_state=Cfg.RANDOM_STATE)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
return (
pd.DataFrame({"feature": X.columns, "mi_score": mi_scores})
.set_index("feature")
.sort_values(by="mi_score", ascending=False)
)
def plot_mi_scores(scores, ax=None):
if ax == None:
fig, ax = plt.subplots(1, 1)
sns.barplot(
data=scores,
x="mi_score",
y=scores.index,
palette=blues_palette,
orient="h",
alpha=0.8,
ax=ax,
)
ax.set_title("Mutual Information Scores")
ax.set_xlabel("Score")
ax.set_ylabel("Features")
return ax
def plot_hist(data, feature, palette=blues_palette, ax=None, kde=True):
if ax is None:
ax = plt.gca()
sns.histplot(
data=data,
x=feature,
bins=20,
legend=True,
palette=palette,
alpha=0.8,
kde=kde,
ax=ax,
)
mean = np.mean(data[feature])
ax.vlines(
mean, 0, 1, transform=ax.get_xaxis_transform(), color="k", linewidth=2, ls=":"
)
return ax
def plot_count(data, feature, palette=blues_palette, hue=None, ax=None):
if ax is None:
ax = plt.gca()
sns.countplot(data=data, x=feature, hue=hue, palette=palette, alpha=0.8, ax=ax)
return ax
def plot_hist(data, feature, palette=blues_palette, hue=None, ax=None, kde=False):
if ax is None:
ax = plt.gca()
sns.histplot(
data=data,
x=feature,
hue=hue,
bins=30,
legend=True,
palette=palette,
alpha=0.8,
kde=kde,
ax=ax,
)
mean = np.mean(data[feature])
ax.vlines(mean, 0, 1, transform=ax.get_xaxis_transform(), color=red, ls=":")
return ax
def plot_boxplot(data, x=None, y=None, palette=blues_palette, hue=None, ax=None):
if ax is None:
ax = plt.gca()
sns.boxplot(
data=data, x=x, y=y, hue=hue, boxprops=dict(alpha=0.8), palette=palette, ax=ax
)
return ax
def plot_kde(
data, feature, hue=None, ax=None, palette=blues_palette, legend=True, show_mean=True
):
if ax is None:
ax = plt.gca()
sns.kdeplot(
data=data,
x=feature,
hue=hue,
fill=True,
legend=legend,
palette=palette,
alpha=0.8,
ax=ax,
)
if show_mean:
mean = np.mean(data[feature])
ax.vlines(mean, 0, 1, transform=ax.get_xaxis_transform(), color=red, ls=":")
return ax
def plot_scatter(data, x, y, palette=blues_palette, hue=None, ax=None):
if ax is None:
ax = plt.gca()
sns.scatterplot(data=data, x=x, y=y, hue=hue, alpha=0.8, palette=palette, ax=ax)
ax.set_title(f'Scatter "{x}" vs "{y}"')
return ax
# ___
# # Data Loading [↑](#top)
def read_train_data(file=Cfg.TRAN_FILE, index_col=Cfg.INDEX):
"""Reads the train data"""
return pd.read_csv(file).set_index(Cfg.INDEX)
def read_test_data(file=Cfg.TEST_FILE, index_col=Cfg.INDEX):
"""Reads the test data"""
return pd.read_csv(file).set_index(Cfg.INDEX)
train_data = read_train_data()
test_data = read_test_data()
display(train_data.head())
display(test_data.head())
print(f"Train data size: {train_data.shape[0]} rows; {train_data.shape[1]} columns")
print(f"Test data size : {test_data.shape[0]} rows; {test_data.shape[1]} columns")
# ___
# # Exploratory Data Analysis (EDA) [↑](#top)
# ## Basic Statistics [↑](#top)
display(train_data.describe().T)
display(test_data.describe().T)
# ## Unique Values [↑](#top)
pd.DataFrame(
{
"feature": train_data.columns,
"dytpe": train_data.dtypes,
"unique": train_data.nunique(),
}
)
# ## Missing Values [↑](#top)
pd.DataFrame(
{"feature": train_data.columns, "train": train_data.isna().sum()}
).set_index("feature").sort_values(by="train", ascending=False)
# **Insights:**
# > The datasets have no missing values
# ## Duplicates [↑](#top)
test_data.duplicated().sum(), test_data.duplicated().sum()
print(f"Duplicated values in train data: {train_data.duplicated().sum()}")
print(f"Duplicated values in test data: {test_data.duplicated().sum()}")
# ## Outliers Detection [↑](#top)
def get_numerical_features(data, target=Cfg.TARGET):
return list(data.select_dtypes(np.number).columns.difference([Cfg.TARGET]))
from scipy import stats
z_threshold = 3
features = get_numerical_features(train_data)
z_sorce = np.abs(stats.zscore(train_data[features], axis=0))
pd.DataFrame(
{
"feature": features,
"num_outlier": [
train_data[z_sorce[f] > z_threshold].shape[0] for f in features
],
}
).set_index("feature")
features = get_numerical_features(train_data)
fig, axis = plt.subplots(nrows=2, ncols=4, figsize=(15, 3))
for feature, ax in zip(features, axis.flatten()):
plot_boxplot(train_data, x=feature, palette=blues_palette, ax=ax)
ax.set_title("")
plt.tight_layout()
plt.show()
# ## Target Distribution [↑](#top)
fig, ax = plt.subplots(1, 1, figsize=(3, 3))
plot_count(train_data, Cfg.TARGET, palette=two_colors, ax=ax)
ax.set_title(f"Target Distribution")
plt.show()
# ## Continous Data Distribution [↑](#top)
features = get_numerical_features(train_data)
fig, axis = plt.subplots(2, 3, figsize=(12, 6))
for feature, ax in zip(features, axis.flatten()):
plot_hist(train_data, feature, hue=None, palette=two_colors, kde=True, ax=ax)
ax.set_title(f'Distribution "{feature}"')
plt.show()
features = get_numerical_features(train_data)
fig, axis = plt.subplots(2, 3, figsize=(12, 6))
for feature, ax in zip(features, axis.flatten()):
plot_hist(train_data, feature, hue=Cfg.TARGET, palette=two_colors, kde=True, ax=ax)
ax.set_title(f'Distribution "{feature}"')
plt.show()
features = get_numerical_features(train_data)
fig, axis = plt.subplots(1, 6, figsize=(16, 4))
for feature, ax in zip(features, axis.flatten()):
plot_boxplot(train_data, x=Cfg.TARGET, y=feature, palette=two_colors, ax=ax)
ax.set_title(f'Boxplot "{feature}"')
plt.tight_layout()
plt.show()
# ## Correlations [↑](#top)
def plot_correlation_matrix(corr_data, cbar=True, ax=None):
if ax is None:
ax = plt.gca()
cmap = sns.diverging_palette(245, 15, as_cmap=False)
mask = np.triu(np.ones_like(corr_data, dtype=bool))
sns.heatmap(
corr_data,
mask=mask,
vmax=1,
vmin=-1,
cmap=cmap,
square=True,
linewidths=0.1,
ax=ax,
alpha=1,
annot=True,
fmt=".1g",
cbar=cbar,
cbar_kws={"shrink": 0.5},
)
return ax
num_features = get_numerical_features(train_data)
fig, axis = plt.subplots(1, 3, figsize=(15, 4))
corr_data = train_data[num_features].corr()
ax = plot_correlation_matrix(corr_data, ax=axis[0], cbar=False)
ax.set_title("Overall Correlation")
corr_data = train_data[train_data["target"] == 1][num_features].corr()
ax = plot_correlation_matrix(corr_data, ax=axis[1], cbar=False)
ax.set_title("Correlation (target==1)")
corr_data = train_data[train_data["target"] == 0][num_features].corr()
ax = plot_correlation_matrix(corr_data, ax=axis[2])
ax.set_title("Correlation (target==0)")
plt.tight_layout()
plt.show()
# **Insights:**
# > There is a strong positive correlation between urea and osmo (0.8).
def plot_pairplot(data, features, hue=Cfg.TARGET, height=3):
grid = sns.pairplot(
data=train_data[features + [Cfg.TARGET]],
palette=two_colors,
height=height,
hue=hue,
corner=True,
)
grid.fig.set_size_inches(10, 8)
for ax in filter(None, grid.axes.flatten()):
ax.set_xlabel(ax.get_xlabel(), rotation=90)
ax.set_ylabel(ax.get_ylabel(), rotation=0)
ax.yaxis.get_label().set_horizontalalignment("right")
fig.tight_layout()
plt.show()
num_features = get_numerical_features(train_data)
plot_pairplot(train_data, num_features, height=1)
# ## Mutual Information
# Intuitively, mutual information measures the information that $X$ and $Y$ share. It measures how much knowing one of these variables reduces uncertainty about the other.
# [Source](https://en.wikipedia.org/wiki/Mutual_information)
X = train_data.copy()
y = X.pop(Cfg.TARGET)
mi_scores = make_mi_scores(X, y)
fig, ax = plt.subplots(figsize=(6, 3))
plot_mi_scores(mi_scores, ax=ax)
plt.show()
# ## Calcium `calc` vs other Features
sns.lmplot(
x="value",
y="calc",
hue="target",
col="variable",
palette=two_colors,
height=4,
aspect=1,
facet_kws={"sharex": False},
col_wrap=4,
data=train_data.melt(
value_vars=["gravity", "ph", "cond", "urea"], id_vars=["calc", "target"]
),
)
# ## Conductivity vs Osmolality
X = train_data.copy()
X["xx"] = X["calc"] + X["urea"]
sns.lmplot(data=X, x="cond", y="xx", hue=Cfg.TARGET, palette=two_colors)
# # Feature engineering (FE) [↑](#top)
from sklearn.metrics import auc
from sklearn.model_selection import cross_val_score
from sklearn.metrics import make_scorer
from catboost import CatBoostClassifier, CatBoostRegressor
cat_params = {
"learning_rate": 0.04,
"depth": 3,
"n_estimators": 50,
"eval_metric": "AUC",
"random_seed": Cfg.RANDOM_STATE,
"verbose": 0,
}
def score_dataset(X, y, prev_score=0, model=CatBoostClassifier(**cat_params)):
X = X.copy()
for colname in X.select_dtypes(["category", "object"]):
X[colname], _ = X[colname].factorize()
score = cross_val_score(model, X, y, cv=5, scoring="roc_auc").mean()
diff = score - prev_score
direction = "↑" if diff > 0 else "↓"
return score, direction, diff
X = train_data.copy()
y = X.pop(Cfg.TARGET)
baseline_score, _, _ = score_dataset(X, y)
print(f"Baseline Score: {baseline_score}")
# ___
# ## What does the calcium `calc` value mean?
# Calcium excretion in urine is measured with 24-hour urine collection determined. The urine must be mixed with hydrochloric acid to prevent crystallisation and precipitation of the calcium. The measurement is photometrically.
# Reference values:
# |Gender|Calcium in urine (mmol/l)|
# |---|---|
# |Womem| < 6,2 mmol/l|
# |Men| < 7,5 mmol/l|
# [Source](https://www.lifeline.de/diagnose/laborwerte/calcium-blutwert-id47661.html)
train_data.groupby(by="target").describe()["calc"]
X = train_data.copy()
y = X.pop("target")
X = X[["calc"]]
score, direction, diff = score_dataset(X, y, prev_score=baseline_score)
print(f"Score: {score} {direction} - diff = {diff}")
# **Insights:**
# > Only with the feature `calc` do we achieve a accuracy of about 80%.
def create_calc_level_feature(X):
labels = ["low", "normal", "high"]
X["calc_level"] = pd.cut(
X["calc"], bins=[0, 2.75, 7.5, 15], labels=labels, ordered=True
)
return X
X = train_data.copy()
y = X.pop("target")
X = create_calc_level_feature(X)
score, direction, diff = score_dataset(X, y, prev_score=baseline_score)
print(f"Score: {score} {direction} - diff = {diff}")
# ## What is a urine specific gravity test?[↑](#top)
# A urine-specific gravity test compares the density of urine with the density of water. This helps identify dehydration, a kidney problem, or a condition like diabetes insipidus.
# Normal value: 1.010 - 1.035 g/l
# [Source](https://www.medicalnewstoday.com/articles/322125?c=220849204885)
def create_disease_feature(X):
labels = ["hyposthenurie", "eusthenurie", "hypersthenurie"]
X["disease"] = pd.cut(
X["gravity"], bins=[0, 1.010, 1.030, 2], labels=labels, ordered=True
)
return X
X = create_disease_feature(X)
score, direction, diff = score_dataset(X, y, prev_score=baseline_score)
print(f"Score: {score} {direction} - diff = {diff}")
fig, ax = plt.subplots(1, 1, figsize=(4, 4))
sns.countplot(data=X, x="disease", hue=y, palette=two_colors, alpha=0.8, ax=ax)
ax.set_title(f"Specific gravity and disease")
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
plt.show()
# ## What is a normal urine pH?
# A neutral pH is 7.0. The average urine sample tests at about 6.0, but typical urine pH may range from 5.5–7.6.
# The higher the number, the more basic your urine is. The lower the number, the more acidic your urine is.
# [Source](https://www.onmeda.de/krankheiten/blasensteine-id200175/)
train_data.groupby(by="target").describe()["ph"]
def create_ph_level_feature(X):
labels = ["acidic", "normal", "basic"]
X["ph_level"] = pd.cut(X["ph"], bins=[0, 5.5, 7.6, 12], labels=labels, ordered=True)
return X
X = create_ph_level_feature(X)
fig, ax = plt.subplots(1, 1, figsize=(4, 4))
sns.countplot(data=X, x="ph_level", hue=y, palette=two_colors, alpha=0.8, ax=ax)
ax.set_title(f"Normal pH level")
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
plt.show()
score, direction, diff = score_dataset(X, y, prev_score=baseline_score)
print(f"Score: {score} {direction} - diff = {diff}")
# ## What is a normal `urea` value?
# When protein is broken down in the body, it first produces carbon dioxide (CO2) and ammonia in the liver. Ammonia is toxic to the body (and especially the brain) in large amounts and is therefore broken down into non-toxic urea in the so-called urea cycle. In medical laboratory diagnostics, urea plays a role as a kidney parameter.
# Reference values:
# |Age|Women|Men|
# |---|---|---|
# |< 50 Years|15-40 mg/dl|19-44 mg/dl|
# |> 50 Years|21-43 mg/dl|18-55 mg/dl|
# [Source](https://www.medpertise.de/blutwerte/nierenwerte/harnstoff/)
train_data.groupby(by="target").describe()["urea"]
# ## What does the calcium `calc` value mean?
# Calcium excretion in urine is measured with 24-hour urine collection determined. The urine must be mixed with hydrochloric acid to prevent crystallisation and precipitation of the calcium. The measurement is photometrically.
# Reference values:
# |Gender|Calcium in urine (mmol/l)|
# |---|---|
# |Womem| < 6,2 mmol/l|
# |Men| < 7,5 mmol/l|
# [Source](https://www.lifeline.de/diagnose/laborwerte/calcium-blutwert-id47661.html)
train_data.groupby(by="target").describe()["calc"]
# ## Osmolality `osmo` in urine
# Osmolality indicates the concentration of all dissolved, osmotically active particles in a solution. By determining the osmolality of urine, a statement can be made about the electrolyte and water balance of the body.
# Reference values:
# |Osmolality in urine (mosm/kg H2O)|
# |---|
# |50-1200 H20|
# [Source:](https://www.labormedizin-krefeld.de/index.php?mact=Labormedizin,cntnt01,default,0&cntnt01what=Leistung&cntnt01alias=Osmolalitaet-im-Serum-Urin&cntnt01returnid=27)
train_data.groupby(by="target").describe()["osmo"]
# ## Create ratio features
def create_ratio_features(X):
X["calc_urea_ratio"] = X["calc"] / X["urea"]
X["calc_osm_product"] = X["calc"] * X["osmo"]
X["calc_gravity_ratio"] = X["calc"] / X["gravity"]
X["calc_osm_ratio"] = X["calc"] / X["osmo"]
return X
X = train_data.copy()
y = X.pop("target")
X = create_ratio_features(X)
score, direction, diff = score_dataset(X, y, prev_score=baseline_score)
print(f"Score: {score} {direction} - diff = {diff}")
# ___
def create_features(X):
X = create_disease_feature(X)
X = create_ph_level_feature(X)
X = create_calc_level_feature(X)
X = create_ratio_features(X)
return X
X = factorize(create_features(train_data.copy()))
y = X.pop(Cfg.TARGET)
score, direction, diff = score_dataset(X, y, prev_score=baseline_score)
print(f"Score: {score} {direction} - diff = {diff}")
mi_scores = make_mi_scores(X, y)
fig, ax = plt.subplots(figsize=(10, 4))
plot_mi_scores(mi_scores, ax=ax)
plt.show()
# ___
# # Modeling [↑](#top)
import optuna
optuna.logging.set_verbosity(optuna.logging.CRITICAL)
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, RocCurveDisplay
from catboost import CatBoostClassifier, CatBoostRegressor
from xgboost.sklearn import XGBClassifier
from lightgbm.sklearn import LGBMClassifier
from sklearn.ensemble import StackingClassifier, VotingClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import cross_validate
def plot_model_result(y_pred, y_true, y_pred_proba):
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 4))
ConfusionMatrixDisplay.from_predictions(
y_true, y_pred, ax=ax1, cmap="Blues", normalize="true", colorbar=False
)
ax1.set_title("Confusion Matrix")
sns.histplot(
data=y_pred_proba, palette=two_colors, legend=True, bins=30, kde=True, ax=ax2
)
ax2.set_xlabel("Prediction Probapility")
ax2.set_ylabel("Probabitity")
# ROC curve
RocCurveDisplay.from_predictions(y_true, y_pred, ax=ax3)
ax3.set_title("ROC")
plt.tight_layout()
plt.show()
print(classification_report(y_true, y_pred))
X = factorize(create_features(train_data.copy()))
y = X.pop(Cfg.TARGET)
X_train, X_val, y_train, y_val = train_test_split(
X, y, test_size=Cfg.TEST_SIZE, random_state=Cfg.RANDOM_STATE
)
pd.DataFrame(
{"Rows": [X_train.shape[0], X_val.shape[0]], "Dataset": ["Train", "Validation"]}
).set_index("Dataset")
def xgb_objective(trial):
eta = trial.suggest_float("eta", 0, 1)
max_depth = trial.suggest_int("max_depth", 5, 30)
n_estimators = trial.suggest_int("n_estimators", 100, 300)
reg_alpha = trial.suggest_float("reg_alpha", 1e-8, 10, log=True)
reg_lambda = trial.suggest_float("reg_lambda", 1e-8, 10, log=True)
model = XGBClassifier(
eta=eta,
n_estimators=n_estimators,
max_depth=max_depth,
seed=Cfg.RANDOM_STATE,
eval_metric="auc",
reg_alpha=reg_alpha,
reg_lambda=reg_lambda,
)
y_pred = model.fit(X_train, y_train).predict(X_val)
score = roc_auc_score(y_val, y_pred)
return score
study = optuna.create_study()
study.optimize(xgb_objective, Cfg.N_TRIALS)
xgb_params = study.best_params.copy()
xgb_params.update({"eval_metric": "auc", "seed": Cfg.RANDOM_STATE})
xgb_params
def cat_objective(trial):
learning_rate = trial.suggest_float("learning_rate", 0.0, 0.1)
depth = trial.suggest_int("depth", 3, 5, 10)
n_estimators = trial.suggest_int("n_estimators", 10, 350, 500)
model = CatBoostClassifier(
learning_rate=learning_rate,
depth=depth,
n_estimators=n_estimators,
verbose=0,
random_seed=Cfg.RANDOM_STATE,
eval_metric="AUC",
)
y_pred = model.fit(X_train, y_train).predict(X_val)
score = roc_auc_score(y_val, y_pred)
return score
study = optuna.create_study()
study.optimize(cat_objective, n_trials=Cfg.N_TRIALS)
cat_params = study.best_params.copy()
cat_params.update({"eval_metric": "AUC", "random_seed": Cfg.RANDOM_STATE, "verbose": 0})
cat_params
def lgbm_objective(trial):
learning_rate = trial.suggest_float("learning_rate", 5e-4, 0.75, log=True)
n_estimators = trial.suggest_int("n_estimators", 100, 1500, log=True)
max_depth = (trial.suggest_int("max_depth", 5, 30),)
num_leaves = trial.suggest_int("num_leaves", 2, 128, log=True)
colsample_bytree = trial.suggest_float("colsample_bytree", 0.1, 1)
subsample = trial.suggest_float("colsample_bytree", 0, 1.0)
reg_alpha = trial.suggest_float("reg_alpha", 1e-8, 10, log=True)
reg_lambda = trial.suggest_float("reg_lambda", 1e-8, 10, log=True)
model = LGBMClassifier(
learning_rate=learning_rate,
n_estimators=n_estimators,
max_depth=max_depth,
num_leaves=num_leaves,
colsample_bytree=colsample_bytree,
subsample=subsample,
reg_alpha=reg_alpha,
reg_lambda=reg_lambda,
metric="AUC",
seed=Cfg.RANDOM_STATE,
)
y_pred = model.fit(X_train, y_train).predict(X_val)
score = roc_auc_score(y_val, y_pred)
return score
study = optuna.create_study()
study.optimize(lgbm_objective, n_trials=Cfg.N_TRIALS)
lgbm_params = study.best_params.copy()
lgbm_params.update({"metric": "AUC", "seed": Cfg.RANDOM_STATE})
lgbm_params
estimators = [
("xgb", XGBClassifier(**xgb_params)),
("lgbm", LGBMClassifier(verbose=-1, force_row_wise=True, **lgbm_params)),
("cat", CatBoostClassifier(**cat_params)),
]
model = StackingClassifier(
estimators=estimators, final_estimator=CatBoostClassifier(verbose=0)
).fit(X_train, y_train)
y_pred = model.predict(X_val)
y_pred_proba = model.predict_proba(X_val)
plot_model_result(y_pred, y_val, y_pred_proba)
# ___
# # Feature Importance [↑](#top)
cat_model = CatBoostClassifier(**cat_params).fit(X_train, y_train)
feature_importance = pd.DataFrame(
{"feature": X_val.columns, "importance": cat_model.get_feature_importance()}
).set_index("feature")
feature_importance.sort_values(by="importance", ascending=False, inplace=True)
fig, ax = plt.subplots(figsize=(10, 4))
df = feature_importance.head(50)
sns.barplot(
data=df,
y=df.index,
x="importance",
palette=blues_palette,
orient="h",
alpha=0.75,
ax=ax,
)
ax.set_title("Feature Importance")
plt.tight_layout()
plt.show()
# ___
# # Submission [↑](#top)
X = factorize(create_features(test_data.copy()))
y_pred_submission = model.predict_proba(X)[:, 1]
submission_data = pd.DataFrame(
{
Cfg.INDEX: test_data.index,
Cfg.TARGET: y_pred_submission,
}
).set_index(Cfg.INDEX)
submission_data
# save submission file
submission_data.to_csv(Cfg.SUBMISSION_FILE)
|
import numpy as np
import pandas as pd
import matplotlib
import os
import torch
import matplotlib
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
np.random.seed(42)
# ## Loading the MIT BIH Dataset
import pandas as pd
import os
def load_data(path):
train_df = pd.read_csv(os.path.join(path, "mitbih_train.csv"), header=None)
test_df = pd.read_csv(os.path.join(path, "mitbih_test.csv"), header=None)
return train_df, test_df
train_df, test_df = load_data("/kaggle/input/heartbeat/")
# ## Investigating class distribution
from matplotlib import pyplot as plt
import numpy as np
matplotlib.rcParams.update({"font.size": 12})
classes = ["N", "S", "V", "F", "Q"]
classnames = [
"Normal",
"Supraventricular premature",
"Premature ventricular contraction",
"Fusion ventricular + normal",
"Unclassifiable",
]
train = np.array(train_df)
test = np.array(test_df)
# very last column is the label
X_train, y_train = train[:, :-1], train[:, -1]
X_test, y_test = test[:, :-1], test[:, -1]
y_train, y_test = y_train.astype(int), y_test.astype(int)
print("X_train: ", X_train.shape)
print("X_test: ", X_test.shape)
print(f"y_train: {y_train.shape}")
print(f"y_test: {y_test.shape}")
def show_class_distribution(train_y, test_y, plot=False):
print()
print("\tFrequency of each class:")
print("train\t\t\ttest")
train_freq = np.unique(train_y, return_counts=True)[1]
test_freq = np.unique(test_y, return_counts=True)[1]
for i in range(len(classes)):
num = train_freq[i]
percent = train_freq[i] / len(train) * 100
test_num = test_freq[i]
test_percent = test_freq[i] / len(test) * 100
print(f"{classes[i]}: {num} ({percent:.0f}%)", end="\t\t")
print(f"{classes[i]}: {test_num} ({test_percent:.0f}%)")
if plot:
plt.figure(figsize=(8, 2))
plt.subplot(1, 2, 1)
plt.bar(classes, train_freq / len(train_y) * 100)
plt.title("Train data class distribution")
plt.xlabel("Class")
plt.ylabel("Percentage")
plt.subplot(1, 2, 2)
plt.bar(classes, test_freq / len(test_y) * 100)
plt.title("Test data class distribution")
plt.xlabel("Class")
plt.ylabel("Percentage")
plt.show()
show_class_distribution(y_train, y_test, plot=True)
# ## Data Augmentation
# This data is extremely imbalanced so let's augment it using stretching, amplifying and injecting noise.
# We augment all classes to the same number of samples (n=10_000)
# Note that we only augment the training data - touching the test data is generally bad practice. To evaluate the model on test data we can use f1 score and normalized confusion matrix to account for imbalanced classes.
import random
from scipy.signal import resample
import numpy as np
def stretch(x):
l = int(187 * (1 + (random.random() - 0.5) / 3))
y = resample(x, l)
if l < 187:
y_ = np.zeros(shape=(187,))
y_[:l] = y
else:
y_ = y[:187]
return y_
def amplify(x):
alpha = random.random() - 0.5
factor = -alpha * x + (1 + alpha)
return x * factor
def inject_noise(x, mean=0, std=0.01):
noise = np.random.normal(mean, std, size=x.shape)
return x + noise
def augment_sample(x, stretch_=True, amplify_=True, inject_noise_=True, times=1):
result = np.zeros(shape=(times, 187))
for i in range(times):
result[i, :] = x
if stretch_:
result[i, :] = stretch(result[i, :])
if amplify_:
result[i, :] = amplify(result[i, :])
if inject_noise_:
result[i, :] = inject_noise(result[i, :])
return result
# example of augmentation
plt.figure(figsize=(5, 3))
plt.plot(X_train[0, :])
plt.plot(augment_sample(X_train[0, :])[0])
plt.show()
import math
def augment_data(X, y, n=10_000):
# calculate how many samples to augment each class
categories = [X[y == i] for i in range(5)]
num_augments = [
math.ceil((n - len(c)) / len(c)) if n > len(c) else 0 for c in categories
]
print(f"Augmenting data to {n} samples per class")
print(f"Number of augmentations per class: {num_augments}")
X_aug = []
y_aug = []
# augment each class the calculated number of times
for c in range(5):
if num_augments[c] == 0:
continue
print(
f"class {c} ({classes[c]}) augmenting {num_augments[c]} times... ", end=""
)
augmented = np.apply_along_axis(
augment_sample, axis=1, arr=X[y == c], times=num_augments[c]
).reshape(-1, 187)
X_aug.extend(augmented)
y_aug.extend(np.full(shape=(len(augmented),), fill_value=c))
print("done")
X_aug = np.array(X_aug)
y_aug = np.array(y_aug)
print(f"Augmented data shape X_aug: {X_aug.shape}")
print(f"Augmented labels shape y_aug: {y_aug.shape}")
if len(X_aug) != 0:
X = np.concatenate((X, X_aug))
y = np.concatenate((y, y_aug))
# randomly sample classes with more than n samples down to n samples
for c in range(5):
if num_augments[c] > 0:
continue
print(f"class {c} ({classes[c]}) sampling {n} samples... ", end="")
X_c = X[y == c]
indices = np.random.choice(X_c.shape[0], n, replace=False)
X_c = X_c[indices]
y_c = np.random.choice(y[y == c], size=n, replace=False)
# delete old samples and add new samples
X = np.delete(X, np.where(y == c), axis=0)
y = np.delete(y, np.where(y == c), axis=0)
X = np.concatenate((X, X_c))
y = np.concatenate((y, y_c))
print("done")
return X, y
X_train, y_train = augment_data(X_train, y_train)
print(f"New X_train: {X_train.shape}")
print(f"New y_train: {y_train.shape}")
show_class_distribution(y_train, y_test, plot=True)
# ## Visualizing random samples from each class
# Now that we have equal class distribution let's do some more exploratory analysis, starting with visualizing some random samples from each class.
import random
def plot_samples(X, y, names, n=5):
fig, axs = plt.subplots(n, 5, figsize=(15, n * 1.5))
for i in range(n):
for j in range(5):
axs[i][j].plot(X[y == j][random.randint(0, len(X[y == j]))])
axs[i][j].set_xticks([])
axs[i][j].set_yticks([])
if i == 0:
axs[i][j].set_title(names[j], fontsize=20)
plt.tight_layout()
plt.show()
plot_samples(X_train, y_train, classes, n=5)
# ## Visualizing mean & std across features
# Plotting the distribution of mean and std of each feature for each class using line plot could give us interesting insights.
# Here we see that the S and N classes are very similar in terms of their mean and std values and distribution, indicating that the model might have trouble distinguishing between them.
# This information could be used for additional preprocessing/augmentation specific to these samples.
matplotlib.rcParams.update({"font.size": 15})
def plot_feature_std_means(X, y):
print("Average mean and std across all features for each class:")
for i in range(5):
print(
f"{classes[i]}: mean={X[y == i].mean():.4f}, std={X[y == i].std(axis=0).mean():.4f}"
)
plt.figure(figsize=(15, 3))
plt.subplot(1, 2, 1)
plt.bar(classes, [X[y == i].mean() for i in range(5)])
plt.xlabel("Class")
plt.ylabel("Mean")
plt.title("Mean over all features for each class")
plt.subplot(1, 2, 2)
plt.bar(classes, [X[y == i].std(axis=0).mean() for i in range(5)])
plt.xlabel("Class")
plt.ylabel("Std")
plt.title("Average sample std for each class")
plt.show()
plt.figure(figsize=(15, 4))
plt.subplot(1, 2, 1)
for i in range(5):
plt.plot(X[y == i].mean(axis=0), label=f"{classes[i]} mean", linewidth=2)
plt.legend()
plt.xlabel("Feature")
plt.ylabel("Mean")
plt.title("Mean of each feature for each class")
plt.subplot(1, 2, 2)
for i in range(5):
plt.plot(X[y == i].std(axis=0), label=f"{classes[i]} std", linewidth=2)
plt.legend()
plt.xlabel("Feature")
plt.ylabel("Std")
plt.title("Std of each feature for each class")
plt.tight_layout()
plt.show()
plot_feature_std_means(X_train, y_train)
# ## Shuffle and standardize
def shuffle_data(X, y):
indices = np.arange(X.shape[0])
np.random.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def standard_scale_data(train, test):
train_mean = train.mean(axis=0)
train_std = train.std(axis=0)
train = (train - train_mean) / train_std
test = (test - train_mean) / train_std
return train, test
X_train, y_train = shuffle_data(X_train, y_train)
print(f"Old mean: {X_train.mean():.4f}, old std: {X_train.std():.4f}")
X_train, X_test = standard_scale_data(X_train, X_test)
print(f"New mean: {X_train.mean():.4f}, new std: {X_train.std():.4f}")
# ## Simple Random Forest
# Let's try cross-validation with a simple RF model
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_validate
from sklearn.metrics import classification_report
from sklearn.metrics import ConfusionMatrixDisplay
FOLDS = 10
def cross_validate_random_forest(x, y, cv, n_estimators=101, **kwargs):
clf = RandomForestClassifier(n_estimators=n_estimators, n_jobs=-1, **kwargs)
scores = cross_validate(
clf,
x,
y,
cv=cv,
scoring=[
"balanced_accuracy",
"f1_macro",
"f1_weighted",
"precision_macro",
"precision_weighted",
"recall_macro",
"recall_weighted",
],
return_train_score=False,
return_estimator=True,
n_jobs=-1,
)
return scores
# run CPU-intensive only if not on GPU (kaggle reduces CPU power if using GPU)
# we will do a separate CPU-only run
if not torch.cuda.is_available():
scores = cross_validate_random_forest(X_train, y_train, FOLDS)
print(f"Cross validation results (averaged across {FOLDS} folds):")
print(
f"Balanced Accuracy: {scores['test_balanced_accuracy'].mean():.4f} (+/- {scores['test_balanced_accuracy'].std() * 2:.4f})"
)
print(
f"F1 Macro: {scores['test_f1_macro'].mean():.4f} (+/- {scores['test_f1_macro'].std() * 2:.4f})"
)
print(
f"F1 Weighted: {scores['test_f1_weighted'].mean():.4f} (+/- {scores['test_f1_weighted'].std() * 2:.4f})"
)
print(
f"Precision Macro: {scores['test_precision_macro'].mean():.4f} (+/- {scores['test_precision_macro'].std() * 2:.4f})"
)
print(
f"Precision Weighted: {scores['test_precision_weighted'].mean():.4f} (+/- {scores['test_precision_weighted'].std() * 2:.4f})"
)
print(
f"Recall Macro: {scores['test_recall_macro'].mean():.4f} (+/- {scores['test_recall_macro'].std() * 2:.4f})"
)
print(
f"Recall Weighted: {scores['test_recall_weighted'].mean():.4f} (+/- {scores['test_recall_weighted'].std() * 2:.4f})"
)
print()
print(classification_report(y_test, scores["estimator"][0].predict(X_test)))
disp = ConfusionMatrixDisplay.from_estimator(
scores["estimator"][0],
X_test,
y_test,
display_labels=classes,
cmap=plt.cm.Blues,
values_format=".2f",
normalize="true",
)
disp.ax_.set_title("Normalized confusion matrix")
plt.show()
if not torch.cuda.is_available():
importances = scores["estimator"][0].feature_importances_
feature_names = [f"{i}" for i in range(len(importances))]
plt.figure(figsize=(10, 5))
plt.bar(feature_names, importances)
plt.xticks([]) # too many numbers
plt.title("RF Feature Importances")
plt.xlabel("Features")
plt.ylabel("Importance Scores")
ax = plt.gca()
n = 10 # Keeps every 10th label
temp = ax.xaxis.get_ticklabels()
temp = list(set(temp) - set(temp[::n]))
for label in temp:
label.set_visible(False)
plt.show()
# ## Using AutoML (TPOT)
# We could manually implement models like XGBoost, SVC, GradientBoostingClassifier, etc. but using TPOT we can intelligently and quickly iterate over tons of models and grid search hyperparameters.
# We'll try it with 3 configs:
# - Default
# - Multifactor dimensionality reduction (MDR)
# - GPU-optimized models (cuML)
# for mdr
from tpot import TPOTClassifier
if not torch.cuda.is_available():
default_pipeline = TPOTClassifier(
generations=5,
population_size=10,
verbosity=2,
random_state=42,
n_jobs=-1,
memory="/tmp/tpot-data/",
scoring="f1_macro",
)
default_pipeline.fit(X_train, y_train)
print(
f"f1_macro test score for best pipeline: {default_pipeline.score(X_test, y_test)}"
)
default_pipeline.export("/kaggle/working/tpot_default_results.py")
print(
default_pipeline.evaluated_individuals_,
file=open("/kaggle/working/default_models.txt", "w+"),
)
from tpot import TPOTClassifier
if not torch.cuda.is_available():
mdr_pipeline = TPOTClassifier(
generations=5,
population_size=10,
verbosity=2,
random_state=42,
n_jobs=-1,
scoring="f1_macro",
memory="/tmp/tpot-data",
config_dict="TPOT MDR",
)
mdr_pipeline.fit(X_train, y_train)
print(
f"f1_macro test score for best pipeline: {mdr_pipeline.score(X_test, y_test)}"
)
mdr_pipeline.export("/kaggle/working/tpot_mdr_results.py")
print(
mdr_pipeline.evaluated_individuals_,
file=open("/kaggle/working/mdr_models.txt", "w+"),
)
# ## GPU-accelerated models
# Everything from this point onwards is GPU-optimized, so we exit early for the CPU-run
import torch
if not torch.cuda.is_available():
exit(0)
from tpot import TPOTClassifier
cuML_pipeline = TPOTClassifier(
generations=5,
population_size=10,
verbosity=2,
random_state=42,
n_jobs=-1,
scoring="f1_macro",
config_dict="TPOT cuML",
)
cuML_pipeline.fit(X_train, y_train)
print(f"f1_macro test score for best pipeline: {cuML_pipeline.score(X_test, y_test)}")
cuML_pipeline.export("/kaggle/working/tpot_cuML_results.py")
print(
cuML_pipeline.evaluated_individuals_,
file=open("/kaggle/working/cuML_models.txt", "w+"),
)
# ## Depthwise 1D CNN
# This time of model is commonly used for modeling signals
print("X_train", X_train.shape)
print("y_train", y_train.shape)
print("X_test", X_test.shape)
print("y_test", y_test.shape)
from sklearn.preprocessing import OneHotEncoder
X_train = np.expand_dims(X_train, 2)
X_test = np.expand_dims(X_test, 2)
print("X_train", X_train.shape)
print("y_train", y_train.shape)
print("X_test", X_test.shape)
print("y_test", y_test.shape)
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(
X_train, y_train, test_size=0.2, random_state=42
)
print("X_train", X_train.shape)
print("y_train", y_train.shape)
print("X_val", X_val.shape)
print("y_val", y_val.shape)
n_obs, features, depth = X_train.shape
from keras.models import Sequential
from keras.layers import (
Conv1D,
MaxPooling1D,
Dense,
Dropout,
Input,
Flatten,
SeparableConv1D,
GlobalAveragePooling1D,
)
from keras.layers import GlobalMaxPooling1D
from keras.layers import BatchNormalization
from keras.models import Model
from keras import backend as K
from keras.optimizers import Adam
from keras.callbacks import LearningRateScheduler, ModelCheckpoint
def build_model():
input_img = Input(shape=(features, depth), name="ImageInput")
x = Conv1D(32, 3, activation="relu", padding="same", name="Conv1_1")(input_img)
x = Conv1D(32, 3, activation="relu", padding="same", name="Conv1_2")(x)
x = MaxPooling1D(2, name="pool1")(x)
x = SeparableConv1D(32, 3, activation="relu", padding="same", name="Conv2_1")(x)
x = SeparableConv1D(32, 3, activation="relu", padding="same", name="Conv2_2")(x)
x = MaxPooling1D(2, name="pool2")(x)
x = SeparableConv1D(64, 3, activation="relu", padding="same", name="Conv3_1")(x)
x = BatchNormalization(name="bn1")(x)
x = SeparableConv1D(64, 3, activation="relu", padding="same", name="Conv3_2")(x)
x = BatchNormalization(name="bn2")(x)
x = SeparableConv1D(64, 3, activation="relu", padding="same", name="Conv3_3")(x)
x = MaxPooling1D(2, name="pool3")(x)
x = Flatten(name="flatten")(x)
x = Dense(128, activation="relu", name="fc1")(x)
x = Dropout(0.6, name="dropout1")(x)
x = Dense(128, activation="relu", name="fc2")(x)
x = Dropout(0.5, name="dropout2")(x)
x = Dense(5, activation="softmax", name="fc3")(x)
model = Model(inputs=input_img, outputs=x)
return model
model = build_model()
model.summary()
BATCH_SIZE = 1024
def exp_decay(epoch, lr):
if epoch < 10:
return lr
else:
return lr * 0.97
lrate = LearningRateScheduler(exp_decay)
adam = Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999)
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=adam,
metrics=["sparse_categorical_accuracy"],
)
import tensorflow as tf
early_stopping = tf.keras.callbacks.EarlyStopping(monitor="val_loss", patience=5)
history = model.fit(
X_train,
y_train,
epochs=100,
batch_size=BATCH_SIZE,
validation_data=(X_val, y_val),
callbacks=[lrate],
)
print("loss and accuracy:")
model.evaluate(X_test, y_test)
y_pred = model.predict(X_test, batch_size=BATCH_SIZE)
y_pred = np.argmax(y_pred, axis=-1)
# Original labels
print(y_test.shape)
print(y_pred.shape)
from sklearn.metrics import confusion_matrix
from mlxtend.plotting import plot_confusion_matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)
plot_confusion_matrix(
cm, figsize=(7, 7), hide_ticks=True, cmap=plt.cm.Blues, show_normed=True
)
plt.xticks(range(5), classes, fontsize=12)
plt.yticks(range(5), classes, fontsize=12)
plt.show()
print(classification_report(y_test, y_pred))
plt.figure(figsize=(15, 5))
plt.subplot(1, 2, 1)
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("model categorical accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "val"], loc="lower right")
plt.subplot(1, 2, 2)
# summarize history for loss
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model categorical_crossentropy loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "val"], loc="upper right")
plt.show()
|
#
# # **파이썬과 함께하는 로지스틱 회귀 튜토리얼**
# Hello friends,
# In this kernel, I implement Logistic Regression with Python and Scikit-Learn. I build a Logistic Regression classifier to predict whether or not it will rain tomorrow in Australia. I train a binary classification model using Logistic Regression.
# **As always, I hope you find this kernel useful and your UPVOTES would be highly appreciated**.
# # **Table of Contents**
# 1. [Introduction to Logistic Regression](#1)
# 2. [Logistic Regression intuition](#2)
# 3. [Assumptions of Logistic Regression](#3)
# 4. [Types of Logistic Regression](#4)
# 5. [Import libraries](#5)
# 6. [Import dataset](#6)
# 7. [Exploratory data analysis](#7)
# 8. [Declare feature vector and target variable](#8)
# 9. [Split data into separate training and test set](#9)
# 10. [Feature engineering](#10)
# 11. [Feature scaling](#11)
# 12. [Model training](#12)
# 13. [Predict results](#13)
# 14. [Check accuracy score](#14)
# 15. [Confusion matrix](#15)
# 16. [Classification metrices](#16)
# 17. [Adjusting the threshold level](#17)
# 18. [ROC - AUC](#18)
# 19. [k-Fold Cross Validation](#19)
# 20. [Hyperparameter optimization using GridSearch CV](#20)
# 21. [Results and conclusion](#21)
# 22. [References](#22)
# # **1. 로지스틱 회귀 입문**
# [Table of Contents](#0.1)
# 데이터 과학자들이 분류 문제를 맨 처음 마주했을 때, 그들의 머릿속에 가장 먼저 떠오르는 알고리즘은 **로지스틱 회귀**일 것입니다. 이 알고리즘은 지도학습을 사용하고 이산적인 클래스를 반환합니다. 보통 이 알고리즘은 데이터들을 몇 개의 카테고리로 분류합니다. 그러므로 이 알고리즘의 출력은 이산적입니다. **로지스틱 회귀**는 또한 **로짓 회귀**라고도 불립니다. 이 모델은 가장 심플하고 직관적이며, 어디에도 적용될 수 있는 가장 효과적인 분류기입니다.
# # **2. 로지스틱 회귀의 직관적 이해**
# [Table of Contents](#0.1)
# 통계에서, **로지스틱 회귀모델**은 분류를 목적으로 하는 많은 통계 모델에서 활용됩니다. 이것은 주어진 데이터에서 로지스틱 회귀 알고리즘은 이 데이터들을 이산적인 카테고리로 분류하는데 큰 도움을 줍니다. 그러므로, 타겟 데이터는 보통 이산적인 값입니다,
# 로지스틱 회귀는 다음과 같이 계산됩니다 -
# ## **일차방정식 활용**
# 로지스틱 회귀는 일차방정식을 활용하여 독립변수를 통해 종속변수를 예측하는 것입니다. 예를 들어, "얼마나 공부를 오래 해야 시험에 합격할 수 있는가"를 생각해볼 수 있죠. 얼마나 오래 공부했는가를 x1으로 두고. 그에 따른 시험에 합격할 확률을 z로 둘 수 있습니다.
# 하나의 독립변수 (x1) 과 하나의 종속변수 (z)가 있다고 했을 때, 일차방정식은 다음과 같을 것입니다.-
# z = β0 + β1x1
# 여기의 계수 β0과 β1은 모델의 파라메터입니다
# 만약 복수의 독립변수가 있다면, 위의 식은 다음과 같이 확장될 수 있습니다.
# z = β0 + β1x1+ β2x2+……..+ βnxn
#
# 이 식의 계수 β0, β1, β2 그리고 βn은 모델의 파라메터입니다.
# 따라서, 예측되는 종속변수 z는 위의 식으로부터 유도됩니다.
# ## **시그모이드 함수**
# 예측된 종속변수 z가 있을 때 그 값을 0과 1 사이의 확률 값으로 변환시켜주고 싶다고 합시다. 그럴 때 시그모이드 함수를 사용하여 예측된 값을 확률값으로 바꿀 수 있습니다. 시그모이드 함수는 실수를 0과 1 사이로 사상하는 함수입니다.
# 머신러닝에서, 시그모이드 함수는 예측값들을 확률값들로 변환시켜주는 역할을 합니다. 시그모이드 함수는 S자 곡선을 띄는데, 이를 시그모이드 곡선이라고도 부릅니다.
# 시그모이드 함수는 로지스틱 회귀의 아주 특별한 케이스입니다. 그것은 다음의 수식을 따릅니다.
# 그래프적으로 표현하자면 시그모이드 함수는 다음과 같이 그려집니다.
# ### 시그모이드 함수
# 
# ## **결정경계**
# 시그모이드 함수는 0과 1 사이의 확률값을 반환합니다. 이 확률값은 우리가 예측하고자 하는 이산적인 클래스에 대응되는데 그것들은 바로 “0”과 “1”입니다. 확률값을 이산적인 값으로 변환하기 위해 (성공/실패, 예/아니오, 긍정/부정), 우리는 한계값을 설정합니다. 이 한계점이 바로 결정경계입니다. 이 한계점 위쪽으로는, 확률값을 클래스 1번으로 대응시킬 것이고 아래의 것들을 클래스 0번으로 대응시킬 것입니다.
# 수학적으로, 이것은 다음과 같이 표현됩니다:-
# p ≥ 0.5 => 클래스 = 1
# p 클래스 = 0
# 일반적으로, 결정경계는 0.5로 세팅됩니다. 그러므로, 확률값이 0.8 (> 0.5)이면, 우리는 이것을 클래스 1번에 대응시킬 것입니다. 비슷하게, 확률값이 0.2 (< 0.5)라면, 이것을 클래스 0번에 대응시킬 것입니다. 이것은 다음 그래프레 잘 표현됩니다.-
# 
# ## **예측값 생성**
# 이제 우리는 로지스틱 회귀에서의 시그모이드 함수와 결정경계에 대해 알고 있습니다. 이제 이 지식들로 예측값을 어떻게 생성하는지 알아봅시다. 로지스틱 회귀의 예측함수는 데이터의 확률값이 어떤 값 이상이면 양의 값을 반환합니다. 우리는 이것을 클래스 1에 대응시켰고 P(class = 1)이라고 쓸 수 있음을 알고 있습니다. 확률값이 1에 근접하면 근접할수록, 우리는 우리의 모델이 예측한 값이 클래스 1번에 속한다고 자신있게 말할 수 있게 됩니다, 반대의 경우도 마찬가지입니다.
# # **3. 로지스틱 회귀를 사용하는 전제조건**
# [Table of Contents](#0.1)
# 로지스틱 회귀 모델은 몇개의 필수적인 전제가 필요합니다:-
# 1. 로지스틱 회귀는 종속변수가 무조건 이분법/이산적/순서를 가지고 나뉘어야 합니다.
# 2. 데이터가 서로에 대해 독립이어야만 합니다. 다시말해서, 데이터가 중복되서는 안됩니다.
# 3. 로지스틱 회귀 알고리즘은 독립변수들이 서로 선형성이 없거나 매우 적어야만 합니다. 다시말해 서로 상관계수가 매우 작아야 합니다.
# 4. 로지스틱 회귀 모델은 독립 변수의 선형성과 log odds(=log(실패에 대한 성공의 비율))를 추정합니다.
# 5. 로지스틱 회귀 모델이 잘 적용되는데에는 샘플의 크기가 결정적인 역할을 합니다. 특히, 큰 샘플일 수록 더 높은 정확도를 보입니다.
# # **4. 로지스틱 회귀의 종류**
# [Table of Contents](#0.1)
# 로지스틱 회귀는 타겟에 따라 다음의 세 분류로 나눌 수 있습니다:-
# ### 1. 이분법적 로지스틱 회귀
# 이분법적 로지스틱 회귀에서는, 타겟 변수는 두 개의 카테고리를 가집니다. 아주 좋은 예시들을 보자면, 예/아니오, 좋음/싫음, 긍정/부정, 스팸이냐 아니냐, 합결/불합격 등이 있습니다.
# ### 2. 다중 로지스틱 회귀
# 다중 로지스틱 회귀에서는, 타겟 변수는 세 개 혹은 그 이상의 순서를 가지지 않는 카테고리를 가집니다. 그러므로, 카테고리는 세 종류 혹은 그 이상의 종류로 구성됩니다. 예를 들자면, 과일 - 사과, 배, 망고, 바나나.
# ### 3. 순서 로지스틱 회귀
# 순서 로지스틱 회귀에서는, 타겟 변수가 세 개 혹은 그 이상의 순서를 가지는 카테고리를 지닙니다. 그러므로 각 카테고리는 서로에 대해 순서를 가집니다. 예를 들어, 학생의 성적은 낮음, 중간, 높음, 아주 높음으로 구분될 수 있습니다.
# # **5. 라이브러리 import**
# [Table of Contents](#0.1)
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # data visualization
import seaborn as sns # statistical data visualization
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import warnings
warnings.filterwarnings("ignore")
# # **6. 데이터셋 import**
# [Table of Contents](#0.1)
data = "/kaggle/input/weather-dataset-rattle-package/weatherAUS.csv"
df = pd.read_csv(data)
# # **7. 독립변수 분석**
# [Table of Contents](#0.1)
# 이제, 데이터에 대한 유용한 정보들을 얻어봅시다.
# view dimensions of dataset
df.shape
# 이를 통해 데이터셋에 142193개의 데이터와 24의 특성이 있는 것을 알 수 있습니다.
# preview the dataset
df.head()
col_names = df.columns
col_names
# ### RISK_MM 변수 제거
# 데이터 설명에서, `RISK_MM` 특성을 데이터에서 제거해야 한다고 한다. 그러므로, 다음과 같이 이 특성을 제거한다.-
# ERROR) 이 특성이 데이터 안에 존재하지 않으므로 주석처리함
# df.drop(['RISK_MM'], axis=1, inplace=True)
# view summary of dataset
df.info()
# ### 변수들의 자료형
# 이 섹션에서는, 데이터를 범주형과 수치형 자료로 나눌 것입니다. 범주형 자료와 수치형 자료가 데이터 안에 뒤섞여있습니다. 범주형 자료는 자료형이 Object이고, 수치형 자료는 float64 자료형이다.
# 먼저, 모든 범주형 자료를 찾을 것입니다.
# find categorical variables
categorical = [var for var in df.columns if df[var].dtype == "O"]
print("There are {} categorical variables\n".format(len(categorical)))
print("The categorical variables are :", categorical)
# view the categorical variables
df[categorical].head()
# ### 범주형 자료 정리
# - 날짜 데이터 타입이`Date` 특성에 존재한다.
# - 특성이 범주형 자료인 것이 6개 존재한다. 그것들은 `Location`, `WindGustDir`, `WindDir9am`, `WindDir3pm`, `RainToday`, `RainTomorrow`이다.
# - 이분법으로 분리되는 자료형이 두 개 존재한다. - `RainToday`, `RainTomorrow`.
# - `RainTomorrow`가 타겟 데이터셋이다.
# ## 범주형 자료들을 분석해보기
# 먼저, 범주형 자료들을 분석해보겠다.
# ### 범주형 자료들의 결측치
# check missing values in categorical variables
df[categorical].isnull().sum()
# print categorical variables containing missing values
cat1 = [var for var in categorical if df[var].isnull().sum() != 0]
print(df[cat1].isnull().sum())
# 오직 4개의 특성만이 결측치가 존재한다. 그 특성은 다음과 같다: `WindGustDir`, `WindDir9am`, `WindDir3pm` and `RainToday`.
# ### 범주형 자료들의 빈도수
# 이제, 범주형 자료들의 값의 빈도수를 알아보겠다.
# view frequency of categorical variables
for var in categorical:
print(df[var].value_counts())
# view frequency distribution of categorical variables
for var in categorical:
print(df[var].value_counts() / np.float(len(df)))
# ### 레이블들의 수: 카디널리티(의역하자면 얼마나 많은 종류의(수X) 레이블이 존재하는가?)
# 범주형 자료의 레이블의 수는 **카디널리티**라고 불린다. 그 수가 높은 레이블을 **높은 카디널리티 high cardinality**라고 부른다. 높은 카디널리티를 가지는 레이블은 머신러닝 모델에서 중대한 문제를 야기할 수 있다. 그러므로 높은 카디널리티를 찾아보겠다.
# check for cardinality in categorical variables
for var in categorical:
print(var, " contains ", len(df[var].unique()), " labels")
# `Date` 특성이 전처리가 필요해보인다. 다음 섹션에서 이를 진행하겠다.
# 다른 특성들은 상대적으로 적은 값을 보인다.
# ### 날짜 자료형을 전처리하는 법
df["Date"].dtypes
# `Date`의 데이터타입이 Object (O)인 것을 확인할 수 있다. datetime 형식으로 이를 3개로 나누어 처리할 것이다.
# parse the dates, currently coded as strings, into datetime format
df["Date"] = pd.to_datetime(df["Date"])
# extract year from date
df["Year"] = df["Date"].dt.year
df["Year"].head()
# extract month from date
df["Month"] = df["Date"].dt.month
df["Month"].head()
# extract day from date
df["Day"] = df["Date"].dt.day
df["Day"].head()
# again view the summary of dataset
df.info()
# `Date` 특성으로부터 새 특성 3개가 만들어졌다. 이제, 원래 있었던 `Date` 특성을 데이터셋에서 지우겠다.
# drop the original Date variable
df.drop("Date", axis=1, inplace=True)
# preview the dataset again
df.head()
# `Date` 특성이 성공적으로 변환된 것을 알 수 있다.
# ### 범주형 자료를 분석
# 이제, 범주형 자료를 하나하나 분석해보겠다.
# find categorical variables
categorical = [var for var in df.columns if df[var].dtype == "O"]
print("There are {} categorical variables\n".format(len(categorical)))
print("The categorical variables are :", categorical)
# 6개의 범주형 자료가 데이터셋에 있는 것을 확인할 수 있다. `Date` 특성은 지워졌다. 먼저, 이것들의 결측치를 확인해보겠다.
# check for missing values in categorical variables
df[categorical].isnull().sum()
# `WindGustDir`, `WindDir9am`, `WindDir3pm`, `RainToday` 의 특성들이 결측치를 갖는 것을 알 수 있다. 이것들을 하나하나 분석해보겠다.
# ### `Location` 특성 분석
# print number of labels in Location variable
print("Location contains", len(df.Location.unique()), "labels")
# check labels in location variable
df.Location.unique()
# check frequency distribution of values in Location variable
df.Location.value_counts()
# let's do One Hot Encoding of Location variable
# get k-1 dummy variables after One Hot Encoding
# preview the dataset with head() method
pd.get_dummies(df.Location, drop_first=True).head()
# ### 'WindGustDir` 특성 분석
# print number of labels in WindGustDir variable
print("WindGustDir contains", len(df["WindGustDir"].unique()), "labels")
# check labels in WindGustDir variable
df["WindGustDir"].unique()
# check frequency distribution of values in WindGustDir variable
df.WindGustDir.value_counts()
# let's do One Hot Encoding of WindGustDir variable
# get k-1 dummy variables after One Hot Encoding
# also add an additional dummy variable to indicate there was missing data
# preview the dataset with head() method
pd.get_dummies(df.WindGustDir, drop_first=True, dummy_na=True).head()
# sum the number of 1s per boolean variable over the rows of the dataset
# it will tell us how many observations we have for each category
pd.get_dummies(df.WindGustDir, drop_first=True, dummy_na=True).sum(axis=0)
# 9330개의 결측치가 WindGustDir 특성에 존재한다.
# ### `WindDir9am` 특성 분석
# print number of labels in WindDir9am variable
print("WindDir9am contains", len(df["WindDir9am"].unique()), "labels")
# check labels in WindDir9am variable
df["WindDir9am"].unique()
# check frequency distribution of values in WindDir9am variable
df["WindDir9am"].value_counts()
# let's do One Hot Encoding of WindDir9am variable
# get k-1 dummy variables after One Hot Encoding
# also add an additional dummy variable to indicate there was missing data
# preview the dataset with head() method
pd.get_dummies(df.WindDir9am, drop_first=True, dummy_na=True).head()
# sum the number of 1s per boolean variable over the rows of the dataset
# it will tell us how many observations we have for each category
pd.get_dummies(df.WindDir9am, drop_first=True, dummy_na=True).sum(axis=0)
# 10013개의 결측치가 `WindDir9am` 특성에 존재한다.
# ### `WindDir3pm` 특성 분석
# print number of labels in WindDir3pm variable
print("WindDir3pm contains", len(df["WindDir3pm"].unique()), "labels")
# check labels in WindDir3pm variable
df["WindDir3pm"].unique()
# check frequency distribution of values in WindDir3pm variable
df["WindDir3pm"].value_counts()
# let's do One Hot Encoding of WindDir3pm variable
# get k-1 dummy variables after One Hot Encoding
# also add an additional dummy variable to indicate there was missing data
# preview the dataset with head() method
pd.get_dummies(df.WindDir3pm, drop_first=True, dummy_na=True).head()
# sum the number of 1s per boolean variable over the rows of the dataset
# it will tell us how many observations we have for each category
pd.get_dummies(df.WindDir3pm, drop_first=True, dummy_na=True).sum(axis=0)
# 3778개의 결측치가 `WindDir3pm` 특성에 존재한다.
# ### `RainToday` 특성 분석
# print number of labels in RainToday variable
print("RainToday contains", len(df["RainToday"].unique()), "labels")
# check labels in WindGustDir variable
df["RainToday"].unique()
# check frequency distribution of values in WindGustDir variable
df.RainToday.value_counts()
# let's do One Hot Encoding of RainToday variable
# get k-1 dummy variables after One Hot Encoding
# also add an additional dummy variable to indicate there was missing data
# preview the dataset with head() method
pd.get_dummies(df.RainToday, drop_first=True, dummy_na=True).head()
# sum the number of 1s per boolean variable over the rows of the dataset
# it will tell us how many observations we have for each category
pd.get_dummies(df.RainToday, drop_first=True, dummy_na=True).sum(axis=0)
# 1406개의 결측치가 `RainToday` 특성에 존재한다.
# ### 수치형 자료 분석
# find numerical variables
numerical = [var for var in df.columns if df[var].dtype != "O"]
print("There are {} numerical variables\n".format(len(numerical)))
print("The numerical variables are :", numerical)
# view the numerical variables
df[numerical].head()
# ### 수치형 자료 분석 결과
# - 16개의 수치형 자료가 데이터셋이 존재한다.
# - 그것들은 다음과 같다: `MinTemp`, `MaxTemp`, `Rainfall`, `Evaporation`, `Sunshine`, `WindGustSpeed`, `WindSpeed9am`, `WindSpeed3pm`, `Humidity9am`, `Humidity3pm`, `Pressure9am`, `Pressure3pm`, `Cloud9am`, `Cloud3pm`, `Temp9am`, `Temp3pm`.
# - 모든 수치형 자료는 연속된 자료형이다.
# ## 수치형 자료의 문제 분석
# 넘어가서, 수치형 자료를 분석해보겠다.
# ### 수치형 자료들의 결측치
# check missing values in numerical variables
df[numerical].isnull().sum()
# 16개의 수치형 자료 특성이 결측치를 갖습니다.
# ### 수치형 자료들의 이상치
# view summary statistics in numerical variables
print(round(df[numerical].describe()), 2)
# 대충 봐서, `Rainfall`, `Evaporation`, `WindSpeed9am`, `WindSpeed3pm` 특성들이 이상치를 갖는다는 것을 대충 알 수 있다.
# 정확히 알기 위해 이것들을 박스 플롯으로 그려보겠다.
# draw boxplots to visualize outliers
plt.figure(figsize=(15, 10))
plt.subplot(2, 2, 1)
fig = df.boxplot(column="Rainfall")
fig.set_title("")
fig.set_ylabel("Rainfall")
plt.subplot(2, 2, 2)
fig = df.boxplot(column="Evaporation")
fig.set_title("")
fig.set_ylabel("Evaporation")
plt.subplot(2, 2, 3)
fig = df.boxplot(column="WindSpeed9am")
fig.set_title("")
fig.set_ylabel("WindSpeed9am")
plt.subplot(2, 2, 4)
fig = df.boxplot(column="WindSpeed3pm")
fig.set_title("")
fig.set_ylabel("WindSpeed3pm")
# 위의 그래프를 보았을 때, 이상치가 어마어마하게 많다는 것을 알 수 있다.
# ### 특성들의 분포함수를 그려보기
# 이제 이 특성의 데이터들이 정규분포를 그리는지 아닌지 분석해보겠다. 만약 데이터들이 정규분포를 형성한다면, `Extreme Value Analysis(극단치 분석)`를 사용해볼것이고, 아니면 IQR (Interquantile range)을 .
# plot histogram to check distribution
plt.figure(figsize=(15, 10))
plt.subplot(2, 2, 1)
fig = df.Rainfall.hist(bins=10)
fig.set_xlabel("Rainfall")
fig.set_ylabel("RainTomorrow")
plt.subplot(2, 2, 2)
fig = df.Evaporation.hist(bins=10)
fig.set_xlabel("Evaporation")
fig.set_ylabel("RainTomorrow")
plt.subplot(2, 2, 3)
fig = df.WindSpeed9am.hist(bins=10)
fig.set_xlabel("WindSpeed9am")
fig.set_ylabel("RainTomorrow")
plt.subplot(2, 2, 4)
fig = df.WindSpeed3pm.hist(bins=10)
fig.set_xlabel("WindSpeed3pm")
fig.set_ylabel("RainTomorrow")
# 모든 4개의 특성이 일그러져있음을 알 수 있다. 그러므로, 사분위수 범위를 찾아 이상치를 없앨 것이다.
# find outliers for Rainfall variable
IQR = df.Rainfall.quantile(0.75) - df.Rainfall.quantile(0.25)
Lower_fence = df.Rainfall.quantile(0.25) - (IQR * 3)
Upper_fence = df.Rainfall.quantile(0.75) + (IQR * 3)
print(
"Rainfall outliers are values < {lowerboundary} or > {upperboundary}".format(
lowerboundary=Lower_fence, upperboundary=Upper_fence
)
)
# `Rainfall`의 경우, 최소값과 최대값은 각각 0.0 과 371.0 이다. 그러므로, 이상치를 잡는 범위는 > 3.2 로 설정하겠다.
# find outliers for Evaporation variable
IQR = df.Evaporation.quantile(0.75) - df.Evaporation.quantile(0.25)
Lower_fence = df.Evaporation.quantile(0.25) - (IQR * 3)
Upper_fence = df.Evaporation.quantile(0.75) + (IQR * 3)
print(
"Evaporation outliers are values < {lowerboundary} or > {upperboundary}".format(
lowerboundary=Lower_fence, upperboundary=Upper_fence
)
)
# `Evaporation`의 경우, 최소값과 최대값이 각각 0.0 과 145.0 이다. 그러므로, 이상치 값은 > 21.8 로 정하겠다.
# find outliers for WindSpeed9am variable
IQR = df.WindSpeed9am.quantile(0.75) - df.WindSpeed9am.quantile(0.25)
Lower_fence = df.WindSpeed9am.quantile(0.25) - (IQR * 3)
Upper_fence = df.WindSpeed9am.quantile(0.75) + (IQR * 3)
print(
"WindSpeed9am outliers are values < {lowerboundary} or > {upperboundary}".format(
lowerboundary=Lower_fence, upperboundary=Upper_fence
)
)
# `WindSpeed9am`의 경우, 최소값과 최대값이 각각 0.0 and 130.0 이므로, 이상치 범위는 > 55.0로 하겠다.
# find outliers for WindSpeed3pm variable
IQR = df.WindSpeed3pm.quantile(0.75) - df.WindSpeed3pm.quantile(0.25)
Lower_fence = df.WindSpeed3pm.quantile(0.25) - (IQR * 3)
Upper_fence = df.WindSpeed3pm.quantile(0.75) + (IQR * 3)
print(
"WindSpeed3pm outliers are values < {lowerboundary} or > {upperboundary}".format(
lowerboundary=Lower_fence, upperboundary=Upper_fence
)
)
# `WindSpeed3pm`의 경우, 최소값과 최대값이 각각 0.0 과 87.0. 그러므로 이상치값의 범위는 > 57.0 로 하겠다.
# # **8. 데이터셋을 특성 데이터와 타겟 데이터로 나누기**
# [Table of Contents](#0.1)
X = df.drop(["RainTomorrow"], axis=1)
y = df["RainTomorrow"]
# # **9. train 데이터셋과 test 데이터셋으로 나누기**
# [Table of Contents](#0.1)
# split X and y into training and testing sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# check the shape of X_train and X_test
X_train.shape, X_test.shape
# # **10. 특성 처리**
# [Table of Contents](#0.1)
# **특성 처리, Feature Engineering**이란, 가공되지 않은 데이터를 가공하여 머신러닝의 예측 성능을 높이는 방법을 말한다. 이후, 각기 다른 특성들을 각기 다른 방법으로 특성 처리를 해보겠다.
# 먼저, 데이터를 다시 수치형과 범주형으로 나눈다.
# check data types in X_train
X_train.dtypes
# display categorical variables
categorical = [col for col in X_train.columns if X_train[col].dtypes == "O"]
categorical
# display numerical variables
numerical = [col for col in X_train.columns if X_train[col].dtypes != "O"]
numerical
# ### 수치형 자료들의 결측값 처리
#
# check missing values in numerical variables in X_train
X_train[numerical].isnull().sum()
# check missing values in numerical variables in X_test
X_test[numerical].isnull().sum()
# print percentage of missing values in the numerical variables in training set
for col in numerical:
if X_train[col].isnull().mean() > 0:
print(col, round(X_train[col].isnull().mean(), 4))
# ### 위에서 결측치를 채울 때 했던 가정
# 위에서 데이터를 처리할 때 데이터가 완벽하게 랜덤으로 결측되었다고 가정하였다 (missing completely at random (MCAR)). 결측치를 채울 때 쓸 수 있는 방법이 두 가지가 있는데, 하나는 평균이나 중간값으로 채우는 것이고 다른 하나는 완벽하게 랜덤인 변수로 채우는 것이다. 데이터셋에 이상치가 있을 때, 보통 중간값을 쓴다. 그러므로 여기서도 중간값을 쓴다, 중간값이 이상치에 잘 반응하지 않기 때문이다.
# 결측치를 이 데이터에 있어서 통계적으로 적절한 값인 중간값으로 채우겠다. 채우는 것은 train 셋에서 값을 낸 다음 train 셋과 test 셋에 시행되어야 한다. 왜냐하면 결측치는 적절한 통계적 방법으로 train 셋과 test 셋 둘 다 채워져야 하지만, 그 값은 train 셋에서만 만들어져야 한다. 이것은 오버피팅을 피하기 위해서이다.
# impute missing values in X_train and X_test with respective column median in X_train
for df1 in [X_train, X_test]:
for col in numerical:
col_median = X_train[col].median()
df1[col].fillna(col_median, inplace=True)
# check again missing values in numerical variables in X_train
X_train[numerical].isnull().sum()
# check missing values in numerical variables in X_test
X_test[numerical].isnull().sum()
# 수치형 데이터에는 train셋과 test셋 모두 결측치가 없음을 볼 수 있다.
# ### 범주형 자료들의 결측치 처리
# print percentage of missing values in the categorical variables in training set
X_train[categorical].isnull().mean()
# print categorical variables with missing data
for col in categorical:
if X_train[col].isnull().mean() > 0:
print(col, (X_train[col].isnull().mean()))
# impute missing categorical variables with most frequent value
for df2 in [X_train, X_test]:
df2["WindGustDir"].fillna(X_train["WindGustDir"].mode()[0], inplace=True)
df2["WindDir9am"].fillna(X_train["WindDir9am"].mode()[0], inplace=True)
df2["WindDir3pm"].fillna(X_train["WindDir3pm"].mode()[0], inplace=True)
df2["RainToday"].fillna(X_train["RainToday"].mode()[0], inplace=True)
# check missing values in categorical variables in X_train
X_train[categorical].isnull().sum()
# check missing values in categorical variables in X_test
X_test[categorical].isnull().sum()
# 마지막 점검으로, X_train과 X_test에 결측치가 없나 확인해보겠다.
# check missing values in X_train
X_train.isnull().sum()
# check missing values in X_test
X_test.isnull().sum()
# X_train과 X_test에 결측치가 없음을 확인할 수 있다.
# ### 수치형 자료들의 이상치 처리
# `Rainfall`, `Evaporation`, `WindSpeed9am`, `WindSpeed3pm` 특성들이 이상치가 있는 것을 확인했다. top-coding 방법을 통해 최대값을 제한하고 위의 특성들로부터 이상치를 제거할 것이다.
def max_value(df3, variable, top):
return np.where(df3[variable] > top, top, df3[variable])
for df3 in [X_train, X_test]:
df3["Rainfall"] = max_value(df3, "Rainfall", 3.2)
df3["Evaporation"] = max_value(df3, "Evaporation", 21.8)
df3["WindSpeed9am"] = max_value(df3, "WindSpeed9am", 55)
df3["WindSpeed3pm"] = max_value(df3, "WindSpeed3pm", 57)
X_train.Rainfall.max(), X_test.Rainfall.max()
X_train.Evaporation.max(), X_test.Evaporation.max()
X_train.WindSpeed9am.max(), X_test.WindSpeed9am.max()
X_train.WindSpeed3pm.max(), X_test.WindSpeed3pm.max()
X_train[numerical].describe()
# `Rainfall`, `Evaporation`, `WindSpeed9am` 그리고 `WindSpeed3pm` 특성들의 결측치가 전부 커버되었음을 확인할 수 있다
# ### 범주형 자료 인코딩
categorical
X_train[categorical].head()
# encode RainToday variable
import category_encoders as ce
encoder = ce.BinaryEncoder(cols=["RainToday"])
X_train = encoder.fit_transform(X_train)
X_test = encoder.transform(X_test)
X_train.head()
# `RainToday`로부터 `RainToday_0`,`RainToday_1`특성이 추가됐다.
# 이제 training set인`X_train`을 만들어 볼 것이다.
X_train = pd.concat(
[
X_train[numerical],
X_train[["RainToday_0", "RainToday_1"]],
pd.get_dummies(X_train.Location),
pd.get_dummies(X_train.WindGustDir),
pd.get_dummies(X_train.WindDir9am),
pd.get_dummies(X_train.WindDir3pm),
],
axis=1,
)
X_train.head()
# 비슷하게, test셋인 `X_test`도 만들 것이다.
X_test = pd.concat(
[
X_test[numerical],
X_test[["RainToday_0", "RainToday_1"]],
pd.get_dummies(X_test.Location),
pd.get_dummies(X_test.WindGustDir),
pd.get_dummies(X_test.WindDir9am),
pd.get_dummies(X_test.WindDir3pm),
],
axis=1,
)
X_test.head()
# 우리는 모델을 작동시키기 위한 training셋과 testing셋을 만들었습니다. 실행시키기 전에, 모든 특성들을 같은 스케일을 갖도록 정규화시킵니다. 이것을 `특성 스케일링, feature scaling`이라고 합니다. 이것을 바로 다음 장에서 실행시켜보겠습니다.
# # **11. 특성 스케일링**
# [Table of Contents](#0.1)
X_train.describe()
cols = X_train.columns
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
X_train = pd.DataFrame(X_train, columns=[cols])
X_test = pd.DataFrame(X_test, columns=[cols])
X_train.describe()
# 우리는 이제 `X_train` 데이터셋을 로지스틱 회귀 모델에 넣을 준비가 됐습니다. 바로 다음 장에서 실행해보죠.
# ### y_train 과 y_test 처리
# 이 튜터에서 이 둘을 까먹은 것 같아 직접 만들어본다
y_train
y_train[y_train == "Yes"] = 1
y_train[y_train == "No"] = 0
y_train.fillna(0, inplace=True)
y_test[y_test == "Yes"] = 1
y_test[y_test == "No"] = 0
y_test.fillna(0, inplace=True)
# 내일 비가 오면 1, 안오면 0, 모르는거는 0(아래의 섹션을 보니 그렇게 처리한 것 같음)
# # **12. 모델 학습**
# [Table of Contents](#0.1)
# train a logistic regression model on the training set
from sklearn.linear_model import LogisticRegression
# instantiate the model
logreg = LogisticRegression(solver="liblinear", random_state=0)
# fit the model
logreg.fit(X_train, y_train)
# # **13. 결과 예측**
# [Table of Contents](#0.1)
y_pred_test = logreg.predict(X_test)
y_pred_test
# ### predict_proba 방법
# **predict_proba** 방법은 타겟의 수치들을 확률의 어레이로 취급하는 것을 말한다,(0, 1, 추가로 0.5).
# `0 비가 안올 확률`, `1 은 비가 올 확률.`, `0.5 는 Null이었던 데이터들.`
# probability of getting output as 0 - no rain
logreg.predict_proba(X_test)[:, 0]
# probability of getting output as 1 - rain
logreg.predict_proba(X_test)[:, 1]
# # **14. 정확도 확인**
# [Table of Contents](#0.1)
from sklearn.metrics import accuracy_score
print("Model accuracy score: {0:0.4f}".format(accuracy_score(y_test, y_pred_test)))
# **y_test**가 진짜 레이블 값이고 **y_pred_test** 는 특성 데이터들로 예측된 값이다.
# ### 훈련을 통해 나온 예측값과 실제 값을 비교
# 이제 훈련을 통해 나온 데이터와 실제 데이터를 비교해서 오버피팅이 났는지 확인해보겠다.
y_pred_train = logreg.predict(X_train)
y_pred_train
print(
"Training-set accuracy score: {0:0.4f}".format(
accuracy_score(y_train, y_pred_train)
)
)
# ### 오버피팅과 언더피팅 체크
# print the scores on training and test set
print("Training set score: {:.4f}".format(logreg.score(X_train, y_train)))
print("Test set score: {:.4f}".format(logreg.score(X_test, y_test)))
# 훈련 셋의 정확도는 0.8476인데 비해 테스트셋은 0.8501이다. 이 값들은 꽤나 비슷하다. 그러므로 오버피팅이 나지 않았다고 할 수 있다.
#
# 로지스틱 회귀 모델에서, 보통 디폴트값으로 C = 1을 준다. 이건 85%의 정확도라는 꽤나 좋은 성적을 train 셋과 test 셋에서 냈다. 하지만 모델이 그렇게 차이가 안난다는 뜻은 언더피팅의 여지가 있다는 뜻이다.
# C값을 늘려 좀 더 유연한 모델로 만들어 보겠다.
# fit the Logsitic Regression model with C=100
# instantiate the model
logreg100 = LogisticRegression(C=100, solver="liblinear", random_state=0)
# fit the model
logreg100.fit(X_train, y_train)
# print the scores on training and test set
print("Training set score: {:.4f}".format(logreg100.score(X_train, y_train)))
print("Test set score: {:.4f}".format(logreg100.score(X_test, y_test)))
# C=100 을 넣어 본 결과, train 셋과 test 셋 둘 다 정확도의 향상이 있었다. 그러므로 우리는 더 유연한 모델을 넣을 수록 더 결과가 좋다는 것을 알게 되었다.
# 반대로, 좀 더 정규화된 모델을 사용해보면 어떨까? C=1을 C=0.01으로 바꿔보았다.
# fit the Logsitic Regression model with C=001
# instantiate the model
logreg001 = LogisticRegression(C=0.01, solver="liblinear", random_state=0)
# fit the model
logreg001.fit(X_train, y_train)
# print the scores on training and test set
print("Training set score: {:.4f}".format(logreg001.score(X_train, y_train)))
print("Test set score: {:.4f}".format(logreg001.score(X_test, y_test)))
# 좀 더 정규화된 모델인 C=0.01을 사용해본 결과, training 셋과 test 셋의 정확도가 둘 다 하락하였다.
# ### null accuracy와 모델 정확도 비교
# 모델의 정확도는 0.8501이다. 하지만, 우리의 모델이 저 정확도에서 잘 활동하는지는 확신하지 못한다. 우리는 이것을 **null accuracy**라고 부르는 것과 비교해봐야 한다. Null accuracy는 모델이 예측을 잘 하는 것인가, 가장 많은 레이블을 그냥 찍고 있는 것인가의 정확도이다
# 그러므로, 일단 테스트 레이블의 데이터 분포를 보자.
# check class distribution in test set
y_test.value_counts()
# 가장 많은 데이터의 레이블은 22082개이다. 그러므로, 22082를 총 개수로 나누면 null accuracy를 구할 수 있다.
# check null accuracy score
null_accuracy = 22067 / (22067 + 6372)
print("Null accuracy score: {0:0.4f}".format(null_accuracy))
# 우리의 모델의 정확도는 0.8501 이었지만 null accuracy는 0.7759이다. 그러므로, 우리의 로지스틱 회귀 모델은 나쁘지 않은 결과를 냈다고 할 수 있다.
# 위의 결과를 통해 우리의 모델의 정확도가 꽤나 높다는 것을 알 수 있었으며,
# 이는 우리의 모델이 꽤나 잘 작동하고 있다는 것을 의미한다.
# 하지만, 이것은 어떤 데이터 분포에도 잘 작동한다는 것을 의미하지는 않는다. 또한, 이것은 분류기가 만들 수 있는 어떤 오류도 알려주지 않는다.
# `Confusion matrix`라고 불리는 것을 통해 이 문제를 해결할 수 있다.
# # **15. Confusion matrix(오차행렬)**
# [Table of Contents](#0.1)
# 오차 행렬은 분류 알고리즘의 성능을 종합적으로 보게 해주는 도구이다. 오차 행렬은 분류 모델의 성능을 확실하게 보여주고 모델로부터 나오는 오류들을 알게 해준다. 이는 각 카테고리에서 정답과 오답을 잘 보여준다. 그리고 그 결과물은 2x2의 행렬이다.
# 분류 알고리즘의 성능을 측정할 때는 4개의 지표가 사용된다. 그것들은 다음과 같다:-
# **True Positives (TP)** – 예측이 참이었을 때 실제로도 참일 비율.
# **True Negatives (TN)** – 예측이 거짓이었을 때 실제로도 거짓일 비율.
# **False Positives (FP)** – 예측이 참이지만 실제로는 거짓일 비율. 이 에러는 **타입 I 에러.** 라고 불린다
# **False Negatives (FN)** – 예측이 거짓이지만 실제로은 참일 비율. 이 에러는 매우 심각한 에러이며 **타입 II 에러.**라고 불린다
# 이 4개의 값이 오차 행렬이 보여주는 값이다.
#
# Print the Confusion Matrix and slice it into four pieces
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred_test)
print("Confusion matrix\n\n", cm)
print("\nTrue Positives(TP) = ", cm[0, 0])
print("\nTrue Negatives(TN) = ", cm[1, 1])
print("\nFalse Positives(FP) = ", cm[0, 1])
print("\nFalse Negatives(FN) = ", cm[1, 0])
# 이 오차 행렬은 `20892 + 3285 = 24177 개의 참인 예측` 그리고 `3087 + 1175 = 4262 거짓인 예측`으로 나뉘어진다.
# 여기에서는, 우리는 다음을 계산할 수 있다.
# - `True Positives` (실제가 참:1 그리고 예측도 참:1) - 20892
# - `True Negatives` (실제가 거짓:1 그리고 예측도 거짓:1) - 3285
# - `False Positives` (실제가 거짓:1 그런데 예측이 참:1) - 1175 `(타입 I 에러)`
# - `False Negatives` (실제가 참:1 그런데 예측이 거짓:1) - 3087 `(타입 II 에러)`
# visualize confusion matrix with seaborn heatmap
cm_matrix = pd.DataFrame(
data=cm,
columns=["Actual Positive:1", "Actual Negative:0"],
index=["Predict Positive:1", "Predict Negative:0"],
)
sns.heatmap(cm_matrix, annot=True, fmt="d", cmap="YlGnBu")
# # **16. 분류 방법**
# [Table of Contents](#0.1)
# ## Classification Report
# **Classification report** 은 분류 모델의 성능을 측정하는 또 다른 지표입니다. 이것은 **정밀도**, **재현율**, **f1**, **support** 들의 점수를 매깁니다. 이것들이 무엇인지는 차후 설명하겠습니다.
# classification report를 다음과 같이 볼 수 있습니다:-
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred_test))
# ## 분류 정확도
TP = cm[0, 0]
TN = cm[1, 1]
FP = cm[0, 1]
FN = cm[1, 0]
# print classification accuracy
classification_accuracy = (TP + TN) / float(TP + TN + FP + FN)
print("Classification accuracy : {0:0.4f}".format(classification_accuracy))
# ## 분류 오류
# print classification error
classification_error = (FP + FN) / float(TP + TN + FP + FN)
print("Classification error : {0:0.4f}".format(classification_error))
# ## 정밀도
# **정밀도**는 참으로 예측한 것 중에 실제로 참인 것의 비율입니다. 이것은 true positives (TP)와 true and false positives (TP + FP)의 비율로써 나타납니다.
# 그러므로, **정밀도**는 참으로 예측한 것을 얼마나 잘 맞추냐를 의미합니다. 이것은 거짓 클래스의 데이터보다는 참 클래스의 데이터에 영향을 많이 받습니다.
# 수학적으로, 정밀도는 다음과 같이 정의됩니다: `TP to (TP + FP).`
#
# print precision score
precision = TP / float(TP + FP)
print("Precision : {0:0.4f}".format(precision))
# ## 재현율
# 재현율은 실제 참인 데이터 중 얼마나 참으로 예측되었는지에 대한 비율입니다.
# 이것은 true positives (TP) 와 true positives 그리고 false negatives의 합 (TP + FN)의 비율로 나타납니다. **재현율** 은 **민감도**라고 불리기도 합니다.
# **재현율** 은 진짜 참인 것들 중에 얼마나 참인지를 보여줍니다.
# 수학적으로, 재현율은 다음과 같이 정의됩니다: `TP to (TP + FN).`
#
recall = TP / float(TP + FN)
print("Recall or Sensitivity : {0:0.4f}".format(recall))
# ## 참 양성 비율
# **참 양성 비율** 은 **재현율**의 또 다른 이름입니다.
#
true_positive_rate = TP / float(TP + FN)
print("True Positive Rate : {0:0.4f}".format(true_positive_rate))
# ## 거짓 양성 비율
# 실제 거짓인 것 중에 참으로 예측된 것의 비율
false_positive_rate = FP / float(FP + TN)
print("False Positive Rate : {0:0.4f}".format(false_positive_rate))
# ## Specificity
# 실제 거짓인 것 중에 진짜 거짓인 것의 비율
specificity = TN / (TN + FP)
print("Specificity : {0:0.4f}".format(specificity))
# ## f1-점수
# **f1-점수**는 정밀도와 재현율의 조화평균입니다. **f1-점수**의 가장 좋은 값은 1.0 이고 가장 나쁜 값은 0.0입니다.
# **f1-점수**는 정밀도와 재현율의 조화평균이므로, **f1-점수**는 계산해서 나온 정확도보다 언제나 낮습니다. `f1-점수`의 값은 분류기 모델들의 성능을 비교하기 위한 것이지 정확도를 의미하지는 않습니다.
# ## Support
# **Support**는 데이터셋 안에 있는 클래스들이 얼마나 자주 나오는지에 대한 실제 숫자입니다.
# # **17. 경계값 조정하기**
# [Table of Contents](#0.1)
# print the first 10 predicted probabilities of two classes- 0 and 1
y_pred_prob = logreg.predict_proba(X_test)[0:10]
y_pred_prob
# ### 관찰
# - 각 행의 값을 각각 더하면 1이다.
# - 두 개의 0과 1의 클래스로 구분되는 열이 존재한다.
# - 클래스 0 - 내일 비가 안올 확률.
#
# - 클래스 1 - 내일 비가 올 확률.
#
#
# - 예측된 값의 중요성
# - 비가 올지 안올지에 대한 확률로 각 데이터를 평가할 수 있음.
# - predict_proba 과정
# - 확률값 예측.
#
# - 둘 중 더 높은 값을 택함.
#
#
# - 분류에서의 경계값(임계값)
# - 0.5로 임계값이 정해져있음.
#
# - 클래스 1 - 비가 올 확률이 0.5 이상.
#
# - 클래스 0 - 비가 올 확률이 0.5 이하 (비가 안올 확률이 0,5 이상).
#
#
# store the probabilities in dataframe
y_pred_prob_df = pd.DataFrame(
data=y_pred_prob,
columns=["Prob of - No rain tomorrow (0)", "Prob of - Rain tomorrow (1)"],
)
y_pred_prob_df
# print the first 10 predicted probabilities for class 1 - Probability of rain
logreg.predict_proba(X_test)[0:10, 1]
# store the predicted probabilities for class 1 - Probability of rain
y_pred1 = logreg.predict_proba(X_test)[:, 1]
# plot histogram of predicted probabilities
# adjust the font size
plt.rcParams["font.size"] = 12
# plot histogram with 10 bins
plt.hist(y_pred1, bins=10)
# set the title of predicted probabilities
plt.title("Histogram of predicted probabilities of rain")
# set the x-axis limit
plt.xlim(0, 1)
# set the title
plt.xlabel("Predicted probabilities of rain")
plt.ylabel("Frequency")
# ### 관찰
# - 위의 히스토그램이 양의 방향으로 심하게 기울어져 있는 것을 알 수 있음.
# - 첫 번째 열에서부터 15000개 이상의 데이터가 0.0과 0.1 사이의 확률에 분포해있음.
# - 확률이 0.5 이상인 값이 매우 적음.
# - 그러므로, 내일 비가 온다고 예측할 확률은 매우 적음을 알 수 있다.
# - 모델의 주된 예측은 내일 비가 안올것이다가 매우 많다.
# ### 결정 경계값을 낮추기
from sklearn.preprocessing import binarize
for i in range(1, 5):
cm1 = 0
y_pred1 = logreg.predict_proba(X_test)[:, 1]
y_pred1 = y_pred1.reshape(-1, 1)
y_pred2 = binarize(y_pred1, i / 10)
cm1 = confusion_matrix(y_test, y_pred2)
print(
"With",
i / 10,
"threshold the Confusion Matrix is ",
"\n\n",
cm1,
"\n\n",
"with",
cm1[0, 0] + cm1[1, 1],
"correct predictions, ",
"\n\n",
cm1[0, 1],
"Type I errors( False Positives), ",
"\n\n",
cm1[1, 0],
"Type II errors( False Negatives), ",
"\n\n",
"Accuracy score: ",
(accuracy_score(y_test, y_pred2)),
"\n\n",
"Sensitivity: ",
cm1[1, 1] / (float(cm1[1, 1] + cm1[1, 0])),
"\n\n",
"Specificity: ",
cm1[0, 0] / (float(cm1[0, 0] + cm1[0, 1])),
"\n\n",
"====================================================",
"\n\n",
)
# ### 추가 설명
# - 두 개의 클래스로 나누는 문제에서, 경계값 0.5는 예측된 확률을 두 클래스로 분류하는 기본값이다.
# - 경계값의 조절은 민감도나 specificity에 영향을 줄 수 있다.
# - 민감도와 specificity는 반비례관계를 가지고 있다. 하나를 올리면 다른 하나는 내려간다, 반대도 마찬가지이고.
# - 우리는 방금 경계값을 올림으로써 정확도가 올라가는 것을 확인했다.
# - 경계값을 손대는 행동은 모델을 만들 때 최후의 최후에 해야 하는 .
# # **18. ROC - AUC**
# [Table of Contents](#0.1)
# ## ROC (수신기 조작 특성)
# 분류 모델을 가장 잘 시각화 하는 방법 중 하나로 **ROC 곡선**을 꼽을 수 있다. ROC 곡선은 **Receiver Operating Characteristic Curve(수신기 조작 특성 곡선)** 의 줄임말이다. **ROC Curve** 분류기의 결정값을 조절하면서 변화하는 모델의 정확도를 시각화해준다.
# **ROC 곡선** 은 **참 양성 비율 (TPR)** 과 **거짓 양성 비율 (FPR)** 의 관계를 변화하는 경계값에 따라 그린 것이다.
# **참 양성 비율 (TPR)** 은 **재현율**이라고도 불린다. 이것은 `TP / (TP + FN)`로 정의된다.
# **거짓 양성 비율 (FPR)** 은 `FP / (FP + TN)`로 정의된다.
# ROC 곡선에서, 우리는 TPR (참 양성 비율) 과 FPR (거짓 양성 비율) 의 관계 중 하나의 결정 경계값에 집중해서 분석해볼 것이다. 이것은 우리가 ROC 곡선에서 변화하는 경계값에 따라 TPR과 FPR에게 영향을 주는지 직관을 얻게 해줄것이다. 그러므로, ROC 곡선은 TPR과 FPR의 관계를 서로 다른 경계값에 따라 그려줄것이다. 만약에 경계값을 낮춘다면, 많은 결과값들이 양으로 처리될것이고, 그것은 True Positives (TP) 와 False Positives (FP) 를 동시에 상승시킬 것이다.
#
# plot ROC Curve
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(y_test, y_pred1, pos_label="Yes")
plt.figure(figsize=(6, 4))
plt.plot(fpr, tpr, linewidth=2)
plt.plot([0, 1], [0, 1], "k--")
plt.rcParams["font.size"] = 12
plt.title("ROC curve for RainTomorrow classifier")
plt.xlabel("False Positive Rate (1 - Specificity)")
plt.ylabel("True Positive Rate (Sensitivity)")
plt.show()
# ROC 곡선은 우리가 민감도와 specificity 사이에서 균형을 가지는 결정 경계값을 찾도록 도와줄 것입니다.
# ## ROC-AUC
# AUC : Area under Curve, 한마디로 0과 1 사이에서의 적분값
# **ROC AUC** 은 **Receiver Operating Characteristic - Area Under Curve, 수신자 조작 특성 - 곡선 아래 면적** 이라는 뜻이다. 이것은 분류기들 중 어느 것이 더 예측을 잘하는지의 척도가 된다. 이 기술을 사용하려면, 먼저 `area under the curve (AUC), 곡선 아래 면적`을 측정해야 한다. 완벽한 분류기는 ROC AUC 가 1이겠지만, 보통의 분류기는 ROC AUC 가 0.5이다.
# 한마디로, **ROC AUC** ROC 그래프에서 곡선 아래 면적을 의미한다.
# compute ROC AUC
from sklearn.metrics import roc_auc_score
ROC_AUC = roc_auc_score(y_test, y_pred1)
print("ROC AUC : {:.4f}".format(ROC_AUC))
# ### 추가 설명
# - ROC AUC는 분류기의 성능을 측정하는 하나의 값이다. 값이 높을수록 분류기의 성능이 뛰어나다는 것을 의미한다.
# - 우리 모델의 ROC AUC는 1을 향하고 있다. 그러므로, 우리는 우리 모델이 내일 비가 올지 안올지에 대한 분류를 아주 잘 수행하고 있다고 할 수 있다.
# calculate cross-validated ROC AUC
from sklearn.model_selection import cross_val_score
Cross_validated_ROC_AUC = cross_val_score(
logreg, X_train, y_train, cv=5, scoring="roc_auc"
).mean()
print("Cross validated ROC AUC : {:.4f}".format(Cross_validated_ROC_AUC))
# # **19. k-겹 교차검증**
# [Table of Contents](#0.1)
# Applying 5-Fold Cross Validation
from sklearn.model_selection import cross_val_score
scores = cross_val_score(logreg, X_train, y_train, cv=5, scoring="accuracy")
print("Cross-validation scores:{}".format(scores))
# 우리는 교차검증의 평균을 통해 교차검증의 결과를 알 수 있다.
# compute Average cross-validation score
print("Average cross-validation score: {:.4f}".format(scores.mean()))
# 우리의 원래 모델의 정확도는 0.8476이 나왔고, 교차검증의 평균값은 0.8474이다. 그러므로, 교차검증을 통해서는 우리 모델이 그렇게 향상되지 않았다고 결론지을 수 있다.
# # **20. GridSearch CV를 이용한하이퍼파라메터 최적화**
# [Table of Contents](#0.1)
from sklearn.model_selection import GridSearchCV
parameters = [{"penalty": ["l1", "l2"]}, {"C": [1, 10, 100, 1000]}]
grid_search = GridSearchCV(
estimator=logreg, param_grid=parameters, scoring="accuracy", cv=5, verbose=0
)
grid_search.fit(X_train, y_train)
# examine the best model
# best score achieved during the GridSearchCV
print("GridSearch CV best score : {:.4f}\n\n".format(grid_search.best_score_))
# print parameters that give the best results
print("Parameters that give the best results :", "\n\n", (grid_search.best_params_))
# print estimator that was chosen by the GridSearch
print(
"\n\nEstimator that was chosen by the search :",
"\n\n",
(grid_search.best_estimator_),
)
# calculate GridSearch CV score on test set
print(
"GridSearch CV score on test set: {0:0.4f}".format(
grid_search.score(X_test, y_test)
)
)
|
# #💨 [ПРОДЖЕКТ] Есть куреха у кента или нету ни черта?
# ------------------
# > Ответим на четкий вопрос **ЕСТЬ КУРИТЬ????**
# 
# !pip install sidetable -q
# !sudo pip uninstall numpy -q -y
# !sudo pip install numpy==1.23.0 -q
# !pip install pycaret -q
# !pip list
############################################################
# тут пишем модные названия например всякого...
############################################################
import matplotlib.pyplot as plt
import numpy as np
### data aggregate and visualisation
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import sidetable
pd.options.plotting.backend = "plotly"
import phik
import seaborn as sns
plt.style.use("dark_background")
import warnings
warnings.filterwarnings("ignore")
pd.set_option("display.max_columns", 999)
pd.options.display.float_format = "{:.7f}".format
from datetime import date, datetime
from pathlib import Path
import catboost
import missingno as msno
import sklearn.exceptions
from catboost import CatBoostClassifier, CatBoostRegressor, Pool, cv
from imblearn.over_sampling import (
ADASYN,
SMOTE,
SVMSMOTE,
BorderlineSMOTE,
RandomOverSampler,
)
from sklearn.compose import make_column_transformer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (
accuracy_score,
auc,
balanced_accuracy_score,
classification_report,
confusion_matrix,
f1_score,
plot_confusion_matrix,
precision_score,
recall_score,
roc_auc_score,
roc_curve,
)
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import ( # for standardization
MinMaxScaler,
OneHotEncoder,
OrdinalEncoder,
RobustScaler,
StandardScaler,
)
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
import os
from glob import glob
from sklearn.utils.class_weight import compute_class_weight
from xgboost import XGBClassifier
from pycaret.classification import *
# сморим чо есть
df = pd.read_csv("/kaggle/input/leopard-challenge-classification/train.csv")
test = pd.read_csv("/kaggle/input/leopard-challenge-classification/test.csv")
# дропоем не годное без мазы
df.drop("oral", axis=1, inplace=True)
test.drop("oral", axis=1, inplace=True)
df.set_index("ID", inplace=True)
test.set_index("ID", inplace=True)
# тут смотрим - если татар - значит один, а если не татар - то ноль по любому!
df["tartar"] = df["tartar"].map({"Y": 1, "N": 0}).astype(int).copy()
test["tartar"] = test["tartar"].map({"Y": 1, "N": 0}).astype(int).copy()
df.info(), test.info()
# тут мутим названия поцановские! А то не по людские если разные будут!
df.columns = test.columns.tolist() + ["smoking"]
df.head()
# ### 💥 [АЛЯРМ] 💥
# -------------------------
# **тут косячные названия, пробелы, скобки и прочий скам - их по хорошему надо удалять я конечно же это не делал!**
# target distribution я бы написал но для поцанов чисто скажу что график нам рассказывает - есь сиарета или нет!
plt.subplots(figsize=(7, 7), facecolor="#0a0a0a")
plt.pie(
df.smoking.value_counts(),
startangle=90,
wedgeprops={"width": 0.3},
colors=["#6c6c6c", "#c93046"],
)
plt.title(
"Binary Target Balance",
loc="center",
fontsize=24,
color="#c93046",
fontweight="bold",
)
plt.text(
0,
0,
f"{df.smoking.value_counts()[1] / df.smoking.count() * 100:.2f}%",
ha="center",
va="center",
fontweight="bold",
fontsize=42,
color="#c93046",
)
plt.legend(
df.smoking.value_counts().index,
ncol=2,
facecolor="#404956",
edgecolor="#1a2028",
loc="lower center",
fontsize=16,
)
plt.show()
msno.matrix(
df,
)
# сморим есть ли мусор?
# remove all None-features / тут сморим чоб все былдо красиво по пропускам если пропустил чото!
drop_cols = df.columns[df.apply(lambda x: x.isin([None, "", " "])).all()]
# df = df.drop(drop_cols, axis=1)
# df.columns = df.columns.str.replace(' ', '_', regex=False).str.lower()
# Ну и рисуем наш датафреймосет например!
df.head()
df["age"][np.logical_and(df["age"] > 10, df["age"] < 99)].plot(
kind="hist",
title="Age distr",
nbins=30,
color_discrete_sequence=px.colors.qualitative.Prism,
)
# тут сморим че за кенты по возрасту.
df.groupby(["age"])["smoking"].agg(["mean"]).sort_values("mean", ascending=False).plot(
kind="barh"
)
# тут сморим во скок лет самые прикуренные типы и кого можно щемить например
df.groupby(["height(cm)"])["smoking"].agg(["mean"]).sort_values(
"mean", ascending=False
).plot(kind="barh")
# куряки-баскетболисты
df.groupby(["weight(kg)"])["smoking"].agg(["mean"]).sort_values(
"mean", ascending=False
).plot(kind="barh")
# есть и кенты куряки толстожопы
df.groupby(["tartar"])["smoking"].agg(["mean"]).sort_values(
"mean", ascending=False
).nlargest(10, ["mean"])
# если татар - то полюбому куряка!
# # кароче ML 🍌🍌
# например режем наших куряк и некуряк поровну
df_1 = df[df["smoking"] == 1].copy()
df_0 = df[df["smoking"] == 0].copy()
df_0 = df_0.sample(df_1.shape[0]).copy()
# лепим в одну кучу
df_balanced = pd.concat([df_1, df_0])
# очень модная функция
def scoring(model, x_train, y_train, x_test, y_test):
y_prob = model.predict_proba(x_test)[:, 1]
y_pred = model.predict(x_test)
cm = confusion_matrix(y_test, y_pred)
tn, fn, fp, tp = cm.ravel()
all_scores = {
"Accuracy score": accuracy_score(y_test, y_pred),
"Precision score": (cm[0, 0]) / (cm[0, 1] + cm[0, 0]),
"Recall score": recall_score(y_test, y_pred),
"F1 score": f1_score(y_test, y_pred),
"ROC AUC score": roc_auc_score(y_test, y_prob),
}
print(all_scores)
print(
f"TRUE NEGATIVE [{tn}] \nFALSE POSITIVE [{fp}] \nFALSE NEGATIVE [{fn}] \nTRUE POSITIVE [{tp}]"
)
return all_scores
# куряка / некуряка
y = df_balanced["smoking"]
# чо есть у куряки, татарин он, баскетболист или толстожопый?
x = df_balanced.drop("smoking", axis=1)
# тащим больше .43 православным логрегом
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, shuffle=True, random_state=11
)
# тут поцаны подсказали как лучше отрабатывать кентов
scaler = RobustScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
# тут например сравниваем две дефолтных приблуды
xgb = XGBClassifier()
xgb.fit(x_train, y_train)
scores = scoring(xgb, x_train, y_train, x_test, y_test)
lr = LogisticRegression()
lr.fit(x_train, y_train)
scores = scoring(lr, x_train, y_train, x_test, y_test)
# чтобы никто не мусорнулся - скалируем и тестовых кентов!
test_tr = test.copy()
scaler = RobustScaler()
scaler.fit(test_tr)
test_tr = scaler.transform(test_tr)
test_tr
# записываем под карандаш всех кентов куряк/некуряк!
test_preds = lr.predict(test_tr)
test["smoking"] = test_preds.tolist()
# test.reset_index()[["ID", "smoking"]].copy().to_csv(
# "subm_lr.csv", sep=",", columns=["ID", "smoking"], index=False
# )
# ### 🙋♀️🙋♀️🙋♀️короче тестим например всякие штуки
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, shuffle=True, random_state=11
)
# не забывай заново перебить кентов!
ovs = RandomOverSampler(sampling_strategy="auto", random_state=11)
x_rovs, y_rovs = ovs.fit_resample(x_train, y_train)
xgb.fit(x_rovs, y_rovs)
rovs_scores = scoring(xgb, x_rovs, y_rovs, x_test, y_test)
cls_cb = CatBoostClassifier(verbose=False)
cls_cb.fit(x_rovs, y_rovs)
scores = scoring(cls_cb, x_rovs, y_rovs, x_test, y_test)
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, shuffle=True, random_state=11
)
smovs = SMOTE(sampling_strategy="auto", random_state=11, k_neighbors=5, n_jobs=-1)
x_smote, y_smote = smovs.fit_resample(x_train, y_train)
xgb.fit(x_smote, y_smote)
smovs_scores = scoring(xgb, x_smote, y_smote, x_test, y_test)
cls_cb = CatBoostClassifier(verbose=False, auto_class_weights="Balanced")
cls_cb.fit(x_smote, y_smote)
scores = scoring(cls_cb, x_smote, y_smote, x_test, y_test)
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, shuffle=True, random_state=11
)
border_smovs = BorderlineSMOTE()
x_border, y_border = border_smovs.fit_resample(x_train, y_train)
xgb.fit(x_border, y_border)
border_smovs_scores = scoring(xgb, x_border, y_border, x_test, y_test)
cls_cb = CatBoostClassifier(verbose=False, auto_class_weights="Balanced")
cls_cb.fit(x_border, y_border)
scores = scoring(cls_cb, x_border, y_border, x_test, y_test)
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, shuffle=True, random_state=11
)
b_svm = SVMSMOTE()
x_svm, y_svm = b_svm.fit_resample(x_train, y_train)
xgb.fit(x_svm, y_svm)
b_svm_scores = scoring(xgb, x_svm, y_svm, x_test, y_test)
cls_cb = CatBoostClassifier(verbose=False)
cls_cb.fit(x_svm, y_svm)
scores = scoring(cls_cb, x_svm, y_svm, x_test, y_test)
# x_train, x_test, y_train, y_test = train_test_split(
# x, y, test_size=0.2, shuffle=True, random_state=11
# )
# ada_samp = ADASYN()
# x_ada, y_ada = ada_samp.fit_resample(x_train, y_train)
# xgb.fit(x_ada, y_ada)
# ada_samp_scores = scoring(xgb, x_ada, y_ada, x_test, y_test)
# cls_cb = CatBoostClassifier(verbose=False, auto_class_weights="Balanced")
# cls_cb.fit(x_ada, y_ada)
# scores = scoring(cls_cb, x_ada, y_ada, x_test, y_test)
# >>> кароче по тестам не фартануло ****!
# # 👳♂️ГДЕ МОИ СЕМНАДЦАТЬ ЛЕТ ПУКАРЕТ?
# спрашиваем у пукарета есть ли сиарета?
s = setup(
df,
target="smoking",
remove_multicollinearity=True,
multicollinearity_threshold=0.9,
fix_imbalance=True,
fix_imbalance_method="SMOTE",
remove_outliers=True,
transformation=True,
)
# !pip install autoviz
# !pip install pycaret[mlops]
# eda(display_format='svg')
best = compare_models(sort="F1", cross_validation=True) # кросс валидация дает буст!
evaluate_model(best)
# functional API
plot_model(best, plot="confusion_matrix")
# tune model
# tune model optuna
# tune_model(dt, search_library = 'optuna')
# у меня поцаны аптюна не затарахтела на серваке. может вам фартанет!
lr = create_model("lr")
tuned_lr = tune_model(lr, optimize="F1", choose_better=True)
bagged_lr = ensemble_model(lr, method="Boosting", n_estimators=100, choose_better=True)
# пукарет умеет многое мы с поцанами советуем дикояростно его юзать! отдуши!
lr = create_model("lr")
dt = create_model("ridge")
lda = create_model("lda")
blender = blend_models([lr, dt, lda], optimize="F1", fold=5, choose_better=True)
blender
# lightgbm = create_model('lightgbm')
# stacker = stack_models([lr, dt, lda], optimize='F1', fold=5, choose_better=True)
# launch dashboard
# dashboard(stacker)
# functional API
predictions = predict_model(best, data=test, raw_score=True)
predictions.head()
# compare models
top5 = compare_models(sort="F1", n_select=5)
# tune models
tuned_top5 = [tune_model(i) for i in top5]
# ensemble models
bagged_top5 = [ensemble_model(i) for i in tuned_top5]
# blend models
blender = blend_models(estimator_list=top5)
# stack models
stacker = stack_models(estimator_list=top5)
# automl
best = automl(optimize="F1")
plot_model(best, plot="confusion_matrix")
predictions = predict_model(best, data=test, raw_score=True)
predictions.head()
submission = (
predictions[["prediction_label"]]
.rename(columns={"prediction_label": "smoking"})
.copy()
)
# submission.to_csv("subm_lr_pyc.csv", sep=",", columns=["ID", "smoking"], index=False)
# submission
# # 🐓КОТ БУСТ
# Снова перебиваем куряк некуряк
train_, test_ = train_test_split(df, train_size=0.8, random_state=1)
df["smoking"].mean(), train_["smoking"].mean(), test_["smoking"].mean()
# Настраиваем нашего котбуста
X = train_.columns.tolist()[:-1]
y = ["smoking"]
# ignored_features = ["ID"]
train_data_ = Pool(
data=train_[X],
label=train_[y],
# cat_features=cat_features
)
valid_data_ = Pool(
data=test_[X],
label=test_[y],
# cat_features=cat_features
)
# делаем манкипатчинг с умным лицом
params = {
"leaf_estimation_method": "Newton",
"learning_rate": 0.01,
"max_depth": 8,
"bootstrap_type": "MVS",
"subsample": 0.8,
"random_state": 42,
"verbose": 200,
"eval_metric": "F1",
"auto_class_weights": "Balanced",
"boosting_type": "Ordered",
# "ignored_features": ignored_features,
}
model_cb_cut = CatBoostClassifier(
**params, early_stopping_rounds=100, use_best_model=True
)
model_cb_cut.fit(train_data_, eval_set=valid_data_, plot=False)
y_test_pred = model_cb_cut.predict(data=test_[X])
# print(f'Balanced accuracy: {balanced_accuracy_score(test[y], y_test_pred):.2f}')
# print(f'Precision: {precision_score(test[y], y_test_pred):.2f}')
# print(f'Recall: {recall_score(test[y], y_test_pred):.2f}')
print(f"F1: {f1_score(test_[y], y_test_pred):.5f}")
print("*" * 50)
y_pred = model_cb_cut.predict_proba(test_[X])[:, 1]
auc_score_CAT = roc_auc_score(test_[y], y_pred)
print("*" * 50)
cm = confusion_matrix(test_[y], y_test_pred)
print(classification_report(test_[y], y_test_pred))
model_cb_cut.save_model(f"model_cb_cut")
print("*" * 50)
print(cm)
print()
tn, fn, fp, tp = cm.ravel()
print(
f"TRUE NEGATIVE [{tn}] \nFALSE POSITIVE [{fp}] \nFALSE NEGATIVE [{fn}] \nTRUE POSITIVE [{tp}]"
)
print("*" * 50)
# Вот так катбуст угадал куряк/некуряк
model_rtchk = CatBoostClassifier()
model_rtchk.load_model(f"model_cb_cut")
test["predictions"] = model_rtchk.predict(test)
# submission_cb = (
# test[["ID", "predictions"]].rename(columns={"predictions": "smoking"}).copy()
# )
# submission_cb.to_csv("subm_cb.csv", sep=",", columns=["ID", "smoking"], index=False)
# submission_cb
# # 💦Еще одна мутка угадать есть ли у кента куреха
df[:2]
df.columns
# ml = df.drop("oral", axis=1).copy() если кент орал - его нада дропнуть!
ml = df.copy()
# пилим модный трансформер
transformer = make_column_transformer(
(
StandardScaler(),
[
"age",
"height(cm)",
"weight(kg)",
"waist(cm)",
"eyesight(left)",
"eyesight(right)",
"hearing(left)",
"hearing(right)",
"systolic",
"relaxation",
"fasting blood sugar",
"Cholesterol",
"triglyceride",
"HDL",
"LDL",
"hemoglobin",
"Urine protein",
"serum creatinine",
"AST",
"ALT",
"Gtp",
],
),
remainder="passthrough",
)
transformed = transformer.fit_transform(ml)
transformed_ml = pd.DataFrame(transformed, columns=transformer.get_feature_names_out())
# сморим чтоб ничо не сломалось
transformed_ml = transformed_ml.astype(float)
transformed_ml["remainder__smoking"] = transformed_ml["remainder__smoking"].astype(int)
# transformed_ml.reset_index(inplace=True)
# transformed_ml["remainder__ID"] = transformed_ml["remainder__ID"].astype(int)
transformed_ml["remainder__dental caries"] = transformed_ml[
"remainder__dental caries"
].astype(int)
transformed_ml["remainder__tartar"] = transformed_ml["remainder__tartar"].astype(int)
transformed_ml.head()
# сморим чтобы все было по поцановски например!
train__, test__ = train_test_split(transformed_ml, train_size=0.8, random_state=1)
ml["smoking"].mean(), train__["remainder__smoking"].mean(), test__[
"remainder__smoking"
].mean()
# ну опа находим куряк и резонно обосновано спрашиваем - есть ли чо?
X = train__.columns.tolist()[:-1]
y = ["remainder__smoking"]
# cat_features = cut_cat_cols
# ignored_features = ["remainder__ID"]
train_data = Pool(
data=train__[X],
label=train__[y],
# ignored_features=ignored_features
# cat_features=cat_features
)
valid_data = Pool(
data=test__[X],
label=test__[y],
# ignored_features=ignored_features
# cat_features=cat_features
)
# params = {
# 'task_type': 'CPU',
# # 'loss_function': 'Logloss',
# 'iterations': 1200,
# 'eval_metric': 'F1',
# 'verbose' : 200,
# # 'custom_loss': ['Recall'],
# 'random_seed': 1,
# 'learning_rate': .01,
# # "num_trees": 500,
# 'auto_class_weights': 'Balanced',
# 'ignored_features':ignored_features
# }
# F1: 0.433455
params = {
"leaf_estimation_method": "Newton",
"learning_rate": 0.02,
"max_depth": 8,
"bootstrap_type": "Bernoulli",
"subsample": 0.8,
"random_state": 42,
"verbose": 200,
"eval_metric": "F1",
# "early_stopping_rounds" : 100,
"auto_class_weights": "Balanced",
"boosting_type": "Ordered",
}
model_cb_t = CatBoostClassifier(
**params, early_stopping_rounds=100, use_best_model=True
)
model_cb_t.fit(train_data, eval_set=valid_data, plot=False)
y_test_pred = model_cb_t.predict(data=test__[X])
# print(f'Balanced accuracy: {balanced_accuracy_score(test[y], y_test_pred):.2f}')
# print(f'Precision: {precision_score(test[y], y_test_pred):.2f}')
# print(f'Recall: {recall_score(test[y], y_test_pred):.2f}')
print(f"F1: {f1_score(test__[y], y_test_pred):.6f}")
print("*" * 50)
print("*" * 50)
y_pred = model_cb_t.predict_proba(test__[X])[:, 1]
auc_score_CAT = roc_auc_score(test__[y], y_pred)
print("*" * 50)
print("*" * 50)
cm = confusion_matrix(test__[y], y_test_pred)
print(classification_report(test__[y], y_test_pred))
# model_cb_t.save_model(f"model_cb_t")
print("*" * 50)
print(cm)
print()
tn, fn, fp, tp = cm.ravel()
print(
f"TRUE NEGATIVE [{tn}] \nFALSE POSITIVE [{fp}] \nFALSE NEGATIVE [{fn}] \nTRUE POSITIVE [{tp}]"
)
print("*" * 50)
# # Как я уже и говорил поцанам, аптюна порушила мой сервак (конплюхтер)⚗ оставлю сетап для фартовых поцанов!🎠
#
import optuna
from optuna.visualization import plot_optimization_history, plot_param_importances
df
# сморим кто курит а кто - боксер
X = df.drop(["smoking"], axis=1).copy()
y = df["smoking"]
def objective(trial, data=X, target=y):
X_train, X_val, y_train, y_val = train_test_split(
X, y, test_size=0.2, random_state=42
)
ignored_features = ["ID"]
params = {
"max_depth": trial.suggest_int("max_depth", 3, 16),
"learning_rate": trial.suggest_categorical(
"learning_rate", [0.005, 0.02, 0.05]
),
"n_estimators": trial.suggest_int("n_estimators", 2000, 8000),
"max_bin": trial.suggest_int("max_bin", 200, 400),
"min_data_in_leaf": trial.suggest_int("min_data_in_leaf", 1, 300),
"l2_leaf_reg": trial.suggest_float("l2_leaf_reg", 0.0001, 1.0, log=True),
"subsample": trial.suggest_float("subsample", 0.1, 0.8),
"random_seed": 42,
"loss_function": "Logloss",
"auto_class_weights": "Balanced",
"eval_metric": "F1",
"boosting_type": "Ordered",
"ignored_features": ignored_features,
"bootstrap_type": trial.suggest_categorical(
"bootstrap_type", ["MVS", "Bernoulli"]
),
}
model_opt = CatBoostClassifier(**params)
model_opt.fit(
X_train,
y_train,
eval_set=[(X_val, y_val)],
early_stopping_rounds=100,
verbose=False,
)
y_test_pred = model_opt.predict(X_val)
f1 = f1_score(y_val, y_test_pred)
return f1
# 💌телеги от заграничных поцанов 💬
# -----------------
# Most of the classification problems I've tackled are similar in nature, so a large class imbalance is quite common.
# It is not clear whether you are using training-validation sets to build and fine tune the model. Cross-fold validation is generally preferred since it gives more reliable model performance estimates.
# The F1 score is a good classification performance measure, I find it more important than the AUC-ROC metric. Its best to use a performance measure which matches the real-world problem you're trying to solve.
# Without having access to the dataset, I'm unable to give exact pointers; so I'm suggesting a few directions to approach this problem and help improve the F1 score:
# Use better features, sometimes a domain expert (specific to the problem you're trying to solve) can give relevant pointers that can result in significant improvements.
# Use a better classification algorithm and better hyper-parameters.
# Over-sample the minority class, and/or under-sample the majority class to reduce the class imbalance.
# Use higher weights for the minority class, although I've found over-under sampling to be more effective than using weights.
# Choose an optimal cutoff value to convert the continuous valued class probabilities output by your algorithm into a class label. This is as important as a good AUC metric but is overlooked quite often. A word of caution though: the choice of the cutoff should be guided by the users by evaluating the relevant trade-offs.
study = optuna.create_study(direction="maximize")
study.optimize(objective, n_trials=50)
print("Best value:", study.best_value)
# балансируем датасет уравновешивая таргет
df_1 = df[df["smoking"] == 1].copy()
df_0 = df[df["smoking"] == 0].copy()
df_0 = df_0.sample(df_1.shape[0]).copy()
# df_1.shape[0], df_0.shape[0]
df_balanced = pd.concat([df_1, df_0])
df_balanced
s = setup(
df_balanced,
target="smoking",
remove_multicollinearity=True,
multicollinearity_threshold=0.9,
remove_outliers=True,
transformation=True,
) # 0.4158
best = compare_models(sort="F1", cross_validation=True) # кросс валидация дает буст!
plot_model(best, plot="confusion_matrix")
predictions = predict_model(best, data=test, raw_score=True)
submission = (
predictions[["prediction_label"]]
.rename(columns={"prediction_label": "smoking"})
.copy()
)
# submission.reset_index(inplace=True)
# submission.to_csv("subm_sample.csv", sep=",", columns=["ID", "smoking"], index=False)
# submission
# # 🪓КОТБУСТ ПОРЕЗАНЫЙ
# ------------------
# **поровну - боксеры и куряки**
# 👒
# балансируем датасет уравновешивая таргет
df_1 = df[df["smoking"] == 1].copy()
df_0 = df[df["smoking"] == 0].copy()
df_0 = df_0.sample(df_1.shape[0]).copy()
# df_1.shape[0], df_0.shape[0]
df_balanced = pd.concat([df_1, df_0])
df_balanced
train___, test___ = train_test_split(df_balanced, train_size=0.8, random_state=1)
df_balanced["smoking"].mean(), train___["smoking"].mean(), test___["smoking"].mean()
X = train___.columns.tolist()[:-1]
y = ["smoking"]
# cat_features = cut_cat_cols
# ignored_features = ['ID']
train_data_ = Pool(
data=train___[X],
label=train___[y],
# ignored_features=ignored_features
# cat_features=cat_features
)
valid_data_ = Pool(
data=test___[X],
label=test___[y],
# ignored_features=ignored_features
# cat_features=cat_features
)
params = {
# 'leaf_estimation_method': 'Newton',
"learning_rate": 0.01,
"max_depth": 8,
# 'bootstrap_type': 'MVS',
# 'subsample': 0.8,
"random_state": 42,
"verbose": 200,
"eval_metric": "F1",
# 'auto_class_weights': 'Balanced',
# 'boosting_type': 'Ordered',
}
model_cb_sam = CatBoostClassifier(
**params, early_stopping_rounds=100, use_best_model=True
)
model_cb_sam.fit(train_data_, eval_set=valid_data_, plot=False)
y_test_pred = model_cb_sam.predict(data=test___[X])
# print(f'Balanced accuracy: {balanced_accuracy_score(test[y], y_test_pred):.2f}')
# print(f'Precision: {precision_score(test[y], y_test_pred):.2f}')
# print(f'Recall: {recall_score(test[y], y_test_pred):.2f}')
print(f"F1: {f1_score(test___[y], y_test_pred):.5f}")
print("*" * 50)
print("*" * 50)
y_pred = model_cb_sam.predict_proba(test___[X])[:, 1]
auc_score_CAT = roc_auc_score(test___[y], y_pred)
# predict_mine = np.where(auc_score_CAT > .13, 1, 0)
print("*" * 50)
# print(f"ROC-AUC > CATClassifier proba: {round(auc_score_CAT, 3)}")
print("*" * 50)
cm = confusion_matrix(test___[y], y_test_pred)
print(classification_report(test___[y], y_test_pred))
model_cb_sam.save_model(f"model_cb_sam")
print("*" * 50)
print(cm)
print()
tn, fn, fp, tp = cm.ravel()
print(
f"TRUE NEGATIVE [{tn}] \nFALSE POSITIVE [{fp}] \nFALSE NEGATIVE [{fn}] \nTRUE POSITIVE [{tp}]"
)
print("*" * 50)
model_rtchk = CatBoostClassifier()
model_rtchk.load_model(f"model_cb_sam")
test["smoking"] = model_rtchk.predict(test)
# submission_cb = test.reset_index()[["ID", "smoking"]].copy()
# submission_cb.to_csv("subm_cb_sam.csv", sep=",", columns=["ID", "smoking"], index=False)
# submission_cb
|
# #### This notebook is a continuation of work begun in another notebook (https://www.kaggle.com/code/parushkin/taxi-v3-q-learning-mcts). Here is considering a solution using TD instead of MCTS.
SEED = 42
import time
import random
import pickle
import numpy as np
import pandas as pd
import gymnasium as gym
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
is_ipython = "inline" in plt.get_backend()
plt.ion()
from IPython.display import display, Markdown, clear_output
random.seed(SEED)
np.random.seed(SEED)
env = gym.make("Taxi-v3", render_mode="rgb_array")
print(f"OBSERVATON SPACE: {env.observation_space}")
print(f"ACTION SPACE: {env.action_space}")
obs, info = env.reset(seed=SEED)
print(obs)
print(info)
env.s = env.initial_state_distrib[6]
env.lastaction = None
env.taxi_orientation = 0
plt.imshow(env.render())
# Rewards:
# * -1 per step unless other reward is triggered.
# * +20 delivering passenger.
# * -10 executing “pickup” and “drop-off” actions illegally.
# #### Temporal difference learning
# In general, it learns faster than MKTS.
# As well as when working with MKTS, three options are considered:
# * Model-free. Use the environment as is. without changes. Do not use additional information about valid actions, do not change rewards.
# * Add a penalty for unavailable actions.
# * Use information about available moves for each state.
# Q-table mask (is a move available) can be collected in the learning process (if we consider the environment as a black box), or you can do it easier and pull it out of the source code of the environment.
def greedy_policy(Qtable, state, action_mask=None):
Qa = Qtable[state]
if action_mask is not None:
Qa = np.where(action_mask, Qa, -np.inf)
action = np.random.choice(np.where(Qa == Qa.max())[0])
return action
def epsilon_greedy_policy(Qtable, state, epsilon, action_space, action_mask=None):
if random.uniform(0, 1) > epsilon:
action = greedy_policy(Qtable, state, action_mask)
else:
action = action_space.sample(action_mask)
return action
def initialize_q_table(state_space, action_space):
return np.zeros((state_space, action_space))
def get_epsilon(i, min_epsilon, max_epsilon, decay_rate):
return min_epsilon + (max_epsilon - min_epsilon) * np.exp(-decay_rate * i)
def eval_episode(env, Qtable, max_steps=100, seed=None):
total_reward = 0
routes = np.zeros(
(Qtable.shape[0], 2)
) # 0 - before first pick up client, 1 - after
n_route = 0
steps = max_steps
state, _ = env.reset(seed=seed)
routes[state, n_route] = 1
for i in range(max_steps):
if n_route == 0 and list(env.decode(state))[2] == 4:
n_route = 1
action = greedy_policy(Qtable, state)
new_state, reward, terminated, truncated, _ = env.step(action)
routes[new_state, n_route] += 1
total_reward += reward
if terminated or truncated:
steps = i + 1
break
state = new_state
return steps, total_reward, routes
def evaluate(env, Qtable, n_eval_episodes=300, max_steps=100, seeds=None, silent=False):
results = {}
obs_visited = np.zeros(Qtable.shape[0])
episode_rewards = []
if seeds is not None:
n_eval_episodes = len(seeds)
for i in tqdm(range(n_eval_episodes), disable=silent):
s, r, routes = eval_episode(
env, Qtable, max_steps, seed=seeds[i] if seeds is not None else None
)
results[seeds[i] if seeds is not None else f"NoSeed_{i}"] = (
s,
r,
routes[:, 0],
routes[:, 1],
)
obs_visited += routes.sum(-1)
episode_rewards.append(r)
mean_reward = np.mean(episode_rewards)
std_reward = np.std(episode_rewards)
return obs_visited, mean_reward, std_reward, results
def plot_routes(R):
fig, axs = plt.subplots(1, 2, figsize=(15, 5))
shw1 = axs[0].imshow(R[:, 0].reshape(-1, 20).sum(-1).reshape(5, 5), cmap="GnBu")
plt.colorbar(shw1, ax=axs[0])
axs[0].title.set_text("to passenger")
shw2 = axs[1].imshow(R[:, 1].reshape(-1, 20).sum(-1).reshape(5, 5), cmap="GnBu")
plt.colorbar(shw2, ax=axs[1])
axs[1].title.set_text("to destination")
def one_episode_preview(Qtable, max_steps=100, seed=None, use_mask=False):
env = gym.make("Taxi-v3", render_mode="rgb_array")
state, info = env.reset(seed=seed)
img = plt.imshow(env.render())
for _ in range(max_steps):
img.set_data(env.render())
plt.axis("off")
display(plt.gcf())
clear_output(wait=True)
time.sleep(0.1)
action = greedy_policy(Qtable, state, info["action_mask"] if use_mask else None)
state, _, terminated, _, info = env.step(action)
if terminated:
break
MAX_STEPS = 100
gamma = 0.95
learning_rate = 0.7
max_epsilon = 1.0
min_epsilon = 0.5
n_ep = [25, 50, 100, 200, 400, 800, 1600, 3200]
param = {
"with_mask": {"use_mask": True, "penalty": False},
"mod_penalty": {"use_mask": False, "penalty": True},
"model_free": {"use_mask": False, "penalty": False},
}
experiments = {
f"{k}_{n}": {
"n_training_episodes": n,
"decay_rate": 25 / n,
"use_mask": v["use_mask"],
"penalty": v["penalty"],
}
for n in n_ep
for k, v in param.items()
}
def TD_train(
n_training_episodes,
decay_rate,
max_steps=MAX_STEPS,
use_mask=False,
penalty=False,
silent=False,
):
env = gym.make("Taxi-v3")
env.action_space.seed(
SEED + n_training_episodes + use_mask + penalty
) # important for reproducibility
env.reset(seed=SEED + n_training_episodes + use_mask + penalty)
Qtable = initialize_q_table(env.observation_space.n, env.action_space.n)
Q_mask = np.where(
np.array([[s != x[a][0][1] for a in range(6)] for s, x in env.P.items()]),
0,
-np.inf,
)
visits = np.zeros_like(Qtable)
for episode in tqdm(range(n_training_episodes), disable=silent):
epsilon = get_epsilon(episode, min_epsilon, max_epsilon, decay_rate)
state, info = env.reset() # сид можно не задавать т.к. генератор не обновляется
for _ in range(max_steps):
action = epsilon_greedy_policy(
Qtable,
state,
epsilon,
env.action_space,
info["action_mask"] if use_mask else None,
)
new_state, reward, terminated, truncated, info = env.step(action)
if penalty and new_state == state:
reward = -5
TD = (
reward
+ gamma
* np.max(Qtable[new_state] + (Q_mask[new_state] if use_mask else 0))
- Qtable[state][action]
)
Qtable[state][action] = Qtable[state][action] + learning_rate * TD
visits[state][action] += 1
if terminated or truncated:
break
state = new_state
return Qtable, visits
# Testing results requires information about the maximum possible reward for each possible initial state of the environment. Such information was collected in another notebook (https://www.kaggle.com/code/parushkin/taxi-v3-true-policy-max-reward).
df = pd.read_csv(
"/kaggle/input/taxi-v3-true-policy-max-reward/Taxi_v3_seed-reward.csv", index_col=0
)
benchmark = df["reward"].to_dict()
seeds = list(benchmark.keys())
# Each Q-table is evaluated twice, with and without masking.
Q_mask = np.array([[s != x[a][0][1] for a in range(6)] for s, x in env.P.items()])
def evaluate_experiment(Qtable, use_mask=False, silent=False):
if use_mask:
Qtable = np.where(Q_mask, Qtable, -np.inf)
obs_visited, mean_reward, std_reward, eval_results = evaluate(
env, Qtable, max_steps=MAX_STEPS, seeds=seeds, silent=silent
)
optim, no_path = 0, 0
for k, v in eval_results.items():
if v[1] == benchmark[k]:
optim += 1
if v[0] == MAX_STEPS:
no_path += 1
if not silent:
print("Masked Q-table" if use_mask else "Vanilla Q-table", "\n")
print(
f'Mean reward: {mean_reward:.6f}\tstd: {std_reward:.6f}.\nNumber of "path not found" solutions: {no_path}.\nNumber of "optimal" solutions: {optim}'
)
return {
"Qtable": Qtable,
"visits": obs_visited,
"mean_reward": mean_reward,
"std_reward": std_reward,
"optim": optim,
"no_path": no_path,
}
results_log = {}
for experiment_name, experiment_params in tqdm(experiments.items()):
random.seed(SEED)
np.random.seed(SEED)
Qtable, visits = TD_train(**experiment_params, silent=True)
results_log[experiment_name] = {
"Vanilla Q-table": {
**evaluate_experiment(Qtable, use_mask=False, silent=True),
**experiments[experiment_name],
},
"Masked Q-table": {
**evaluate_experiment(Qtable, use_mask=True, silent=True),
**experiments[experiment_name],
},
}
# interim dump
with open("results_log_TD.pkl", "wb") as f:
pickle.dump(results_log, f)
with open("results_log_TD.pkl", "rb") as f:
results_log = pickle.load(f)
data_dict = {
(k, x): {
_k: _v for _k, _v in results_log[k][x].items() if _k not in ("Qtable", "visits")
}
for k in results_log.keys()
for x in results_log[k].keys()
}
df = (
pd.DataFrame.from_dict(data_dict, orient="index")
.reset_index()
.rename({"level_0": "experiment_name", "level_1": "val_type"}, axis=1)
)
df["experiment_type"] = ["_".join(x.split("_")[1:-1]) for x in df["experiment_name"]]
for _g, _df in df.groupby("experiment_type"):
for __g, __df in _df.groupby("val_type"):
display(Markdown(f"### {_g} / {__g}"))
display(
__df[
["n_training_episodes", "mean_reward", "optim", "no_path"]
].reset_index(drop=True)
)
for type, _df in df.groupby("val_type"):
display(Markdown(f"### {type}"))
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
for type, __df in _df.groupby("experiment_type"):
for i, col in enumerate(["mean_reward", "optim", "no_path"]):
sns.scatterplot(
data=__df, y=col, x="n_training_episodes", ax=axs[i], label=type
)
sns.lineplot(
data=__df, y=col, x="n_training_episodes", linestyle="--", ax=axs[i]
)
axs[i].set_xscale("log")
plt.show()
# #### The longest route
longest_route_seed = [k for k, v in benchmark.items() if v == min(benchmark.values())]
Qtable = results_log["with_mask_3200"]["Masked Q-table"]["Qtable"]
_, _, R = eval_episode(env, Qtable, seed=longest_route_seed[0])
plot_routes(R)
one_episode_preview(Qtable, seed=longest_route_seed[0])
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # **What Is The Fear And Greed Index?**
# ### **The Fear and Greed Index is a tool that helps investors and traders analyze the Bitcoin and Crypto market from a sentiment perspective. It identifies the extent to which the market is becoming overly fearful or overly greedy. Hence why it is called the Fear and Greed Index.**
# ### **source : https://www.lookintobitcoin.com/charts/bitcoin-fear-and-greed-index/**
df = pd.read_csv("/kaggle/input/bitcoin-and-fear-and-greed/dataset.csv")
df
# # **Bitcoin Fear and greed days split overall**
bar_chart = df["Value_Classification"].hist()
bar_chart.set_title("Bitcoin fear and greed index 1 Feb to 31 Mar 2023")
bar_chart.set_ylabel("Number of days")
print(df["Value_Classification"].value_counts())
# # **Bitcoin Fear and Greed timeline**
from matplotlib.patches import Patch
from matplotlib.ticker import FuncFormatter
import matplotlib.pyplot as plt
def format_yaxis(value, tick_number):
return f"{value:,.0f}"
# Convert the Date column to a datetime object
df["Date"] = pd.to_datetime(df["Date"])
# Normalize the Value column to be between 0 and 1
df["Value_Norm"] = (df["Value"] - df["Value"].min()) / (
df["Value"].max() - df["Value"].min()
)
# Change the theme
plt.style.use("ggplot")
# Create a colormap that maps normalized values to colors
cmap = plt.get_cmap("RdYlGn")
# Plot the data using the scatter and plot functions
fig, ax = plt.subplots(figsize=(16, 6))
ax.scatter(df["Date"], df["BTC_Closing"], c=df["Value_Norm"], cmap=cmap)
ax.plot(df["Date"], df["BTC_Closing"], c="black", alpha=0.3)
# Set the title and axis labels
ax.set_title("Bitcoin Fear and Greed timeline - 1 Feb to 31 Mar 2023 ")
ax.set_xlabel("Date")
ax.set_ylabel("BTC Closing Price U$")
# Create custom legend handles and labels
handles = [
Patch(facecolor=cmap(0.0), edgecolor="black", label="Extreme Fear"),
Patch(facecolor=cmap(0.25), edgecolor="black", label="Fear"),
Patch(facecolor=cmap(0.5), edgecolor="black", label="Neutral"),
Patch(facecolor=cmap(0.75), edgecolor="black", label="Greed"),
Patch(facecolor=cmap(1.0), edgecolor="black", label="Extreme Greed"),
]
labels = ["Extreme Fear", "Fear", "Neutral", "Greed", "Extreme Greed"]
# Add a custom legend to the plot
ax.legend(handles=handles, labels=labels)
# Format the y-axis tick labels to include a thousands separator
ax.yaxis.set_major_formatter(FuncFormatter(format_yaxis))
plt.show()
# # **Bitcoin Fear and greed per month**
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
df["Date"] = pd.to_datetime(df["Date"])
# Extract the short name of the month from the date column
df["month"] = df["Date"].dt.strftime("%b")
df
# Create the pivot table
pivot = pd.pivot_table(
df, index="month", columns="Value_Classification", values="Value", aggfunc="count"
)
# Define the month order
month_order = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
# Reorder the index
pivot = pivot.reindex(month_order)
# Define the column order
column_order = ["Extreme Fear", "Fear", "Neutral", "Greed", "Extreme Greed"]
# Reorder the columns
pivot = pivot.reindex(column_order, axis=1)
# Change the theme
plt.style.use("ggplot")
# Create a custom color map
cmap = plt.get_cmap("RdYlGn")
# Create the bar chart
ax = pivot.plot(
kind="bar",
title="Bitcoin fear and greed index per Month - 1 Feb to 31 Mar 2023",
xlabel="Months",
ylabel="Number of days",
figsize=(16, 6),
colormap=cmap,
legend=False,
)
# Create a custom legend
ax.legend(column_order)
# Show the plot
plt.show()
def highlight_max(s):
is_max = s == s.max()
return ["background-color: yellow" if v else "" for v in is_max]
pivot.style.apply(highlight_max, axis=1)
|
# This notebook contains the analysis on Resistive Plate Chambers detectors. The data is in form of a pandas datframe where each row represent one event (i.e. an acquisition form the electronics that is triggered by an external source). For each row there is the stored the collected `event_charge`, the type of signal (`1=avalanche, 3=streamer, 0=noise, -1=uncategorized`), depending on the `event_charge` (expressed as `pC` units) and other parameters not relevant here. There is also one columns `is_detected` that states if the RPC detector has effectively recognized a particle and classifies the event as a 'detection event'.
# The detector efficiency is defined as the number of `is_detected` event over the total number of events. This efficiency is a value that is generally increasing by increasing the voltage at which the RPC is operated.
# By increasing the voltage you can see that the `event_charge` distribution is also changing. The goal is to find suitable estimators for the `event_charge` distribution at different `voltage` values.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from pathlib import Path
import matplotlib.pyplot as plt
import scipy.optimize as so
from scipy.special import erfc
df = pd.read_csv("/kaggle/input/rpc-event-charge/rpc_data_kaggle.csv", index_col=0)
df["voltage"] = df.voltage.astype(int)
df.head()
# Here below we query for all the detected events of type `1` (i.e. `avalanche` signals). We groub by each voltage and make an histogram plot of the event_charge in the first 5 pC
axs = (
df.query("is_detected and event_type == 1")
.groupby("voltage")
.event_charge.plot.hist(bins=np.arange(0, 5, 0.2), histtype="step", legend=True)
)
# As you can see the distribution of `event_charge` is changing when the `voltage` is changing. I would like to summarize the results by means of two estimators: one representing somehow the central or mean value and another one giving information about the spread or the variance of the samples.
def expomodgaus(x, h, m, s, t):
"""Parameters definition:
from: https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution
h = height
m = mean
s = sigma
t = tau
"""
return (
h
* s
/ t
* np.sqrt(np.pi / 2)
* np.exp(1 / 2 * (s / t) ** 2 - (x - m) / t)
* erfc(1 / np.sqrt(2) * (s / t - (x - m) / s))
)
# # Are these distributions following some particular pdf?
# The plot here below is similary to the histogram made before but it's expressed as a line plot
for ix, group in df.query("is_detected and event_type == 1").groupby("voltage"):
bins, edges = np.histogram(group.event_charge, bins=np.arange(0, 100, 0.1))
plt.plot(edges[:-1], bins, label=f"voltage={ix} V")
plt.xlim(0, 20)
plt.legend()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # import seaborn
import matplotlib.pyplot as plt # import plot
from sklearn import tree # import trees
from sklearn import ensemble # import forest
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# load train and test data
train = pd.read_csv("/kaggle/input/cap-4611-spring-21-assignment-1/train.csv")
test = pd.read_csv("/kaggle/input/cap-4611-spring-21-assignment-1/test.csv")
# check that data was loaded correctly
train.head()
# check training info for missing data
train.info()
train.isnull()
# get info via describe
train.describe()
# looking for outliers via boxplots
for column in train:
if train.loc[:, column].dtypes == float:
train.boxplot([column])
plt.figure()
# replace outliers with the median value and confirm
for column in train:
if train.loc[:, column].dtypes == float:
train[column] = train[column].mask(
train[column] > train[column].quantile(0.75), train[column].median()
)
train.describe()
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
# use bankrupt as our target
features = train.columns[2:]
y = train["Bankrupt"]
# get dummies and train and fit our forrest
x = pd.get_dummies(train[features])
testData = pd.get_dummies(test[features])
# create our tree
tree = DecisionTreeClassifier(max_depth=5)
# fit our tree
tree.fit(x, y)
# make prediction
tree_prediction = tree.predict(testData)
print(tree_prediction)
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
# drop a unit an index so features index match our prediction indexes
ydrop = y.drop(y.tail(1).index, inplace=False)
# find roc curve and use it to calculate our roc auc score
fpr, tpr, thresholds = roc_curve(ydrop, tree_prediction)
roc_auc = auc(fpr, tpr)
# calculate F1 score and accuracy
f1 = f1_score(ydrop, tree_prediction)
acc = accuracy_score(ydrop, tree_prediction)
# print scores
print("ROC AUC:", roc_auc, ", F1 Score:", f1, ", Accuracy", acc)
# create our forest
forest = RandomForestClassifier(criterion="entropy", max_depth=5)
forest.fit(x, y)
# make prediction
forest_prediction = forest.predict(testData)
print(forest_prediction)
# find roc curve and use it to calculate our roc auc score
fpr, tpr, thresholds = roc_curve(ydrop, forest_prediction)
roc_auc = auc(fpr, tpr)
# calculate F1 score and accuracy
f1 = f1_score(ydrop, forest_prediction)
acc = accuracy_score(ydrop, forest_prediction)
# print scores
print("ROC AUC:", roc_auc, ", F1 Score:", f1, ", Accuracy", acc)
|
# 
# image from [link](https://www.futurebrand.com/news/2015/what-does-a-world-of-impulsive-connected-consumers-mean-for-future-brands)
# ## Association Rule Learning
# Association Rule Learning is also known as basket analysis.
# Product recommendations can be made based on the rules learned through association analysis.
# It is a rule-based machine learning technique used to discover patterns in data.
# ### Apriori Algoritmasi
# It is a method of basket analysis that is used to discover associations among products.
# The Apriori algorithm has three metrics that allow us to observe the patterns and structures of relationships within the dataset.
# These three metrics are:
# * Support(X,Y) = Freq(X,Y) / N >>> The probability of X and Y occurring together.
# * Confidence(X,Y) = Freq(X,Y) / Freq(X) >>> The probability of Y being purchased when X is purchased.
# * Lift = Support(X,Y) / Support(X) * Support(Y) >>> The probability of Y being purchased when X is purchased increases by the lift value.
# ### Dataset Details
# BillNo: 6-digit number assigned to each transaction. Nominal.
# Itemname: Product name. Nominal.
# Quantity: The quantities of each product per transaction. Numeric.
# Date: The day and time when each transaction was generated. Numeric.
# Price: Product price. Numeric.
# CustomerID: 5-digit number assigned to each customer. Nominal.
# Country: Name of the country where each customer resides. Nominal.
#
import pandas as pd
from mlxtend.frequent_patterns import apriori, association_rules
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
pd.set_option("display.width", 500)
df = pd.read_excel("/kaggle/input/market-basket-analysis/Assignment-1_Data.xlsx")
# We are trying to understand the data.
def check_df(dataframe, head=5):
print("################### Shape ####################")
print(dataframe.shape)
print("#################### Info #####################")
print(dataframe.info())
print("################### Nunique ###################")
print(dataframe.nunique())
print("##################### NA #####################")
print(dataframe.isnull().sum())
print("################## Quantiles #################")
print(dataframe.describe([0, 0.05, 0.50, 0.95, 0.99, 1]).T)
print("#################### Head ####################")
print(dataframe.head(head))
check_df(df)
# Data Preparation
# We set a small threshold value to account for the presence of outliers in the data.
def outlier_thresholds(dataframe, variable):
quartile1 = dataframe[variable].quantile(0.01)
quartile3 = dataframe[variable].quantile(0.99)
interquantile_range = quartile3 - quartile1
up_limit = quartile3 + 1.5 * interquantile_range
low_limit = quartile1 - 1.5 * interquantile_range
return low_limit, up_limit
# We are writing a function to equalize the outlier values in the data to threshold values.
def replace_with_thresholds(dataframe, variable):
low_limit, up_limit = outlier_thresholds(dataframe, variable)
dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit
dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit
# We are removing the negative and outlier values from the quantity and price variables.
def retail_data_prep(dataframe):
dataframe = dataframe[dataframe["Quantity"] > 0]
dataframe = dataframe[dataframe["Price"] > 0]
replace_with_thresholds(dataframe, "Quantity")
replace_with_thresholds(dataframe, "Price")
return dataframe
df = retail_data_prep(df)
df.describe().T
# We are selecting only the data for France in order to narrow down the data since it is large.
df_fr = df[df["Country"] == "France"]
# We are creating a table based on the sum of Quantity for the breakdown of BillNo and Itemname.
df_fr.groupby(["BillNo", "Itemname"]).agg({"Quantity": "sum"}).unstack().fillna(0).iloc[
0:5, 0:5
]
# We are converting the table to a completely Boolean type.
fr_inv_pro_df = (
df_fr.groupby(["BillNo", "Itemname"])
.agg({"Quantity": "sum"})
.unstack()
.fillna(0)
.applymap(lambda x: 1 if x > 0 else 0)
)
# We are using the Apriori method to find the support values of the products.
frequent_itemsets = apriori(
fr_inv_pro_df.astype("bool"), min_support=0.01, use_colnames=True
)
frequent_itemsets.sort_values("support", ascending=False).head()
# With this method, we can obtain the support, confidence, and lift values of the products,
# with the support values that we input.
rules = association_rules(frequent_itemsets, metric="support", min_threshold=0.01)
# By setting a threshold value for the metrics obtained, we can see the product associations in Apriori algorithm.
rules[
(rules["support"] > 0.05) & (rules["confidence"] > 0.1) & (rules["lift"] > 5)
].sort_values("confidence", ascending=False).head()
# Thus, we can see the products that are closely related to each other.
|
# # Introduction
# Skin cancer is a common type of cancer that develops in the skin cells due to uncontrolled growth, often triggered by exposure to UV radiation from the sun or other sources. It includes types such as basal cell carcinoma (BCC), squamous cell carcinoma (SCC), and melanoma. Early detection is crucial for successful treatment, and risk factors include sun exposure, fair skin, family history, and weakened immune system. Prevention measures, like sun safety practices and regular skin self-examination, are important.Melanoma is responsible for 75% of skin cancer.
# The American Cancer Society's estimates for melanoma in the United States for 2023 are: About 97,610 new melanomas will be diagnosed (about 58,120 in men and 39,490 in women). About 7,990 people are expected to die of melanoma (about 5,420 men and 2,570 women.
# # Problem Statement
# To classify 9 types of skin cancer using CNN.
# Cancer
# # Importing Libaraies
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import cv2
import matplotlib.pyplot as plt
import os
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow import keras
from keras.activations import relu
from tensorflow.keras import layers, Sequential, Model
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, AveragePooling2D
path = "/kaggle/input/skin-cancer/Images"
labels = [
"pigmented benign keratosis",
"melanoma",
"vascular lesion",
"actinic keratosis",
"squamous cell carcinoma",
"basal cell carcinoma",
"seborrheic keratosis",
"dermatofibroma",
"nevus",
]
# # Plotting sample from each class
fig, axis = plt.subplots(3, 3)
k = 0
for i in range(3):
for j in range(3):
img_name = os.listdir(path + "/" + labels[k])[0]
img = plt.imread(path + "/" + labels[k] + "/" + img_name)
axis[i, j].imshow(img)
axis[i, j].title.set_text(labels[k])
k += 1
fig.set_figheight(15)
fig.set_figwidth(15)
fig.show()
# # Preparing Dataset
batch_size = 16
height = width = 180
train_ds = keras.preprocessing.image_dataset_from_directory(
path,
seed=123,
validation_split=0.2,
subset="training",
shuffle=True,
image_size=(height, width),
batch_size=batch_size,
)
val_ds = keras.preprocessing.image_dataset_from_directory(
path,
seed=123,
shuffle=True,
validation_split=0.2,
subset="validation",
image_size=(height, width),
batch_size=batch_size,
)
# # Defining Model
l2 = 0.0001
model = Sequential(
[
layers.experimental.preprocessing.Rescaling(
1.0 / 255, input_shape=(height, width, 3)
),
layers.Conv2D(
64,
(3, 3),
activation="relu",
input_shape=(224, 224, 3),
kernel_regularizer=keras.regularizers.l2(l2),
),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(
128, (3, 3), activation="relu", kernel_regularizer=keras.regularizers.l2(l2)
),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(
256, (3, 3), activation="relu", kernel_regularizer=keras.regularizers.l2(l2)
),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(
512, (3, 3), activation="relu", kernel_regularizer=keras.regularizers.l2(l2)
),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(
512, activation="relu", kernel_regularizer=keras.regularizers.l2(l2)
),
layers.Dense(9, activation="softmax"),
]
)
model.compile(
optimizer=keras.optimizers.Adam(lr=0.001),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=["accuracy"],
)
model.summary()
history = model.fit(train_ds, epochs=25, batch_size=batch_size, validation_data=val_ds)
# # Plotting loss and Accuracy graphs
plt.plot(history.history["val_loss"])
plt.plot(history.history["loss"])
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
|
import pandas as pd
import numpy as np
df = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
df.head()
# Data Seems clear no preprocessing required
df.shape
# here in this data we see we have got activation value of each pixel.
import seaborn as sns
# seeing how many of them are with each digit
sns.countplot(y=df["label"]),
df["label"].value_counts()
# here we see each digits are more than 4000 but still varries
# # Splitting The lable and Data
#
X = df.drop(["label"], axis=1)
y = df["label"]
X
y
# # Plotting Some Digit
import matplotlib.pyplot as plt
# lets try to plotting some figure
fig, ax = plt.subplots(5, 5, figsize=(15, 15))
ax = ax.ravel()
for i in range(25):
image = X.iloc[i]
ax[i].imshow(image.to_numpy().reshape(28, 28), cmap="gray")
ax[i].set_title(y[i])
ax[i].axis("off")
plt.show()
# now normalizing the data as whole of data is either in form of zero or one
X = X / 225
# # Splitting into train_test_split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# ## One hot encoding the target:
# We have the labels for each input data. They are numbers between 0 and 9 that indicate which digit represents the image, that is, to which class they are associated. We will represent this label with a vector of 10 positions, where the position corresponding to the digit that represents the image contains a 1 and the remaining positions of the vector contain the value 0.
# So, we will use the one-hot encoding procedure. It consists of transforming the labels into a vector of as many zeros as the number of different labels, and containing the value of 1 in the index that corresponds to the value of the label. Keras offers many support functions, including to_categorical to perform this transformation, which we can import from keras.utils:
from keras.utils import np_utils
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# ## Modeling:
# We are going to use Keras. Keras is a high-level neural network API that is built on top of TensorFlow, Theano, or CNTK. There are several reasons why Keras is a good choice for building neural networks:
# User-friendly interface: Keras provides a simple, intuitive, and easy-to-use interface for building neural networks. It offers a wide range of pre-built layers and models, which makes it easier to get started with deep learning.
# Modular design: Keras is designed in a modular way, which means that you can build and modify neural networks by adding or removing layers. This makes it easier to experiment with different architectures and hyperparameters
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
model = Sequential()
model.add(Dense(128, activation="relu", input_shape=(784,)))
model.add(Dense(90, activation="relu"))
model.add(Dense(10, activation="softmax"))
model.summary()
model.compile(loss="categorical_crossentropy", optimizer="Adam", metrics=["accuracy"])
import tensorflow as tf
tf.config.run_functions_eagerly(True)
import warnings
warnings.filterwarnings("ignore") # this is to ignore warnings generated by code
model.fit(X_train, y_train, batch_size=100, epochs=10, validation_data=(X_test, y_test))
test_loss, test_acc = model.evaluate(X_test, y_test)
print(test_acc)
# printing Accuracy
y_pred = model.predict(X_test)
# Convert predictions classes back to the digit
y_pred_classes = np.argmax(y_pred, axis=1)
print(np.round(y_pred[0]))
y_true = np.argmax(y_test, axis=1)
from sklearn.metrics import confusion_matrix
confusion = confusion_matrix(y_true, y_pred_classes)
sns.heatmap(confusion, annot=True, fmt="d")
plt.xlabel("Predicted")
plt.ylabel("True")
plt.show()
from sklearn.metrics import classification_report
print(classification_report(y_true, y_pred_classes))
test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
test.head()
test = test / 255
# just normalizing the data like training set
result = model.predict(test)
result
result_classes = np.argmax(result, axis=1)
reslt = pd.DataFrame(result_classes)
arr = [*range(1, 28001)]
ID = pd.DataFrame(arr)
result = pd.concat([ID, reslt], axis=1)
result.columns = ["ID", "Star type"]
result.to_csv("/kaggle/working/result.csv", index=False)
df2 = pd.read_csv("/kaggle/working/result.csv")
df2
|
# Consider the following equation
# $$- u'' = f, \quad x\in \Omega$$
# $$u = 0, \quad x \in \partial \Omega.$$
# We choose the true solution $u = x(e^{-(x-\frac{1}{3})^2/K} - e^{-\frac{4}{9}/K})$ with $K = 0.01$ on the interval $\Omega = [-1,1]$.
# By FOSLS method, we have
# $$\sigma = - u'.$$
# And the loss function
# $$G(\sigma, u; f) = \|\text{div }\sigma + u -f \|_{0,\Omega}^2 + \|\frac{1}{\varepsilon}\sigma + \varepsilon \nabla u\|_{0,\Omega}^2 + \beta \|u\|^2_{0,\Omega}$$
import time
def TicTocGenerator():
# Generator that returns time differences
ti = 0 # initial time
tf = time.time() # final time
while True:
ti = tf
tf = time.time()
yield tf - ti # returns the time difference
TicToc = TicTocGenerator() # create an instance of the TicTocGen generator
# This will be the main function through which we define both tic() and toc()
def toc(tempBool=True):
# Prints the time difference yielded by generator instance TicToc
tempTimeInterval = next(TicToc)
if tempBool:
print("Elapsed time: %f seconds.\n" % tempTimeInterval)
def tic():
# Records a time in TicToc, marks the beginning of a time interval
toc(False)
import torch
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torch.nn import functional as F
from torch import nn, optim
from math import exp
# initialize parameters
global k, dx, beta
k, dx, beta = 0.01, 0.005, 200
def f(x):
return -(
2
* exp(-((x - 1 / 3) ** 2) / k)
* (6 * k + 2 * x - 27 * k * x - 12 * x**2 + 18 * x**3)
) / (9 * k**2)
def g(x):
return torch.tensor([0.0], requires_grad=True)
def u_exact(x):
return x * (exp(-1 / k * (x - 1 / 3) ** 2) - exp(-4 / (9 * k)))
def sigma_exact(x):
return (
exp(-4 / (9 * k))
- exp(-((x - 1 / 3) ** 2) / k)
+ (x * exp(-((x - 1 / 3) ** 2) / k) * (2 * x - 2 / 3)) / k
)
sq = lambda x: x**2
vsq = np.vectorize(sq)
# compute H1 norm of true u and sigma
L = 0.0
R = 1.0
test_set = np.arange(L, R + dx, dx)
u = np.vectorize(u_exact)(test_set)
ud = -np.vectorize(sigma_exact)(test_set)
u_h1 = np.sum(dx * (vsq(u) + vsq(ud)))
u_l2 = np.sum(dx * vsq(u))
sigma = np.vectorize(sigma_exact)(test_set)
sigmad = np.vectorize(f)(test_set)
sigma_h1 = np.sum(dx * (vsq(sigma) + vsq(sigmad)))
sigma_l2 = np.sum(dx * vsq(sigma))
print("u: H1 norm square: %.6f, L2 norm square: %.6f " % (u_h1, u_l2))
print("sigma: H1 norm square: %.6f, L2 norm square: %.6f " % (sigma_h1, sigma_l2))
class MuSigmaPde(nn.Module):
def __init__(self, dimension, mesh=24, neuron=14):
super(MuSigmaPde, self).__init__()
self.xdim = dimension
# Layer 1
self.fc1mu = nn.Linear(dimension, mesh, dtype=torch.float64)
self.fc1sig = nn.Linear(dimension, mesh, dtype=torch.float64)
# Layer 2
self.fc2mu = nn.Linear(mesh, neuron, dtype=torch.float64)
self.fc2sig = nn.Linear(mesh, neuron, dtype=torch.float64)
# Layer 3
self.fc3mu = nn.Linear(neuron, neuron, dtype=torch.float64)
self.fc3sig = nn.Linear(neuron, neuron, dtype=torch.float64)
# Layer 4
self.fc4mu = nn.Linear(neuron, 1, dtype=torch.float64)
self.fc4sig = nn.Linear(neuron, dimension, dtype=torch.float64)
def forward(self, x): # Sigmoid activation function
assert len(x.shape) == 1 and x.shape[0] == self.xdim
y_mu = torch.sigmoid(self.fc2mu(torch.sigmoid(self.fc1mu(x))))
y_sig = torch.sigmoid(self.fc2sig(torch.sigmoid(self.fc1sig(x))))
mu = self.fc4mu(torch.sigmoid(self.fc3mu(y_mu)))
sigma = self.fc4sig(torch.sigmoid(self.fc3mu(y_sig)))
return mu, sigma
def net_grad(self, x):
mu_center, sigma_center = self.forward(x)
mu_forward, sigma_forward = self.forward(x - 0.5 * dx)
mu_grad_forward = (mu_center - mu_forward) / (0.5 * dx)
sigma_grad_forward = (sigma_center - sigma_forward) / (0.5 * dx)
return mu_grad_forward, sigma_grad_forward
def loss_function_bulk(self, x): # energy functional
mu, sigma = self.forward(x)
mu_grad, sigma_grad = self.net_grad(x)
LSE = 0.5 * (mu_grad) ** 2 - f(x) * mu
return LSE
def loss_function_surf(self, x):
mu, sigma = self.forward(x)
# Boundary condition penalty
BCP = beta * (mu - g(x)) ** 2
return BCP
model = MuSigmaPde(dimension=1, mesh=24, neuron=14)
sum([p.numel() for p in model.parameters()])
# change h for different number of quadrature points. There are 1/h=200 quadrature points in this case.
h = 0.005
epochs = 10000
L, R = 0.0, 1.0
bulk_set, surf_set = np.arange(L, R, h), [L, R]
loss_bulk_record, loss_surf_record = [], []
print(
"bulk points number %d \nsurface points number %d\ntest points number %d\ndx for difference in testing %.3g\ntrainging iteration %d"
% (np.size(bulk_set), np.size(surf_set), np.size(test_set), dx, epochs)
)
optimizer = optim.Adam(model.parameters(), lr=0.0005)
tic()
local_min = 100.0
for j in range(epochs):
loss_bulk = torch.zeros(1, dtype=torch.float64)
loss_surf = torch.zeros(1, dtype=torch.float64)
for point in bulk_set:
x = torch.tensor([point + 0.5 * h], dtype=torch.float64)
loss_bulk += h * model.loss_function_bulk(x)
for point in surf_set:
x = torch.tensor([point], dtype=torch.float64)
loss_surf += model.loss_function_surf(x)
# record each loss
loss_bulk_record.append(loss_bulk.data[0])
loss_surf_record.append(loss_surf.data[0])
loss = loss_bulk + loss_surf
if loss.item() < local_min:
local_min = loss.item()
torch.save(model.state_dict(), "./poisson_equationenergy_24_14_2")
print(
"Train Epoch: {}, Loss: {:.6f}, loss bulk: {:.6f}, loss surf: {:.6f}".format(
j, loss.item(), loss_bulk.item(), loss_surf.item()
)
)
optimizer.zero_grad()
loss.backward()
optimizer.step()
toc()
model.load_state_dict(torch.load("./poisson_equationenergy_24_14_2"))
mu_err_h1 = torch.zeros(1)
sigma_err_h1 = torch.zeros(1)
bdd_err = torch.zeros(1)
mu_err_l2 = torch.zeros(1)
sigma_err_l2 = torch.zeros(1)
G_relative = torch.zeros(1)
mu_err_semi = torch.zeros(1)
test_set = np.arange(L, R + dx, dx)
for point in test_set:
x = torch.tensor([point + 0.5 * dx])
mu, sigma = model(x)
mu_grad, sigma_grad = model.net_grad(x)
loss = model.loss_function_bulk(x)
# esitmate H1 norm error
mu_diff = (mu - u_exact(x)) ** 2 + (mu_grad + sigma_exact(x)) ** 2
mu_diff_simi = (mu_grad + sigma_exact(x)) ** 2
sigma_diff = (sigma - sigma_exact(x)) ** 2 + (sigma_grad - f(x)) ** 2
mu_err_h1 += dx * mu_diff
sigma_err_h1 += dx * sigma_diff
# estimate L2 norm error
mu_err_l2 += dx * (mu - u_exact(x)) ** 2
sigma_err_l2 += dx * (sigma - sigma_exact(x)) ** 2
# estimate H1 semi norm error
# mu_err_semi = mu_err_h1 - mu_err_l2
mu_err_semi += dx * mu_diff_simi
sigma_err_semi = sigma_err_h1 - sigma_err_l2
H1_err = mu_err_h1 + sigma_err_h1
H1_err_relative = ((H1_err) ** (1 / 2)) / ((u_h1 + sigma_h1) ** (1 / 2))
L2_err = mu_err_l2 + sigma_err_l2
L2_err_relative = ((L2_err) ** (1 / 2)) / ((u_l2 + sigma_l2) ** (1 / 2))
mu_err_h1_relative = (mu_err_h1 / u_h1) ** (1 / 2)
mu_err_l2_relative = (mu_err_l2 / u_l2) ** (1 / 2)
mu_err_semi_relative = (mu_err_semi / (sigma_l2)) ** (1 / 2)
sigma_err_h1_relative = (sigma_err_h1 / sigma_h1) ** (1 / 2)
sigma_err_l2_relative = (sigma_err_l2 / sigma_l2) ** (1 / 2)
sigma_err_semi_relative = (sigma_err_semi / (sigma_h1 - sigma_l2)) ** (1 / 2)
bdd_err += abs(mu - g(x))
G_rel = (loss) ** (1 / 2) / ((u_h1 + sigma_h1) ** (1 / 2))
print(
"u: L2_rel: {:.6f}, H1_semi_rel: {:.6f}".format(
mu_err_l2_relative.item(), mu_err_semi_relative.item()
)
)
print("sigma: L2_rel: {:.6f}\n".format(sigma_err_l2_relative.item()))
print("G_rel: {:.6f}\n".format(G_rel.item()))
points = test_set
yt = np.zeros_like(points)
y_diff = np.zeros_like(points)
ymu = np.zeros_like(points)
ysig = np.zeros_like(points)
for i in range(len(points)):
yt[i] = u_exact(points[i])
y_diff[i] = -exp(-((points[i] - 1 / 3) ** 2) / 0.01) * (
-200 * points[i] ** 2 + (200 / 3) * points[i] + 1
)
ymu[i], ysig[i] = model(torch.tensor([points[i]]))
plt.plot(points, yt, label="u_true")
plt.plot(points, ymu, label="u_approximation")
plt.legend()
plt.plot(points, y_diff, label="sigma_true")
plt.plot(points, ysig, label="sigma_approximation")
plt.legend()
num = np.arange(1, len(loss_bulk_record) + 1, 1)
plt.plot(num, loss_bulk_record)
plt.plot(num, loss_surf_record)
plt.plot(num, np.add(loss_bulk_record, loss_surf_record))
|
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
# # Loading Data
data = pd.read_csv("/kaggle/input/homework-3-dataset/data.csv")
data
# # Implementing K-means Algorithm from Scratch
# Implement the K-means algorithm from scratch. K-means algorithm computes the distance of a given data point pair. Replace the distance computation function with Euclidean distance, 1- Cosine similarity, and 1 – the Generalized Jarcard similarity
# [Link](https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/jaccard.htm)
class KMeans:
def __init__(self, k=2, max_iter=100, distance_metric="euclidean"):
self.k = k
self.max_iter = max_iter
self.disstance_metric = distance_metric
|
# # Import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
"""Lệnh "%matplotlib inline" được sử dụng để đảm bảo rằng các hình ảnh được hiển thị trong phần mềm Jupyter Notebook hoặc JupyterLab và không yêu cầu mở cửa sổ riêng biệt để xem. Khi được kích hoạt, lệnh này sẽ đưa ra một giao diện dòng lệnh để tạo các hình ảnh trực tiếp trong trình duyệt web."""
# # Import dataset
#
data = pd.read_csv("weatherAUS.csv")
data.head(5)
# # Description summary about dataset Rain in Australia
# -----------------------------------------
# ***Purpose***:
# Predict next-day rain by training classification models on the target variable
# ***Content***:
# This dataset contains about 10 year of daily weather observations from many location across Autralia
# -----------------------------------------
# # Description specify columns
# ***Data***: the date of observation
# ***location***: the location of the weather
# ***MinTemp***: The minimum temperature in degrees celsius
# ***MaxTemp***: The maximum temperature in degrees celsius
# ***Rainfall***: the amount of rainfall recorded for the day in mm
# ***Evaporation***: The so-called Class A pan evaporation (mm) in the 24 hours to 9am
# ***Sunshine***: The number of hours of bright sunshine in the day.
# ***WindGustDir***: The direction of the strongest wind gust in the 24 hours to midnight
# ***WindGustSpeed***: The speed (km/h) of the strongest wind gust in the 24 hours to midnight
# ***WindDir9am***: Direction of the wind at 9am
# # Exploratory data analysis
data.shape
data.info()
## check null
data.columns
import plotly.express as px
def show_bar_singe(x, y, title=None):
df = pd.DataFrame({"x": x, "y": y})
px.bar(df, x="x", y="y", text_auto=".s", color="x", title=title).show()
def show_bar(data_sub):
x = []
y = []
for name_col in data_sub.columns:
if data_sub[name_col].isnull().sum() > 0:
# print(f'{name_col} has {data_sub[name_col].isnull().sum()} is null')
x.append(name_col)
y.append(data_sub[name_col].isnull().sum())
df = pd.DataFrame({"x": x, "y": y})
px.bar(df, x="x", y="y", text_auto=".s", color="x").show()
data = data.drop(data[data.RainTomorrow.isnull()].index, axis=0)
data.head(5)
# Show variable categorical
categorical = [var for var in data.columns if data[var].dtype == "O"]
categorical
data[categorical]
data.Date = pd.to_datetime(data.Date)
data["week"] = data.Date.dt.weekday
data["month"] = data.Date.dt.month
data["year"] = data.Date.dt.year
data.drop("Date", inplace=True, axis=1)
data.head(5)
# ### Summary of categorical variables
# + Data is variable datatime
# + There are 2 variable binary categorical: RainTomorrow and RainToday (so I convert RainTomorrow values into 0, 1)
# + RainTomorrow is target variable
# ### Check oulier in number variable
number_var = [var for var in data.columns if data[var].dtype != "O"]
number_var
data[number_var]
def show_ourlies(data_sub):
x = []
y = []
for name_col in data_sub.columns:
desc = data_sub[name_col].describe()
iqr = desc["75%"] - desc["25%"]
lower_bound = desc["25%"] - 1.5 * iqr
upper_bound = desc["75%"] + 1.5 * iqr
outliers = data_sub[
(data_sub[name_col] < lower_bound) | (data_sub[name_col] > upper_bound)
]
if len(outliers) > 0:
x.append(name_col)
y.append(len(outliers))
show_bar_singe(x, y, title="Statistic columns be Outlier")
show_ourlies(data[number_var])
# So we look at plot, I see Rainfall the most outlier, next to WindGustSpeed, WindSpeed3pm, Evaporation
fig, ax = plt.subplots(2, 2, figsize=(10, 5))
ax = ax.flatten()
col = ["Rainfall", "WindGustSpeed", "WindSpeed3pm", "Evaporation"]
for i, name_col in enumerate(col):
ax[i].hist(data[name_col], bins=10)
ax[i].set_title(name_col)
plt.show()
# # Drop values null
show_bar(data)
data.Evaporation.value_counts()
len(data)
# So i see Evaporation have the most null
# So, we see a lot of values null, do, I apply method trimmed median. It help me retain data important, and avoid values outlier
from scipy import stats
for name_col in data[number_var].columns:
if data[name_col].isnull().sum() > 0:
trimmed_mean = stats.trim_mean(
data[name_col].value_counts().index, proportiontocut=0.2
)
data[name_col] = data[name_col].fillna(trimmed_mean)
print(name_col)
categorical = [x for x in categorical if x != "Date"]
show_bar(data[categorical])
# See, null quite littel
data.dropna(inplace=True)
# # Split data
X = data.drop("RainTomorrow", axis=1)
y = data["RainTomorrow"]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
X_train.shape, X_test.shape
# # Feature Engineer
data.columns
# encode variable category
# import category_encoders as ce
# encoder = ce.BinaryEncoder(cols=['RainTomorrow'])
# X_train = encoder.fit_transform(X_train)
# X_test = encoder.transform(X_test)
import category_encoders as ce
encoder = ce.BinaryEncoder(cols=["RainToday"])
X_train = encoder.fit_transform(X_train)
X_test = encoder.transform(X_test)
X_train = pd.concat(
[
X_train[number_var],
X_train[["RainToday_0", "RainToday_1"]],
pd.get_dummies(X_train.Location),
pd.get_dummies(X_train.WindGustDir),
pd.get_dummies(X_train.WindDir9am),
pd.get_dummies(X_train.WindDir3pm),
],
axis=1,
)
X_test = pd.concat(
[
X_test[number_var],
X_test[["RainToday_0", "RainToday_1"]],
pd.get_dummies(X_test.Location),
pd.get_dummies(X_test.WindGustDir),
pd.get_dummies(X_test.WindDir9am),
pd.get_dummies(X_test.WindDir3pm),
],
axis=1,
)
# # Feature Scaling
from sklearn.preprocessing import MinMaxScaler
# The fit_transform() method is called on X_train to calculate the parameters required for data normalization and transform the X_train data according to those parameters. Then the transform() method is called on X_test to transform the X_test data according to the parameters calculated from the X_train.
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# # Model Training
from sklearn.linear_model import LogisticRegression
# C là một tham số trong một số mô hình học máy như hồi quy logistic và máy vector hỗ trợ (SVM) được sử dụng để điều chỉnh mức độ điều chuẩn (regularization). Điều chuẩn là một kỹ thuật được sử dụng để giảm thiểu hiện tượng quá khớp (overfitting) bằng cách thêm một thành phần phạt vào hàm mất mát. Giá trị của C là nghịch đảo của sức mạnh điều chuẩn, tức là giá trị C càng nhỏ thì sức mạnh điều chuẩn càng lớn và ngược lại. Bạn có muốn biết thêm chi tiết không?
logreg = LogisticRegression(C=1, solver="liblinear", random_state=0)
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
y_pred
from sklearn.metrics import accuracy_score
print(f"ACC = {accuracy_score(y_test, y_pred)}")
# # Check for overfitting and underfitting
print(f"ACC TRAIN = {logreg.score(X_train, y_train)}")
print(f"ACC TEST = {logreg.score(X_test, y_test)}")
|
import os
from tensorflow.keras.utils import image_dataset_from_directory
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import Input
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.metrics import SparseCategoricalAccuracy
from tensorflow.keras.callbacks import EarlyStopping
import tensorflow as tf
from sklearn.metrics import classification_report, ConfusionMatrixDisplay
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import wandb
from wandb.keras import WandbMetricsLogger
import resnets as rn
wandb.login()
# ## Set hyperparameters
train_path = "face-mask-12k-images-dataset/Face Mask Dataset/Train"
test_path = "face-mask-12k-images-dataset/Face Mask Dataset/Test"
val_path = "face-mask-12k-images-dataset/Face Mask Dataset/Validation"
image_size = 96
channels = 3
color_mode = "rgb"
validation_split = 0.2
seed = 27
config = {
"batch_size": 64,
"epochs": 100,
"learning_rate": 0.00001,
"early_stop_patience": 10,
}
# ## Run the following block on Kaggle only!
train_path = "/kaggle/input/" + train_path
test_path = "/kaggle/input/" + test_path
val_path = "/kaggle/input/" + val_path
# ## Get class names
classes = next(os.walk(train_path))[1]
print(classes)
# ## Create training, validation and test datasets
train = image_dataset_from_directory(
directory=train_path,
labels="inferred",
class_names=classes,
label_mode="int",
color_mode=color_mode,
batch_size=config["batch_size"],
image_size=(image_size, image_size),
)
validation = image_dataset_from_directory(
directory=val_path,
labels="inferred",
class_names=classes,
label_mode="int",
color_mode=color_mode,
batch_size=config["batch_size"],
image_size=(image_size, image_size),
)
test = image_dataset_from_directory(
directory=test_path,
labels="inferred",
class_names=classes,
label_mode="int",
color_mode=color_mode,
batch_size=config["batch_size"],
image_size=(image_size, image_size),
shuffle=False,
)
# ## Training on ResNet-18
wandb.init(config=config, project="resnet-different-dataset")
optimizer = Adam(learning_rate=config["learning_rate"])
loss = SparseCategoricalCrossentropy()
metrics = [SparseCategoricalAccuracy()]
model = rn.ResNet18(
(image_size, image_size, channels),
include_top=False,
normalize=True,
flatten=True,
dropout_rate=0.2,
)
top = Dense(4096, activation="relu")(model.output)
top = Dropout(0.5)(top)
top = Dense(4096, activation="relu")(top)
top = Dropout(0.5)(top)
top = Dense(len(classes), activation="softmax")(top)
model = Model(inputs=model.input, outputs=top)
early_stopping = EarlyStopping(
monitor="val_loss",
mode="min",
verbose=1,
patience=config["early_stop_patience"],
restore_best_weights=True,
)
callbacks = [early_stopping, WandbMetricsLogger()]
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
history = model.fit(
train, validation_data=validation, epochs=config["epochs"], callbacks=callbacks
)
wandb.finish()
# ## Plotting training progress
history_df = pd.DataFrame(history.history)
history_df[["sparse_categorical_accuracy", "val_sparse_categorical_accuracy"]].plot()
history_df[["loss", "val_loss"]].plot()
# ## Predict
y_true_tmp = []
y_pred_tmp = []
for x_test_batch, y_test_batch in test:
y_true_tmp.append(y_test_batch)
predictions = model.predict(x_test_batch, verbose=0)
y_pred_tmp.append(np.argmax(predictions, axis=1))
y_true = tf.concat(y_true_tmp, axis=0)
y_pred = tf.concat(y_pred_tmp, axis=0)
# ## Classification report
print(classification_report(y_true, y_pred, target_names=classes, digits=4))
# ## Confusion matrix
ConfusionMatrixDisplay.from_predictions(
y_true, y_pred, display_labels=classes, xticks_rotation=45
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
res = []
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
filepath = os.path.join(dirname, filename)
print(filepath)
res.append(
pd.read_csv(
filepath,
names=["id", "date", "time", "consumption"],
parse_dates={"datetime": ["date", "time"]},
index_col="id",
header=0,
date_parser=lambda x, y: pd.to_datetime(
x + " " + y, format="%d %b %Y %H:%M:%S"
),
)
)
df = pd.concat(res, ignore_index=True).sort_values("datetime")
df.head()
print(f"min date: {df['datetime'].min()}, max date: {df['datetime'].max()}")
print(f"range: {df['datetime'].max()-df['datetime'].min()}")
# daily energy consumption
df.sort_values("datetime").rolling("1D", on="datetime").sum().head()
import matplotlib.pyplot as plt
df.rolling("1H", on="datetime").sum().loc[df["consumption"] > 0.5].plot(
x="datetime", y="consumption", title="Hourly energy consumption", figsize=(20, 6)
)
# Monthly total consumption and daily average consumption for a month
daily_cons = df.rolling("1d", on="datetime").sum()
daily_cons.groupby(daily_cons["datetime"].dt.month)["consumption"].apply(
lambda x: {"sum": x.sum(), "average": x.mean(), "count": x.count()}
)
daily_cons.plot(
x="datetime", y="consumption", title="Daily energy consumption", figsize=(10, 6)
)
plt.show()
daily_cons.describe()
# Separating day and night consumption time
day_consumption = df.loc[
(pd.Timestamp("06:00").time() <= df["datetime"].dt.time)
& (df["datetime"].dt.time < pd.Timestamp("18:00").time())
& (df["consumption"] > 0.5)
]
night_consumption = df.loc[set(df.index) - set(day_consumption.index)].sort_values(
"datetime"
)
night_consumption = night_consumption.loc[night_consumption["consumption"] > 0.5]
fig, ax = plt.subplots(1, 1, figsize=(20, 6))
day_hourly_cons = day_consumption.rolling("1H", on="datetime").sum()
night_hourly_cons = night_consumption.rolling("1H", on="datetime").sum()
ax.plot(
day_hourly_cons["datetime"],
day_hourly_cons["consumption"],
"r-",
label="Day Consumption",
)
ax.plot(
night_hourly_cons["datetime"],
night_hourly_cons["consumption"],
"b-",
label="Night Consumption",
)
ax.legend()
ax.set_title("Hourly Energy Consumption")
ax.set_xlabel("Date")
ax.set_ylabel("Energy Consumption")
plt.show()
daily_day_cons = day_consumption.rolling("1d", on="datetime").sum()
daily_night_cons = night_consumption.rolling("1d", on="datetime").sum()
fig, ax = plt.subplots(1, 1, figsize=(15, 7))
ax.plot(
daily_day_cons["datetime"],
daily_day_cons["consumption"],
"r-",
label="Day Consumption",
)
ax.plot(
daily_night_cons["datetime"],
daily_night_cons["consumption"],
"b-",
label="Night Consumption",
)
ax.legend()
ax.set_title("Daily Energy Consumption")
ax.set_xlabel("Date")
ax.set_ylabel("Energy Consumption")
plt.show()
# # **TimeSeries Forecasting**
# Aggregating to day
df_copy = df.set_index("datetime").sort_index()
df_copy = df_copy.resample("1D").sum()
# **Additive Decompostion**
# TimeSeries Additive Decomposition
from statsmodels.tsa.seasonal import seasonal_decompose
result = seasonal_decompose(df_copy, model="additive")
plt.rc("figure", figsize=(15, 6))
result.plot()
plt.show()
# **Multiplicative Decomposition**
# TimeSeries Multiplitive Decomposition
from statsmodels.tsa.seasonal import seasonal_decompose
result = seasonal_decompose(df_copy, model="multiplicative")
plt.rc("figure", figsize=(15, 6))
result.plot()
plt.show()
# # AutoRegression Forecasting
# AutoRegression Forecasting
from statsmodels.tsa.ar_model import AutoReg
model = AutoReg(df_copy, lags=15)
model_fit = model.fit()
next_one_m = model_fit.predict("2022-03-01", "2022-03-31")
fig, ax = plt.subplots(1, 1, figsize=(15, 7))
ax.plot(df_copy["consumption"], "r-", label="Daily total Consumption")
ax.plot(next_one_m, "b-", label="Daily predicted total Consumption")
ax.legend()
ax.set_title("Daily Energy Predicted Consumption using Auto Regression")
ax.set_xlabel("Date")
ax.set_ylabel("Energy Consumption")
plt.show()
# # **Moving Average Forecasting**
from statsmodels.tsa.arima.model import ARIMA
model = ARIMA(df_copy, order=(0, 0, 1))
model_fit = model.fit()
next_one_m = model_fit.predict("2022-03-01", "2022-03-31")
fig, ax = plt.subplots(1, 1, figsize=(15, 7))
ax.plot(df_copy["consumption"], "r-", label="Daily total Consumption")
ax.plot(next_one_m, "b-", label="Daily predicted total Consumption")
ax.legend()
ax.set_title("Daily Energy Predicted Consumption using MA")
ax.set_xlabel("Date")
ax.set_ylabel("Energy Consumption")
plt.show()
# # **Autoregressive Moving Average (ARMA)**
model = ARIMA(df_copy, order=(9, 0, 1))
model_fit = model.fit()
next_one_m = model_fit.predict("2022-03-01", "2022-03-31")
fig, ax = plt.subplots(1, 1, figsize=(15, 7))
ax.plot(df_copy["consumption"], "r-", label="Daily total Consumption")
ax.plot(next_one_m, "b-", label="Daily predicted total Consumption")
ax.legend()
ax.set_title("Daily Energy Predicted Consumption using ARMA")
ax.set_xlabel("Date")
ax.set_ylabel("Energy Consumption")
plt.show()
# # **Autoregressive Integrated Moving Average (ARIMA)**
model = ARIMA(df_copy, order=(10, 1, 1))
model_fit = model.fit()
next_one_m = model_fit.predict("2022-03-01", "2022-03-31")
fig, ax = plt.subplots(1, 1, figsize=(15, 7))
ax.plot(df_copy["consumption"], "r-", label="Daily total Consumption")
ax.plot(next_one_m, "b-", label="Daily predicted total Consumption")
ax.legend()
ax.set_title("Daily Energy Predicted Consumption using ARIMA")
ax.set_xlabel("Date")
ax.set_ylabel("Energy Consumption")
plt.show()
# # **Seasonal Autoregressive Integrated Moving Average (SARIMA)**
model = ARIMA(df_copy, order=(5, 1, 1), seasonal_order=(1, 0, 1, 7))
model_fit = model.fit()
next_one_m = model_fit.predict("2022-03-01", "2022-03-31")
fig, ax = plt.subplots(1, 1, figsize=(15, 7))
ax.plot(df_copy["consumption"], "r-", label="Daily total Consumption")
ax.plot(next_one_m, "b-", label="Daily predicted total Consumption")
ax.legend()
ax.set_title("Daily Energy Predicted Consumption using ARIMA")
ax.set_xlabel("Date")
ax.set_ylabel("Energy Consumption")
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import sklearn
file_path = "/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
file = pd.read_csv(file_path)
file.head()
file.columns
null_val = file.isnull()
null_count = null_val.sum()
columns = null_count.sort_values(ascending=False)
file.set_index("Id", inplace=True)
columns.index
file[
["PoolQC", "MiscFeature", "Alley", "Fence", "FireplaceQu", "LotFrontage"]
].isnull().sum()
file.drop_duplicates(inplace=True)
# these 4 columns have high missing values(around 1400 values are missing out of 1100) so they were deleted
file.drop(columns=["PoolQC", "MiscFeature", "Alley", "Fence"], inplace=True)
column = file.isnull().sum().sort_values(ascending=False)
file.info()
file[
[
"FireplaceQu",
"LotFrontage",
"GarageYrBlt",
"GarageCond",
"GarageType",
"GarageFinish",
"GarageQual",
"BsmtFinType2",
"BsmtExposure",
"BsmtQual",
"BsmtCond",
"BsmtFinType1",
"MasVnrArea",
"MasVnrType",
"Electrical",
]
].isnull().sum()
file["FireplaceQu"].describe()
# lot frontage should be of integer or float type but some values are in string form so
# Replace all string values with NaN
file["LotFrontage"] = file["LotFrontage"].replace(
to_replace=r"^.*$", value=np.nan, regex=True
)
file.LotFrontage.astype("float64")
file.describe()
# missing values in lotfrontage replaced by median
file.LotFrontage.fillna(file.LotFrontage.median(), inplace=True)
file.isnull().sum().sort_values(ascending=False)
file.FireplaceQu.fillna(file.FireplaceQu.mode()[0], inplace=True)
file.isnull().sum().sort_values(ascending=False)
file.GarageYrBlt.fillna(file.GarageYrBlt.median(), inplace=True)
file.isnull().sum().sort_values(ascending=False)
file.fillna(file.mode(), inplace=True)
file.isnull().sum()
file.corr()
# correlation of saleprice and mssubclass is very low (-0.084284).Similarly we have so many parameters that do not affect price that much so deleting them
# Set the threshold for correlation
corr_threshold = 0.5
# Identify variables with low correlation to the dependent variable
corr_matrix = file.corr()
low_corr_vars = []
for var in corr_matrix.columns:
if abs(corr_matrix[var]["SalePrice"]) < corr_threshold:
low_corr_vars.append(var)
file = file.drop(low_corr_vars, axis=1)
file.corr()
file.head()
columns.index
file.select_dtypes(include="object").columns.tolist()
file[
[
"MSZoning",
"Street",
"LotShape",
"LandContour",
"Utilities",
"LotConfig",
"LandSlope",
"Neighborhood",
"Condition1",
"Condition2",
"BldgType",
"HouseStyle",
"RoofStyle",
"RoofMatl",
"Exterior1st",
"Exterior2nd",
"MasVnrType",
"ExterQual",
"ExterCond",
"Foundation",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"Heating",
"HeatingQC",
"CentralAir",
"Electrical",
"KitchenQual",
"Functional",
"FireplaceQu",
"GarageType",
"GarageFinish",
"GarageQual",
"GarageCond",
"PavedDrive",
"SaleType",
"SaleCondition",
]
]
# let's encode the data
from sklearn.preprocessing import LabelEncoder
encode = LabelEncoder()
file["MSZoning"] = encode.fit_transform(file["MSZoning"])
file["Street"] = encode.fit_transform(file["Street"])
file["LotShape"] = encode.fit_transform(file["LotShape"])
file["LandContour"] = encode.fit_transform(file["LandContour"])
file["LotConfig"] = encode.fit_transform(file["LotConfig"])
file["LandSlope"] = encode.fit_transform(file["LandSlope"])
file["Neighborhood"] = encode.fit_transform(file["Neighborhood"])
# let us find correlation again
file.corr()
imp = [
"OverallQual",
"YearBuilt",
"YearRemodAdd",
"TotalBsmtSF",
"1stFlrSF",
"GrLivArea",
"FullBath",
"TotRmsAbvGrd",
"GarageCars",
"GarageArea",
]
y = file.SalePrice
X = file[
[
"OverallQual",
"YearBuilt",
"YearRemodAdd",
"TotalBsmtSF",
"1stFlrSF",
"GrLivArea",
"FullBath",
"TotRmsAbvGrd",
"GarageCars",
"GarageArea",
]
]
from sklearn.preprocessing import StandardScaler
# standard scaler
scaler = StandardScaler()
x = scaler.fit_transform(X)
scaled_x = pd.DataFrame(
x,
columns=[
"OverallQual",
"YearBuilt",
"YearRemodAdd",
"TotalBsmtSF",
"1stFlrSF",
"GrLivArea",
"FullBath",
"TotRmsAbvGrd",
"GarageCars",
"GarageArea",
],
)
scaled_x, y
# # **Train Test Split**
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
scaled_x, y, test_size=0.9, random_state=42
)
X_train, X_test, y_train, y_test
models = pd.DataFrame(
columns=["Model", "MAE", "MSE", "RMSE", "R2 Score", "RMSE (Cross-Validation)"]
)
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from xgboost import XGBRegressor
from sklearn.preprocessing import PolynomialFeatures
def rmse_cv(model):
rmse = np.sqrt(
-cross_val_score(model, X, y, scoring="neg_mean_squared_error", cv=5)
).mean()
return rmse
def evaluation(y, predictions):
mae = mean_absolute_error(y, predictions)
mse = mean_squared_error(y, predictions)
rmse = np.sqrt(mean_squared_error(y, predictions))
r_squared = r2_score(y, predictions)
return mae, mse, rmse, r_squared
# # linear Regression
lin_reg = LinearRegression()
lin_reg.fit(X_train, y_train)
predictions = lin_reg.predict(X_test)
mae, mse, rmse, r_squared = evaluation(y_test, predictions)
print("MAE:", mae)
print("MSE:", mse)
print("RMSE:", rmse)
print("R2 Score:", r_squared)
print("-" * 30)
rmse_cross_val = rmse_cv(lin_reg)
print("RMSE Cross-Validation:", rmse_cross_val)
new_row = {
"Model": "LinearRegression",
"MAE": mae,
"MSE": mse,
"RMSE": rmse,
"R2 Score": r_squared,
"RMSE (Cross-Validation)": rmse_cross_val,
}
models = models.append(new_row, ignore_index=True)
# # Ridge Regression
ridge = Ridge()
ridge.fit(X_train, y_train)
predictions = ridge.predict(X_test)
mae, mse, rmse, r_squared = evaluation(y_test, predictions)
print("MAE:", mae)
print("MSE:", mse)
print("RMSE:", rmse)
print("R2 Score:", r_squared)
print("-" * 30)
rmse_cross_val = rmse_cv(ridge)
print("RMSE Cross-Validation:", rmse_cross_val)
new_row = {
"Model": "Ridge",
"MAE": mae,
"MSE": mse,
"RMSE": rmse,
"R2 Score": r_squared,
"RMSE (Cross-Validation)": rmse_cross_val,
}
models = models.append(new_row, ignore_index=True)
# # *Lasso Regression*
lasso = Lasso()
lasso.fit(X_train, y_train)
predictions = lasso.predict(X_test)
mae, mse, rmse, r_squared = evaluation(y_test, predictions)
print("MAE:", mae)
print("MSE:", mse)
print("RMSE:", rmse)
print("R2 Score:", r_squared)
print("-" * 30)
rmse_cross_val = rmse_cv(lasso)
print("RMSE Cross-Validation:", rmse_cross_val)
new_row = {
"Model": "Lasso",
"MAE": mae,
"MSE": mse,
"RMSE": rmse,
"R2 Score": r_squared,
"RMSE (Cross-Validation)": rmse_cross_val,
}
models = models.append(new_row, ignore_index=True)
# # *Elastic Net* #
elastic_net = ElasticNet()
elastic_net.fit(X_train, y_train)
predictions = elastic_net.predict(X_test)
mae, mse, rmse, r_squared = evaluation(y_test, predictions)
print("MAE:", mae)
print("MSE:", mse)
print("RMSE:", rmse)
print("R2 Score:", r_squared)
print("-" * 30)
rmse_cross_val = rmse_cv(elastic_net)
print("RMSE Cross-Validation:", rmse_cross_val)
new_row = {
"Model": "ElasticNet",
"MAE": mae,
"MSE": mse,
"RMSE": rmse,
"R2 Score": r_squared,
"RMSE (Cross-Validation)": rmse_cross_val,
}
models = models.append(new_row, ignore_index=True)
# # **Support Vector Machines**
svr = SVR(C=100000)
svr.fit(X_train, y_train)
predictions = svr.predict(X_test)
mae, mse, rmse, r_squared = evaluation(y_test, predictions)
print("MAE:", mae)
print("MSE:", mse)
print("RMSE:", rmse)
print("R2 Score:", r_squared)
print("-" * 30)
rmse_cross_val = rmse_cv(svr)
print("RMSE Cross-Validation:", rmse_cross_val)
new_row = {
"Model": "SVR",
"MAE": mae,
"MSE": mse,
"RMSE": rmse,
"R2 Score": r_squared,
"RMSE (Cross-Validation)": rmse_cross_val,
}
models = models.append(new_row, ignore_index=True)
# # *Random Forest*
random_forest = RandomForestRegressor(n_estimators=100)
random_forest.fit(X_train, y_train)
predictions = random_forest.predict(X_test)
mae, mse, rmse, r_squared = evaluation(y_test, predictions)
print("MAE:", mae)
print("MSE:", mse)
print("RMSE:", rmse)
print("R2 Score:", r_squared)
print("-" * 30)
rmse_cross_val = rmse_cv(random_forest)
print("RMSE Cross-Validation:", rmse_cross_val)
new_row = {
"Model": "RandomForestRegressor",
"MAE": mae,
"MSE": mse,
"RMSE": rmse,
"R2 Score": r_squared,
"RMSE (Cross-Validation)": rmse_cross_val,
}
models = models.append(new_row, ignore_index=True)
# # *XGB Regressor*
xgb = XGBRegressor(n_estimators=1000, learning_rate=0.01)
xgb.fit(X_train, y_train)
predictions = xgb.predict(X_test)
mae, mse, rmse, r_squared = evaluation(y_test, predictions)
print("MAE:", mae)
print("MSE:", mse)
print("RMSE:", rmse)
print("R2 Score:", r_squared)
print("-" * 30)
rmse_cross_val = rmse_cv(xgb)
print("RMSE Cross-Validation:", rmse_cross_val)
new_row = {
"Model": "XGBRegressor",
"MAE": mae,
"MSE": mse,
"RMSE": rmse,
"R2 Score": r_squared,
"RMSE (Cross-Validation)": rmse_cross_val,
}
models = models.append(new_row, ignore_index=True)
# # *Polynomial Regression*
poly_reg = PolynomialFeatures(degree=2)
X_train_2d = poly_reg.fit_transform(X_train)
X_test_2d = poly_reg.transform(X_test)
lin_reg = LinearRegression()
lin_reg.fit(X_train_2d, y_train)
predictions = lin_reg.predict(X_test_2d)
mae, mse, rmse, r_squared = evaluation(y_test, predictions)
print("MAE:", mae)
print("MSE:", mse)
print("RMSE:", rmse)
print("R2 Score:", r_squared)
print("-" * 30)
rmse_cross_val = rmse_cv(lin_reg)
print("RMSE Cross-Validation:", rmse_cross_val)
new_row = {
"Model": "Polynomial Regression (degree=2)",
"MAE": mae,
"MSE": mse,
"RMSE": rmse,
"R2 Score": r_squared,
"RMSE (Cross-Validation)": rmse_cross_val,
}
models = models.append(new_row, ignore_index=True)
poly_reg = PolynomialFeatures(degree=3)
X_train_2d = poly_reg.fit_transform(X_train)
X_test_2d = poly_reg.transform(X_test)
lin_reg = LinearRegression()
lin_reg.fit(X_train_2d, y_train)
predictions = lin_reg.predict(X_test_2d)
mae, mse, rmse, r_squared = evaluation(y_test, predictions)
print("MAE:", mae)
print("MSE:", mse)
print("RMSE:", rmse)
print("R2 Score:", r_squared)
print("-" * 30)
rmse_cross_val = rmse_cv(lin_reg)
print("RMSE Cross-Validation:", rmse_cross_val)
new_row = {
"Model": "Polynomial Regression (degree=2)",
"MAE": mae,
"MSE": mse,
"RMSE": rmse,
"R2 Score": r_squared,
"RMSE (Cross-Validation)": rmse_cross_val,
}
models = models.append(new_row, ignore_index=True)
models.sort_values(by="RMSE (Cross-Validation)")
sns.barplot(x=models["Model"], y=models["RMSE (Cross-Validation)"])
plt.xticks(rotation=90, size=12)
plt.show()
# # **XGb regressor is best here.**
import pickle
pickle.dump(xgb, open("xgb.pickle", "wb"))
pickle.dump(scaler, open("scaler.pickle", "wb"))
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import svm, metrics
import warnings
warnings.filterwarnings("ignore")
# Read the csv file and with the head() function, preview the first 5 rows
adult_df = pd.read_csv("/kaggle/input/adult-census-income/adult.csv")
adult_df.head()
print(len(adult_df))
print(len(holdout))
adult_df = adult_df.sample(frac=1, random_state=10)
holdout = adult_df[:3000]
adult_df = adult_df[:-3000]
adult_df["income"].unique()
import seaborn as sns
from matplotlib import pyplot
sns.set(style="whitegrid", font_scale=1.2)
a4_dims = (20, 10)
fig, ax = pyplot.subplots(figsize=a4_dims)
g = sns.countplot(y="education", hue="income", data=adult_df, palette="muted")
sns.set(style="whitegrid", font_scale=1.2)
# Draw a nested barplot to show survival for class and sex
a4_dims = (20, 10)
fig, ax = pyplot.subplots(figsize=a4_dims)
g = sns.countplot(y="marital.status", hue="income", data=adult_df, palette="muted")
import seaborn as sns
adult_df = pd.get_dummies(adult_df, columns=["income"])
adult_df = adult_df.drop(["income_<=50K"], axis=1)
corr = adult_df.corr()
a4_dims = (20, 10)
fig, ax = pyplot.subplots(figsize=a4_dims)
sns.heatmap(corr, annot=True)
adult_df2 = adult_df.copy()
adult_df = pd.get_dummies(
adult_df,
columns=["occupation", "race", "workclass", "marital.status", "relationship"],
)
adult_df["income"].unique()
adult_df.corr().unstack().sort_values().drop_duplicates()
adult_df.info()
import seaborn as sns
from matplotlib import pyplot
adult_df = pd.get_dummies(adult_df, columns=["income"])
adult_df = adult_df.drop(["income_<=50K"], axis=1)
corr = adult_df.corr()
a4_dims = (20, 10)
fig, ax = pyplot.subplots(figsize=a4_dims)
sns.heatmap(corr, annot=True)
adult_df = adult_df2
adult_df = pd.get_dummies(
adult_df, columns=["occupation", "race", "workclass", "marital.status"]
)
adult_df[">50K"].corr(adult_df["education.num"])
|
# 
# 
# # Logistic Regression
# ### About Dataset
# Features are computed from a digitized image of a fine needle aspirate (FNA) of a breast mass. They describe characteristics of the cell nuclei present in the image.
# n the 3-dimensional space is that described in: [K. P. Bennett and O. L. Mangasarian: "Robust Linear Programming Discrimination of Two Linearly Inseparable Sets", Optimization Methods and Software 1, 1992, 23-34].
# This database is also available through the UW CS ftp server:
# ftp ftp.cs.wisc.edu
# cd math-prog/cpo-dataset/machine-learn/WDBC/
# Also can be found on UCI Machine Learning Repository: https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29
# #### Attribute Information:
# * 1) ID number
# * 2) Diagnosis (M = malignant, B = benign)
# * 3-32)
# * Ten real-valued features are computed for each cell nucleus:
# * a) radius (mean of distances from center to points on the perimeter)
# * b) texture (standard deviation of gray-scale values)
# * c) perimeter
# * d) area
# * e) smoothness (local variation in radius lengths)
# * f) compactness (perimeter^2 / area - 1.0)
# * g) concavity (severity of concave portions of the contour)
# * h) concave points (number of concave portions of the contour)
# * i) symmetry
# * j) fractal dimension ("coastline approximation" - 1)
# The mean, standard error and "worst" or largest (mean of the three
# largest values) of these features were computed for each image,
# resulting in 30 features. For instance, field 3 is Mean Radius, field
# 13 is Radius SE, field 23 is Worst Radius.
# * All feature values are recoded with four significant digits.
# * Missing attribute values: none
# * Class distribution: 357 benign, 212 malignant
# ## Analysis Content
# 1. [Import the necessary libraries:](#1)
# 1. [Load the data into a pandas DataFrame:](#2)
# 1. [Explore & Clean the data](#3)
# 1. [Building a Model:](#4)
# ## Import the necessary libraries:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
#
# ## Load the data into a pandas DataFrame:
df = pd.read_csv("/kaggle/input/breast-cancer-wisconsin-data/data.csv")
df.head()
df.info()
df.describe().T
#
# ## Explore & Clean the data:
df.drop(["id", "Unnamed: 32"], axis=1, inplace=True)
df.diagnosis = [1 if each == "M" else 0 for each in df.diagnosis]
df.info()
df.iloc[:, 1:]
y = df.diagnosis.values.reshape(-1, 1)
X = df.iloc[:, 1:].values
#
# ## Feature Scaling
# 
# ### Normalization
X = (x - np.min(x)) / (np.max(x) - np.min(x))
#
# ## Train & Test Split
# 
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
X_train = X_train.T
X_test = X_test.T
y_train = y_train.T
y_test = y_test.T
print(f"X_train: {X_train.shape}")
print(f"X_test: {X_test.shape}")
print(f"y_train: {y_train.shape}")
print(f"y_test: {y_test.shape}")
#
# ## Building a Model
# 
# #### Activation function: Sigmoid
# 
# Parameter initialize and Activation function
def initialize_weights_and_bias(dimension):
w = np.full((dimension, 1), 0.01)
b = 0.0
return w, b
def sigmoid(z):
y_head = 1 / (1 + np.exp(-z))
return y_head
def forward_backward_propagation(w, b, X_train, y_train):
# forward propagation
z = np.dot(w.T, X_train) + b
y_head = sigmoid(z)
loss = -y_train * np.log(y_head) - (1 - y_train) * np.log(1 - y_head)
cost = (np.sum(loss)) / X_train.shape[1] # X_train.shape[1] is for scaling
# backward propagation
derivative_weight = (np.dot(X_train, ((y_head - y_train).T))) / X_train.shape[
1
] # X_train.shape[1] is for scaling
derivative_bias = (
np.sum(y_head - y_train) / X_train.shape[1]
) # X_train.shape[1] is for scaling
gradients = {
"derivative_weight": derivative_weight,
"derivative_bias": derivative_bias,
}
return cost, gradients
# %% Updating(learning) parameters
def update(w, b, X_train, y_train, learning_rate, number_of_iterarion):
cost_list = []
cost_list2 = []
index = []
# updating(learning) parameters is number_of_iterarion times
for i in range(number_of_iterarion):
# make forward and backward propagation and find cost and gradients
cost, gradients = forward_backward_propagation(w, b, X_train, y_train)
cost_list.append(cost)
# lets update
w = w - learning_rate * gradients["derivative_weight"]
b = b - learning_rate * gradients["derivative_bias"]
if i % 10 == 0:
cost_list2.append(cost)
index.append(i)
print("Cost after iteration %i: %f" % (i, cost))
# we update(learn) parameters weights and bias
parameters = {"weight": w, "bias": b}
plt.plot(index, cost_list2)
plt.xticks(index, rotation="vertical")
plt.xlabel("Number of Iterarion")
plt.ylabel("Cost")
plt.show()
return parameters, gradients, cost_list
# %% # prediction
def predict(w, b, X_test):
# X_test is a input for forward propagation
z = sigmoid(np.dot(w.T, X_test) + b)
Y_prediction = np.zeros((1, X_test.shape[1]))
# if z is bigger than 0.5, our prediction is sign one (y_head=1),
# if z is smaller than 0.5, our prediction is sign zero (y_head=0),
for i in range(z.shape[1]):
if z[0, i] <= 0.5:
Y_prediction[0, i] = 0
else:
Y_prediction[0, i] = 1
return Y_prediction
def logistic_regression(
X_train, y_train, X_test, y_test, learning_rate, num_iterations
):
# initialize
dimension = X_train.shape[0] # that is 30
w, b = initialize_weights_and_bias(dimension)
# do not change learning rate
parameters, gradients, cost_list = update(
w, b, X_train, y_train, learning_rate, num_iterations
)
y_prediction_test = predict(parameters["weight"], parameters["bias"], X_test)
# Print test Errors
print(
"test accuracy: {} %".format(
100 - np.mean(np.abs(y_prediction_test - y_test)) * 100
)
)
logistic_regression(
X_train, y_train, X_test, y_test, learning_rate=1, num_iterations=300
)
# %% sklearn with LR
lr = LogisticRegression()
lr.fit(X_train.T, y_train.T)
print(f"Test Accuracy {lr.score(X_test.T,y_test.T)}")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.