file_id
stringlengths 5
9
| content
stringlengths 100
5.25M
| local_path
stringlengths 66
70
| kaggle_dataset_name
stringlengths 3
50
⌀ | kaggle_dataset_owner
stringlengths 3
20
⌀ | kversion
stringlengths 497
763
⌀ | kversion_datasetsources
stringlengths 71
5.46k
⌀ | dataset_versions
stringlengths 338
235k
⌀ | datasets
stringlengths 334
371
⌀ | users
stringlengths 111
264
⌀ | script
stringlengths 100
5.25M
| df_info
stringlengths 0
4.87M
| has_data_info
bool 2
classes | nb_filenames
int64 0
370
| retreived_data_description
stringlengths 0
4.44M
| script_nb_tokens
int64 25
663k
| upvotes
int64 0
1.65k
| tokens_description
int64 25
663k
| tokens_script
int64 25
663k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
69003341
|
# Hi, this is my first approach on solving machine learning and analyzing problems using Python, so I'll try to approach with just basic techniques.
# **Spanish**: Hola, esta es mi primera aproximación a la resolución de problemas de ML con Python, así que no pienses que vas a ver un código muy avanzado. Simplemente, voy a intentar aplicar lo básico que he aprendido los últimos días, para ir mejorando poco a poco. Además, para algunos razonamientos me he inspirado en otros análisis que gente más experimentada que yo ha compartido.
# **Steps**:
# **0.** Initial imports and load data
# **1.** Data engineering -> Analyze feature by feature
# **2.** Apply different models
# **3.** Choose the better model
# # 0. Initial imports and load Data
# Initial imports
import numpy as np
import pandas as pd
import os
import seaborn as sns
import matplotlib.pyplot as plt
train = pd.read_csv("/kaggle/input/titanic/train.csv")
test = pd.read_csv("/kaggle/input/titanic/test.csv")
# train_data.head()
# # 1. Analizar y categorizar datos
# Trabajaremos con un dataset que combine datos de entrenamiento y de test, ya que queremos tener ambos con el mismo formato:
train["Survived"] = train["Survived"].astype(int)
train_test = pd.concat([train, test], sort=True).reset_index(drop=True)
train_test.isna().sum()
# Vemos que, tanto 'age' como 'cabin' tienen numerosos valores vacíos, mientras que casi todo el resto de columnas tienen valor (no significa que todos esos valores 'rellenos' sean relevantes). Un primer paso sería analizar cuáles son los valores aproximados a rellenar de esas columnas.
# Después analizar 1 por 1 cada variable para ver sus valores y tratar de decidir si son variables a tener en cuenta para nuestro modelo. En resumen:
# **1.** Rellenar datos vacíos
#
# 1.1 Encontrar qué variables influyen en las columnas vacías
# 1.2 Rellenar con valores medios encontrados
#
# **2.** Categorizar variables en rangos numéricos
# -----------------------------------------------------
# Veamos la matriz de correlación de cada 'feature':
sns.heatmap(train_test.iloc[:, 0:12].corr(), annot=True, fmt=".0%")
# Esta matriz únicamente correla datos numéricos: tenemos que ver si otros datos que actualmente no proporcionan mucha información (por su formato) pueden transformarse a rangos numéricos que nos aporten información.
train_test.info()
# Los campos que no sean int/float deben ser categorizados (si los consideramos importantes para el modelo).
# # Nombre: Categorizar valores
# El nombre es quizás la columna que menos aporta en "crudo". Es preciso sacar el "título" de todos los nombres y categorizar los rangos de valores para sacar información.
train_test["Title"] = train_test.Name.str.extract("([A-Za-z]+)\.")
train_test["Title"].unique()
train_test.head()
# Todos estos 'titles' seguramente estén asociados a un género y un rango de edad. Veamos la relación.
pd.crosstab(train_test["Title"], train_test["Sex"])
fig, ax = plt.subplots()
ax.scatter(
x=train_test["Age"], y=train_test["Title"], alpha=0.2
) # alpha=0.2 specifies the opacity
ax.set_xlabel("Age")
ax.set_ylabel("Title")
# Viendo estas relaciones, y sobre todo el nº de datos que tenemos, parece lógico agrupar todos estos valores en 4 principales:
# 1. Mrs: Mujer 'más mayor'
# 2. Miss: Mujer 'más joven'
# 3. Master: 'Master' (hombres niños)
# 4. Mr: Hombres que no sean 'Master'
#
# Replace values
train_test["Title"] = train_test["Title"].replace("Dona", "Mrs")
train_test["Title"] = train_test["Title"].replace("Countess", "Mrs")
train_test["Title"] = train_test["Title"].replace("Lady", "Mrs")
train_test["Title"] = train_test["Title"].replace("Mlle", "Miss")
train_test["Title"] = train_test["Title"].replace("Ms", "Miss")
train_test["Title"] = train_test["Title"].replace("Mme", "Miss")
train_test["Title"] = train_test["Title"].replace("Capt", "Mr")
train_test["Title"] = train_test["Title"].replace("Col", "Mr")
train_test["Title"] = train_test["Title"].replace("Don", "Mr")
train_test["Title"] = train_test["Title"].replace("Dr", "Mr")
train_test["Title"] = train_test["Title"].replace("Jonkheer", "Mr")
train_test["Title"] = train_test["Title"].replace("Major", "Mr")
train_test["Title"] = train_test["Title"].replace("Rev", "Mr")
train_test["Title"] = train_test["Title"].replace("Sir", "Mr")
# Map data
train_test["Title"] = (
train_test["Title"].map({"Mrs": 1, "Miss": 2, "Master": 3, "Mr": 4}).astype(int)
)
# Ahora parece que tenemos mejor información sobre el nombre de los pasajeros. Ya que hemos utilizado género y edad para decidir estos valores, podríamos utilizarlos para rellenar las edades que nos faltan con los valores medios de edad de los pasajeros con ese 'Title':
round(train_test.groupby(["Title"]).median()["Age"])
np.unique(train_test[train_test["Age"].isna()]["Title"], return_counts=True)
# Vemos que la gran mayoría de las edades que nos faltan son de pasajeros 'Mr'.
# Ahora bien, ¿es demasiado simple estimar la edad como la media de los otros 'Mr'? ¿O quizás haya otra 'feature' que esté muy relacionada con el 'title' y nos permita ser más precisos?
# En esta solución, avanzamos con este approach más simple.
train_test["Age"] = train_test.groupby(["Title"])["Age"].apply(
lambda x: x.fillna(round(x.median()))
)
train_test["Age"] = train_test["Age"].astype(int)
# Vale, pues **ya hemos rellenado la edad**.
# Sin embargo, podríamos agrupar rangos de edades para simplificar el modelo. ¿En cuántos subconjuntos? Elegimos 3 por simplicidad: la función 'qcut' hará el resto por nosotros:
age_ranges = 3
pd.qcut(train_test["Age"], age_ranges)
train_test["Age_Range"] = pd.qcut(train_test["Age"], age_ranges, labels=False)
train_test.groupby(["Age_Range"]).mean()["Survived"]
train_test.isna().sum()
# Tanto 'Embarked' como 'Fare' tienen muy pocos registros sin valor. Al tratarse de tan pocos datos, no nos calentaremos la cabeza:
train_test["Embarked"].hist()
# Rellenamos los 2 registros sin 'Embarked' con el valor 'S', ya que la mayoría de pasajeros embarcaron en ese puerto.
train_test["Embarked"] = train_test["Embarked"].fillna("S")
train_test["Embarked"] = train_test["Embarked"].map({"C": 1, "Q": 2, "S": 3})
# Para 'Fare', veamos cuál de las features restantes tiene mayor correlación con ella:
sns.heatmap(train_test.iloc[:, 0:14].corr(), annot=True, fmt=".0%")
# La aproximación más simple posible es utilizar la mediana de las 'Fare' de los pasajeros de una clase (Pcclass) específica, ya que es una feature fuertemente relacionada (a la inversa) con 'Fare'.
# ¿Por qué mediana y no media? Se considera que los datos de 'Fare' están distribuidos de una forma no muy uniforme
train_test["Fare"].hist()
train_test.groupby(["Pclass"]).Fare.median()
train_test["Fare"] = train_test["Fare"].fillna(
train_test.groupby(["Pclass"]).Fare.median()[3]
)
train_test.isna().sum()
# ¿Y qué pasa con 'Cabin'? A priori, bajo mi inexpertísima opinión, no debería tener relevancia en la supervivencia de un pasajero, por lo que no se tendrá en cuenta.
train_test["Cabin"].unique()
# Vamos a limpiar un poco nuestro dataset: eliminamos variables que no nos valen
train_test.drop(columns=["Age", "Cabin", "Name", "Ticket"], inplace=True)
train_test.head()
# Adaptamos las columnas que todavía no son numéricas:
train_test["Sex"] = train_test["Sex"].map({"male": 1, "female": 2})
train_test["Sex"] = train_test["Sex"].astype(int)
train_test.head()
sns.heatmap(train_test.iloc[:, 0:10].corr(), annot=True, fmt=".0%")
#
# Como primera aproximación a intentar analizar los datos, podría valer: los campos 'Parch' y 'SibSp' podrían ser interesantes, pero no se van a valorar esta vez. Además, como la tarifa está relacionada con la clase, se va a obviar.
train_test.drop(columns=["Fare", "Parch", "SibSp", "Embarked"], inplace=True)
train_test.head()
# # ----------------------------------------------
# # Aplicar diferentes modelos
# Sacar los datos de entrenamiento y test con el nuevo formato:
pd.options.mode.chained_assignment = None
train = train_test[train_test["Survived"].notna()]
train["Survived"] = train["Survived"].astype(int)
test = train_test.drop(train_test[train_test.Survived >= 0].index)
train.head()
test.head()
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
y = train["Survived"]
features = ["Pclass", "Title", "Sex", "Age_Range"]
X = pd.get_dummies(train[features])
X_test = pd.get_dummies(test[features])
# Split into validation and training data
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=2)
tree_depth = [2, 3, 4, 5, 6]
trees_num = [2, 5, 7, 10, 15]
for depth in tree_depth:
print("-------------------------------------------")
for tree_num in trees_num:
model = RandomForestClassifier(
n_estimators=tree_num, max_depth=depth, random_state=10
)
model.fit(train_X, train_y)
predictions = model.predict(val_X)
mae = accuracy_score(predictions, val_y)
print("Val. for {} depth and {} trees: {:6f}".format(depth, tree_num, mae))
# La que mejor ha funcionado (81,16 %) en este pequeño conjunto de pruebas ha sido 3 depht y 2 trees, así que esa es la configuración que elegiremos en el modelo:
model = RandomForestClassifier(n_estimators=2, max_depth=3, random_state=10)
model.fit(X, y)
predictions = model.predict(X_test)
predictions
output = pd.DataFrame({"PassengerId": test.PassengerId, "Survived": predictions})
output.to_csv("my_submission_3.csv", index=False)
print("Your submission was successfully saved!")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/003/69003341.ipynb
| null | null |
[{"Id": 69003341, "ScriptId": 18655756, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7918363, "CreationDate": "07/25/2021 17:51:37", "VersionNumber": 2.0, "Title": "Pyth-tanic", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 254.0, "LinesInsertedFromPrevious": 6.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 248.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# Hi, this is my first approach on solving machine learning and analyzing problems using Python, so I'll try to approach with just basic techniques.
# **Spanish**: Hola, esta es mi primera aproximación a la resolución de problemas de ML con Python, así que no pienses que vas a ver un código muy avanzado. Simplemente, voy a intentar aplicar lo básico que he aprendido los últimos días, para ir mejorando poco a poco. Además, para algunos razonamientos me he inspirado en otros análisis que gente más experimentada que yo ha compartido.
# **Steps**:
# **0.** Initial imports and load data
# **1.** Data engineering -> Analyze feature by feature
# **2.** Apply different models
# **3.** Choose the better model
# # 0. Initial imports and load Data
# Initial imports
import numpy as np
import pandas as pd
import os
import seaborn as sns
import matplotlib.pyplot as plt
train = pd.read_csv("/kaggle/input/titanic/train.csv")
test = pd.read_csv("/kaggle/input/titanic/test.csv")
# train_data.head()
# # 1. Analizar y categorizar datos
# Trabajaremos con un dataset que combine datos de entrenamiento y de test, ya que queremos tener ambos con el mismo formato:
train["Survived"] = train["Survived"].astype(int)
train_test = pd.concat([train, test], sort=True).reset_index(drop=True)
train_test.isna().sum()
# Vemos que, tanto 'age' como 'cabin' tienen numerosos valores vacíos, mientras que casi todo el resto de columnas tienen valor (no significa que todos esos valores 'rellenos' sean relevantes). Un primer paso sería analizar cuáles son los valores aproximados a rellenar de esas columnas.
# Después analizar 1 por 1 cada variable para ver sus valores y tratar de decidir si son variables a tener en cuenta para nuestro modelo. En resumen:
# **1.** Rellenar datos vacíos
#
# 1.1 Encontrar qué variables influyen en las columnas vacías
# 1.2 Rellenar con valores medios encontrados
#
# **2.** Categorizar variables en rangos numéricos
# -----------------------------------------------------
# Veamos la matriz de correlación de cada 'feature':
sns.heatmap(train_test.iloc[:, 0:12].corr(), annot=True, fmt=".0%")
# Esta matriz únicamente correla datos numéricos: tenemos que ver si otros datos que actualmente no proporcionan mucha información (por su formato) pueden transformarse a rangos numéricos que nos aporten información.
train_test.info()
# Los campos que no sean int/float deben ser categorizados (si los consideramos importantes para el modelo).
# # Nombre: Categorizar valores
# El nombre es quizás la columna que menos aporta en "crudo". Es preciso sacar el "título" de todos los nombres y categorizar los rangos de valores para sacar información.
train_test["Title"] = train_test.Name.str.extract("([A-Za-z]+)\.")
train_test["Title"].unique()
train_test.head()
# Todos estos 'titles' seguramente estén asociados a un género y un rango de edad. Veamos la relación.
pd.crosstab(train_test["Title"], train_test["Sex"])
fig, ax = plt.subplots()
ax.scatter(
x=train_test["Age"], y=train_test["Title"], alpha=0.2
) # alpha=0.2 specifies the opacity
ax.set_xlabel("Age")
ax.set_ylabel("Title")
# Viendo estas relaciones, y sobre todo el nº de datos que tenemos, parece lógico agrupar todos estos valores en 4 principales:
# 1. Mrs: Mujer 'más mayor'
# 2. Miss: Mujer 'más joven'
# 3. Master: 'Master' (hombres niños)
# 4. Mr: Hombres que no sean 'Master'
#
# Replace values
train_test["Title"] = train_test["Title"].replace("Dona", "Mrs")
train_test["Title"] = train_test["Title"].replace("Countess", "Mrs")
train_test["Title"] = train_test["Title"].replace("Lady", "Mrs")
train_test["Title"] = train_test["Title"].replace("Mlle", "Miss")
train_test["Title"] = train_test["Title"].replace("Ms", "Miss")
train_test["Title"] = train_test["Title"].replace("Mme", "Miss")
train_test["Title"] = train_test["Title"].replace("Capt", "Mr")
train_test["Title"] = train_test["Title"].replace("Col", "Mr")
train_test["Title"] = train_test["Title"].replace("Don", "Mr")
train_test["Title"] = train_test["Title"].replace("Dr", "Mr")
train_test["Title"] = train_test["Title"].replace("Jonkheer", "Mr")
train_test["Title"] = train_test["Title"].replace("Major", "Mr")
train_test["Title"] = train_test["Title"].replace("Rev", "Mr")
train_test["Title"] = train_test["Title"].replace("Sir", "Mr")
# Map data
train_test["Title"] = (
train_test["Title"].map({"Mrs": 1, "Miss": 2, "Master": 3, "Mr": 4}).astype(int)
)
# Ahora parece que tenemos mejor información sobre el nombre de los pasajeros. Ya que hemos utilizado género y edad para decidir estos valores, podríamos utilizarlos para rellenar las edades que nos faltan con los valores medios de edad de los pasajeros con ese 'Title':
round(train_test.groupby(["Title"]).median()["Age"])
np.unique(train_test[train_test["Age"].isna()]["Title"], return_counts=True)
# Vemos que la gran mayoría de las edades que nos faltan son de pasajeros 'Mr'.
# Ahora bien, ¿es demasiado simple estimar la edad como la media de los otros 'Mr'? ¿O quizás haya otra 'feature' que esté muy relacionada con el 'title' y nos permita ser más precisos?
# En esta solución, avanzamos con este approach más simple.
train_test["Age"] = train_test.groupby(["Title"])["Age"].apply(
lambda x: x.fillna(round(x.median()))
)
train_test["Age"] = train_test["Age"].astype(int)
# Vale, pues **ya hemos rellenado la edad**.
# Sin embargo, podríamos agrupar rangos de edades para simplificar el modelo. ¿En cuántos subconjuntos? Elegimos 3 por simplicidad: la función 'qcut' hará el resto por nosotros:
age_ranges = 3
pd.qcut(train_test["Age"], age_ranges)
train_test["Age_Range"] = pd.qcut(train_test["Age"], age_ranges, labels=False)
train_test.groupby(["Age_Range"]).mean()["Survived"]
train_test.isna().sum()
# Tanto 'Embarked' como 'Fare' tienen muy pocos registros sin valor. Al tratarse de tan pocos datos, no nos calentaremos la cabeza:
train_test["Embarked"].hist()
# Rellenamos los 2 registros sin 'Embarked' con el valor 'S', ya que la mayoría de pasajeros embarcaron en ese puerto.
train_test["Embarked"] = train_test["Embarked"].fillna("S")
train_test["Embarked"] = train_test["Embarked"].map({"C": 1, "Q": 2, "S": 3})
# Para 'Fare', veamos cuál de las features restantes tiene mayor correlación con ella:
sns.heatmap(train_test.iloc[:, 0:14].corr(), annot=True, fmt=".0%")
# La aproximación más simple posible es utilizar la mediana de las 'Fare' de los pasajeros de una clase (Pcclass) específica, ya que es una feature fuertemente relacionada (a la inversa) con 'Fare'.
# ¿Por qué mediana y no media? Se considera que los datos de 'Fare' están distribuidos de una forma no muy uniforme
train_test["Fare"].hist()
train_test.groupby(["Pclass"]).Fare.median()
train_test["Fare"] = train_test["Fare"].fillna(
train_test.groupby(["Pclass"]).Fare.median()[3]
)
train_test.isna().sum()
# ¿Y qué pasa con 'Cabin'? A priori, bajo mi inexpertísima opinión, no debería tener relevancia en la supervivencia de un pasajero, por lo que no se tendrá en cuenta.
train_test["Cabin"].unique()
# Vamos a limpiar un poco nuestro dataset: eliminamos variables que no nos valen
train_test.drop(columns=["Age", "Cabin", "Name", "Ticket"], inplace=True)
train_test.head()
# Adaptamos las columnas que todavía no son numéricas:
train_test["Sex"] = train_test["Sex"].map({"male": 1, "female": 2})
train_test["Sex"] = train_test["Sex"].astype(int)
train_test.head()
sns.heatmap(train_test.iloc[:, 0:10].corr(), annot=True, fmt=".0%")
#
# Como primera aproximación a intentar analizar los datos, podría valer: los campos 'Parch' y 'SibSp' podrían ser interesantes, pero no se van a valorar esta vez. Además, como la tarifa está relacionada con la clase, se va a obviar.
train_test.drop(columns=["Fare", "Parch", "SibSp", "Embarked"], inplace=True)
train_test.head()
# # ----------------------------------------------
# # Aplicar diferentes modelos
# Sacar los datos de entrenamiento y test con el nuevo formato:
pd.options.mode.chained_assignment = None
train = train_test[train_test["Survived"].notna()]
train["Survived"] = train["Survived"].astype(int)
test = train_test.drop(train_test[train_test.Survived >= 0].index)
train.head()
test.head()
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
y = train["Survived"]
features = ["Pclass", "Title", "Sex", "Age_Range"]
X = pd.get_dummies(train[features])
X_test = pd.get_dummies(test[features])
# Split into validation and training data
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=2)
tree_depth = [2, 3, 4, 5, 6]
trees_num = [2, 5, 7, 10, 15]
for depth in tree_depth:
print("-------------------------------------------")
for tree_num in trees_num:
model = RandomForestClassifier(
n_estimators=tree_num, max_depth=depth, random_state=10
)
model.fit(train_X, train_y)
predictions = model.predict(val_X)
mae = accuracy_score(predictions, val_y)
print("Val. for {} depth and {} trees: {:6f}".format(depth, tree_num, mae))
# La que mejor ha funcionado (81,16 %) en este pequeño conjunto de pruebas ha sido 3 depht y 2 trees, así que esa es la configuración que elegiremos en el modelo:
model = RandomForestClassifier(n_estimators=2, max_depth=3, random_state=10)
model.fit(X, y)
predictions = model.predict(X_test)
predictions
output = pd.DataFrame({"PassengerId": test.PassengerId, "Survived": predictions})
output.to_csv("my_submission_3.csv", index=False)
print("Your submission was successfully saved!")
| false | 0 | 3,192 | 0 | 3,192 | 3,192 |
||
69003082
|
<jupyter_start><jupyter_text>QMNIST - The Extended MNIST Dataset (120k images)
### Context
The exact preprocessing steps used to construct the MNIST dataset have long been lost. This leaves us with no reliable way to associate its characters with the ID of the writer and little hope to recover the full MNIST testing set that had 60K images but was never released. The official MNIST testing set only contains 10K randomly sampled images and is often considered too small to provide meaningful confidence intervals.
The QMNIST dataset was generated from the original data found in the NIST Special Database 19 with the goal to match the MNIST preprocessing as closely as possible.
### Content
The simplest way to use the QMNIST extended dataset is to download the unique file below (MNIST-120k). This pickle file has the same format as the standard MNIST data files but contains 120000 examples.
You can use the following lines of code to load the data:
```
def unpickle(file):
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
```
`qmnist = unpickle("MNIST-120k")`
The data comes in a dictionary format, you can get the data and the labels separately by extracting the content from the dictionary:
```
data = qmnist['data']
labels = qmnist['labels']
```
### Source
The original QMNIST dataset was uploaded by Chhavi Yadav and Léon Bottou. Citation:
> Yadav, C. and Bottou, L., “Cold Case: The Lost MNIST Digits”, <i>arXiv e-prints</i>, 2019.
Link to the original paper: [https://arxiv.org/pdf/1905.10498.pdf](https://arxiv.org/pdf/1905.10498.pdf)
Link to the GitHub repository: [https://github.com/facebookresearch/qmnist](https://github.com/facebookresearch/qmnist)
My contribution was to collect all the images and labels into the same file and convert it into a pickle file so it is easier to load. Please consider mentioning the author if you use this dataset instead of the original version.
Kaggle dataset identifier: qmnist-the-extended-mnist-dataset-120k-images
<jupyter_script># # *Mnist Autoencoder*
# ## *Importing the libraries*
import tensorflow as tf
import tensorflow.keras as ks
import matplotlib.pyplot as plt
import numpy as np
import pickle
# ## *unpickling the data from the file*
def unpickle(file):
import pickle
with open(file, "rb") as fo:
dicty = pickle.load(fo, encoding="bytes")
return dicty
qmnist = unpickle("../input/qmnist-the-extended-mnist-dataset-120k-images/MNIST-120k")
# ## *After extraction we get the label and the pixel data's of all 120k images*
data = qmnist["data"]
labels = qmnist["labels"]
# ### *Lets visualize a single image to confirm that the image is a mnist*
img = plt.imshow(data[0])
plt.axis("off")
plt.show()
# ## *The extracted data consist of about 120000 28x28 images*
data.shape
# ## *prepare the data for autoencoding where we do not need their labels. so we just preprocess the image alone by normalizing and reshaping it to a single dimension*
def map_image(image):
image = image / 255.0
image = tf.reshape(image, shape=(784,))
return image
new_x = map(map_image, data)
new_x = np.array(new_x)
# ## *here a simple autoencoder is built with only two layers, one for encoding and one for decoding . we train the model by keeping both x and y as new_x which is the preprocessed image*
def simple_autoencoder(inputs):
encoder = ks.layers.Dense(units=32, activation="relu")(inputs)
decoder = ks.layers.Dense(units=784, activation="sigmoid")(encoder)
return encoder, decoder
inputs = ks.layers.Input(shape=(784,))
encoder_output, decoder_output = simple_autoencoder(inputs)
encoder_model = ks.Model(inputs=inputs, outputs=encoder_output)
autoencoder_model = ks.Model(inputs=inputs, outputs=decoder_output)
autoencoder_model.compile(optimizer=ks.optimizers.Adam(), loss="binary_crossentropy")
batch_size = 128
train_steps = 120000 // batch_size
autoencoder_history = autoencoder_model.fit(
x=new_x[:110000],
y=new_x[:110000],
steps_per_epoch=train_steps,
epochs=50,
verbose=0,
)
# ## *After 50 epochs the autoencoder model has trained better which we can judge by looking at its loss value*
loss = autoencoder_history.history["loss"]
plt.figure(figsize=(6, 5))
plt.plot(loss, range(1, 51), color="red", label="training loss")
plt.title("Training loss")
plt.legend()
plt.show()
# ## *Its time for evaluating the model with visuals*
def display_one_row(disp_images, offset, shape=(28, 28)):
for idx, test_image in enumerate(disp_images):
plt.subplot(3, 10, offset + idx + 1)
plt.xticks([])
plt.yticks([])
test_img = np.reshape(test_image, shape)
plt.imshow(test_img, cmap="gray")
def display_results(disp_input_images, disp_encoded, disp_predicted, enc_shape=(8, 4)):
plt.figure(figsize=(15, 5))
display_one_row(disp_input_images, 0, shape=(28, 28))
display_one_row(disp_encoded, 10, shape=enc_shape)
display_one_row(disp_predicted, 20, shape=(28, 28))
# ## *Below you can visualize the input image to the model -> encoded image -> decoded image*
test_images = new_x[110000:]
output_samples = test_images
encoded_predicted = encoder_model.predict(test_images)
decoded_predicted = autoencoder_model.predict(test_images)
display_results(output_samples[:10], encoded_predicted[:10], decoded_predicted[:10])
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/003/69003082.ipynb
|
qmnist-the-extended-mnist-dataset-120k-images
|
fedesoriano
|
[{"Id": 69003082, "ScriptId": 18824536, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5474165, "CreationDate": "07/25/2021 17:47:12", "VersionNumber": 1.0, "Title": "QMnist Autoencoder", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 107.0, "LinesInsertedFromPrevious": 107.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 91688289, "KernelVersionId": 69003082, "SourceDatasetVersionId": 2458532}]
|
[{"Id": 2458532, "DatasetId": 1488071, "DatasourceVersionId": 2500942, "CreatorUserId": 6402661, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "07/24/2021 15:31:01", "VersionNumber": 3.0, "Title": "QMNIST - The Extended MNIST Dataset (120k images)", "Slug": "qmnist-the-extended-mnist-dataset-120k-images", "Subtitle": "Improve the computer vision performance with the expanded version of MNIST data", "Description": "### Context\n\nThe exact preprocessing steps used to construct the MNIST dataset have long been lost. This leaves us with no reliable way to associate its characters with the ID of the writer and little hope to recover the full MNIST testing set that had 60K images but was never released. The official MNIST testing set only contains 10K randomly sampled images and is often considered too small to provide meaningful confidence intervals.\n\nThe QMNIST dataset was generated from the original data found in the NIST Special Database 19 with the goal to match the MNIST preprocessing as closely as possible.\n\n\n### Content\n\nThe simplest way to use the QMNIST extended dataset is to download the unique file below (MNIST-120k). This pickle file has the same format as the standard MNIST data files but contains 120000 examples.\n\nYou can use the following lines of code to load the data:\n```\ndef unpickle(file):\n import pickle\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n return dict\n```\n`qmnist = unpickle(\"MNIST-120k\")`\n\nThe data comes in a dictionary format, you can get the data and the labels separately by extracting the content from the dictionary:\n```\ndata = qmnist['data']\nlabels = qmnist['labels']\n```\n\n\n### Source\n\nThe original QMNIST dataset was uploaded by Chhavi Yadav and L\u00e9on Bottou. Citation:\n> Yadav, C. and Bottou, L., \u201cCold Case: The Lost MNIST Digits\u201d, <i>arXiv e-prints</i>, 2019.\n\nLink to the original paper: [https://arxiv.org/pdf/1905.10498.pdf](https://arxiv.org/pdf/1905.10498.pdf)\nLink to the GitHub repository: [https://github.com/facebookresearch/qmnist](https://github.com/facebookresearch/qmnist)\n\nMy contribution was to collect all the images and labels into the same file and convert it into a pickle file so it is easier to load. Please consider mentioning the author if you use this dataset instead of the original version.", "VersionNotes": "Data Update 2021/07/24", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1488071, "CreatorUserId": 6402661, "OwnerUserId": 6402661.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2458532.0, "CurrentDatasourceVersionId": 2500942.0, "ForumId": 1507762, "Type": 2, "CreationDate": "07/24/2021 15:04:30", "LastActivityDate": "07/24/2021", "TotalViews": 9691, "TotalDownloads": 859, "TotalVotes": 29, "TotalKernels": 20}]
|
[{"Id": 6402661, "UserName": "fedesoriano", "DisplayName": "fedesoriano", "RegisterDate": "12/18/2020", "PerformanceTier": 4}]
|
# # *Mnist Autoencoder*
# ## *Importing the libraries*
import tensorflow as tf
import tensorflow.keras as ks
import matplotlib.pyplot as plt
import numpy as np
import pickle
# ## *unpickling the data from the file*
def unpickle(file):
import pickle
with open(file, "rb") as fo:
dicty = pickle.load(fo, encoding="bytes")
return dicty
qmnist = unpickle("../input/qmnist-the-extended-mnist-dataset-120k-images/MNIST-120k")
# ## *After extraction we get the label and the pixel data's of all 120k images*
data = qmnist["data"]
labels = qmnist["labels"]
# ### *Lets visualize a single image to confirm that the image is a mnist*
img = plt.imshow(data[0])
plt.axis("off")
plt.show()
# ## *The extracted data consist of about 120000 28x28 images*
data.shape
# ## *prepare the data for autoencoding where we do not need their labels. so we just preprocess the image alone by normalizing and reshaping it to a single dimension*
def map_image(image):
image = image / 255.0
image = tf.reshape(image, shape=(784,))
return image
new_x = map(map_image, data)
new_x = np.array(new_x)
# ## *here a simple autoencoder is built with only two layers, one for encoding and one for decoding . we train the model by keeping both x and y as new_x which is the preprocessed image*
def simple_autoencoder(inputs):
encoder = ks.layers.Dense(units=32, activation="relu")(inputs)
decoder = ks.layers.Dense(units=784, activation="sigmoid")(encoder)
return encoder, decoder
inputs = ks.layers.Input(shape=(784,))
encoder_output, decoder_output = simple_autoencoder(inputs)
encoder_model = ks.Model(inputs=inputs, outputs=encoder_output)
autoencoder_model = ks.Model(inputs=inputs, outputs=decoder_output)
autoencoder_model.compile(optimizer=ks.optimizers.Adam(), loss="binary_crossentropy")
batch_size = 128
train_steps = 120000 // batch_size
autoencoder_history = autoencoder_model.fit(
x=new_x[:110000],
y=new_x[:110000],
steps_per_epoch=train_steps,
epochs=50,
verbose=0,
)
# ## *After 50 epochs the autoencoder model has trained better which we can judge by looking at its loss value*
loss = autoencoder_history.history["loss"]
plt.figure(figsize=(6, 5))
plt.plot(loss, range(1, 51), color="red", label="training loss")
plt.title("Training loss")
plt.legend()
plt.show()
# ## *Its time for evaluating the model with visuals*
def display_one_row(disp_images, offset, shape=(28, 28)):
for idx, test_image in enumerate(disp_images):
plt.subplot(3, 10, offset + idx + 1)
plt.xticks([])
plt.yticks([])
test_img = np.reshape(test_image, shape)
plt.imshow(test_img, cmap="gray")
def display_results(disp_input_images, disp_encoded, disp_predicted, enc_shape=(8, 4)):
plt.figure(figsize=(15, 5))
display_one_row(disp_input_images, 0, shape=(28, 28))
display_one_row(disp_encoded, 10, shape=enc_shape)
display_one_row(disp_predicted, 20, shape=(28, 28))
# ## *Below you can visualize the input image to the model -> encoded image -> decoded image*
test_images = new_x[110000:]
output_samples = test_images
encoded_predicted = encoder_model.predict(test_images)
decoded_predicted = autoencoder_model.predict(test_images)
display_results(output_samples[:10], encoded_predicted[:10], decoded_predicted[:10])
| false | 0 | 1,059 | 2 | 1,616 | 1,059 |
||
69003513
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("../input/house-prices-advanced-regression-techniques/train.csv")
df
df.info()
df.groupby("YrSold")["SalePrice"].median().plot()
plt.xlabel("Year Sold")
plt.ylabel("Median House Price")
plt.title("House Price Per Year")
# #### In this analysis of yearwise distribution of prices, we see that houseprice is falling per year
# ### Missing Values handling
def percent_missing_data(tr):
missingcount = df.isna().sum().sort_values(ascending=False)
missingpercent = 100 * df.isna().sum().sort_values(ascending=False) / len(df)
missingcount = pd.DataFrame(missingcount[missingcount > 0])
missingpercent = pd.DataFrame(missingpercent[missingpercent > 0])
missingtable = pd.concat([missingcount, missingpercent], axis=1)
missingtable.columns = ["missingcount", "missingpercent"]
return missingtable
missingvalues = percent_missing_data(df)
missingvalues
df = df.drop(["PoolQC", "MiscFeature", "Alley", "Fence"], axis=1)
df["MasVnrArea"] = df["MasVnrArea"].fillna(0)
df["MasVnrType"] = df["MasVnrType"].fillna("None")
bsmt_str_cols = ["BsmtQual", "BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2"]
df[bsmt_str_cols] = df[bsmt_str_cols].fillna("None")
# basement numeric features ==> fill with 0
bsmt_num_cols = [
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"BsmtFullBath",
"BsmtHalfBath",
]
df[bsmt_num_cols] = df[bsmt_num_cols].fillna(0)
df["GarageType"] = df["GarageType"].fillna("Attached")
df["GarageCond"] = df["GarageCond"].fillna("NA")
df["GarageFinish"] = df["GarageFinish"].fillna("INC")
df["GarageQual"] = df["GarageQual"].fillna("NA")
df["GarageYrBlt"] = df["GarageYrBlt"].fillna(df.GarageYrBlt.mean())
df["Electrical"] = df["Electrical"].fillna("SBrkr")
df["Electrical"].isna().sum()
df["FireplaceQu"] = df["FireplaceQu"].fillna("None")
df["LotFrontage"] = df["LotFrontage"].fillna(df.LotFrontage.median())
missing_values = percent_missing_data(df)
missing_values
# print("No missing values")
# ### Numerical & categorical features
num_feat = ["LotFrontage", "LotArea", "1stFlrSF", "GrLivArea", "SalePrice"]
for i in num_feat:
df[i] = np.log(df[i])
for j in df.select_dtypes(include="object"):
labels_ordered = df.groupby([j])["SalePrice"].mean().sort_values().index
labels_ordered = {k: i for i, k in enumerate(labels_ordered, 0)}
df[j] = df[j].map(labels_ordered)
df.head()
# ### Model
X = df.drop(["Id", "SalePrice"], axis=1)
Y = df["SalePrice"]
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.3, random_state=101
)
from sklearn.preprocessing import StandardScaler
scale = StandardScaler()
scale.fit(X_train)
X_train = scale.transform(X_train)
X_test = scale.transform(X_test)
# ### Ridge Model
from sklearn.linear_model import Ridge
rid_reg = Ridge(alpha=8.01)
rid_reg.fit(X_train, Y_train)
Y_pred_ridge = rid_reg.predict(X_test)
from sklearn.linear_model import Ridge
rid_reg = Ridge(alpha=100)
rid_reg.fit(X_train, Y_train)
Y_pred = rid_reg.predict(X_test)
# testing the model
from sklearn.metrics import r2_score, mean_absolute_error
ridge_mae = mean_absolute_error(Y_test, Y_pred_ridge)
ridge_r2_score = r2_score(Y_test, Y_pred_ridge)
print("MAE for Ridge : ", ridge_mae)
print("R2 for Ridge: ", ridge_r2_score)
Y_pred.min()
plt.figure
sns.regplot(Y_pred_ridge, Y_test)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/003/69003513.ipynb
| null | null |
[{"Id": 69003513, "ScriptId": 18830030, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7618519, "CreationDate": "07/25/2021 17:54:52", "VersionNumber": 2.0, "Title": "notebook5c11cbcc36", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 148.0, "LinesInsertedFromPrevious": 112.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 36.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("../input/house-prices-advanced-regression-techniques/train.csv")
df
df.info()
df.groupby("YrSold")["SalePrice"].median().plot()
plt.xlabel("Year Sold")
plt.ylabel("Median House Price")
plt.title("House Price Per Year")
# #### In this analysis of yearwise distribution of prices, we see that houseprice is falling per year
# ### Missing Values handling
def percent_missing_data(tr):
missingcount = df.isna().sum().sort_values(ascending=False)
missingpercent = 100 * df.isna().sum().sort_values(ascending=False) / len(df)
missingcount = pd.DataFrame(missingcount[missingcount > 0])
missingpercent = pd.DataFrame(missingpercent[missingpercent > 0])
missingtable = pd.concat([missingcount, missingpercent], axis=1)
missingtable.columns = ["missingcount", "missingpercent"]
return missingtable
missingvalues = percent_missing_data(df)
missingvalues
df = df.drop(["PoolQC", "MiscFeature", "Alley", "Fence"], axis=1)
df["MasVnrArea"] = df["MasVnrArea"].fillna(0)
df["MasVnrType"] = df["MasVnrType"].fillna("None")
bsmt_str_cols = ["BsmtQual", "BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2"]
df[bsmt_str_cols] = df[bsmt_str_cols].fillna("None")
# basement numeric features ==> fill with 0
bsmt_num_cols = [
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"BsmtFullBath",
"BsmtHalfBath",
]
df[bsmt_num_cols] = df[bsmt_num_cols].fillna(0)
df["GarageType"] = df["GarageType"].fillna("Attached")
df["GarageCond"] = df["GarageCond"].fillna("NA")
df["GarageFinish"] = df["GarageFinish"].fillna("INC")
df["GarageQual"] = df["GarageQual"].fillna("NA")
df["GarageYrBlt"] = df["GarageYrBlt"].fillna(df.GarageYrBlt.mean())
df["Electrical"] = df["Electrical"].fillna("SBrkr")
df["Electrical"].isna().sum()
df["FireplaceQu"] = df["FireplaceQu"].fillna("None")
df["LotFrontage"] = df["LotFrontage"].fillna(df.LotFrontage.median())
missing_values = percent_missing_data(df)
missing_values
# print("No missing values")
# ### Numerical & categorical features
num_feat = ["LotFrontage", "LotArea", "1stFlrSF", "GrLivArea", "SalePrice"]
for i in num_feat:
df[i] = np.log(df[i])
for j in df.select_dtypes(include="object"):
labels_ordered = df.groupby([j])["SalePrice"].mean().sort_values().index
labels_ordered = {k: i for i, k in enumerate(labels_ordered, 0)}
df[j] = df[j].map(labels_ordered)
df.head()
# ### Model
X = df.drop(["Id", "SalePrice"], axis=1)
Y = df["SalePrice"]
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.3, random_state=101
)
from sklearn.preprocessing import StandardScaler
scale = StandardScaler()
scale.fit(X_train)
X_train = scale.transform(X_train)
X_test = scale.transform(X_test)
# ### Ridge Model
from sklearn.linear_model import Ridge
rid_reg = Ridge(alpha=8.01)
rid_reg.fit(X_train, Y_train)
Y_pred_ridge = rid_reg.predict(X_test)
from sklearn.linear_model import Ridge
rid_reg = Ridge(alpha=100)
rid_reg.fit(X_train, Y_train)
Y_pred = rid_reg.predict(X_test)
# testing the model
from sklearn.metrics import r2_score, mean_absolute_error
ridge_mae = mean_absolute_error(Y_test, Y_pred_ridge)
ridge_r2_score = r2_score(Y_test, Y_pred_ridge)
print("MAE for Ridge : ", ridge_mae)
print("R2 for Ridge: ", ridge_r2_score)
Y_pred.min()
plt.figure
sns.regplot(Y_pred_ridge, Y_test)
| false | 0 | 1,396 | 2 | 1,396 | 1,396 |
||
69003248
|
# Importing necessary packages
import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split as tts
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Lasso
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.preprocessing import LabelEncoder
# Importing necessary datasets
hom_data = pd.read_csv("train.csv")
hom_data
hom_data.isnull().sum()
na_data = [data for data in hom_data.columns if hom_data[data].isnull().any() == True]
na_data
# Plotting the features with Median Sales Price for 0 and 1 values in features
for data in na_data:
df_copy = hom_data.copy()
df_copy[data] = np.where(df_copy[data].isnull(), 1, 0)
df_copy.groupby(data)["SalePrice"].median().plot.bar(color=["red", "green"])
print(df_copy.groupby(data)["SalePrice"].median())
plt.show()
# Extracting all the numerical columns from the dastaset
num_data = [data for data in hom_data.columns if hom_data[data].dtypes != "O"]
print("Number of columns with numerical data:", len(num_data))
hom_data[num_data].head(7)
# Extracting datetime columns from the dataset
dt_data = [data for data in num_data if "Year" in data or "Yr" in data]
print("Number of columns with datetime data:", len(dt_data))
hom_data[dt_data].head(7)
# Analyzing yearly features with SalePrice
for data in dt_data:
df_copy = hom_data.copy()
df_copy.groupby(data)["SalePrice"].median().plot()
plt.show()
# Now we will be extracting numerical data which will be consisting of discrete data and numerical data
# Extracting discrete data
disc_data = [
data
for data in num_data
if len(hom_data[data].unique()) < 69 and data not in dt_data + ["Id"]
]
print("Number of columns with discrete data", len(disc_data))
hom_data[disc_data].head(7)
# Bar graphs between discrete data columns and sales price
for data in disc_data:
df_copy = hom_data.copy()
df_copy.groupby(data)["SalePrice"].median().plot.bar(
color=["red", "blue", "green", "yellow", "violet", "indigo", "orange", "black"]
)
plt.ylabel("SalePrice")
plt.show()
# Extracting continuous data
cont_data = [data for data in num_data if data not in disc_data + dt_data + ["Id"]]
print("Number of columns with continuous data: ", len(cont_data))
hom_data[cont_data].head(7)
# Histograms in between continuous data columns and sales price
for data in cont_data:
df_copy = hom_data.copy()
df_copy[data].hist(bins=16)
plt.ylabel("Count")
plt.xlabel(data)
plt.show()
# Box plot in between continuous data columns and sales price
for data in cont_data:
df_copy = hom_data.copy()
df_copy[data] = np.log1p(df_copy[data])
df_copy.boxplot(column=data)
plt.ylabel(data)
plt.show()
# Extracting categorical data columns
categ_data = [data for data in hom_data.columns if hom_data[data].dtypes == "O"]
print("Number of columns with categorical data:", len(categ_data))
hom_data[categ_data].head(7)
# Bar graphs in between categorical data columns and sales pricing
for data in categ_data:
df_copy = hom_data.copy()
df_copy.groupby(data)["SalePrice"].median().plot.bar(
color=["red", "blue", "green", "yellow", "violet", "indigo", "orange", "black"]
)
plt.show()
cat_data = [data for data in hom_data.columns if hom_data[data].dtypes == "O"]
hom_data[cat_data].head(7)
# Finding out percentage of missing values in categorical features
prct_na = (
hom_data[[data for data in hom_data.columns if hom_data[data].dtypes == "O"]]
.isnull()
.sum()
/ len(hom_data)
* 100
)
prct_na
# Removing categorical data columns where missing values is more than 50%
na_feat = prct_na[prct_na > 50]
na_feat
for data in na_feat.index:
hom_data.drop([data], axis=1, inplace=True)
na_data = [
data for data in hom_data.columns if hom_data[data].isnull().sum().any() == True
]
na_data
num_data = [data for data in na_data if hom_data[data].dtypes != "O"]
hom_data[num_data].isnull().sum() / len(hom_data) * 100
# Now we start doing feature engineering
X = hom_data.drop(["SalePrice"], axis=1)
y = hom_data["SalePrice"]
X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2, random_state=0)
X_train.shape, X_test.shape
train_df = pd.concat([X_train, y_train], axis=1)
test_df = pd.concat([X_test, y_test], axis=1)
# Features with null values in training set
na_data = [data for data in train_df.columns if train_df[data].isnull().any() == True]
na_data
# Now let us deal with numerical data
na_num = [data for data in na_data if train_df[data].dtypes != "O"]
print("Number of columns with null numerical data:", len(na_num))
train_df[na_num].head(7)
train_df[na_num].isnull().sum()
for data in na_num:
train_df[data].fillna(train_df[data].median(), inplace=True)
train_df[na_num].isnull().sum()
train_df[na_num].head(7)
train_df.head(7)
skew_num_data = ["LotFrontage", "LotArea", "1stFlrSF", "GrLivArea", "SalePrice"]
for data in skew_num_data:
train_df[data] = np.log(train_df[data])
train_df.head(7)
# Analysing datetime data columns
train_df[dt_data].isnull().sum()
train_df[["YearBuilt", "YearRemodAdd", "GarageYrBlt", "YrSold"]].head(7)
for data in ["YearBuilt", "YearRemodAdd", "GarageYrBlt"]:
train_df[data] = train_df["YrSold"] - train_df[data]
train_df[["YearBuilt", "YearRemodAdd", "GarageYrBlt", "YrSold"]].head(7)
# Categorical data features
na_categ_data = [data for data in na_data if train_df[data].dtypes == "O"]
print("Number of columns with categorical data:", len(na_categ_data))
train_df[na_categ_data].head(7)
train_df[na_categ_data].isnull().sum() / len(train_df)
# Null value replacements in categorical data columns with new labels
for data in na_categ_data:
val_mode = train_df[data].mode()[0]
train_df[data].fillna(val_mode, inplace=True)
train_df[na_categ_data].isnull().sum()
categ_data = [data for data in train_df.columns if train_df[data].dtypes == "O"]
for data in categ_data:
labels_order = train_df.groupby(data)["SalePrice"].mean().sort_values().index
labels_order = {k: i for i, k in enumerate(labels_order, 0)}
train_df[data] = train_df[data].map(labels_order)
train_df.head(7)
# Performing feature engineering on test data keeping in mind data leakage
na_data = [data for data in test_df.columns if test_df[data].isnull().any() == True]
na_data
# **Numerical Feature** for test data
na_num = [data for data in na_data if test_df[data].dtypes != "O"]
print("Number of columns with null numerical data:", len(na_num))
test_df[na_num].isnull().sum()
for data in na_num:
test_df[data].fillna(test_df[data].median(), inplace=True)
test_df[na_num].isnull().sum()
skew_num_data = ["LotFrontage", "LotArea", "1stFlrSF", "GrLivArea", "SalePrice"]
for data in skew_num_data:
test_df[data] = np.log(test_df[data])
test_df.head(7)
for feature in ["YearBuilt", "YearRemodAdd", "GarageYrBlt"]:
train_df[feature] = train_df["YrSold"] - train_df[feature]
train_df[["YearBuilt", "YearRemodAdd", "GarageYrBlt", "YrSold"]].head()
# **Categorical features**
na_categ_data = [data for data in na_data if test_df[data].dtypes == "O"]
print("number of columns with null categorical data:", len(na_categ_data))
test_df[na_categ_data].head()
for feature in na_categ_data:
mode_value = train_df[feature].mode()[0]
train_df[feature].fillna(mode_value, inplace=True)
train_df[na_categ_data].isnull().sum()
categ_data = [data for data in test_df.columns if test_df[data].dtypes == "O"]
for feature in categ_data:
labels_order = test_df.groupby(feature)["SalePrice"].mean().sort_values().index
labels_order = {k: i for i, k in enumerate(labels_order, 0)}
test_df[feature] = test_df[feature].map(labels_order)
pd.set_option("display.max_columns", None)
test_df.head(5)
test_df.isnull().sum()
# Performing feature scaling on train and test data
scale_feature = [
feature for feature in train_df.columns if feature not in ["Id", "SalePrice"]
]
scaler = StandardScaler()
train_scaled = scaler.fit_transform(train_df[scale_feature])
test_scaled = scaler.transform(test_df[scale_feature])
X = pd.DataFrame(train_scaled, columns=scale_feature)
train_df = pd.concat([X, train_df["SalePrice"].reset_index(drop=True)], axis=1)
train_df.head(5)
train_df.isnull().sum()
X1 = pd.DataFrame(test_scaled, columns=scale_feature)
test_df = pd.concat([X1, test_df["SalePrice"].reset_index(drop=True)], axis=1)
test_df.head(5)
test_df.isnull().sum()
# # Feature Selection
X_train = train_df.drop(["SalePrice"], axis=1)
y_train = train_df["SalePrice"]
feature_sel_model = SelectFromModel(Lasso(alpha=0.01, random_state=0))
feature_sel_model.fit(X_train, y_train)
feature_sel_model.get_support()
selected_feat = X_train.columns[(feature_sel_model.get_support())]
print("selected features:", len(selected_feat))
selected_feat
X_train = X_train[selected_feat]
X_train.head(5)
X_test = test_df[selected_feat]
y_test = test_df["SalePrice"]
X_test.head(5)
X_test.isnull().sum()
# # Fitting model to dataset
rf_reg = RandomForestRegressor()
rf_reg.fit(X_train, y_train)
prediction = rf_reg.predict(X_test)
print("MAE:", mean_absolute_error(y_test, prediction))
print("MSE:", mean_squared_error(y_test, prediction))
print("RMSE:", np.sqrt(mean_squared_error(y_test, prediction)))
house_test = pd.read_csv("test.csv")
house_test.head(5)
null_features = [
features
for features in house_test.columns
if house_test[features].isnull().any() == True
]
null_features
null_numerical = [
feature for feature in null_features if house_test[feature].dtypes != "O"
]
print("Number of null numerical feature:", len(null_numerical))
house_test[null_numerical].isnull().sum()
for feature in null_numerical:
house_test[feature].fillna(house_test[feature].median(), inplace=True)
house_test[null_numerical].isnull().sum()
skew_num_features = ["LotFrontage", "LotArea", "1stFlrSF", "GrLivArea"]
for feature in skew_num_features:
house_test[feature] = np.log(house_test[feature])
# year features
for feature in ["YearBuilt", "YearRemodAdd", "GarageYrBlt"]:
house_test[feature] = house_test["YrSold"] - house_test[feature]
house_test[["YearBuilt", "YearRemodAdd", "GarageYrBlt", "YrSold"]].head()
null_categorical_feature = [
feature for feature in null_features if house_test[feature].dtypes == "O"
]
print("number of null categorical features:", len(null_categorical_feature))
null_categorical_feature
pct = house_test[null_categorical_feature].isnull().sum() / len(house_test)
miss_feature = pct[pct > 0.7]
miss_feature.index
for feature in miss_feature.index:
house_test.drop([feature], inplace=True, axis=1)
house_test.head()
null_feature = [
feature
for feature in house_test.columns
if house_test[feature].isnull().sum().any() == True
]
null_feature
null_categorical_feature = [
feature for feature in null_feature if house_test[feature].dtypes == "O"
]
for feature in null_categorical_feature:
mode_value = house_test[feature].mode()[0]
house_test[feature] = house_test[feature].fillna(mode_value)
house_test.isnull().sum()
house_test.head()
categorical_features = [
feature for feature in house_test.columns if house_test[feature].dtypes == "O"
]
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
for feature in categorical_features:
house_test[feature] = le.fit_transform(house_test[feature])
house_test.head(5)
# performing feature scaling in house test data
house_test_scaled = scaler.transform(house_test[scale_feature])
X_house = pd.DataFrame(house_test_scaled, columns=scale_feature)
X_house.head(5)
X_house = X_house[selected_feat]
X_house.head(5)
price_prediction = rf_reg.predict(X_house)
price_prediction
np.exp(price_prediction)
# Prediction Metrics
sample = pd.read_csv("sample_submission.csv")
y_test = sample["SalePrice"]
print("MAE:", mean_absolute_error(np.log(y_test), price_prediction))
print("MSE:", mean_squared_error(np.log(y_test), price_prediction))
print("RMSE:", np.sqrt(mean_squared_error(np.log(y_test), price_prediction)))
house_test["SalePrice"] = np.exp(price_prediction)
submission = house_test[["Id", "SalePrice"]]
submission.to_csv("./final_submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/003/69003248.ipynb
| null | null |
[{"Id": 69003248, "ScriptId": 18830692, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7855089, "CreationDate": "07/25/2021 17:49:50", "VersionNumber": 1.0, "Title": "Aditya_Hriday_Sahu_House_Proj", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 412.0, "LinesInsertedFromPrevious": 412.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# Importing necessary packages
import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split as tts
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Lasso
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.preprocessing import LabelEncoder
# Importing necessary datasets
hom_data = pd.read_csv("train.csv")
hom_data
hom_data.isnull().sum()
na_data = [data for data in hom_data.columns if hom_data[data].isnull().any() == True]
na_data
# Plotting the features with Median Sales Price for 0 and 1 values in features
for data in na_data:
df_copy = hom_data.copy()
df_copy[data] = np.where(df_copy[data].isnull(), 1, 0)
df_copy.groupby(data)["SalePrice"].median().plot.bar(color=["red", "green"])
print(df_copy.groupby(data)["SalePrice"].median())
plt.show()
# Extracting all the numerical columns from the dastaset
num_data = [data for data in hom_data.columns if hom_data[data].dtypes != "O"]
print("Number of columns with numerical data:", len(num_data))
hom_data[num_data].head(7)
# Extracting datetime columns from the dataset
dt_data = [data for data in num_data if "Year" in data or "Yr" in data]
print("Number of columns with datetime data:", len(dt_data))
hom_data[dt_data].head(7)
# Analyzing yearly features with SalePrice
for data in dt_data:
df_copy = hom_data.copy()
df_copy.groupby(data)["SalePrice"].median().plot()
plt.show()
# Now we will be extracting numerical data which will be consisting of discrete data and numerical data
# Extracting discrete data
disc_data = [
data
for data in num_data
if len(hom_data[data].unique()) < 69 and data not in dt_data + ["Id"]
]
print("Number of columns with discrete data", len(disc_data))
hom_data[disc_data].head(7)
# Bar graphs between discrete data columns and sales price
for data in disc_data:
df_copy = hom_data.copy()
df_copy.groupby(data)["SalePrice"].median().plot.bar(
color=["red", "blue", "green", "yellow", "violet", "indigo", "orange", "black"]
)
plt.ylabel("SalePrice")
plt.show()
# Extracting continuous data
cont_data = [data for data in num_data if data not in disc_data + dt_data + ["Id"]]
print("Number of columns with continuous data: ", len(cont_data))
hom_data[cont_data].head(7)
# Histograms in between continuous data columns and sales price
for data in cont_data:
df_copy = hom_data.copy()
df_copy[data].hist(bins=16)
plt.ylabel("Count")
plt.xlabel(data)
plt.show()
# Box plot in between continuous data columns and sales price
for data in cont_data:
df_copy = hom_data.copy()
df_copy[data] = np.log1p(df_copy[data])
df_copy.boxplot(column=data)
plt.ylabel(data)
plt.show()
# Extracting categorical data columns
categ_data = [data for data in hom_data.columns if hom_data[data].dtypes == "O"]
print("Number of columns with categorical data:", len(categ_data))
hom_data[categ_data].head(7)
# Bar graphs in between categorical data columns and sales pricing
for data in categ_data:
df_copy = hom_data.copy()
df_copy.groupby(data)["SalePrice"].median().plot.bar(
color=["red", "blue", "green", "yellow", "violet", "indigo", "orange", "black"]
)
plt.show()
cat_data = [data for data in hom_data.columns if hom_data[data].dtypes == "O"]
hom_data[cat_data].head(7)
# Finding out percentage of missing values in categorical features
prct_na = (
hom_data[[data for data in hom_data.columns if hom_data[data].dtypes == "O"]]
.isnull()
.sum()
/ len(hom_data)
* 100
)
prct_na
# Removing categorical data columns where missing values is more than 50%
na_feat = prct_na[prct_na > 50]
na_feat
for data in na_feat.index:
hom_data.drop([data], axis=1, inplace=True)
na_data = [
data for data in hom_data.columns if hom_data[data].isnull().sum().any() == True
]
na_data
num_data = [data for data in na_data if hom_data[data].dtypes != "O"]
hom_data[num_data].isnull().sum() / len(hom_data) * 100
# Now we start doing feature engineering
X = hom_data.drop(["SalePrice"], axis=1)
y = hom_data["SalePrice"]
X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2, random_state=0)
X_train.shape, X_test.shape
train_df = pd.concat([X_train, y_train], axis=1)
test_df = pd.concat([X_test, y_test], axis=1)
# Features with null values in training set
na_data = [data for data in train_df.columns if train_df[data].isnull().any() == True]
na_data
# Now let us deal with numerical data
na_num = [data for data in na_data if train_df[data].dtypes != "O"]
print("Number of columns with null numerical data:", len(na_num))
train_df[na_num].head(7)
train_df[na_num].isnull().sum()
for data in na_num:
train_df[data].fillna(train_df[data].median(), inplace=True)
train_df[na_num].isnull().sum()
train_df[na_num].head(7)
train_df.head(7)
skew_num_data = ["LotFrontage", "LotArea", "1stFlrSF", "GrLivArea", "SalePrice"]
for data in skew_num_data:
train_df[data] = np.log(train_df[data])
train_df.head(7)
# Analysing datetime data columns
train_df[dt_data].isnull().sum()
train_df[["YearBuilt", "YearRemodAdd", "GarageYrBlt", "YrSold"]].head(7)
for data in ["YearBuilt", "YearRemodAdd", "GarageYrBlt"]:
train_df[data] = train_df["YrSold"] - train_df[data]
train_df[["YearBuilt", "YearRemodAdd", "GarageYrBlt", "YrSold"]].head(7)
# Categorical data features
na_categ_data = [data for data in na_data if train_df[data].dtypes == "O"]
print("Number of columns with categorical data:", len(na_categ_data))
train_df[na_categ_data].head(7)
train_df[na_categ_data].isnull().sum() / len(train_df)
# Null value replacements in categorical data columns with new labels
for data in na_categ_data:
val_mode = train_df[data].mode()[0]
train_df[data].fillna(val_mode, inplace=True)
train_df[na_categ_data].isnull().sum()
categ_data = [data for data in train_df.columns if train_df[data].dtypes == "O"]
for data in categ_data:
labels_order = train_df.groupby(data)["SalePrice"].mean().sort_values().index
labels_order = {k: i for i, k in enumerate(labels_order, 0)}
train_df[data] = train_df[data].map(labels_order)
train_df.head(7)
# Performing feature engineering on test data keeping in mind data leakage
na_data = [data for data in test_df.columns if test_df[data].isnull().any() == True]
na_data
# **Numerical Feature** for test data
na_num = [data for data in na_data if test_df[data].dtypes != "O"]
print("Number of columns with null numerical data:", len(na_num))
test_df[na_num].isnull().sum()
for data in na_num:
test_df[data].fillna(test_df[data].median(), inplace=True)
test_df[na_num].isnull().sum()
skew_num_data = ["LotFrontage", "LotArea", "1stFlrSF", "GrLivArea", "SalePrice"]
for data in skew_num_data:
test_df[data] = np.log(test_df[data])
test_df.head(7)
for feature in ["YearBuilt", "YearRemodAdd", "GarageYrBlt"]:
train_df[feature] = train_df["YrSold"] - train_df[feature]
train_df[["YearBuilt", "YearRemodAdd", "GarageYrBlt", "YrSold"]].head()
# **Categorical features**
na_categ_data = [data for data in na_data if test_df[data].dtypes == "O"]
print("number of columns with null categorical data:", len(na_categ_data))
test_df[na_categ_data].head()
for feature in na_categ_data:
mode_value = train_df[feature].mode()[0]
train_df[feature].fillna(mode_value, inplace=True)
train_df[na_categ_data].isnull().sum()
categ_data = [data for data in test_df.columns if test_df[data].dtypes == "O"]
for feature in categ_data:
labels_order = test_df.groupby(feature)["SalePrice"].mean().sort_values().index
labels_order = {k: i for i, k in enumerate(labels_order, 0)}
test_df[feature] = test_df[feature].map(labels_order)
pd.set_option("display.max_columns", None)
test_df.head(5)
test_df.isnull().sum()
# Performing feature scaling on train and test data
scale_feature = [
feature for feature in train_df.columns if feature not in ["Id", "SalePrice"]
]
scaler = StandardScaler()
train_scaled = scaler.fit_transform(train_df[scale_feature])
test_scaled = scaler.transform(test_df[scale_feature])
X = pd.DataFrame(train_scaled, columns=scale_feature)
train_df = pd.concat([X, train_df["SalePrice"].reset_index(drop=True)], axis=1)
train_df.head(5)
train_df.isnull().sum()
X1 = pd.DataFrame(test_scaled, columns=scale_feature)
test_df = pd.concat([X1, test_df["SalePrice"].reset_index(drop=True)], axis=1)
test_df.head(5)
test_df.isnull().sum()
# # Feature Selection
X_train = train_df.drop(["SalePrice"], axis=1)
y_train = train_df["SalePrice"]
feature_sel_model = SelectFromModel(Lasso(alpha=0.01, random_state=0))
feature_sel_model.fit(X_train, y_train)
feature_sel_model.get_support()
selected_feat = X_train.columns[(feature_sel_model.get_support())]
print("selected features:", len(selected_feat))
selected_feat
X_train = X_train[selected_feat]
X_train.head(5)
X_test = test_df[selected_feat]
y_test = test_df["SalePrice"]
X_test.head(5)
X_test.isnull().sum()
# # Fitting model to dataset
rf_reg = RandomForestRegressor()
rf_reg.fit(X_train, y_train)
prediction = rf_reg.predict(X_test)
print("MAE:", mean_absolute_error(y_test, prediction))
print("MSE:", mean_squared_error(y_test, prediction))
print("RMSE:", np.sqrt(mean_squared_error(y_test, prediction)))
house_test = pd.read_csv("test.csv")
house_test.head(5)
null_features = [
features
for features in house_test.columns
if house_test[features].isnull().any() == True
]
null_features
null_numerical = [
feature for feature in null_features if house_test[feature].dtypes != "O"
]
print("Number of null numerical feature:", len(null_numerical))
house_test[null_numerical].isnull().sum()
for feature in null_numerical:
house_test[feature].fillna(house_test[feature].median(), inplace=True)
house_test[null_numerical].isnull().sum()
skew_num_features = ["LotFrontage", "LotArea", "1stFlrSF", "GrLivArea"]
for feature in skew_num_features:
house_test[feature] = np.log(house_test[feature])
# year features
for feature in ["YearBuilt", "YearRemodAdd", "GarageYrBlt"]:
house_test[feature] = house_test["YrSold"] - house_test[feature]
house_test[["YearBuilt", "YearRemodAdd", "GarageYrBlt", "YrSold"]].head()
null_categorical_feature = [
feature for feature in null_features if house_test[feature].dtypes == "O"
]
print("number of null categorical features:", len(null_categorical_feature))
null_categorical_feature
pct = house_test[null_categorical_feature].isnull().sum() / len(house_test)
miss_feature = pct[pct > 0.7]
miss_feature.index
for feature in miss_feature.index:
house_test.drop([feature], inplace=True, axis=1)
house_test.head()
null_feature = [
feature
for feature in house_test.columns
if house_test[feature].isnull().sum().any() == True
]
null_feature
null_categorical_feature = [
feature for feature in null_feature if house_test[feature].dtypes == "O"
]
for feature in null_categorical_feature:
mode_value = house_test[feature].mode()[0]
house_test[feature] = house_test[feature].fillna(mode_value)
house_test.isnull().sum()
house_test.head()
categorical_features = [
feature for feature in house_test.columns if house_test[feature].dtypes == "O"
]
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
for feature in categorical_features:
house_test[feature] = le.fit_transform(house_test[feature])
house_test.head(5)
# performing feature scaling in house test data
house_test_scaled = scaler.transform(house_test[scale_feature])
X_house = pd.DataFrame(house_test_scaled, columns=scale_feature)
X_house.head(5)
X_house = X_house[selected_feat]
X_house.head(5)
price_prediction = rf_reg.predict(X_house)
price_prediction
np.exp(price_prediction)
# Prediction Metrics
sample = pd.read_csv("sample_submission.csv")
y_test = sample["SalePrice"]
print("MAE:", mean_absolute_error(np.log(y_test), price_prediction))
print("MSE:", mean_squared_error(np.log(y_test), price_prediction))
print("RMSE:", np.sqrt(mean_squared_error(np.log(y_test), price_prediction)))
house_test["SalePrice"] = np.exp(price_prediction)
submission = house_test[["Id", "SalePrice"]]
submission.to_csv("./final_submission.csv", index=False)
| false | 0 | 4,067 | 0 | 4,067 | 4,067 |
||
69003119
|
<jupyter_start><jupyter_text>Palmer Archipelago (Antarctica) penguin data
Please refer to the official [Github page](https://github.com/allisonhorst/penguins/blob/master/README.md) for details and license information. The details below have also been taken from there.
Artwork: @allison_horst
# Palmer Archipelago (Antarctica) penguin data
Data were collected and made available by [Dr. Kristen Gorman](https://www.uaf.edu/cfos/people/faculty/detail/kristen-gorman.php) and the [Palmer Station, Antarctica LTER](https://pal.lternet.edu/), a member of the [Long Term Ecological Research Network](https://lternet.edu/).
**Thank you** to Dr. Gorman, Palmer Station LTER and the LTER Network! Special thanks to Marty Downs (Director, LTER Network Office) for help regarding the data license & use.
## License & citation
- **Data are available by** [CC-0](https://creativecommons.org/share-your-work/public-domain/cc0/) license in accordance with the [Palmer Station LTER Data Policy](http://pal.lternet.edu/data/policies) and the [LTER Data Access Policy for Type I data](https://lternet.edu/data-access-policy/).
- **Please cite this data using:** Gorman KB, Williams TD, Fraser WR (2014) Ecological Sexual Dimorphism and Environmental Variability within a Community of Antarctic Penguins (Genus *Pygoscelis*). PLoS ONE 9(3): e90081. doi:10.1371/journal.pone.0090081
## Summary:
The data folder contains two CSV files. For intro courses/examples, you probably want to use the first one (penguins_size.csv).
- **penguins_size.csv**: Simplified data from original penguin data sets. Contains variables:
- `species`: penguin species (Chinstrap, Adélie, or Gentoo)
- `culmen_length_mm`: culmen length (mm)
- `culmen_depth_mm`: culmen depth (mm)
- `flipper_length_mm`: flipper length (mm)
- `body_mass_g`: body mass (g)
- `island`: island name (Dream, Torgersen, or Biscoe) in the Palmer Archipelago (Antarctica)
- `sex`: penguin sex
- **penguins_lter.csv**: Original combined data for 3 penguin species (aggregated from individual links below)
#### Meet the penguins:

#### What are culmen length & depth?
The culmen is "the upper ridge of a bird's beak" (definition from Oxford Languages).
For this penguin data, the culmen length and culmen depth are measured as shown below (thanks Kristen Gorman for clarifying!):

<hr>
## Data:
These data are originally published in:
[**Gorman KB, Williams TD, Fraser WR** (2014) Ecological Sexual Dimorphism and Environmental Variability within a Community of Antarctic Penguins (Genus *Pygoscelis*). PLoS ONE 9(3): e90081. doi:10.1371/journal.pone.0090081](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0090081)
Anyone interested in publishing the data should contact [Dr. Kristen Gorman](https://www.uaf.edu/cfos/people/faculty/detail/kristen-gorman.php) about analysis and working together on any final products.
From Gorman et al. (2014): "Data reported here are publicly available within the PAL-LTER data system (datasets #219, 220, and 221): http://oceaninformatics.ucsd.edu/datazoo/data/pallter/datasets. These data are additionally archived within the United States (US) LTER Network’s Information System Data Portal: https://portal.lternet.edu/. Individuals interested in using these data are therefore expected to follow the US LTER Network’s Data Access Policy, Requirements and Use Agreement: https://lternet.edu/data-access-policy/."
From the LTER data access policy: "The consumer of these data (“Data User” herein) has an ethical obligation to cite it appropriately in any publication that results from its use. The Data User should realize that these data may be actively used by others for ongoing research and that coordination may be necessary to prevent duplicate publication. The Data User is urged to contact the authors of these data if any questions about methodology or results occur. Where appropriate, the Data User is encouraged to consider collaboration or coauthorship with the authors. The Data User should realize that misinterpretation of data may occur if used out of context of the original study. While substantial efforts are made to ensure the accuracy of data and associated documentation, complete accuracy of data sets cannot be guaranteed. All data are made available “as is.” The Data User should be aware, however, that data are updated periodically and it is the responsibility of the Data User to check for new versions of the data. The data authors and the repository where these data were obtained shall not be liable for damages resulting from any use or misinterpretation of the data. Thank you."
## Links to original data & metadata:
Original data accessed via the [Environmental Data Initiative](https://environmentaldatainitiative.org/):
**Adélie penguins:** Palmer Station Antarctica LTER and K. Gorman. 2020. Structural size measurements and isotopic signatures of foraging among adult male and female Adélie penguins (*Pygoscelis adeliae*) nesting along the Palmer Archipelago near Palmer Station, 2007-2009 ver 5. Environmental Data Initiative. https://doi.org/10.6073/pasta/98b16d7d563f265cb52372c8ca99e60f (Accessed 2020-06-08).
**Gentoo penguins:** Palmer Station Antarctica LTER and K. Gorman. 2020. Structural size measurements and isotopic signatures of foraging among adult male and female Gentoo penguin (*Pygoscelis papua*) nesting along the Palmer Archipelago near Palmer Station, 2007-2009 ver 5. Environmental Data Initiative. https://doi.org/10.6073/pasta/7fca67fb28d56ee2ffa3d9370ebda689 (Accessed 2020-06-08).
**Chinstrap penguins:** Palmer Station Antarctica LTER and K. Gorman. 2020. Structural size measurements and isotopic signatures of foraging among adult male and female Chinstrap penguin (*Pygoscelis antarcticus*) nesting along the Palmer Archipelago near Palmer Station, 2007-2009 ver 6. Environmental Data Initiative. https://doi.org/10.6073/pasta/c14dfcfada8ea13a17536e73eb6fbe9e (Accessed 2020-06-08).
Kaggle dataset identifier: palmer-archipelago-antarctica-penguin-data
<jupyter_code>import pandas as pd
df = pd.read_csv('palmer-archipelago-antarctica-penguin-data/penguins_size.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 344 entries, 0 to 343
Data columns (total 7 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 species 344 non-null object
1 island 344 non-null object
2 culmen_length_mm 342 non-null float64
3 culmen_depth_mm 342 non-null float64
4 flipper_length_mm 342 non-null float64
5 body_mass_g 342 non-null float64
6 sex 334 non-null object
dtypes: float64(4), object(3)
memory usage: 18.9+ KB
<jupyter_text>Examples:
{
"species": "Adelie",
"island": "Torgersen",
"culmen_length_mm": 39.1,
"culmen_depth_mm": 18.7,
"flipper_length_mm": 181.0,
"body_mass_g": 3750.0,
"sex": "MALE"
}
{
"species": "Adelie",
"island": "Torgersen",
"culmen_length_mm": 39.5,
"culmen_depth_mm": 17.4,
"flipper_length_mm": 186.0,
"body_mass_g": 3800.0,
"sex": "FEMALE"
}
{
"species": "Adelie",
"island": "Torgersen",
"culmen_length_mm": 40.3,
"culmen_depth_mm": 18.0,
"flipper_length_mm": 195.0,
"body_mass_g": 3250.0,
"sex": "FEMALE"
}
{
"species": "Adelie",
"island": "Torgersen",
"culmen_length_mm": NaN,
"culmen_depth_mm": NaN,
"flipper_length_mm": NaN,
"body_mass_g": NaN,
"sex": null
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
pen = pd.read_csv(
"../input/palmer-archipelago-antarctica-penguin-data/penguins_size.csv"
)
pen.head()
pen.describe()
pen.info()
pen.isna().sum()
pen = pen.drop([3, 339], axis=0)
# all the column in this rows are having null values.
pen.head()
pen.isnull().sum()
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy="most_frequent")
pen.iloc[:, :] = imputer.fit_transform(pen)
pen.isnull().sum()
import matplotlib.pyplot as plt
plt.figure(figsize=(9, 9))
plt.subplot(2, 2, 1)
plt.boxplot(pen["culmen_length_mm"])
plt.title("Boxplot_culmen_length_mm")
plt.subplot(2, 2, 2)
plt.boxplot(pen["culmen_depth_mm"])
plt.title("Boxplot_culmen_depth_mm")
plt.subplot(2, 2, 3)
plt.boxplot(pen["flipper_length_mm"])
plt.title("Boxplot_flipper_length_mm")
plt.subplot(2, 2, 4)
plt.boxplot(pen["body_mass_g"])
plt.title("Boxplot_body_mass_g")
plt.show()
# no outliers in the dataset
plt.figure(figsize=(9, 9))
plt.subplot(2, 2, 1)
plt.hist(pen["culmen_length_mm"])
plt.title("Histogram_culmen_length_mm")
plt.subplot(2, 2, 2)
plt.hist(pen["culmen_depth_mm"])
plt.title("Histogram_culmen_depth_mm")
plt.subplot(2, 2, 3)
plt.hist(pen["flipper_length_mm"])
plt.title("Histogram_flipper_length_mm")
plt.subplot(2, 2, 4)
plt.hist(pen["body_mass_g"])
plt.title("Histogram_body_mass_g")
plt.show()
for i in pen.columns:
print(i, " = ", pen[i].nunique())
pen["sex"].unique()
pen[pen["sex"] == "."]
pen = pen.drop([336], axis=0)
print("species =", pen.species.unique())
print("island =", pen.island.unique())
print("sex =", pen.sex.unique())
pen["species"].value_counts()
plt.scatter(pen["species"], pen["island"])
# Adelie lives in all three islands whereas Gentoo lives in Biscoe and Chinstrap lives in Torgersen.
pen_bm = pen["body_mass_g"].value_counts()
pen_bm.head()
pen[pen["body_mass_g"] == 3800.0]
pen[pen["body_mass_g"] == 3700.0]
pen.cov()
pen.corr()
plt.figure(figsize=(9, 5))
plt.subplot(1, 2, 1)
plt.plot(pen.culmen_length_mm, pen.culmen_depth_mm)
plt.title("pen.culmen_length_mm vs pen.culmen_depth_mm")
plt.subplot(1, 2, 2)
plt.plot(pen.flipper_length_mm, pen.body_mass_g)
plt.title("pen.flipper_length_mm,pen.body_mass_g")
plt.show()
plt.figure(figsize=(9, 9))
plt.subplot(2, 2, 1)
plt.scatter(pen["flipper_length_mm"], pen["species"])
plt.title("pen[flipper_length_mm] vs pen[species]")
plt.subplot(2, 2, 2)
plt.scatter(pen["body_mass_g"], pen["species"])
plt.title("pen[body_mass_g] vs pen[species]")
plt.subplot(2, 2, 3)
plt.scatter(pen["culmen_depth_mm"], pen["species"])
plt.title("pen[culmen_depth_mm] vs pen[species]")
plt.subplot(2, 2, 4)
plt.scatter(pen["culmen_length_mm"], pen["species"])
plt.title("pen[culmen_length_mm] vs pen[species]")
plt.show()
pen_flipper_length = pen[pen["flipper_length_mm"] >= 215]
pen_flipper_length["species"].unique()
# we can conclude that flipper length more than 215 can only exists in Gentoo
pen_body_mass = pen[pen["body_mass_g"] >= 5000]
pen_body_mass["species"].unique()
# only Gentoo body mass greater than 5000 grams.
pen_culmen_depth_mm = pen[pen["culmen_depth_mm"] <= 15]
pen_culmen_depth_mm["species"].unique()
# Gentoo culmen_depth_mm is less than 15
pen_culmen_length_mm = pen[pen["culmen_length_mm"] <= 40]
pen_culmen_length_mm["species"].unique()
# adelie culmen_length_mm is less than 40
import seaborn as sns
sns.pairplot(pen)
# conclusions:
# Adelie lives in all three islands whereas Gentoo lives in Biscoe and Chinstrap lives in Torgersen.
# most of Gentoo has unique flipper length, body mass, culmen depth
# Most of adelie culmen length is less than 40mm
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/003/69003119.ipynb
|
palmer-archipelago-antarctica-penguin-data
|
parulpandey
|
[{"Id": 69003119, "ScriptId": 18830518, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6532536, "CreationDate": "07/25/2021 17:47:49", "VersionNumber": 1.0, "Title": "notebook36bb40c07f", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 150.0, "LinesInsertedFromPrevious": 150.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 91688329, "KernelVersionId": 69003119, "SourceDatasetVersionId": 1228604}]
|
[{"Id": 1228604, "DatasetId": 703056, "DatasourceVersionId": 1260169, "CreatorUserId": 391404, "LicenseName": "CC0: Public Domain", "CreationDate": "06/09/2020 10:14:54", "VersionNumber": 1.0, "Title": "Palmer Archipelago (Antarctica) penguin data", "Slug": "palmer-archipelago-antarctica-penguin-data", "Subtitle": "Drop in replacement for Iris Dataset", "Description": "Please refer to the official [Github page](https://github.com/allisonhorst/penguins/blob/master/README.md) for details and license information. The details below have also been taken from there.\n\nArtwork: @allison_horst\n\n# Palmer Archipelago (Antarctica) penguin data\n\nData were collected and made available by [Dr. Kristen Gorman](https://www.uaf.edu/cfos/people/faculty/detail/kristen-gorman.php) and the [Palmer Station, Antarctica LTER](https://pal.lternet.edu/), a member of the [Long Term Ecological Research Network](https://lternet.edu/). \n\n**Thank you** to Dr. Gorman, Palmer Station LTER and the LTER Network! Special thanks to Marty Downs (Director, LTER Network Office) for help regarding the data license & use.\n\n## License & citation\n\n- **Data are available by** [CC-0](https://creativecommons.org/share-your-work/public-domain/cc0/) license in accordance with the [Palmer Station LTER Data Policy](http://pal.lternet.edu/data/policies) and the [LTER Data Access Policy for Type I data](https://lternet.edu/data-access-policy/).\n\n- **Please cite this data using:** Gorman KB, Williams TD, Fraser WR (2014) Ecological Sexual Dimorphism and Environmental Variability within a Community of Antarctic Penguins (Genus *Pygoscelis*). PLoS ONE 9(3): e90081. doi:10.1371/journal.pone.0090081\n\n\n## Summary:\n\nThe data folder contains two CSV files. For intro courses/examples, you probably want to use the first one (penguins_size.csv). \n\n- **penguins_size.csv**: Simplified data from original penguin data sets. Contains variables:\n\n - `species`: penguin species (Chinstrap, Ad\u00e9lie, or Gentoo)\n - `culmen_length_mm`: culmen length (mm) \n - `culmen_depth_mm`: culmen depth (mm) \n - `flipper_length_mm`: flipper length (mm) \n - `body_mass_g`: body mass (g) \n - `island`: island name (Dream, Torgersen, or Biscoe) in the Palmer Archipelago (Antarctica)\n - `sex`: penguin sex\n\n- **penguins_lter.csv**: Original combined data for 3 penguin species (aggregated from individual links below) \n\n#### Meet the penguins: \n\n\n#### What are culmen length & depth? \n\nThe culmen is \"the upper ridge of a bird's beak\" (definition from Oxford Languages). \n\nFor this penguin data, the culmen length and culmen depth are measured as shown below (thanks Kristen Gorman for clarifying!):\n\n\n<hr>\n## Data: \n\nThese data are originally published in: \n\n[**Gorman KB, Williams TD, Fraser WR** (2014) Ecological Sexual Dimorphism and Environmental Variability within a Community of Antarctic Penguins (Genus *Pygoscelis*). PLoS ONE 9(3): e90081. doi:10.1371/journal.pone.0090081](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0090081)\n\nAnyone interested in publishing the data should contact [Dr. Kristen Gorman](https://www.uaf.edu/cfos/people/faculty/detail/kristen-gorman.php) about analysis and working together on any final products.\n\nFrom Gorman et al. (2014): \"Data reported here are publicly available within the PAL-LTER data system (datasets #219, 220, and 221): http://oceaninformatics.ucsd.edu/datazoo/data/pallter/datasets. These data are additionally archived within the United States (US) LTER Network\u2019s Information System Data Portal: https://portal.lternet.edu/. Individuals interested in using these data are therefore expected to follow the US LTER Network\u2019s Data Access Policy, Requirements and Use Agreement: https://lternet.edu/data-access-policy/.\"\n\nFrom the LTER data access policy: \"The consumer of these data (\u201cData User\u201d herein) has an ethical obligation to cite it appropriately in any publication that results from its use. The Data User should realize that these data may be actively used by others for ongoing research and that coordination may be necessary to prevent duplicate publication. The Data User is urged to contact the authors of these data if any questions about methodology or results occur. Where appropriate, the Data User is encouraged to consider collaboration or coauthorship with the authors. The Data User should realize that misinterpretation of data may occur if used out of context of the original study. While substantial efforts are made to ensure the accuracy of data and associated documentation, complete accuracy of data sets cannot be guaranteed. All data are made available \u201cas is.\u201d The Data User should be aware, however, that data are updated periodically and it is the responsibility of the Data User to check for new versions of the data. The data authors and the repository where these data were obtained shall not be liable for damages resulting from any use or misinterpretation of the data. Thank you.\"\n\n## Links to original data & metadata:\n\nOriginal data accessed via the [Environmental Data Initiative](https://environmentaldatainitiative.org/): \n\n**Ad\u00e9lie penguins:** Palmer Station Antarctica LTER and K. Gorman. 2020. Structural size measurements and isotopic signatures of foraging among adult male and female Ad\u00e9lie penguins (*Pygoscelis adeliae*) nesting along the Palmer Archipelago near Palmer Station, 2007-2009 ver 5. Environmental Data Initiative. https://doi.org/10.6073/pasta/98b16d7d563f265cb52372c8ca99e60f (Accessed 2020-06-08).\n\n**Gentoo penguins:** Palmer Station Antarctica LTER and K. Gorman. 2020. Structural size measurements and isotopic signatures of foraging among adult male and female Gentoo penguin (*Pygoscelis papua*) nesting along the Palmer Archipelago near Palmer Station, 2007-2009 ver 5. Environmental Data Initiative. https://doi.org/10.6073/pasta/7fca67fb28d56ee2ffa3d9370ebda689 (Accessed 2020-06-08).\n\n**Chinstrap penguins:** Palmer Station Antarctica LTER and K. Gorman. 2020. Structural size measurements and isotopic signatures of foraging among adult male and female Chinstrap penguin (*Pygoscelis antarcticus*) nesting along the Palmer Archipelago near Palmer Station, 2007-2009 ver 6. Environmental Data Initiative. https://doi.org/10.6073/pasta/c14dfcfada8ea13a17536e73eb6fbe9e (Accessed 2020-06-08).", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 703056, "CreatorUserId": 391404, "OwnerUserId": 391404.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1228604.0, "CurrentDatasourceVersionId": 1260169.0, "ForumId": 717743, "Type": 2, "CreationDate": "06/09/2020 10:14:54", "LastActivityDate": "06/09/2020", "TotalViews": 158509, "TotalDownloads": 51754, "TotalVotes": 423, "TotalKernels": 239}]
|
[{"Id": 391404, "UserName": "parulpandey", "DisplayName": "Parul Pandey", "RegisterDate": "07/26/2015", "PerformanceTier": 4}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
pen = pd.read_csv(
"../input/palmer-archipelago-antarctica-penguin-data/penguins_size.csv"
)
pen.head()
pen.describe()
pen.info()
pen.isna().sum()
pen = pen.drop([3, 339], axis=0)
# all the column in this rows are having null values.
pen.head()
pen.isnull().sum()
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy="most_frequent")
pen.iloc[:, :] = imputer.fit_transform(pen)
pen.isnull().sum()
import matplotlib.pyplot as plt
plt.figure(figsize=(9, 9))
plt.subplot(2, 2, 1)
plt.boxplot(pen["culmen_length_mm"])
plt.title("Boxplot_culmen_length_mm")
plt.subplot(2, 2, 2)
plt.boxplot(pen["culmen_depth_mm"])
plt.title("Boxplot_culmen_depth_mm")
plt.subplot(2, 2, 3)
plt.boxplot(pen["flipper_length_mm"])
plt.title("Boxplot_flipper_length_mm")
plt.subplot(2, 2, 4)
plt.boxplot(pen["body_mass_g"])
plt.title("Boxplot_body_mass_g")
plt.show()
# no outliers in the dataset
plt.figure(figsize=(9, 9))
plt.subplot(2, 2, 1)
plt.hist(pen["culmen_length_mm"])
plt.title("Histogram_culmen_length_mm")
plt.subplot(2, 2, 2)
plt.hist(pen["culmen_depth_mm"])
plt.title("Histogram_culmen_depth_mm")
plt.subplot(2, 2, 3)
plt.hist(pen["flipper_length_mm"])
plt.title("Histogram_flipper_length_mm")
plt.subplot(2, 2, 4)
plt.hist(pen["body_mass_g"])
plt.title("Histogram_body_mass_g")
plt.show()
for i in pen.columns:
print(i, " = ", pen[i].nunique())
pen["sex"].unique()
pen[pen["sex"] == "."]
pen = pen.drop([336], axis=0)
print("species =", pen.species.unique())
print("island =", pen.island.unique())
print("sex =", pen.sex.unique())
pen["species"].value_counts()
plt.scatter(pen["species"], pen["island"])
# Adelie lives in all three islands whereas Gentoo lives in Biscoe and Chinstrap lives in Torgersen.
pen_bm = pen["body_mass_g"].value_counts()
pen_bm.head()
pen[pen["body_mass_g"] == 3800.0]
pen[pen["body_mass_g"] == 3700.0]
pen.cov()
pen.corr()
plt.figure(figsize=(9, 5))
plt.subplot(1, 2, 1)
plt.plot(pen.culmen_length_mm, pen.culmen_depth_mm)
plt.title("pen.culmen_length_mm vs pen.culmen_depth_mm")
plt.subplot(1, 2, 2)
plt.plot(pen.flipper_length_mm, pen.body_mass_g)
plt.title("pen.flipper_length_mm,pen.body_mass_g")
plt.show()
plt.figure(figsize=(9, 9))
plt.subplot(2, 2, 1)
plt.scatter(pen["flipper_length_mm"], pen["species"])
plt.title("pen[flipper_length_mm] vs pen[species]")
plt.subplot(2, 2, 2)
plt.scatter(pen["body_mass_g"], pen["species"])
plt.title("pen[body_mass_g] vs pen[species]")
plt.subplot(2, 2, 3)
plt.scatter(pen["culmen_depth_mm"], pen["species"])
plt.title("pen[culmen_depth_mm] vs pen[species]")
plt.subplot(2, 2, 4)
plt.scatter(pen["culmen_length_mm"], pen["species"])
plt.title("pen[culmen_length_mm] vs pen[species]")
plt.show()
pen_flipper_length = pen[pen["flipper_length_mm"] >= 215]
pen_flipper_length["species"].unique()
# we can conclude that flipper length more than 215 can only exists in Gentoo
pen_body_mass = pen[pen["body_mass_g"] >= 5000]
pen_body_mass["species"].unique()
# only Gentoo body mass greater than 5000 grams.
pen_culmen_depth_mm = pen[pen["culmen_depth_mm"] <= 15]
pen_culmen_depth_mm["species"].unique()
# Gentoo culmen_depth_mm is less than 15
pen_culmen_length_mm = pen[pen["culmen_length_mm"] <= 40]
pen_culmen_length_mm["species"].unique()
# adelie culmen_length_mm is less than 40
import seaborn as sns
sns.pairplot(pen)
# conclusions:
# Adelie lives in all three islands whereas Gentoo lives in Biscoe and Chinstrap lives in Torgersen.
# most of Gentoo has unique flipper length, body mass, culmen depth
# Most of adelie culmen length is less than 40mm
|
[{"palmer-archipelago-antarctica-penguin-data/penguins_size.csv": {"column_names": "[\"species\", \"island\", \"culmen_length_mm\", \"culmen_depth_mm\", \"flipper_length_mm\", \"body_mass_g\", \"sex\"]", "column_data_types": "{\"species\": \"object\", \"island\": \"object\", \"culmen_length_mm\": \"float64\", \"culmen_depth_mm\": \"float64\", \"flipper_length_mm\": \"float64\", \"body_mass_g\": \"float64\", \"sex\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 344 entries, 0 to 343\nData columns (total 7 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 species 344 non-null object \n 1 island 344 non-null object \n 2 culmen_length_mm 342 non-null float64\n 3 culmen_depth_mm 342 non-null float64\n 4 flipper_length_mm 342 non-null float64\n 5 body_mass_g 342 non-null float64\n 6 sex 334 non-null object \ndtypes: float64(4), object(3)\nmemory usage: 18.9+ KB\n", "summary": "{\"culmen_length_mm\": {\"count\": 342.0, \"mean\": 43.9219298245614, \"std\": 5.4595837139265315, \"min\": 32.1, \"25%\": 39.225, \"50%\": 44.45, \"75%\": 48.5, \"max\": 59.6}, \"culmen_depth_mm\": {\"count\": 342.0, \"mean\": 17.151169590643278, \"std\": 1.9747931568167816, \"min\": 13.1, \"25%\": 15.6, \"50%\": 17.3, \"75%\": 18.7, \"max\": 21.5}, \"flipper_length_mm\": {\"count\": 342.0, \"mean\": 200.91520467836258, \"std\": 14.061713679356888, \"min\": 172.0, \"25%\": 190.0, \"50%\": 197.0, \"75%\": 213.0, \"max\": 231.0}, \"body_mass_g\": {\"count\": 342.0, \"mean\": 4201.754385964912, \"std\": 801.9545356980956, \"min\": 2700.0, \"25%\": 3550.0, \"50%\": 4050.0, \"75%\": 4750.0, \"max\": 6300.0}}", "examples": "{\"species\":{\"0\":\"Adelie\",\"1\":\"Adelie\",\"2\":\"Adelie\",\"3\":\"Adelie\"},\"island\":{\"0\":\"Torgersen\",\"1\":\"Torgersen\",\"2\":\"Torgersen\",\"3\":\"Torgersen\"},\"culmen_length_mm\":{\"0\":39.1,\"1\":39.5,\"2\":40.3,\"3\":null},\"culmen_depth_mm\":{\"0\":18.7,\"1\":17.4,\"2\":18.0,\"3\":null},\"flipper_length_mm\":{\"0\":181.0,\"1\":186.0,\"2\":195.0,\"3\":null},\"body_mass_g\":{\"0\":3750.0,\"1\":3800.0,\"2\":3250.0,\"3\":null},\"sex\":{\"0\":\"MALE\",\"1\":\"FEMALE\",\"2\":\"FEMALE\",\"3\":null}}"}}]
| true | 1 |
<start_data_description><data_path>palmer-archipelago-antarctica-penguin-data/penguins_size.csv:
<column_names>
['species', 'island', 'culmen_length_mm', 'culmen_depth_mm', 'flipper_length_mm', 'body_mass_g', 'sex']
<column_types>
{'species': 'object', 'island': 'object', 'culmen_length_mm': 'float64', 'culmen_depth_mm': 'float64', 'flipper_length_mm': 'float64', 'body_mass_g': 'float64', 'sex': 'object'}
<dataframe_Summary>
{'culmen_length_mm': {'count': 342.0, 'mean': 43.9219298245614, 'std': 5.4595837139265315, 'min': 32.1, '25%': 39.225, '50%': 44.45, '75%': 48.5, 'max': 59.6}, 'culmen_depth_mm': {'count': 342.0, 'mean': 17.151169590643278, 'std': 1.9747931568167816, 'min': 13.1, '25%': 15.6, '50%': 17.3, '75%': 18.7, 'max': 21.5}, 'flipper_length_mm': {'count': 342.0, 'mean': 200.91520467836258, 'std': 14.061713679356888, 'min': 172.0, '25%': 190.0, '50%': 197.0, '75%': 213.0, 'max': 231.0}, 'body_mass_g': {'count': 342.0, 'mean': 4201.754385964912, 'std': 801.9545356980956, 'min': 2700.0, '25%': 3550.0, '50%': 4050.0, '75%': 4750.0, 'max': 6300.0}}
<dataframe_info>
RangeIndex: 344 entries, 0 to 343
Data columns (total 7 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 species 344 non-null object
1 island 344 non-null object
2 culmen_length_mm 342 non-null float64
3 culmen_depth_mm 342 non-null float64
4 flipper_length_mm 342 non-null float64
5 body_mass_g 342 non-null float64
6 sex 334 non-null object
dtypes: float64(4), object(3)
memory usage: 18.9+ KB
<some_examples>
{'species': {'0': 'Adelie', '1': 'Adelie', '2': 'Adelie', '3': 'Adelie'}, 'island': {'0': 'Torgersen', '1': 'Torgersen', '2': 'Torgersen', '3': 'Torgersen'}, 'culmen_length_mm': {'0': 39.1, '1': 39.5, '2': 40.3, '3': None}, 'culmen_depth_mm': {'0': 18.7, '1': 17.4, '2': 18.0, '3': None}, 'flipper_length_mm': {'0': 181.0, '1': 186.0, '2': 195.0, '3': None}, 'body_mass_g': {'0': 3750.0, '1': 3800.0, '2': 3250.0, '3': None}, 'sex': {'0': 'MALE', '1': 'FEMALE', '2': 'FEMALE', '3': None}}
<end_description>
| 1,600 | 2 | 4,137 | 1,600 |
69003919
|
# #
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# print all the outputs in a cell
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df_train = pd.read_csv(
"/kaggle/input/tabular-playground-series-jul-2021/train.csv", index_col=0
)
df_train.head()
df_train.shape
df_test = pd.read_csv(
"/kaggle/input/tabular-playground-series-jul-2021/test.csv", index_col=0
)
df_test.head()
df_test.shape
df_submission = pd.read_csv(
"/kaggle/input/tabular-playground-series-jul-2021/sample_submission.csv",
index_col=0,
)
df_submission.head()
df_submission.shape
df_test_full = df_test.merge(df_submission, how="inner", on="date_time")
df_test_full.head()
df_test_full.shape
# #### Visualization Train Data
df_train.index = pd.to_datetime(df_train.index)
df_train.index
df_train["deg_C"].plot(linewidth=0.5)
df_train["relative_humidity"].plot(linewidth=0.5)
cols_plot = [
"deg_C",
"relative_humidity",
"absolute_humidity",
"sensor_1",
"sensor_2",
"sensor_3",
"sensor_4",
]
axes = df_train[cols_plot].plot(
marker=".", alpha=0.5, linestyle="None", figsize=(11, 9), subplots=True
)
# create datasets
X, y = (
df_train.drop(
columns=["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"]
),
df_train[["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"]],
)
from sklearn.linear_model import LinearRegression
# define model
model = LinearRegression()
# fit model
model.fit(X, y)
model.score(X, y)
from sklearn.neighbors import KNeighborsRegressor
# define model
model = KNeighborsRegressor()
# fit model
model.fit(X, y)
model.score(X, y)
Y_pred = model.predict(df_test)
Y_pred.shape
for i in range(len(df_submission)):
df_submission.iloc[i, :] = Y_pred[:][i]
df_submission
df_submission.to_csv("/kaggle/working/submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/003/69003919.ipynb
| null | null |
[{"Id": 69003919, "ScriptId": 18824326, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5929773, "CreationDate": "07/25/2021 18:02:17", "VersionNumber": 1.0, "Title": "TabularJuly2021", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 84.0, "LinesInsertedFromPrevious": 84.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# #
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# print all the outputs in a cell
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df_train = pd.read_csv(
"/kaggle/input/tabular-playground-series-jul-2021/train.csv", index_col=0
)
df_train.head()
df_train.shape
df_test = pd.read_csv(
"/kaggle/input/tabular-playground-series-jul-2021/test.csv", index_col=0
)
df_test.head()
df_test.shape
df_submission = pd.read_csv(
"/kaggle/input/tabular-playground-series-jul-2021/sample_submission.csv",
index_col=0,
)
df_submission.head()
df_submission.shape
df_test_full = df_test.merge(df_submission, how="inner", on="date_time")
df_test_full.head()
df_test_full.shape
# #### Visualization Train Data
df_train.index = pd.to_datetime(df_train.index)
df_train.index
df_train["deg_C"].plot(linewidth=0.5)
df_train["relative_humidity"].plot(linewidth=0.5)
cols_plot = [
"deg_C",
"relative_humidity",
"absolute_humidity",
"sensor_1",
"sensor_2",
"sensor_3",
"sensor_4",
]
axes = df_train[cols_plot].plot(
marker=".", alpha=0.5, linestyle="None", figsize=(11, 9), subplots=True
)
# create datasets
X, y = (
df_train.drop(
columns=["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"]
),
df_train[["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"]],
)
from sklearn.linear_model import LinearRegression
# define model
model = LinearRegression()
# fit model
model.fit(X, y)
model.score(X, y)
from sklearn.neighbors import KNeighborsRegressor
# define model
model = KNeighborsRegressor()
# fit model
model.fit(X, y)
model.score(X, y)
Y_pred = model.predict(df_test)
Y_pred.shape
for i in range(len(df_submission)):
df_submission.iloc[i, :] = Y_pred[:][i]
df_submission
df_submission.to_csv("/kaggle/working/submission.csv", index=False)
| false | 0 | 850 | 0 | 850 | 850 |
||
69003401
|
<jupyter_start><jupyter_text>Hacker News
### Context
This dataset contains all stories and comments from Hacker News from its launch in 2006. Each story contains a story id, the author that made the post, when it was written, and the number of points the story received. Hacker News is a social news website focusing on computer science and entrepreneurship. It is run by Paul Graham's investment fund and startup incubator, Y Combinator. In general, content that can be submitted is defined as "anything that gratifies one's intellectual curiosity".
### Content
Each story contains a story ID, the author that made the post, when it was written, and the number of points the story received.
Please note that the text field includes profanity. All texts are the author’s own, do not necessarily reflect the positions of Kaggle or Hacker News, and are presented without endorsement.
## Querying BigQuery tables
You can use the BigQuery Python client library to query tables in this dataset in Kernels. Note that methods available in Kernels are limited to querying data. Tables are at `bigquery-public-data.hacker_news.[TABLENAME]`. **Fork [this kernel][1] to get started**.
Kaggle dataset identifier: hacker-news
<jupyter_script># # Introduction
# Now that you know how to access and examine a dataset, you're ready to write your first SQL query! As you'll soon see, SQL queries will help you sort through a massive dataset, to retrieve only the information that you need.
# We'll begin by using the keywords **SELECT**, **FROM**, and **WHERE** to get data from specific columns based on conditions you specify.
# For clarity, we'll work with a small imaginary dataset `pet_records` which contains just one table, called `pets`.
# 
# # SELECT ... FROM
# The most basic SQL query selects a single column from a single table. To do this,
# - specify the column you want after the word **SELECT**, and then
# - specify the table after the word **FROM**.
# For instance, to select the `Name` column (from the `pets` table in the `pet_records` database in the `bigquery-public-data` project), our query would appear as follows:
# 
# Note that when writing an SQL query, the argument we pass to **FROM** is *not* in single or double quotation marks (' or "). It is in backticks (\`).
# # WHERE ...
# BigQuery datasets are large, so you'll usually want to return only the rows meeting specific conditions. You can do this using the **WHERE** clause.
# The query below returns the entries from the `Name` column that are in rows where the `Animal` column has the text `'Cat'`.
# 
# # Example: What are all the U.S. cities in the OpenAQ dataset?
# Now that you've got the basics down, let's work through an example with a real dataset. We'll use an [OpenAQ](https://openaq.org) dataset about air quality.
# First, we'll set up everything we need to run queries and take a quick peek at what tables are in our database. (_Since you learned how to do this in the previous tutorial, we have hidden the code. But if you'd like to take a peek, you need only click on the "Code" button below._)
from google.cloud import bigquery
# Create a "Client" object
client = bigquery.Client()
# Construct a reference to the "openaq" dataset
dataset_ref = client.dataset("openaq", project="bigquery-public-data")
# API request - fetch the dataset
dataset = client.get_dataset(dataset_ref)
# List all the tables in the "openaq" dataset
tables = list(client.list_tables(dataset))
# Print names of all tables in the dataset (there's only one!)
for table in tables:
print(table.table_id)
# The dataset contains only one table, called `global_air_quality`. We'll fetch the table and take a peek at the first few rows to see what sort of data it contains. (_Again, we have hidden the code. To take a peek, click on the "Code" button below._)
# Construct a reference to the "global_air_quality" table
table_ref = dataset_ref.table("global_air_quality")
# API request - fetch the table
table = client.get_table(table_ref)
# Preview the first five lines of the "global_air_quality" table
client.list_rows(table, max_results=5).to_dataframe()
# Everything looks good! So, let's put together a query. Say we want to select all the values from the `city` column that are in rows where the `country` column is `'US'` (for "United States").
# Query to select all the items from the "city" column where the "country" column is 'US'
query = """
SELECT city
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE country = 'US'
"""
# Take the time now to ensure that this query lines up with what you learned above.
# # Submitting the query to the dataset
# We're ready to use this query to get information from the OpenAQ dataset. As in the previous tutorial, the first step is to create a [`Client`](https://google-cloud.readthedocs.io/en/latest/bigquery/generated/google.cloud.bigquery.client.Client.html#google.cloud.bigquery.client.Client) object.
# Create a "Client" object
client = bigquery.Client()
# We begin by setting up the query with the [`query()`](https://google-cloud.readthedocs.io/en/latest/bigquery/generated/google.cloud.bigquery.client.Client.query.html#google.cloud.bigquery.client.Client.query) method. We run the method with the default parameters, but this method also allows us to specify more complicated settings that you can read about in [the documentation](https://google-cloud.readthedocs.io/en/latest/bigquery/generated/google.cloud.bigquery.client.Client.query.html#google.cloud.bigquery.client.Client.query). We'll revisit this later.
# Set up the query
query_job = client.query(query)
# Next, we run the query and convert the results to a pandas DataFrame.
# API request - run the query, and return a pandas DataFrame
us_cities = query_job.to_dataframe()
# Now we've got a pandas DataFrame called `us_cities`, which we can use like any other DataFrame.
# What five cities have the most measurements?
us_cities.city.value_counts().head()
# # More queries
# If you want multiple columns, you can select them with a comma between the names:
query = """
SELECT city, country
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE country = 'US'
"""
# You can select all columns with a `*` like this:
query = """
SELECT *
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE country = 'US'
"""
# # Q&A: Notes on formatting
# The formatting of the SQL query might feel unfamiliar. If you have any questions, you can ask in the comments section at the bottom of this page. Here are answers to two common questions:
# ### **Question: What's up with the triple quotation marks (""")?**
# _Answer_: These tell Python that everything inside them is a single string, even though we have line breaks in it. The line breaks aren't necessary, but they make it easier to read your query.
# ### **Question: Do you need to capitalize SELECT and FROM?**
# _Answer_: No, SQL doesn't care about capitalization. However, it's customary to capitalize your SQL commands, and it makes your queries a bit easier to read.
# # Working with big datasets
# BigQuery datasets can be huge. We allow you to do a lot of computation for free, but everyone has some limit.
# **Each Kaggle user can scan 5TB every 30 days for free. Once you hit that limit, you'll have to wait for it to reset.**
# The [biggest dataset currently on Kaggle](https://www.kaggle.com/github/github-repos) is 3TB, so you can go through your 30-day limit in a couple queries if you aren't careful.
# Don't worry though: we'll teach you how to avoid scanning too much data at once, so that you don't run over your limit.
# To begin,you can estimate the size of any query before running it. Here is an example using the (*very large!*) Hacker News dataset. To see how much data a query will scan, we create a `QueryJobConfig` object and set the `dry_run` parameter to `True`.
# Query to get the score column from every row where the type column has value "job"
query = """
SELECT score, title
FROM `bigquery-public-data.hacker_news.full`
WHERE type = "job"
"""
# Create a QueryJobConfig object to estimate size of query without running it
dry_run_config = bigquery.QueryJobConfig(dry_run=True)
# API request - dry run query to estimate costs
dry_run_query_job = client.query(query, job_config=dry_run_config)
print(
"This query will process {} bytes.".format(dry_run_query_job.total_bytes_processed)
)
# You can also specify a parameter when running the query to limit how much data you are willing to scan. Here's an example with a low limit.
# Only run the query if it's less than 1 MB
ONE_MB = 1000 * 1000
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=ONE_MB)
# Set up the query (will only run if it's less than 1 MB)
safe_query_job = client.query(query, job_config=safe_config)
# API request - try to run the query, and return a pandas DataFrame
safe_query_job.to_dataframe()
# In this case, the query was cancelled, because the limit of 1 MB was exceeded. However, we can increase the limit to run the query successfully!
# Only run the query if it's less than 1 GB
ONE_GB = 1000 * 1000 * 1000
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=ONE_GB)
# Set up the query (will only run if it's less than 1 GB)
safe_query_job = client.query(query, job_config=safe_config)
# API request - try to run the query, and return a pandas DataFrame
job_post_scores = safe_query_job.to_dataframe()
# Print average score for job posts
job_post_scores.score.mean()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/003/69003401.ipynb
|
hacker-news
| null |
[{"Id": 69003401, "ScriptId": 18830816, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4693482, "CreationDate": "07/25/2021 17:52:54", "VersionNumber": 1.0, "Title": "Select, From & Where", "EvaluationDate": "07/25/2021", "IsChange": false, "TotalLines": 195.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 195.0, "LinesInsertedFromFork": 0.0, "LinesDeletedFromFork": 0.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 195.0, "TotalVotes": 0}]
|
[{"Id": 91688814, "KernelVersionId": 69003401, "SourceDatasetVersionId": 285982}, {"Id": 91688813, "KernelVersionId": 69003401, "SourceDatasetVersionId": 8677}]
|
[{"Id": 285982, "DatasetId": 6057, "DatasourceVersionId": 298461, "CreatorUserId": 1132983, "LicenseName": "CC0: Public Domain", "CreationDate": "02/12/2019 00:34:51", "VersionNumber": 2.0, "Title": "Hacker News", "Slug": "hacker-news", "Subtitle": "All posts from Y Combinator's social news website from 2006 to late 2017", "Description": "### Context\n\nThis dataset contains all stories and comments from Hacker News from its launch in 2006. Each story contains a story id, the author that made the post, when it was written, and the number of points the story received. Hacker News is a social news website focusing on computer science and entrepreneurship. It is run by Paul Graham's investment fund and startup incubator, Y Combinator. In general, content that can be submitted is defined as \"anything that gratifies one's intellectual curiosity\".\n\n### Content\n\nEach story contains a story ID, the author that made the post, when it was written, and the number of points the story received.\n\nPlease note that the text field includes profanity. All texts are the author\u2019s own, do not necessarily reflect the positions of Kaggle or Hacker News, and are presented without endorsement.\n\n## Querying BigQuery tables\n\nYou can use the BigQuery Python client library to query tables in this dataset in Kernels. Note that methods available in Kernels are limited to querying data. Tables are at `bigquery-public-data.hacker_news.[TABLENAME]`. **Fork [this kernel][1] to get started**.\n\n### Acknowledgements \n\nThis dataset was kindly made publicly available by [Hacker News][2] under [the MIT license][3].\n\n### Inspiration\n\n - Recent studies have found that many forums tend to be dominated by a\n very small fraction of users. Is this true of Hacker News?\n\n - Hacker News has received complaints that the site is biased towards Y\n Combinator startups. Do the data support this? \n\n - Is the amount of coverage by Hacker News predictive of a startup\u2019s\n success?\n\n\n [1]: https://www.kaggle.com/mrisdal/mentions-of-kaggle-on-hacker-news\n [2]: https://github.com/HackerNews/API\n [3]: https://github.com/HackerNews/API/blob/master/LICENSE", "VersionNotes": "Auto Updated", "TotalCompressedBytes": 16598401344.0, "TotalUncompressedBytes": 16598401344.0}]
|
[{"Id": 6057, "CreatorUserId": 1132983, "OwnerUserId": NaN, "OwnerOrganizationId": 8.0, "CurrentDatasetVersionId": 285982.0, "CurrentDatasourceVersionId": 298461.0, "ForumId": 12426, "Type": 2, "CreationDate": "12/04/2017 18:00:36", "LastActivityDate": "12/04/2017", "TotalViews": 294595, "TotalDownloads": 0, "TotalVotes": 492, "TotalKernels": 2212}]
| null |
# # Introduction
# Now that you know how to access and examine a dataset, you're ready to write your first SQL query! As you'll soon see, SQL queries will help you sort through a massive dataset, to retrieve only the information that you need.
# We'll begin by using the keywords **SELECT**, **FROM**, and **WHERE** to get data from specific columns based on conditions you specify.
# For clarity, we'll work with a small imaginary dataset `pet_records` which contains just one table, called `pets`.
# 
# # SELECT ... FROM
# The most basic SQL query selects a single column from a single table. To do this,
# - specify the column you want after the word **SELECT**, and then
# - specify the table after the word **FROM**.
# For instance, to select the `Name` column (from the `pets` table in the `pet_records` database in the `bigquery-public-data` project), our query would appear as follows:
# 
# Note that when writing an SQL query, the argument we pass to **FROM** is *not* in single or double quotation marks (' or "). It is in backticks (\`).
# # WHERE ...
# BigQuery datasets are large, so you'll usually want to return only the rows meeting specific conditions. You can do this using the **WHERE** clause.
# The query below returns the entries from the `Name` column that are in rows where the `Animal` column has the text `'Cat'`.
# 
# # Example: What are all the U.S. cities in the OpenAQ dataset?
# Now that you've got the basics down, let's work through an example with a real dataset. We'll use an [OpenAQ](https://openaq.org) dataset about air quality.
# First, we'll set up everything we need to run queries and take a quick peek at what tables are in our database. (_Since you learned how to do this in the previous tutorial, we have hidden the code. But if you'd like to take a peek, you need only click on the "Code" button below._)
from google.cloud import bigquery
# Create a "Client" object
client = bigquery.Client()
# Construct a reference to the "openaq" dataset
dataset_ref = client.dataset("openaq", project="bigquery-public-data")
# API request - fetch the dataset
dataset = client.get_dataset(dataset_ref)
# List all the tables in the "openaq" dataset
tables = list(client.list_tables(dataset))
# Print names of all tables in the dataset (there's only one!)
for table in tables:
print(table.table_id)
# The dataset contains only one table, called `global_air_quality`. We'll fetch the table and take a peek at the first few rows to see what sort of data it contains. (_Again, we have hidden the code. To take a peek, click on the "Code" button below._)
# Construct a reference to the "global_air_quality" table
table_ref = dataset_ref.table("global_air_quality")
# API request - fetch the table
table = client.get_table(table_ref)
# Preview the first five lines of the "global_air_quality" table
client.list_rows(table, max_results=5).to_dataframe()
# Everything looks good! So, let's put together a query. Say we want to select all the values from the `city` column that are in rows where the `country` column is `'US'` (for "United States").
# Query to select all the items from the "city" column where the "country" column is 'US'
query = """
SELECT city
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE country = 'US'
"""
# Take the time now to ensure that this query lines up with what you learned above.
# # Submitting the query to the dataset
# We're ready to use this query to get information from the OpenAQ dataset. As in the previous tutorial, the first step is to create a [`Client`](https://google-cloud.readthedocs.io/en/latest/bigquery/generated/google.cloud.bigquery.client.Client.html#google.cloud.bigquery.client.Client) object.
# Create a "Client" object
client = bigquery.Client()
# We begin by setting up the query with the [`query()`](https://google-cloud.readthedocs.io/en/latest/bigquery/generated/google.cloud.bigquery.client.Client.query.html#google.cloud.bigquery.client.Client.query) method. We run the method with the default parameters, but this method also allows us to specify more complicated settings that you can read about in [the documentation](https://google-cloud.readthedocs.io/en/latest/bigquery/generated/google.cloud.bigquery.client.Client.query.html#google.cloud.bigquery.client.Client.query). We'll revisit this later.
# Set up the query
query_job = client.query(query)
# Next, we run the query and convert the results to a pandas DataFrame.
# API request - run the query, and return a pandas DataFrame
us_cities = query_job.to_dataframe()
# Now we've got a pandas DataFrame called `us_cities`, which we can use like any other DataFrame.
# What five cities have the most measurements?
us_cities.city.value_counts().head()
# # More queries
# If you want multiple columns, you can select them with a comma between the names:
query = """
SELECT city, country
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE country = 'US'
"""
# You can select all columns with a `*` like this:
query = """
SELECT *
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE country = 'US'
"""
# # Q&A: Notes on formatting
# The formatting of the SQL query might feel unfamiliar. If you have any questions, you can ask in the comments section at the bottom of this page. Here are answers to two common questions:
# ### **Question: What's up with the triple quotation marks (""")?**
# _Answer_: These tell Python that everything inside them is a single string, even though we have line breaks in it. The line breaks aren't necessary, but they make it easier to read your query.
# ### **Question: Do you need to capitalize SELECT and FROM?**
# _Answer_: No, SQL doesn't care about capitalization. However, it's customary to capitalize your SQL commands, and it makes your queries a bit easier to read.
# # Working with big datasets
# BigQuery datasets can be huge. We allow you to do a lot of computation for free, but everyone has some limit.
# **Each Kaggle user can scan 5TB every 30 days for free. Once you hit that limit, you'll have to wait for it to reset.**
# The [biggest dataset currently on Kaggle](https://www.kaggle.com/github/github-repos) is 3TB, so you can go through your 30-day limit in a couple queries if you aren't careful.
# Don't worry though: we'll teach you how to avoid scanning too much data at once, so that you don't run over your limit.
# To begin,you can estimate the size of any query before running it. Here is an example using the (*very large!*) Hacker News dataset. To see how much data a query will scan, we create a `QueryJobConfig` object and set the `dry_run` parameter to `True`.
# Query to get the score column from every row where the type column has value "job"
query = """
SELECT score, title
FROM `bigquery-public-data.hacker_news.full`
WHERE type = "job"
"""
# Create a QueryJobConfig object to estimate size of query without running it
dry_run_config = bigquery.QueryJobConfig(dry_run=True)
# API request - dry run query to estimate costs
dry_run_query_job = client.query(query, job_config=dry_run_config)
print(
"This query will process {} bytes.".format(dry_run_query_job.total_bytes_processed)
)
# You can also specify a parameter when running the query to limit how much data you are willing to scan. Here's an example with a low limit.
# Only run the query if it's less than 1 MB
ONE_MB = 1000 * 1000
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=ONE_MB)
# Set up the query (will only run if it's less than 1 MB)
safe_query_job = client.query(query, job_config=safe_config)
# API request - try to run the query, and return a pandas DataFrame
safe_query_job.to_dataframe()
# In this case, the query was cancelled, because the limit of 1 MB was exceeded. However, we can increase the limit to run the query successfully!
# Only run the query if it's less than 1 GB
ONE_GB = 1000 * 1000 * 1000
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=ONE_GB)
# Set up the query (will only run if it's less than 1 GB)
safe_query_job = client.query(query, job_config=safe_config)
# API request - try to run the query, and return a pandas DataFrame
job_post_scores = safe_query_job.to_dataframe()
# Print average score for job posts
job_post_scores.score.mean()
| false | 0 | 2,292 | 0 | 2,582 | 2,292 |
||
69003346
|
<jupyter_start><jupyter_text>EfficientNet-PytTorch-3D
# "EfficientNet-PyTorch-3D"
* License and other information : https://github.com/shijianjian/EfficientNet-PyTorch-3D
# Usage
```python
import sys
sys.path.append('../input/efficientnetpyttorch3d/EfficientNet-PyTorch-3D')
```
```python
from efficientnet_pytorch_3d import EfficientNet3D
```
**PLEASE UPVOTE** IF this dataset is helpful to you
Kaggle dataset identifier: efficientnetpyttorch3d
<jupyter_script># ## Use stacked images (3D) and Efficentnet3D model
# Acknowledgements:
# - https://www.kaggle.com/ihelon/brain-tumor-eda-with-animations-and-modeling
# - https://www.kaggle.com/furcifer/torch-efficientnet3d-for-mri-no-train
# - https://github.com/shijianjian/EfficientNet-PyTorch-3D
#
#
# Use only one MRI type
# Variable number of images
#
import os
import sys
import json
import glob
import random
import collections
import time
import numpy as np
import pandas as pd
import pydicom
import cv2
import matplotlib.pyplot as plt
import seaborn as sns
import torch
from torch import nn
from torch.utils import data as torch_data
from sklearn import model_selection as sk_model_selection
from torch.nn import functional as torch_functional
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
if os.path.exists("../input/rsna-miccai-brain-tumor-radiogenomic-classification"):
data_directory = "../input/rsna-miccai-brain-tumor-radiogenomic-classification"
pytorch3dpath = "../input/efficientnetpyttorch3d/EfficientNet-PyTorch-3D"
else:
data_directory = (
"/media/roland/data/kaggle/rsna-miccai-brain-tumor-radiogenomic-classification"
)
pytorch3dpath = "EfficientNet-PyTorch-3D"
mri_types = ["FLAIR", "T1w", "T1wCE", "T2w"]
SIZE = 256
NUM_IMAGES = 128
sys.path.append(pytorch3dpath)
from efficientnet_pytorch_3d import EfficientNet3D
# ## Functions to load images
def load_dicom_image(path, img_size=SIZE):
dicom = pydicom.read_file(path)
data = dicom.pixel_array
if np.min(data) == np.max(data):
data = np.zeros((img_size, img_size))
return data
data = data - np.min(data)
if np.max(data) != 0:
data = data / np.max(data)
# data = (data * 255).astype(np.uint8)
data = cv2.resize(data, (img_size, img_size))
return data
def load_dicom_images_3d(
scan_id, num_imgs=NUM_IMAGES, img_size=SIZE, mri_type="FLAIR", split="train"
):
files = sorted(glob.glob(f"{data_directory}/{split}/{scan_id}/{mri_type}/*.dcm"))
middle = len(files) // 2
num_imgs2 = num_imgs // 2
img3d = np.stack(
[load_dicom_image(f) for f in files[middle - num_imgs2 : middle + num_imgs2]]
).T
if img3d.shape[-1] < num_imgs:
n_zero = np.zeros((img_size, img_size, num_imgs - img3d.shape[-1]))
img3d = np.concatenate((img3d, n_zero), axis=-1)
return np.expand_dims(img3d, 0)
load_dicom_images_3d("00000").shape
def set_seed(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
set_seed(42)
# ## train / test splits
train_df = pd.read_csv(f"{data_directory}/train_labels.csv")
display(train_df)
df_train, df_valid = sk_model_selection.train_test_split(
train_df,
test_size=0.2,
random_state=42,
stratify=train_df["MGMT_value"],
)
print(df_train.shape, df_valid.shape)
class Dataset(torch_data.Dataset):
def __init__(self, paths, targets=None, mri_type="FLAIR"):
self.paths = paths
self.targets = targets
self.mri_type = mri_type
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
scan_id = self.paths[index]
if self.targets is None:
data = load_dicom_images_3d(
str(scan_id).zfill(5), mri_type=self.mri_type, split="test"
)
else:
data = load_dicom_images_3d(
str(scan_id).zfill(5), mri_type=self.mri_type, split="train"
)
if self.targets is None:
return {"X": torch.tensor(data).float(), "id": scan_id}
else:
y = torch.tensor(self.targets[index], dtype=torch.float)
return {"X": torch.tensor(data).float(), "y": y}
train_data_retriever = Dataset(
df_train["BraTS21ID"].values,
df_train["MGMT_value"].values,
)
valid_data_retriever = Dataset(
df_valid["BraTS21ID"].values,
df_valid["MGMT_value"].values,
)
class Model(nn.Module):
def __init__(self):
super().__init__()
self.net = EfficientNet3D.from_name(
"efficientnet-b0", override_params={"num_classes": 2}, in_channels=1
)
# checkpoint = torch.load(f"{pytorch3dpath}/best_roc_0.29_loss_1826.83.pt")
# self.net.load_state_dict(checkpoint)
n_features = self.net._fc.in_features
self.net._fc = nn.Linear(in_features=n_features, out_features=1, bias=True)
def forward(self, x):
out = self.net(x)
return out
class LossMeter:
def __init__(self):
self.avg = 0
self.n = 0
def update(self, val):
self.n += 1
# incremental update
self.avg = val / self.n + (self.n - 1) / self.n * self.avg
class AccMeter:
def __init__(self):
self.avg = 0
self.n = 0
def update(self, y_true, y_pred):
y_true = y_true.cpu().numpy().astype(int)
y_pred = y_pred.cpu().numpy() >= 0
last_n = self.n
self.n += len(y_true)
true_count = np.sum(y_true == y_pred)
# incremental update
self.avg = true_count / self.n + last_n / self.n * self.avg
class Trainer:
def __init__(self, model, device, optimizer, criterion, loss_meter, score_meter):
self.model = model
self.device = device
self.optimizer = optimizer
self.criterion = criterion
self.loss_meter = loss_meter
self.score_meter = score_meter
self.best_valid_score = -np.inf
self.n_patience = 0
def fit(self, epochs, train_loader, valid_loader, save_path, patience):
for n_epoch in range(1, epochs + 1):
self.info_message("EPOCH: {}", n_epoch)
train_loss, train_score, train_time = self.train_epoch(train_loader)
valid_loss, valid_score, valid_roc, valid_time = self.valid_epoch(
valid_loader
)
self.info_message(
"[Epoch Train: {}] loss: {:.4f}, score: {:.4f}, time: {:.2f} s",
n_epoch,
train_loss,
train_score,
train_time,
)
self.info_message(
"[Epoch Valid: {}] loss: {:.4f}, score: {:.4f}, auc: {:.4f}, time: {:.2f} s",
n_epoch,
valid_loss,
valid_score,
valid_roc,
valid_time,
)
# if True:
if self.best_valid_score < valid_roc: # < valid_score:
self.info_message(
"The auc score improved from {:.4f} to {:.4f}. Save model to '{}'",
self.best_valid_score,
valid_roc,
save_path,
)
self.best_valid_score = valid_roc
self.save_model(n_epoch, save_path)
self.n_patience = 0
else:
self.n_patience += 1
if self.n_patience >= patience:
self.info_message(
"\nValid score didn't improve last {} epochs.", patience
)
break
def train_epoch(self, train_loader):
self.model.train()
t = time.time()
train_loss = self.loss_meter()
train_score = self.score_meter()
for step, batch in enumerate(train_loader, 1):
X = batch["X"].to(self.device)
targets = batch["y"].to(self.device)
self.optimizer.zero_grad()
outputs = self.model(X).squeeze(1)
loss = self.criterion(outputs, targets)
loss.backward()
train_loss.update(loss.detach().item())
train_score.update(targets, outputs.detach())
self.optimizer.step()
_loss, _score = train_loss.avg, train_score.avg
message = "Train Step {}/{}, train_loss: {:.5f}, train_score: {:.5f}"
self.info_message(message, step, len(train_loader), _loss, _score, end="\r")
return train_loss.avg, train_score.avg, int(time.time() - t)
def valid_epoch(self, valid_loader):
self.model.eval()
t = time.time()
valid_loss = self.loss_meter()
valid_score = self.score_meter()
y_all = []
outputs_all = []
for step, batch in enumerate(valid_loader, 1):
with torch.no_grad():
X = batch["X"].to(self.device)
targets = batch["y"].to(self.device)
outputs = self.model(X).squeeze(1)
loss = self.criterion(outputs, targets)
valid_loss.update(loss.detach().item())
valid_score.update(targets, outputs)
y_all.extend(batch["y"].tolist())
outputs_all.extend(outputs.tolist())
_loss, _score = valid_loss.avg, valid_score.avg
roc = roc_auc_score(y_all, outputs_all)
message = "Valid Step {}/{}, valid_loss: {:.4f}, valid_score: {:.4f}, valid_auc: {:.4f}"
self.info_message(
message, step, len(valid_loader), _loss, _score, roc, end="\r"
)
return valid_loss.avg, valid_score.avg, roc, int(time.time() - t)
def save_model(self, n_epoch, save_path):
torch.save(
{
"model_state_dict": self.model.state_dict(),
"optimizer_state_dict": self.optimizer.state_dict(),
"best_valid_score": self.best_valid_score,
"n_epoch": n_epoch,
},
save_path,
)
@staticmethod
def info_message(message, *args, end="\n"):
print(message.format(*args), end=end)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_data_retriever = Dataset(
df_train["BraTS21ID"].values,
df_train["MGMT_value"].values,
)
valid_data_retriever = Dataset(
df_valid["BraTS21ID"].values,
df_valid["MGMT_value"].values,
)
train_loader = torch_data.DataLoader(
train_data_retriever,
batch_size=4,
shuffle=True,
num_workers=8,
)
valid_loader = torch_data.DataLoader(
valid_data_retriever,
batch_size=4,
shuffle=False,
num_workers=8,
)
model = Model()
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.002)
criterion = torch_functional.binary_cross_entropy_with_logits
trainer = Trainer(model, device, optimizer, criterion, LossMeter, AccMeter)
history = trainer.fit(
20,
train_loader,
valid_loader,
f"best-model-0.pth",
10,
)
models = []
for i in range(1):
model = Model()
model.to(device)
checkpoint = torch.load(f"best-model-{i}.pth")
model.load_state_dict(checkpoint["model_state_dict"])
model.eval()
models.append(model)
submission = pd.read_csv(f"{data_directory}/sample_submission.csv")
test_data_retriever = Dataset(
submission["BraTS21ID"].values,
)
test_loader = torch_data.DataLoader(
test_data_retriever,
batch_size=4,
shuffle=False,
num_workers=8,
)
y_pred = []
ids = []
for e, batch in enumerate(test_loader):
print(f"{e}/{len(test_loader)}", end="\r")
with torch.no_grad():
tmp_pred = np.zeros((batch["X"].shape[0],))
for model in models:
tmp_res = (
torch.sigmoid(model(batch["X"].to(device))).cpu().numpy().squeeze()
)
tmp_pred += tmp_res
y_pred.extend(tmp_pred)
ids.extend(batch["id"].numpy().tolist())
submission = pd.DataFrame({"BraTS21ID": ids, "MGMT_value": y_pred})
submission.to_csv("submission.csv", index=False)
submission
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/003/69003346.ipynb
|
efficientnetpyttorch3d
|
hihunjin
|
[{"Id": 69003346, "ScriptId": 18797520, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 692339, "CreationDate": "07/25/2021 17:51:40", "VersionNumber": 4.0, "Title": "Efficientnet3D with one MRI type", "EvaluationDate": "07/25/2021", "IsChange": false, "TotalLines": 392.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 392.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 91688706, "KernelVersionId": 69003346, "SourceDatasetVersionId": 2423144}]
|
[{"Id": 2423144, "DatasetId": 1466252, "DatasourceVersionId": 2465335, "CreatorUserId": 3746632, "LicenseName": "Unknown", "CreationDate": "07/14/2021 01:53:53", "VersionNumber": 1.0, "Title": "EfficientNet-PytTorch-3D", "Slug": "efficientnetpyttorch3d", "Subtitle": NaN, "Description": "# \"EfficientNet-PyTorch-3D\"\n\n* License and other information : https://github.com/shijianjian/EfficientNet-PyTorch-3D\n\n# Usage\n\n```python\nimport sys\nsys.path.append('../input/efficientnetpyttorch3d/EfficientNet-PyTorch-3D')\n```\n\n```python\nfrom efficientnet_pytorch_3d import EfficientNet3D\n```\n\n**PLEASE UPVOTE** IF this dataset is helpful to you", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1466252, "CreatorUserId": 3746632, "OwnerUserId": 3746632.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2423144.0, "CurrentDatasourceVersionId": 2465335.0, "ForumId": 1485872, "Type": 2, "CreationDate": "07/14/2021 01:53:53", "LastActivityDate": "07/14/2021", "TotalViews": 4459, "TotalDownloads": 443, "TotalVotes": 22, "TotalKernels": 44}]
|
[{"Id": 3746632, "UserName": "hihunjin", "DisplayName": "hihunjin", "RegisterDate": "09/22/2019", "PerformanceTier": 2}]
|
# ## Use stacked images (3D) and Efficentnet3D model
# Acknowledgements:
# - https://www.kaggle.com/ihelon/brain-tumor-eda-with-animations-and-modeling
# - https://www.kaggle.com/furcifer/torch-efficientnet3d-for-mri-no-train
# - https://github.com/shijianjian/EfficientNet-PyTorch-3D
#
#
# Use only one MRI type
# Variable number of images
#
import os
import sys
import json
import glob
import random
import collections
import time
import numpy as np
import pandas as pd
import pydicom
import cv2
import matplotlib.pyplot as plt
import seaborn as sns
import torch
from torch import nn
from torch.utils import data as torch_data
from sklearn import model_selection as sk_model_selection
from torch.nn import functional as torch_functional
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
if os.path.exists("../input/rsna-miccai-brain-tumor-radiogenomic-classification"):
data_directory = "../input/rsna-miccai-brain-tumor-radiogenomic-classification"
pytorch3dpath = "../input/efficientnetpyttorch3d/EfficientNet-PyTorch-3D"
else:
data_directory = (
"/media/roland/data/kaggle/rsna-miccai-brain-tumor-radiogenomic-classification"
)
pytorch3dpath = "EfficientNet-PyTorch-3D"
mri_types = ["FLAIR", "T1w", "T1wCE", "T2w"]
SIZE = 256
NUM_IMAGES = 128
sys.path.append(pytorch3dpath)
from efficientnet_pytorch_3d import EfficientNet3D
# ## Functions to load images
def load_dicom_image(path, img_size=SIZE):
dicom = pydicom.read_file(path)
data = dicom.pixel_array
if np.min(data) == np.max(data):
data = np.zeros((img_size, img_size))
return data
data = data - np.min(data)
if np.max(data) != 0:
data = data / np.max(data)
# data = (data * 255).astype(np.uint8)
data = cv2.resize(data, (img_size, img_size))
return data
def load_dicom_images_3d(
scan_id, num_imgs=NUM_IMAGES, img_size=SIZE, mri_type="FLAIR", split="train"
):
files = sorted(glob.glob(f"{data_directory}/{split}/{scan_id}/{mri_type}/*.dcm"))
middle = len(files) // 2
num_imgs2 = num_imgs // 2
img3d = np.stack(
[load_dicom_image(f) for f in files[middle - num_imgs2 : middle + num_imgs2]]
).T
if img3d.shape[-1] < num_imgs:
n_zero = np.zeros((img_size, img_size, num_imgs - img3d.shape[-1]))
img3d = np.concatenate((img3d, n_zero), axis=-1)
return np.expand_dims(img3d, 0)
load_dicom_images_3d("00000").shape
def set_seed(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
set_seed(42)
# ## train / test splits
train_df = pd.read_csv(f"{data_directory}/train_labels.csv")
display(train_df)
df_train, df_valid = sk_model_selection.train_test_split(
train_df,
test_size=0.2,
random_state=42,
stratify=train_df["MGMT_value"],
)
print(df_train.shape, df_valid.shape)
class Dataset(torch_data.Dataset):
def __init__(self, paths, targets=None, mri_type="FLAIR"):
self.paths = paths
self.targets = targets
self.mri_type = mri_type
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
scan_id = self.paths[index]
if self.targets is None:
data = load_dicom_images_3d(
str(scan_id).zfill(5), mri_type=self.mri_type, split="test"
)
else:
data = load_dicom_images_3d(
str(scan_id).zfill(5), mri_type=self.mri_type, split="train"
)
if self.targets is None:
return {"X": torch.tensor(data).float(), "id": scan_id}
else:
y = torch.tensor(self.targets[index], dtype=torch.float)
return {"X": torch.tensor(data).float(), "y": y}
train_data_retriever = Dataset(
df_train["BraTS21ID"].values,
df_train["MGMT_value"].values,
)
valid_data_retriever = Dataset(
df_valid["BraTS21ID"].values,
df_valid["MGMT_value"].values,
)
class Model(nn.Module):
def __init__(self):
super().__init__()
self.net = EfficientNet3D.from_name(
"efficientnet-b0", override_params={"num_classes": 2}, in_channels=1
)
# checkpoint = torch.load(f"{pytorch3dpath}/best_roc_0.29_loss_1826.83.pt")
# self.net.load_state_dict(checkpoint)
n_features = self.net._fc.in_features
self.net._fc = nn.Linear(in_features=n_features, out_features=1, bias=True)
def forward(self, x):
out = self.net(x)
return out
class LossMeter:
def __init__(self):
self.avg = 0
self.n = 0
def update(self, val):
self.n += 1
# incremental update
self.avg = val / self.n + (self.n - 1) / self.n * self.avg
class AccMeter:
def __init__(self):
self.avg = 0
self.n = 0
def update(self, y_true, y_pred):
y_true = y_true.cpu().numpy().astype(int)
y_pred = y_pred.cpu().numpy() >= 0
last_n = self.n
self.n += len(y_true)
true_count = np.sum(y_true == y_pred)
# incremental update
self.avg = true_count / self.n + last_n / self.n * self.avg
class Trainer:
def __init__(self, model, device, optimizer, criterion, loss_meter, score_meter):
self.model = model
self.device = device
self.optimizer = optimizer
self.criterion = criterion
self.loss_meter = loss_meter
self.score_meter = score_meter
self.best_valid_score = -np.inf
self.n_patience = 0
def fit(self, epochs, train_loader, valid_loader, save_path, patience):
for n_epoch in range(1, epochs + 1):
self.info_message("EPOCH: {}", n_epoch)
train_loss, train_score, train_time = self.train_epoch(train_loader)
valid_loss, valid_score, valid_roc, valid_time = self.valid_epoch(
valid_loader
)
self.info_message(
"[Epoch Train: {}] loss: {:.4f}, score: {:.4f}, time: {:.2f} s",
n_epoch,
train_loss,
train_score,
train_time,
)
self.info_message(
"[Epoch Valid: {}] loss: {:.4f}, score: {:.4f}, auc: {:.4f}, time: {:.2f} s",
n_epoch,
valid_loss,
valid_score,
valid_roc,
valid_time,
)
# if True:
if self.best_valid_score < valid_roc: # < valid_score:
self.info_message(
"The auc score improved from {:.4f} to {:.4f}. Save model to '{}'",
self.best_valid_score,
valid_roc,
save_path,
)
self.best_valid_score = valid_roc
self.save_model(n_epoch, save_path)
self.n_patience = 0
else:
self.n_patience += 1
if self.n_patience >= patience:
self.info_message(
"\nValid score didn't improve last {} epochs.", patience
)
break
def train_epoch(self, train_loader):
self.model.train()
t = time.time()
train_loss = self.loss_meter()
train_score = self.score_meter()
for step, batch in enumerate(train_loader, 1):
X = batch["X"].to(self.device)
targets = batch["y"].to(self.device)
self.optimizer.zero_grad()
outputs = self.model(X).squeeze(1)
loss = self.criterion(outputs, targets)
loss.backward()
train_loss.update(loss.detach().item())
train_score.update(targets, outputs.detach())
self.optimizer.step()
_loss, _score = train_loss.avg, train_score.avg
message = "Train Step {}/{}, train_loss: {:.5f}, train_score: {:.5f}"
self.info_message(message, step, len(train_loader), _loss, _score, end="\r")
return train_loss.avg, train_score.avg, int(time.time() - t)
def valid_epoch(self, valid_loader):
self.model.eval()
t = time.time()
valid_loss = self.loss_meter()
valid_score = self.score_meter()
y_all = []
outputs_all = []
for step, batch in enumerate(valid_loader, 1):
with torch.no_grad():
X = batch["X"].to(self.device)
targets = batch["y"].to(self.device)
outputs = self.model(X).squeeze(1)
loss = self.criterion(outputs, targets)
valid_loss.update(loss.detach().item())
valid_score.update(targets, outputs)
y_all.extend(batch["y"].tolist())
outputs_all.extend(outputs.tolist())
_loss, _score = valid_loss.avg, valid_score.avg
roc = roc_auc_score(y_all, outputs_all)
message = "Valid Step {}/{}, valid_loss: {:.4f}, valid_score: {:.4f}, valid_auc: {:.4f}"
self.info_message(
message, step, len(valid_loader), _loss, _score, roc, end="\r"
)
return valid_loss.avg, valid_score.avg, roc, int(time.time() - t)
def save_model(self, n_epoch, save_path):
torch.save(
{
"model_state_dict": self.model.state_dict(),
"optimizer_state_dict": self.optimizer.state_dict(),
"best_valid_score": self.best_valid_score,
"n_epoch": n_epoch,
},
save_path,
)
@staticmethod
def info_message(message, *args, end="\n"):
print(message.format(*args), end=end)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_data_retriever = Dataset(
df_train["BraTS21ID"].values,
df_train["MGMT_value"].values,
)
valid_data_retriever = Dataset(
df_valid["BraTS21ID"].values,
df_valid["MGMT_value"].values,
)
train_loader = torch_data.DataLoader(
train_data_retriever,
batch_size=4,
shuffle=True,
num_workers=8,
)
valid_loader = torch_data.DataLoader(
valid_data_retriever,
batch_size=4,
shuffle=False,
num_workers=8,
)
model = Model()
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.002)
criterion = torch_functional.binary_cross_entropy_with_logits
trainer = Trainer(model, device, optimizer, criterion, LossMeter, AccMeter)
history = trainer.fit(
20,
train_loader,
valid_loader,
f"best-model-0.pth",
10,
)
models = []
for i in range(1):
model = Model()
model.to(device)
checkpoint = torch.load(f"best-model-{i}.pth")
model.load_state_dict(checkpoint["model_state_dict"])
model.eval()
models.append(model)
submission = pd.read_csv(f"{data_directory}/sample_submission.csv")
test_data_retriever = Dataset(
submission["BraTS21ID"].values,
)
test_loader = torch_data.DataLoader(
test_data_retriever,
batch_size=4,
shuffle=False,
num_workers=8,
)
y_pred = []
ids = []
for e, batch in enumerate(test_loader):
print(f"{e}/{len(test_loader)}", end="\r")
with torch.no_grad():
tmp_pred = np.zeros((batch["X"].shape[0],))
for model in models:
tmp_res = (
torch.sigmoid(model(batch["X"].to(device))).cpu().numpy().squeeze()
)
tmp_pred += tmp_res
y_pred.extend(tmp_pred)
ids.extend(batch["id"].numpy().tolist())
submission = pd.DataFrame({"BraTS21ID": ids, "MGMT_value": y_pred})
submission.to_csv("submission.csv", index=False)
submission
| false | 0 | 3,559 | 0 | 3,707 | 3,559 |
||
69003333
|
<jupyter_start><jupyter_text>US Accidents (2016 - 2023)
### Description
This is a countrywide car accident dataset, which covers __49 states of the USA__. The accident data are collected from __February 2016 to Dec 2020__, using multiple APIs that provide streaming traffic incident (or event) data. These APIs broadcast traffic data captured by a variety of entities, such as the US and state departments of transportation, law enforcement agencies, traffic cameras, and traffic sensors within the road-networks. Currently, there are about __3 million__ accident records in this dataset. Check [here](https://smoosavi.org/datasets/us_accidents) to learn more about this dataset.
Kaggle dataset identifier: us-accidents
<jupyter_script># # An Exploratory Data Analysis of US Accidents Dataset using Python Visualization Libraries
# ### The idea is to use various popular libaries - Seaborn, Plotly, Folium, Lux for various data visualization
# ### In this example we shall do the following
# * Use the US Accidents Dataset
# * Build a Folium Maps for subset of data
# * Look at City level - Use Denver as an example
# * Use some of the **Folium** Features such as ****Choropleth**** , ****FeatureGroup****, ****MarkerCluster****
# * Look at State Level - Use New York as an example
# * Build Seaborn Pairplots
# * Build examples using Folium
# * Refer to Lux as an example (Pleasse note: This notebook may need to be downloaded to check the lux example as it may not appear in Kaggle output inline
# * Build examples using Plotly
# ### Standard Kaggle environment settings
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## Initial imports and data loading
# Lux may not be installed by default and hence you need to use PIP install
# Uncomment this in your local notebook
#!pip install lux
# Import libraries
import pandas as pd
# import lux # Uncomment this in your local notebook
import warnings
import folium
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# Ignore the import and install warnings
warnings.filterwarnings("ignore")
# ### Load the dataset into a Pandas dataframe - This is 1GB+ in size and hence it may take a while
# #### Check the dataframe
# Load the dataset - This is 1GB+ in size and hence it may take a while
accidents_db_mas = pd.read_csv("../input/us-accidents/US_Accidents_Dec20_Updated.csv")
accidents_db_mas.head(10)
accidents_db_mas.describe()
# ## Given that we have millions of rows, let us choose only 100000 rows
accidents_db = accidents_db_mas.iloc[1:100000]
# ### Following code will work in a Jupyter Notebook - but not in a live display binders like Kaggle or Google CoLab
# #### You can play around with Lux options by toggling
accidents_db
# ### Use Folium to build a map
# #### Load the State details
# #### Load the standard GEO JSON Data
# #### Build a Choropleth using Folium Map
plt.style.use("seaborn")
# Load the shape of the zone (US states)
# Find the original file here: https://github.com/python-visualization/folium/tree/master/examples/data
state_geo = "../input/us-crashes-json/us-states.json"
# state_crashes = 'https://raw.githubusercontent.com/kalilurrahman/datasets/main/car_crashes.csv'
state_data = accidents_db # pd.read_csv(state_crashes)
# Initialize the map:
US_state_map = folium.Map(location=[37, -102], zoom_start=5)
# Add the color for the chloropleth:
US_state_map.choropleth(
geo_data=state_geo,
name="choropleth",
data=state_data,
columns=["State", "Severity"],
key_on="feature.id",
fill_color="YlGn",
fill_opacity=0.7,
line_opacity=0.2,
legend_name="Accident Severity ",
)
folium.LayerControl().add_to(US_state_map)
US_state_map
# ## Let us correlate and generate a heatmap
# #### Use Seaborn.corr() function
import seaborn as sns
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(20, 20))
corr = state_data.corr()
print(corr)
sns.heatmap(corr, annot=False, linewidths=0.5, cmap="YlGnBu", ax=ax)
# #### Let us see a distplot for severity on state data
sns.distplot(state_data["Severity"])
# #### Let us see a distplot for other key fields on state data
sns.distplot(state_data["Crossing"])
sns.distplot(state_data["Bump"])
sns.distplot(state_data["Junction"])
sns.distplot(state_data["Roundabout"])
sns.distplot(state_data["Station"])
sns.distplot(state_data["Stop"])
sns.distplot(state_data["Traffic_Calming"])
sns.distplot(state_data["Traffic_Signal"])
# ## Let us take a city and see how it shapes up.
# ### Let us take the Mile High City of Denver
# #### Let us take the first 2000 accident data
## get the first 2000 Accidents from the dataset
# co_data = state_data.loc[state_data['State'] == 'CO']
co_data = accidents_db_mas.loc[accidents_db_mas["State"] == "CO"]
limit = 2000
co_accidents = co_data.iloc[0:limit, :]
# #### Define the Latitude and Longitude for Denver
# Denver latitude and longitude values
den_lat = 39.7392
den_long = -104.9903
# ### Display the map of Denver first
# create map and display it
den_map = folium.Map(location=[den_lat, den_long], zoom_start=12)
# display the map of Denver
den_map
# ### Display the Featurgroup based on accidents dataset (2000) across various Latitude and Longitude
# instantiate a feature group for the accidents in the dataframe
accidents = folium.map.FeatureGroup()
# loop through the 2000 accidents and add to feature group
for (
lat,
lng,
) in zip(co_accidents.Start_Lat.dropna(), co_accidents.Start_Lng.dropna()):
accidents.add_child(
folium.CircleMarker(
[lat, lng],
radius=5, # define how big you want the circle markers to be
color="Green",
fill=True,
fill_color="Red",
fill_opacity=0.2,
)
)
# add incidents to map
den_map.add_child(accidents)
# ### Display the MarkerCluseter based on accidents dataset (2000) across various Latitude and Longitude grouped on Severity of the accident
from folium import plugins
# let's start again with a clean copy of the map of San Francisco
den_map = folium.Map(location=[den_lat, den_long], zoom_start=12)
# instantiate a mark cluster object for the incidents in the dataframe
accidents = plugins.MarkerCluster().add_to(den_map)
# loop through the dataframe and add each data point to the mark cluster
for lat, lng, label in zip(
co_accidents.Start_Lat.dropna(),
co_accidents.Start_Lng.dropna(),
co_accidents.Severity.dropna(),
):
folium.Marker(
location=[lat, lng],
icon=None,
popup=label,
).add_to(accidents)
# display map
den_map
# ### Let us check the stacking of various states based on the severity and the count of values
state_data_sorted = state_data.sort_values("State", ascending=True).reset_index()
state_data_count = state_data.value_counts(state_data["State"])
# print(state_data_count.head(50))
f, ax = plt.subplots(figsize=(20, 12))
g = sns.barplot(x="State", y="Severity", data=state_data_sorted, orient="v")
plt.xticks(rotation=90)
sns.despine(left=True)
g = sns.factorplot(
"State", data=state_data_sorted, aspect=2, kind="count", color="steelblue"
)
plt.xticks(rotation=90)
sns.despine(left=True)
# ### Let us collect data for New York state but set the zoom on the New York City
NY_LAT = 40.730610 # 40.73° N
NY_LNG = -73.935242 # 73.93 W
# ### Extract the NYC Dataset
# #### Code for limiting is commented out. If you want to select specific number of records you can do the same
## get the first 50000 Accidents
# ny_data = state_data.loc[state_data['State'] == 'NY']
ny_data = accidents_db_mas.loc[accidents_db_mas["State"] == "NY"]
## Using full data sets. if you want to restrict, Comment the next line and uncomment the other two
# ny_accidents = ny_data
limit = 50000
ny_accidents = ny_data.iloc[0:limit, :]
# ### Let us see the New York map first
# create map and display it
ny_map = folium.Map(location=[NY_LAT, NY_LNG], zoom_start=7)
# display the map of New York
ny_map
# ### Display the MarkerCluseter based on accidents dataset across various Latitude and Longitude grouped on Severity of the accident
from folium import plugins
# let's start again with a clean copy of the map of San Francisco
ny_map = folium.Map(location=[NY_LAT, NY_LNG], zoom_start=6)
# instantiate a mark cluster object for the incidents in the dataframe
accidents = plugins.MarkerCluster().add_to(ny_map)
# loop through the dataframe and add each data point to the mark cluster
for lat, lng, label in zip(
ny_accidents.Start_Lat.dropna(),
ny_accidents.Start_Lng.dropna(),
ny_accidents.Severity.dropna(),
):
folium.Marker(
location=[lat, lng],
icon=None,
popup=label,
).add_to(accidents)
# display map
ny_map
# ### Let us plot PairPlot for the first 10 columns against Severity for New York
plt.figure()
cols_to_plot = ny_accidents.columns[1:10].tolist() ##
sns.pairplot(ny_accidents[cols_to_plot], hue="Severity", palette="Accent")
plt.show()
# ### Let us check for the full dataset
plt.figure()
cols_to_plot = accidents_db.columns[1:10].tolist() ##
sns.pairplot(accidents_db[cols_to_plot], hue="Severity", palette="Accent")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/003/69003333.ipynb
|
us-accidents
|
sobhanmoosavi
|
[{"Id": 69003333, "ScriptId": 18779555, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4432707, "CreationDate": "07/25/2021 17:51:31", "VersionNumber": 5.0, "Title": "US Accidents DB - EDA using Python Data Viz Tools", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 283.0, "LinesInsertedFromPrevious": 20.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 263.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
|
[{"Id": 91688689, "KernelVersionId": 69003333, "SourceDatasetVersionId": 2185555}]
|
[{"Id": 2185555, "DatasetId": 199387, "DatasourceVersionId": 2226970, "CreatorUserId": 348067, "LicenseName": "CC BY-NC-SA 4.0", "CreationDate": "05/02/2021 21:25:23", "VersionNumber": 9.0, "Title": "US Accidents (2016 - 2023)", "Slug": "us-accidents", "Subtitle": "A Countrywide Traffic Accident Dataset (2016 - 2023)", "Description": "### Description\nThis is a countrywide car accident dataset, which covers __49 states of the USA__. The accident data are collected from __February 2016 to Dec 2020__, using multiple APIs that provide streaming traffic incident (or event) data. These APIs broadcast traffic data captured by a variety of entities, such as the US and state departments of transportation, law enforcement agencies, traffic cameras, and traffic sensors within the road-networks. Currently, there are about __3 million__ accident records in this dataset. Check [here](https://smoosavi.org/datasets/us_accidents) to learn more about this dataset. \n\n### Acknowledgements\nPlease cite the following papers if you use this dataset: \n\n- Moosavi, Sobhan, Mohammad Hossein Samavatian, Srinivasan Parthasarathy, and Rajiv Ramnath. \u201c[A Countrywide Traffic Accident Dataset](https://arxiv.org/abs/1906.05409).\u201d, 2019.\n\n- Moosavi, Sobhan, Mohammad Hossein Samavatian, Srinivasan Parthasarathy, Radu Teodorescu, and Rajiv Ramnath. [\"Accident Risk Prediction based on Heterogeneous Sparse Data: New Dataset and Insights.\"](https://arxiv.org/abs/1909.09638) In proceedings of the 27th ACM SIGSPATIAL International Conference on Advances in Geographic Information Systems, ACM, 2019. \n\n### Content\nThis dataset has been collected in real-time, using multiple Traffic APIs. Currently, it contains accident data that are collected from February 2016 to Dec 2020 for the Contiguous United States. Check [here](https://smoosavi.org/datasets/us_accidents) to learn more about this dataset. \n\n### Inspiration\nUS-Accidents can be used for numerous applications such as real-time car accident prediction, studying car accidents hotspot locations, casualty analysis and extracting cause and effect rules to predict car accidents, and studying the impact of precipitation or other environmental stimuli on accident occurrence. The most recent release of the dataset can also be useful to study the impact of COVID-19 on traffic behavior and accidents. \n\n### Usage Policy and Legal Disclaimer\nThis dataset is being distributed only for __Research__ purposes, under Creative Commons Attribution-Noncommercial-ShareAlike license (CC BY-NC-SA 4.0). By clicking on download button(s) below, you are agreeing to use this data only for non-commercial, research, or academic applications. You may need to cite the above papers if you use this dataset.\n\n### Data Removal (Updated Dataset)\nPlease note that we removed a portion of the data due to a request from one of the main traffic data providers.", "VersionNotes": "Update, data from 2016 to 2020 (removed data from one of the sources)", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 199387, "CreatorUserId": 348067, "OwnerUserId": 348067.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5793796.0, "CurrentDatasourceVersionId": 5870478.0, "ForumId": 210356, "Type": 2, "CreationDate": "05/20/2019 23:26:06", "LastActivityDate": "05/20/2019", "TotalViews": 697710, "TotalDownloads": 94299, "TotalVotes": 1910, "TotalKernels": 330}]
|
[{"Id": 348067, "UserName": "sobhanmoosavi", "DisplayName": "Sobhan Moosavi", "RegisterDate": "05/06/2015", "PerformanceTier": 2}]
|
# # An Exploratory Data Analysis of US Accidents Dataset using Python Visualization Libraries
# ### The idea is to use various popular libaries - Seaborn, Plotly, Folium, Lux for various data visualization
# ### In this example we shall do the following
# * Use the US Accidents Dataset
# * Build a Folium Maps for subset of data
# * Look at City level - Use Denver as an example
# * Use some of the **Folium** Features such as ****Choropleth**** , ****FeatureGroup****, ****MarkerCluster****
# * Look at State Level - Use New York as an example
# * Build Seaborn Pairplots
# * Build examples using Folium
# * Refer to Lux as an example (Pleasse note: This notebook may need to be downloaded to check the lux example as it may not appear in Kaggle output inline
# * Build examples using Plotly
# ### Standard Kaggle environment settings
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## Initial imports and data loading
# Lux may not be installed by default and hence you need to use PIP install
# Uncomment this in your local notebook
#!pip install lux
# Import libraries
import pandas as pd
# import lux # Uncomment this in your local notebook
import warnings
import folium
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# Ignore the import and install warnings
warnings.filterwarnings("ignore")
# ### Load the dataset into a Pandas dataframe - This is 1GB+ in size and hence it may take a while
# #### Check the dataframe
# Load the dataset - This is 1GB+ in size and hence it may take a while
accidents_db_mas = pd.read_csv("../input/us-accidents/US_Accidents_Dec20_Updated.csv")
accidents_db_mas.head(10)
accidents_db_mas.describe()
# ## Given that we have millions of rows, let us choose only 100000 rows
accidents_db = accidents_db_mas.iloc[1:100000]
# ### Following code will work in a Jupyter Notebook - but not in a live display binders like Kaggle or Google CoLab
# #### You can play around with Lux options by toggling
accidents_db
# ### Use Folium to build a map
# #### Load the State details
# #### Load the standard GEO JSON Data
# #### Build a Choropleth using Folium Map
plt.style.use("seaborn")
# Load the shape of the zone (US states)
# Find the original file here: https://github.com/python-visualization/folium/tree/master/examples/data
state_geo = "../input/us-crashes-json/us-states.json"
# state_crashes = 'https://raw.githubusercontent.com/kalilurrahman/datasets/main/car_crashes.csv'
state_data = accidents_db # pd.read_csv(state_crashes)
# Initialize the map:
US_state_map = folium.Map(location=[37, -102], zoom_start=5)
# Add the color for the chloropleth:
US_state_map.choropleth(
geo_data=state_geo,
name="choropleth",
data=state_data,
columns=["State", "Severity"],
key_on="feature.id",
fill_color="YlGn",
fill_opacity=0.7,
line_opacity=0.2,
legend_name="Accident Severity ",
)
folium.LayerControl().add_to(US_state_map)
US_state_map
# ## Let us correlate and generate a heatmap
# #### Use Seaborn.corr() function
import seaborn as sns
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(20, 20))
corr = state_data.corr()
print(corr)
sns.heatmap(corr, annot=False, linewidths=0.5, cmap="YlGnBu", ax=ax)
# #### Let us see a distplot for severity on state data
sns.distplot(state_data["Severity"])
# #### Let us see a distplot for other key fields on state data
sns.distplot(state_data["Crossing"])
sns.distplot(state_data["Bump"])
sns.distplot(state_data["Junction"])
sns.distplot(state_data["Roundabout"])
sns.distplot(state_data["Station"])
sns.distplot(state_data["Stop"])
sns.distplot(state_data["Traffic_Calming"])
sns.distplot(state_data["Traffic_Signal"])
# ## Let us take a city and see how it shapes up.
# ### Let us take the Mile High City of Denver
# #### Let us take the first 2000 accident data
## get the first 2000 Accidents from the dataset
# co_data = state_data.loc[state_data['State'] == 'CO']
co_data = accidents_db_mas.loc[accidents_db_mas["State"] == "CO"]
limit = 2000
co_accidents = co_data.iloc[0:limit, :]
# #### Define the Latitude and Longitude for Denver
# Denver latitude and longitude values
den_lat = 39.7392
den_long = -104.9903
# ### Display the map of Denver first
# create map and display it
den_map = folium.Map(location=[den_lat, den_long], zoom_start=12)
# display the map of Denver
den_map
# ### Display the Featurgroup based on accidents dataset (2000) across various Latitude and Longitude
# instantiate a feature group for the accidents in the dataframe
accidents = folium.map.FeatureGroup()
# loop through the 2000 accidents and add to feature group
for (
lat,
lng,
) in zip(co_accidents.Start_Lat.dropna(), co_accidents.Start_Lng.dropna()):
accidents.add_child(
folium.CircleMarker(
[lat, lng],
radius=5, # define how big you want the circle markers to be
color="Green",
fill=True,
fill_color="Red",
fill_opacity=0.2,
)
)
# add incidents to map
den_map.add_child(accidents)
# ### Display the MarkerCluseter based on accidents dataset (2000) across various Latitude and Longitude grouped on Severity of the accident
from folium import plugins
# let's start again with a clean copy of the map of San Francisco
den_map = folium.Map(location=[den_lat, den_long], zoom_start=12)
# instantiate a mark cluster object for the incidents in the dataframe
accidents = plugins.MarkerCluster().add_to(den_map)
# loop through the dataframe and add each data point to the mark cluster
for lat, lng, label in zip(
co_accidents.Start_Lat.dropna(),
co_accidents.Start_Lng.dropna(),
co_accidents.Severity.dropna(),
):
folium.Marker(
location=[lat, lng],
icon=None,
popup=label,
).add_to(accidents)
# display map
den_map
# ### Let us check the stacking of various states based on the severity and the count of values
state_data_sorted = state_data.sort_values("State", ascending=True).reset_index()
state_data_count = state_data.value_counts(state_data["State"])
# print(state_data_count.head(50))
f, ax = plt.subplots(figsize=(20, 12))
g = sns.barplot(x="State", y="Severity", data=state_data_sorted, orient="v")
plt.xticks(rotation=90)
sns.despine(left=True)
g = sns.factorplot(
"State", data=state_data_sorted, aspect=2, kind="count", color="steelblue"
)
plt.xticks(rotation=90)
sns.despine(left=True)
# ### Let us collect data for New York state but set the zoom on the New York City
NY_LAT = 40.730610 # 40.73° N
NY_LNG = -73.935242 # 73.93 W
# ### Extract the NYC Dataset
# #### Code for limiting is commented out. If you want to select specific number of records you can do the same
## get the first 50000 Accidents
# ny_data = state_data.loc[state_data['State'] == 'NY']
ny_data = accidents_db_mas.loc[accidents_db_mas["State"] == "NY"]
## Using full data sets. if you want to restrict, Comment the next line and uncomment the other two
# ny_accidents = ny_data
limit = 50000
ny_accidents = ny_data.iloc[0:limit, :]
# ### Let us see the New York map first
# create map and display it
ny_map = folium.Map(location=[NY_LAT, NY_LNG], zoom_start=7)
# display the map of New York
ny_map
# ### Display the MarkerCluseter based on accidents dataset across various Latitude and Longitude grouped on Severity of the accident
from folium import plugins
# let's start again with a clean copy of the map of San Francisco
ny_map = folium.Map(location=[NY_LAT, NY_LNG], zoom_start=6)
# instantiate a mark cluster object for the incidents in the dataframe
accidents = plugins.MarkerCluster().add_to(ny_map)
# loop through the dataframe and add each data point to the mark cluster
for lat, lng, label in zip(
ny_accidents.Start_Lat.dropna(),
ny_accidents.Start_Lng.dropna(),
ny_accidents.Severity.dropna(),
):
folium.Marker(
location=[lat, lng],
icon=None,
popup=label,
).add_to(accidents)
# display map
ny_map
# ### Let us plot PairPlot for the first 10 columns against Severity for New York
plt.figure()
cols_to_plot = ny_accidents.columns[1:10].tolist() ##
sns.pairplot(ny_accidents[cols_to_plot], hue="Severity", palette="Accent")
plt.show()
# ### Let us check for the full dataset
plt.figure()
cols_to_plot = accidents_db.columns[1:10].tolist() ##
sns.pairplot(accidents_db[cols_to_plot], hue="Severity", palette="Accent")
plt.show()
| false | 1 | 2,770 | 5 | 2,944 | 2,770 |
||
69003350
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt # for making plots
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train = pd.read_csv("../input/house-prices-advanced-regression-techniques/train.csv")
test = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv")
sample_submission = pd.read_csv(
"../input/house-prices-advanced-regression-techniques/sample_submission.csv"
)
test
train
sns.displot(train["SalePrice"], color="r")
sns.displot(np.log1p(train["SalePrice"]), color="r") # For uniform distribution
train.describe().T # Just an overview of TRAIN DATASET
test.describe().T # Just an overview of the TEST DATASET
sns.jointplot(
x=train["GrLivArea"], y=train["SalePrice"], kind="reg"
) # To use linear regression for linearizing the stats
final_data = pd.concat((train, test)).reset_index(drop=True)
final_data.drop(["SalePrice"], axis=1, inplace=True)
final_data # merging the CSVs for data cleaning
final_data.isna().sum().nlargest(
40
) # Now we would count top 40 columns having NaN Values
some_missing_columns = [
"PoolQC",
"MiscFeature",
"Alley",
"Fence",
"FireplaceQu",
"LotFrontage",
"GarageYrBlt",
"GarageFinish",
"GarageQual",
"GarageCond",
"BsmtCond",
"BsmtExposure",
"BsmtQual",
"BsmtFinType1",
"BsmtFinType2",
"MasVnrType",
"MasVnrArea",
"GarageType",
]
for i in some_missing_columns:
final_data[i].fillna(0, inplace=True)
final_data # Converting NaN data into 0
final_data["Functional"] = final_data["Functional"].fillna("Typ")
final_data.isna().sum().nlargest(
3
) # Now data having NaN values are nearly filled to 0.
Nm = ["MSSubClass", "MoSold", "YrSold"]
for col in Nm:
final_data[col] = final_data[col].astype(str)
train["SalePrice"] = np.log1p(train["SalePrice"])
y = train.SalePrice.values
y[:5] # Logarithmic Transformation
model_xgb.fit(X_train, y)
xgb_train_pred = model_xgb.predict(X_train)
xgb_pred = np.expm1(model_xgb.predict(X_test))
print(rmsle(y, xgb_train_pred))
xgb_pred[:5]
model_gbm.fit(X_train, y)
gbm_train_pred = model_gbm.predict(X_train)
gbm_pred = np.expm1(model_gbm.predict(X_test.values))
print(rmsle(y, gbm_train_pred))
gbm_pred[:5]
trybest = (0.5 * xgb_pred) + (0.5 * gbm_pred)
submission = pd.DataFrame({"Id": test_id, "SalePrice": trybest})
submission.head(5)
submission.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/003/69003350.ipynb
| null | null |
[{"Id": 69003350, "ScriptId": 18778669, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7859336, "CreationDate": "07/25/2021 17:51:45", "VersionNumber": 1.0, "Title": "Pradeep Tripathi_BITS Goa", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 82.0, "LinesInsertedFromPrevious": 82.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt # for making plots
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train = pd.read_csv("../input/house-prices-advanced-regression-techniques/train.csv")
test = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv")
sample_submission = pd.read_csv(
"../input/house-prices-advanced-regression-techniques/sample_submission.csv"
)
test
train
sns.displot(train["SalePrice"], color="r")
sns.displot(np.log1p(train["SalePrice"]), color="r") # For uniform distribution
train.describe().T # Just an overview of TRAIN DATASET
test.describe().T # Just an overview of the TEST DATASET
sns.jointplot(
x=train["GrLivArea"], y=train["SalePrice"], kind="reg"
) # To use linear regression for linearizing the stats
final_data = pd.concat((train, test)).reset_index(drop=True)
final_data.drop(["SalePrice"], axis=1, inplace=True)
final_data # merging the CSVs for data cleaning
final_data.isna().sum().nlargest(
40
) # Now we would count top 40 columns having NaN Values
some_missing_columns = [
"PoolQC",
"MiscFeature",
"Alley",
"Fence",
"FireplaceQu",
"LotFrontage",
"GarageYrBlt",
"GarageFinish",
"GarageQual",
"GarageCond",
"BsmtCond",
"BsmtExposure",
"BsmtQual",
"BsmtFinType1",
"BsmtFinType2",
"MasVnrType",
"MasVnrArea",
"GarageType",
]
for i in some_missing_columns:
final_data[i].fillna(0, inplace=True)
final_data # Converting NaN data into 0
final_data["Functional"] = final_data["Functional"].fillna("Typ")
final_data.isna().sum().nlargest(
3
) # Now data having NaN values are nearly filled to 0.
Nm = ["MSSubClass", "MoSold", "YrSold"]
for col in Nm:
final_data[col] = final_data[col].astype(str)
train["SalePrice"] = np.log1p(train["SalePrice"])
y = train.SalePrice.values
y[:5] # Logarithmic Transformation
model_xgb.fit(X_train, y)
xgb_train_pred = model_xgb.predict(X_train)
xgb_pred = np.expm1(model_xgb.predict(X_test))
print(rmsle(y, xgb_train_pred))
xgb_pred[:5]
model_gbm.fit(X_train, y)
gbm_train_pred = model_gbm.predict(X_train)
gbm_pred = np.expm1(model_gbm.predict(X_test.values))
print(rmsle(y, gbm_train_pred))
gbm_pred[:5]
trybest = (0.5 * xgb_pred) + (0.5 * gbm_pred)
submission = pd.DataFrame({"Id": test_id, "SalePrice": trybest})
submission.head(5)
submission.to_csv("submission.csv", index=False)
| false | 0 | 999 | 0 | 999 | 999 |
||
69003002
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data = pd.read_csv("/kaggle/input/commonlitreadabilityprize/train.csv")
data.head()
# Null check
data.isnull().sum() / len(data) * 100
data.drop(["url_legal", "license"], 1, inplace=True)
from gensim.parsing.preprocessing import remove_stopwords
docs = data["excerpt"].str.lower().str.replace("[^a-z\s]", "")
docs = docs.apply(remove_stopwords)
docs[:10]
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
tokenizer = Tokenizer()
tokenizer.fit_on_texts(docs)
vocab = list(tokenizer.word_index)
print("Total number of unique tokens in corpus: %d" % len(vocab))
zip_path = "/kaggle/input/quora-insincere-questions-classification/embeddings.zip"
from zipfile import ZipFile
zf = ZipFile(zip_path)
zf.filelist
# ### Glove Embedding Layer
glove_path = "glove.840B.300d/glove.840B.300d.txt"
count = 0
with zf.open(glove_path) as file:
embeddings_glove = {}
for line in file:
line = line.decode("utf-8").replace("\n", "").split(" ")
curr_word = line[0]
if curr_word in vocab:
vector = line[1:]
vector = np.array(vector).astype(float)
embeddings_glove[curr_word] = vector
vocab_size = len(vocab) + 1
embedding_dim = 300
words_not_available = []
embedding_matrix = np.zeros((vocab_size, embedding_dim))
for word, wid in tokenizer.word_index.items():
if word in embeddings_glove:
embedding_matrix[wid] = embeddings_glove[word]
else:
words_not_available.append(word)
print(
"Percentage of words not avaialable %.2f%%"
% (len(words_not_available) / len(vocab) * 100)
)
print(
"Percentage of words avaialable %.2f%%"
% (100 - len(words_not_available) / len(vocab) * 100)
)
train_x_seq = tokenizer.texts_to_sequences(docs)
max_doc_len = 115
train_x_padded = pad_sequences(train_x_seq, padding="post", maxlen=max_doc_len)
# ### Using the word embeddings which has the maximum word’s coverage, create a regressor using simple neural network
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
model = Sequential()
model.add(
layers.Embedding(
vocab_size,
embedding_dim,
weights=[embedding_matrix],
input_length=max_doc_len,
trainable=False,
)
)
model.add(layers.Flatten())
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(1))
model.compile(optimizer="sgd", loss="mse", metrics=["mae"])
history = model.fit(train_x_padded, data["target"], epochs=10, verbose=1)
test_df = pd.read_csv("/kaggle/input/commonlitreadabilityprize/test.csv")
test_docs = test_df["excerpt"].str.lower().str.replace("[^a-z\s]", "")
test_docs = test_docs.apply(remove_stopwords)
test_x_seq = tokenizer.texts_to_sequences(test_docs)
test_x_padded = pad_sequences(test_x_seq, padding="post", maxlen=max_doc_len)
test_y_pred = model.predict(test_x_padded)
model.summary()
submission_df = pd.read_csv(
"/kaggle/input/commonlitreadabilityprize/sample_submission.csv"
)
submission_df["target"] = test_y_pred
submission_df.to_csv("submission_glove_embedding.csv", index=False)
# ### Google News Embedding Layer
from gensim.models import KeyedVectors
embedding_file = "GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin"
embeddings = KeyedVectors.load_word2vec_format(zf.open(embedding_file), binary=True)
vocab_size = len(vocab) + 1
embedding_dim = 300
words_not_available = []
embedding_matrix = np.zeros((vocab_size, embedding_dim))
for word, wid in tokenizer.word_index.items():
if word in embeddings:
embedding_matrix[wid] = embeddings[word]
else:
words_not_available.append(word)
print(
"Percentage of words not avaialable %.2f%%"
% (len(words_not_available) / len(vocab) * 100)
)
print(
"Percentage of words avaialable %.2f%%"
% (100 - len(words_not_available) / len(vocab) * 100)
)
model = Sequential()
model.add(
layers.Embedding(
vocab_size,
embedding_dim,
weights=[embedding_matrix],
input_length=max_doc_len,
trainable=False,
)
)
model.add(layers.Flatten())
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(1))
model.compile(optimizer="sgd", loss="mse", metrics=["mae"])
history = model.fit(train_x_padded, data["target"], epochs=10, verbose=1)
test_y_pred = model.predict(test_x_padded)
model.summary()
submission_df["target"] = test_y_pred
submission_df.to_csv("submission_google_embedding.csv", index=False)
# ### Build custom word embeddings using genism word2vec model (with window size=5) and retrain the neural network
from gensim.models import word2vec
docs_words = [doc.split(" ") for doc in docs]
len(docs_words)
embedding_dim = 100
model = word2vec.Word2Vec(
sentences=docs_words, vector_size=embedding_dim, min_count=50, window=5, sg=1
)
vocab = model.wv.index_to_key
df_embedding_matrix = pd.DataFrame(model.wv[vocab], index=vocab)
df_embedding_matrix.shape
model = Sequential()
model.add(
layers.Embedding(
vocab_size, embedding_dim, input_length=max_doc_len, trainable=True
)
)
model.add(layers.Flatten())
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(1))
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
history = model.fit(train_x_padded, data["target"], epochs=25, verbose=1)
test_y_pred = model.predict(test_x_padded)
model.summary()
# ### Keras Embedding Layer
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
vocab_size = len(vocab) + 1
embedding_dim = 300
model = Sequential()
model.add(
layers.Embedding(
vocab_size, embedding_dim, input_length=max_doc_len, trainable=True
)
)
model.add(layers.Flatten())
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(1))
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
history = model.fit(train_x_padded, data["target"], epochs=10, verbose=1)
test_y_pred = model.predict(test_x_padded)
model.summary()
submission_df["target"] = test_y_pred
submission_df.to_csv("submission_keras_embedding.csv", index=False)
train_x_seq = tokenizer.texts_to_sequences(docs)
docs_size = []
for doc in train_x_seq:
size = len(doc)
docs_size.append(size)
pd.Series(docs_size).plot.box()
max_doc_len = 115
train_x_padded = pad_sequences(train_x_seq, padding="post", maxlen=max_doc_len)
docs[:5]
pd.DataFrame(train_x_padded[:5])
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
vocab_size = len(vocab) + 1
embedding_dim = 300
model = Sequential()
model.add(
layers.Embedding(
vocab_size, embedding_dim, input_length=max_doc_len, trainable=True
)
)
model.add(layers.Flatten())
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(1))
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
history = model.fit(train_x_padded, data["target"], epochs=25, verbose=1)
# sgd optimizer
# loss: 0.3717 - mae: 0.5002
# rmsprop
# loss: 0.0658 - mae: 0.2122
# adam
# loss: 0.0594 - mae: 0.1468 # Finalized
test_data = pd.read_csv("/kaggle/input/commonlitreadabilityprize/test.csv")
test_data = test_data[["id", "excerpt"]]
test_docs = test_data["excerpt"].str.lower().str.replace("[^a-z\s]", "")
test_docs = test_docs.apply(remove_stopwords)
test_x_seq = tokenizer.texts_to_sequences(test_docs)
test_x_padded = pad_sequences(test_x_seq, padding="post", maxlen=max_doc_len)
test_docs
pd.DataFrame(test_x_padded)
test_y_pred = model.predict(test_x_padded)
model.summary()
submission_df = pd.read_csv(
"/kaggle/input/commonlitreadabilityprize/sample_submission.csv"
)
submission_df
submission_df["target"] = test_y_pred
submission_df.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/003/69003002.ipynb
| null | null |
[{"Id": 69003002, "ScriptId": 18808794, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4218600, "CreationDate": "07/25/2021 17:45:51", "VersionNumber": 3.0, "Title": "CommonLit", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 270.0, "LinesInsertedFromPrevious": 162.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 108.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data = pd.read_csv("/kaggle/input/commonlitreadabilityprize/train.csv")
data.head()
# Null check
data.isnull().sum() / len(data) * 100
data.drop(["url_legal", "license"], 1, inplace=True)
from gensim.parsing.preprocessing import remove_stopwords
docs = data["excerpt"].str.lower().str.replace("[^a-z\s]", "")
docs = docs.apply(remove_stopwords)
docs[:10]
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
tokenizer = Tokenizer()
tokenizer.fit_on_texts(docs)
vocab = list(tokenizer.word_index)
print("Total number of unique tokens in corpus: %d" % len(vocab))
zip_path = "/kaggle/input/quora-insincere-questions-classification/embeddings.zip"
from zipfile import ZipFile
zf = ZipFile(zip_path)
zf.filelist
# ### Glove Embedding Layer
glove_path = "glove.840B.300d/glove.840B.300d.txt"
count = 0
with zf.open(glove_path) as file:
embeddings_glove = {}
for line in file:
line = line.decode("utf-8").replace("\n", "").split(" ")
curr_word = line[0]
if curr_word in vocab:
vector = line[1:]
vector = np.array(vector).astype(float)
embeddings_glove[curr_word] = vector
vocab_size = len(vocab) + 1
embedding_dim = 300
words_not_available = []
embedding_matrix = np.zeros((vocab_size, embedding_dim))
for word, wid in tokenizer.word_index.items():
if word in embeddings_glove:
embedding_matrix[wid] = embeddings_glove[word]
else:
words_not_available.append(word)
print(
"Percentage of words not avaialable %.2f%%"
% (len(words_not_available) / len(vocab) * 100)
)
print(
"Percentage of words avaialable %.2f%%"
% (100 - len(words_not_available) / len(vocab) * 100)
)
train_x_seq = tokenizer.texts_to_sequences(docs)
max_doc_len = 115
train_x_padded = pad_sequences(train_x_seq, padding="post", maxlen=max_doc_len)
# ### Using the word embeddings which has the maximum word’s coverage, create a regressor using simple neural network
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
model = Sequential()
model.add(
layers.Embedding(
vocab_size,
embedding_dim,
weights=[embedding_matrix],
input_length=max_doc_len,
trainable=False,
)
)
model.add(layers.Flatten())
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(1))
model.compile(optimizer="sgd", loss="mse", metrics=["mae"])
history = model.fit(train_x_padded, data["target"], epochs=10, verbose=1)
test_df = pd.read_csv("/kaggle/input/commonlitreadabilityprize/test.csv")
test_docs = test_df["excerpt"].str.lower().str.replace("[^a-z\s]", "")
test_docs = test_docs.apply(remove_stopwords)
test_x_seq = tokenizer.texts_to_sequences(test_docs)
test_x_padded = pad_sequences(test_x_seq, padding="post", maxlen=max_doc_len)
test_y_pred = model.predict(test_x_padded)
model.summary()
submission_df = pd.read_csv(
"/kaggle/input/commonlitreadabilityprize/sample_submission.csv"
)
submission_df["target"] = test_y_pred
submission_df.to_csv("submission_glove_embedding.csv", index=False)
# ### Google News Embedding Layer
from gensim.models import KeyedVectors
embedding_file = "GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin"
embeddings = KeyedVectors.load_word2vec_format(zf.open(embedding_file), binary=True)
vocab_size = len(vocab) + 1
embedding_dim = 300
words_not_available = []
embedding_matrix = np.zeros((vocab_size, embedding_dim))
for word, wid in tokenizer.word_index.items():
if word in embeddings:
embedding_matrix[wid] = embeddings[word]
else:
words_not_available.append(word)
print(
"Percentage of words not avaialable %.2f%%"
% (len(words_not_available) / len(vocab) * 100)
)
print(
"Percentage of words avaialable %.2f%%"
% (100 - len(words_not_available) / len(vocab) * 100)
)
model = Sequential()
model.add(
layers.Embedding(
vocab_size,
embedding_dim,
weights=[embedding_matrix],
input_length=max_doc_len,
trainable=False,
)
)
model.add(layers.Flatten())
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(1))
model.compile(optimizer="sgd", loss="mse", metrics=["mae"])
history = model.fit(train_x_padded, data["target"], epochs=10, verbose=1)
test_y_pred = model.predict(test_x_padded)
model.summary()
submission_df["target"] = test_y_pred
submission_df.to_csv("submission_google_embedding.csv", index=False)
# ### Build custom word embeddings using genism word2vec model (with window size=5) and retrain the neural network
from gensim.models import word2vec
docs_words = [doc.split(" ") for doc in docs]
len(docs_words)
embedding_dim = 100
model = word2vec.Word2Vec(
sentences=docs_words, vector_size=embedding_dim, min_count=50, window=5, sg=1
)
vocab = model.wv.index_to_key
df_embedding_matrix = pd.DataFrame(model.wv[vocab], index=vocab)
df_embedding_matrix.shape
model = Sequential()
model.add(
layers.Embedding(
vocab_size, embedding_dim, input_length=max_doc_len, trainable=True
)
)
model.add(layers.Flatten())
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(1))
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
history = model.fit(train_x_padded, data["target"], epochs=25, verbose=1)
test_y_pred = model.predict(test_x_padded)
model.summary()
# ### Keras Embedding Layer
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
vocab_size = len(vocab) + 1
embedding_dim = 300
model = Sequential()
model.add(
layers.Embedding(
vocab_size, embedding_dim, input_length=max_doc_len, trainable=True
)
)
model.add(layers.Flatten())
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(1))
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
history = model.fit(train_x_padded, data["target"], epochs=10, verbose=1)
test_y_pred = model.predict(test_x_padded)
model.summary()
submission_df["target"] = test_y_pred
submission_df.to_csv("submission_keras_embedding.csv", index=False)
train_x_seq = tokenizer.texts_to_sequences(docs)
docs_size = []
for doc in train_x_seq:
size = len(doc)
docs_size.append(size)
pd.Series(docs_size).plot.box()
max_doc_len = 115
train_x_padded = pad_sequences(train_x_seq, padding="post", maxlen=max_doc_len)
docs[:5]
pd.DataFrame(train_x_padded[:5])
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
vocab_size = len(vocab) + 1
embedding_dim = 300
model = Sequential()
model.add(
layers.Embedding(
vocab_size, embedding_dim, input_length=max_doc_len, trainable=True
)
)
model.add(layers.Flatten())
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(1))
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
history = model.fit(train_x_padded, data["target"], epochs=25, verbose=1)
# sgd optimizer
# loss: 0.3717 - mae: 0.5002
# rmsprop
# loss: 0.0658 - mae: 0.2122
# adam
# loss: 0.0594 - mae: 0.1468 # Finalized
test_data = pd.read_csv("/kaggle/input/commonlitreadabilityprize/test.csv")
test_data = test_data[["id", "excerpt"]]
test_docs = test_data["excerpt"].str.lower().str.replace("[^a-z\s]", "")
test_docs = test_docs.apply(remove_stopwords)
test_x_seq = tokenizer.texts_to_sequences(test_docs)
test_x_padded = pad_sequences(test_x_seq, padding="post", maxlen=max_doc_len)
test_docs
pd.DataFrame(test_x_padded)
test_y_pred = model.predict(test_x_padded)
model.summary()
submission_df = pd.read_csv(
"/kaggle/input/commonlitreadabilityprize/sample_submission.csv"
)
submission_df
submission_df["target"] = test_y_pred
submission_df.to_csv("submission.csv", index=False)
| false | 0 | 2,767 | 0 | 2,767 | 2,767 |
||
69003501
|
<jupyter_start><jupyter_text>EfficientNet Keras Weights B0-B5
## Credits
All credits are due to https://github.com/qubvel/efficientnet
Thanks so much for your contribution!
## Usage:
Use with this utility script:
https://www.kaggle.com/ratthachat/efficientnet/
Adding this utility script to your kernel, and you will be able to
use all models just like standard Keras pretrained model. For details see
https://www.kaggle.com/c/aptos2019-blindness-detection/discussion/100186
Kaggle dataset identifier: efficientnet-keras-weights-b0b5
<jupyter_script>import numpy as np
import pandas as pd
import os
print(os.listdir("../input"))
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, load_model
from keras.layers import (
Activation,
Dropout,
Flatten,
Dense,
GlobalMaxPooling2D,
BatchNormalization,
Input,
Conv2D,
GlobalAveragePooling2D,
concatenate,
Concatenate,
multiply,
LocallyConnected2D,
Lambda,
)
from keras.callbacks import ModelCheckpoint
from keras import metrics
from keras.optimizers import Adam
from keras import backend as K
import keras
from keras.models import Model
import matplotlib.pyplot as plt
from efficientnet.keras import EfficientNetB3, EfficientNetB4, EfficientNetB5
import skimage.io
from skimage.transform import resize
import imgaug as aug
from imgaug import augmenters as iaa
from tqdm import tqdm
import PIL
from PIL import Image, ImageOps
import cv2
from sklearn.utils import class_weight, shuffle
from keras.losses import binary_crossentropy, categorical_crossentropy
# from keras.applications.resnet50 import preprocess_input
from keras.applications.densenet import DenseNet121, DenseNet169, preprocess_input
import keras.backend as K
import tensorflow as tf
from sklearn.metrics import f1_score, fbeta_score, cohen_kappa_score
from keras.utils import Sequence
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
import imgaug as ia
import keras.callbacks as callbacks
from keras.callbacks import Callback
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import matplotlib.pyplot as plt
from skimage.io import MultiImage, imsave, imread
from skimage.transform import resize, rescale
from skimage.color import rgb2gray
from keras.layers import (
Input,
Cropping2D,
GlobalAveragePooling2D,
Concatenate,
Dense,
Conv2D,
)
from keras.models import Model, load_model
import keras.applications as kl
from keras.backend import name_scope
from sklearn.model_selection import train_test_split
from sklearn.metrics import cohen_kappa_score
from tqdm import tqdm
import tensorflow as tf
from keras.utils import Sequence
from keras.optimizers import Adam, Adamax
from sklearn.utils import shuffle, class_weight
from keras.utils import to_categorical
import efficientnet.keras as efn
from albumentations import (
HorizontalFlip,
IAAPerspective,
ShiftScaleRotate,
CLAHE,
RandomRotate90,
Transpose,
ShiftScaleRotate,
Blur,
OpticalDistortion,
GridDistortion,
HueSaturationValue,
IAAAdditiveGaussianNoise,
GaussNoise,
MotionBlur,
MedianBlur,
RandomBrightnessContrast,
IAAPiecewiseAffine,
IAASharpen,
IAAEmboss,
Flip,
OneOf,
Compose,
)
main_path = "../input/prostate-data/"
image_dim = (224, 224, 3)
BATCH_SIZE = 16
EPOCHS = 10
train_df = pd.read_csv(os.path.join(main_path, "train_data.csv"))
test_df = pd.read_csv(os.path.join(main_path, "test2.csv"))
train_df.head()
train_df["Gleason Score"].value_counts()
for i in range(641):
train_df["file_name"][i] = train_df["file_name"][i].replace(".png", "")
if train_df["Gleason Score"][i] > 5:
train_df["Gleason Score"][i] = train_df["Gleason Score"][i] - 5
train_df.head()
train_df["Gleason Score"].value_counts()
rows, cols = 3, 3
fig = plt.figure(figsize=(10, 10))
for i in range(1, rows * cols + 1):
img = MultiImage(
os.path.join(
main_path,
"Gleason_train/Gleason_train",
train_df.loc[i - 1, "file_name"] + ".jpg",
)
)
img = resize(img[-1], (512, 512))
fig.add_subplot(rows, cols, i)
plt.imshow(img)
plt.title("Gleason Score: " + str(train_df.loc[i - 1, "Gleason Score"]))
plt.show()
colums = ["file_name", "Gleason Score"]
train_df, val_df = train_test_split(train_df[colums], test_size=0.15)
print("Train shape: {}".format(train_df.shape))
print("Validation shape: {}".format(val_df.shape))
class Generator(Sequence):
def __init__(
self,
input_data,
batch_size=BATCH_SIZE,
dims=image_dim,
is_shuffle=True,
n_classes=6,
is_train=True,
):
self.image_ids = input_data[0]
self.labels = input_data[1]
self.batch_size = batch_size
self.dims = image_dim
self.shuffle = is_shuffle
self.n_classes = n_classes
self.is_train = is_train
self.on_epoch_end()
def __len__(self):
return int(np.floor(len(self.image_ids) / self.batch_size))
def on_epoch_end(self):
self.indexes = np.arange(len(self.image_ids))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __getitem__(self, index):
indexes = self.indexes[index * self.batch_size : (index + 1) * self.batch_size]
image_ids_temp = [self.image_ids[k] for k in indexes]
labels_temp = [self.labels[k] for k in indexes]
# Generate data
X, y = self.__data_generation(image_ids_temp, labels_temp)
return X, y
def augment_flips_color(self, p=0.5):
return Compose(
[
Flip(),
RandomRotate90(),
Transpose(),
HorizontalFlip(),
ShiftScaleRotate(
shift_limit=0.0625, scale_limit=0.50, rotate_limit=45, p=0.75
),
Blur(blur_limit=3),
],
p=p,
)
def __data_generation(self, list_IDs_temp, lbls):
X = np.zeros((self.batch_size, *self.dims))
y = np.zeros((self.batch_size), dtype=int)
# Generate data
for i, ID in enumerate(list_IDs_temp):
# Store sample
img = MultiImage(
os.path.join(main_path, "Gleason_train/Gleason_train", ID + ".jpg")
)
img = resize(img[-1], (self.dims[0], self.dims[1]))
# Augmentation
if self.is_train:
aug = self.augment_flips_color(p=1)
img = aug(image=img)["image"]
X[i] = img
# Store class
y[i] = lbls[i]
return X, to_categorical(y, num_classes=self.n_classes)
train_gen = Generator([train_df["file_name"].values, train_df["Gleason Score"].values])
val_gen = Generator(
[val_df["file_name"].values, val_df["Gleason Score"].values],
is_shuffle=False,
is_train=False,
)
class QWKEvaluation(Callback):
def __init__(self, validation_data=(), batch_size=BATCH_SIZE, interval=1):
super(Callback, self).__init__()
self.interval = interval
self.batch_size = batch_size
self.valid_generator, self.y_val = validation_data
self.history = []
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict_generator(
generator=self.valid_generator,
steps=np.ceil(float(len(self.y_val)) / float(self.batch_size)),
workers=1,
use_multiprocessing=False,
verbose=1,
)
def flatten(y):
return np.argmax(y, axis=1).reshape(-1)
score = cohen_kappa_score(
self.y_val,
flatten(y_pred),
labels=[0, 1, 2, 3, 4, 5],
weights="quadratic",
)
print("\n epoch: %d - QWK_score: %.6f \n" % (epoch + 1, score))
self.history.append(score)
if score >= max(self.history):
print("saving checkpoint: ", score)
self.model.save("classifier.h5")
qwk = QWKEvaluation(
validation_data=(
val_gen,
np.asarray(val_df["Gleason Score"][: val_gen.__len__() * BATCH_SIZE]),
),
batch_size=BATCH_SIZE,
interval=1,
)
in_lay = Input(shape=image_dim)
base_model = EfficientNetB5(weights=None, input_tensor=in_lay, include_top=False)
base_model.load_weights(
"../input/efficientnet-keras-weights-b0b5/efficientnet-b5_imagenet_1000_notop.h5"
)
pt_features = base_model(in_lay)
pt_depth = base_model.get_output_shape_at(0)[-1]
bn_features = BatchNormalization()(pt_features)
# %% [markdown]
# ## Attention model
# %%
# here we do an attention mechanism to turn pixels in the GAP on an off
attn_layer = Conv2D(64, kernel_size=(1, 1), padding="same", activation="relu")(
Dropout(0.5)(bn_features)
)
attn_layer = Conv2D(16, kernel_size=(1, 1), padding="same", activation="relu")(
attn_layer
)
attn_layer = Conv2D(8, kernel_size=(1, 1), padding="same", activation="relu")(
attn_layer
)
attn_layer = Conv2D(1, kernel_size=(1, 1), padding="valid", activation="sigmoid")(
attn_layer
)
# fan it out to all of the channels
up_c2_w = np.ones((1, 1, 1, pt_depth))
up_c2 = Conv2D(
pt_depth,
kernel_size=(1, 1),
padding="same",
activation="linear",
use_bias=False,
weights=[up_c2_w],
)
up_c2.trainable = False
attn_layer = up_c2(attn_layer)
mask_features = multiply([attn_layer, bn_features])
gap_features = GlobalAveragePooling2D()(mask_features)
gap_mask = GlobalAveragePooling2D()(attn_layer)
# to account for missing values from the attention model
gap = Lambda(lambda x: x[0] / x[1], name="RescaleGAP")([gap_features, gap_mask])
gap_dr = Dropout(0.25)(gap)
dr_steps = Dropout(0.25)(Dense(128, activation="relu")(gap_dr))
out_layer = Dense(6, activation="softmax")(dr_steps)
retina_model = Model(inputs=[in_lay], outputs=[out_layer])
retina_model.summary()
from keras.callbacks import (
ModelCheckpoint,
LearningRateScheduler,
EarlyStopping,
ReduceLROnPlateau,
CSVLogger,
)
epochs = 10
batch_size = 16
checkpoint = ModelCheckpoint(
"../working/model_.h5",
monitor="val_loss",
verbose=1,
save_best_only=True,
mode="min",
save_weights_only=True,
)
reduceLROnPlat = ReduceLROnPlateau(
monitor="val_loss", factor=0.5, patience=4, verbose=1, mode="auto", epsilon=0.0001
)
early = EarlyStopping(monitor="val_loss", mode="min", patience=4)
csv_logger = CSVLogger(
filename="../working/training_log.csv", separator=",", append=True
)
def kappa_loss(y_true, y_pred, y_pow=2, eps=1e-12, N=5, bsize=32, name="kappa"):
with tf.name_scope(name):
y_true = tf.to_float(y_true)
repeat_op = tf.to_float(tf.tile(tf.reshape(tf.range(0, N), [N, 1]), [1, N]))
repeat_op_sq = tf.square((repeat_op - tf.transpose(repeat_op)))
weights = repeat_op_sq / tf.to_float((N - 1) ** 2)
pred_ = y_pred**y_pow
try:
pred_norm = pred_ / (eps + tf.reshape(tf.reduce_sum(pred_, 1), [-1, 1]))
except Exception:
pred_norm = pred_ / (eps + tf.reshape(tf.reduce_sum(pred_, 1), [bsize, 1]))
hist_rater_a = tf.reduce_sum(pred_norm, 0)
hist_rater_b = tf.reduce_sum(y_true, 0)
conf_mat = tf.matmul(tf.transpose(pred_norm), y_true)
nom = tf.reduce_sum(weights * conf_mat)
denom = tf.reduce_sum(
weights
* tf.matmul(
tf.reshape(hist_rater_a, [N, 1]), tf.reshape(hist_rater_b, [1, N])
)
/ tf.to_float(bsize)
)
return (
nom * 0.5 / (denom + eps) + categorical_crossentropy(y_true, y_pred) * 0.5
)
# %%
from keras.callbacks import Callback
class QWKEvaluation(Callback):
def __init__(self, validation_data=(), batch_size=BATCH_SIZE, interval=1):
super(Callback, self).__init__()
self.interval = interval
self.batch_size = batch_size
self.valid_generator, self.y_val = validation_data
self.history = []
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict_generator(
generator=self.valid_generator,
steps=np.ceil(float(len(self.y_val)) / float(self.batch_size)),
workers=1,
use_multiprocessing=False,
verbose=1,
)
def flatten(y):
return np.argmax(y, axis=1).reshape(-1)
score = cohen_kappa_score(
self.y_val,
flatten(y_pred),
labels=[0, 1, 2, 3, 4, 5],
weights="quadratic",
)
print("\n epoch: %d - QWK_score: %.6f \n" % (epoch + 1, score))
self.history.append(score)
if score >= max(self.history):
print("saving checkpoint: ", score)
self.model.save("classifier.h5")
qwk = QWKEvaluation(
validation_data=(
val_gen,
np.asarray(val_df["Gleason Score"][: val_gen.__len__() * BATCH_SIZE]),
),
batch_size=batch_size,
interval=1,
)
for layer in retina_model.layers:
layer.trainable = True
callbacks_list = [checkpoint, csv_logger, reduceLROnPlat, early, qwk]
retina_model.compile(
loss="categorical_crossentropy",
# loss=kappa_loss,
optimizer=Adam(lr=1e-4),
metrics=["accuracy"],
)
retina_model.fit_generator(
train_gen,
# steps_per_epoch=np.ceil(float(len(train_x)) / float(batch_size)),
validation_data=val_gen,
# validation_steps=np.ceil(float(len(valid_x)) / float(batch_size)),
epochs=epochs,
verbose=1,
workers=1,
use_multiprocessing=False,
callbacks=callbacks_list,
)
loss = retina_model.history.history["loss"]
val_loss = retina_model.history.history["val_loss"]
acc = retina_model.history.history["accuracy"]
val_acc = retina_model.history.history["val_accuracy"]
score = qwk.history
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, "b", color="red", label="Training Loss")
plt.plot(epochs, val_loss, "b", color="blue", label="Validation Loss")
plt.title("Training and Validation Loss")
plt.legend()
plt.figure()
plt.plot(epochs, acc, "b", color="red", label="Training Accuracy")
plt.plot(epochs, val_acc, "b", color="blue", label="Validation Accuracy")
plt.title("Training and Validation Accuracy")
plt.legend()
plt.figure()
plt.plot(epochs, score, "b", color="red", label="Validation Kappa")
plt.legend()
plt.figure()
plt.show()
test_df.head()
test_dir = "../input/prostate-data/Test_data/Test_data/Test_images"
if os.path.exists(test_dir):
model = load_model("./classifier.h5")
predicted = []
for ID in test_df["file_name"]:
file = ID.replace("mask2_", "")
file = file.replace(".png", "")
# print(file)
img = MultiImage(os.path.join(test_dir, file + ".jpg"))
img = resize(img[-1], (image_dim[0], image_dim[1]))
preds = model.predict(np.expand_dims(img, 0))
preds = np.argmax(preds)
predicted.append(preds)
submission = pd.DataFrame(
{"file_name": test_df["file_name"], "Gleason Score": predicted}
)
submission.to_csv("submission1.csv", index=False)
res = pd.read_csv("./submission1.csv")
test = pd.read_csv("../input/prostate-data/test2.csv")
for i in range(245):
if test["Gleason Score"][i] > 5:
test["Gleason Score"][i] = test["Gleason Score"][i] - 5
else:
continue
res.head()
X_actual = test["Gleason Score"]
X_pred = res["Gleason Score"]
res["Gleason Score"].value_counts()
test["Gleason Score"].value_counts()
for i in range(245):
if test_df["Gleason Score"][i] == 0:
test_df["Gleason Score"][i] = 1
test_df["Gleason Score"].value_counts()
for i in range(245):
if res["Gleason Score"][i] == 0:
res["Gleason Score"][i] = 1
res["Gleason Score"].value_counts()
X_actual.fillna(1, inplace=True)
X_actual.isnull().sum()
from sklearn.metrics import confusion_matrix
x = confusion_matrix(X_actual, X_pred)
x
N = len(X_actual)
N
w = np.zeros((6, 6))
w
for i in range(len(w)):
for j in range(len(w)):
w[i][j] = float(((i - j) ** 2) / ((N - 1) ** 2))
w
N = 6
# calculation of actual histogram vector
X_actual_hist = np.zeros([N])
X_actual_hist[0] = 10
X_actual_hist[1] = 29
X_actual_hist[2] = 81
X_actual_hist[3] = 86
X_actual_hist[4] = 26
X_actual_hist[5] = 13
print("Actuals value counts : {}".format(X_actual_hist))
# calculation of predicted histogram vector
X_pred_hist = np.zeros([N])
X_pred_hist[0] = 9
X_pred_hist[1] = 153
X_pred_hist[2] = 32
X_pred_hist[3] = 34
X_pred_hist[4] = 6
X_pred_hist[5] = 11
E = np.outer(X_actual_hist, X_pred_hist)
E
E = E / E.sum()
E.sum()
x = x / x.sum()
x.sum()
E
x
Num = 0
Den = 0
for i in range(len(w)):
for j in range(len(w)):
Num += w[i][j] * x[i][j]
Den += w[i][j] * E[i][j]
Res = Num / Den
QWK = 1 - Res
print("The QWK value is {}".format(round(QWK, 4)))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/003/69003501.ipynb
|
efficientnet-keras-weights-b0b5
|
ratthachat
|
[{"Id": 69003501, "ScriptId": 18338468, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5863027, "CreationDate": "07/25/2021 17:54:39", "VersionNumber": 2.0, "Title": "notebooka088d52694", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 467.0, "LinesInsertedFromPrevious": 465.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 2.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 91689049, "KernelVersionId": 69003501, "SourceDatasetVersionId": 556303}, {"Id": 91689050, "KernelVersionId": 69003501, "SourceDatasetVersionId": 2461938}]
|
[{"Id": 556303, "DatasetId": 266957, "DatasourceVersionId": 572870, "CreatorUserId": 1364892, "LicenseName": "Unknown", "CreationDate": "07/17/2019 04:07:46", "VersionNumber": 1.0, "Title": "EfficientNet Keras Weights B0-B5", "Slug": "efficientnet-keras-weights-b0b5", "Subtitle": NaN, "Description": "## Credits\nAll credits are due to https://github.com/qubvel/efficientnet\nThanks so much for your contribution!\n\n## Usage:\nUse with this utility script:\nhttps://www.kaggle.com/ratthachat/efficientnet/\n\nAdding this utility script to your kernel, and you will be able to \nuse all models just like standard Keras pretrained model. For details see\nhttps://www.kaggle.com/c/aptos2019-blindness-detection/discussion/100186", "VersionNotes": "Initial release", "TotalCompressedBytes": 306553584.0, "TotalUncompressedBytes": 280753301.0}]
|
[{"Id": 266957, "CreatorUserId": 1364892, "OwnerUserId": 1364892.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 556303.0, "CurrentDatasourceVersionId": 572870.0, "ForumId": 278286, "Type": 2, "CreationDate": "07/17/2019 04:07:46", "LastActivityDate": "07/17/2019", "TotalViews": 14284, "TotalDownloads": 2805, "TotalVotes": 53, "TotalKernels": 81}]
|
[{"Id": 1364892, "UserName": "ratthachat", "DisplayName": "Jung", "RegisterDate": "10/27/2017", "PerformanceTier": 3}]
|
import numpy as np
import pandas as pd
import os
print(os.listdir("../input"))
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, load_model
from keras.layers import (
Activation,
Dropout,
Flatten,
Dense,
GlobalMaxPooling2D,
BatchNormalization,
Input,
Conv2D,
GlobalAveragePooling2D,
concatenate,
Concatenate,
multiply,
LocallyConnected2D,
Lambda,
)
from keras.callbacks import ModelCheckpoint
from keras import metrics
from keras.optimizers import Adam
from keras import backend as K
import keras
from keras.models import Model
import matplotlib.pyplot as plt
from efficientnet.keras import EfficientNetB3, EfficientNetB4, EfficientNetB5
import skimage.io
from skimage.transform import resize
import imgaug as aug
from imgaug import augmenters as iaa
from tqdm import tqdm
import PIL
from PIL import Image, ImageOps
import cv2
from sklearn.utils import class_weight, shuffle
from keras.losses import binary_crossentropy, categorical_crossentropy
# from keras.applications.resnet50 import preprocess_input
from keras.applications.densenet import DenseNet121, DenseNet169, preprocess_input
import keras.backend as K
import tensorflow as tf
from sklearn.metrics import f1_score, fbeta_score, cohen_kappa_score
from keras.utils import Sequence
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
import imgaug as ia
import keras.callbacks as callbacks
from keras.callbacks import Callback
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import matplotlib.pyplot as plt
from skimage.io import MultiImage, imsave, imread
from skimage.transform import resize, rescale
from skimage.color import rgb2gray
from keras.layers import (
Input,
Cropping2D,
GlobalAveragePooling2D,
Concatenate,
Dense,
Conv2D,
)
from keras.models import Model, load_model
import keras.applications as kl
from keras.backend import name_scope
from sklearn.model_selection import train_test_split
from sklearn.metrics import cohen_kappa_score
from tqdm import tqdm
import tensorflow as tf
from keras.utils import Sequence
from keras.optimizers import Adam, Adamax
from sklearn.utils import shuffle, class_weight
from keras.utils import to_categorical
import efficientnet.keras as efn
from albumentations import (
HorizontalFlip,
IAAPerspective,
ShiftScaleRotate,
CLAHE,
RandomRotate90,
Transpose,
ShiftScaleRotate,
Blur,
OpticalDistortion,
GridDistortion,
HueSaturationValue,
IAAAdditiveGaussianNoise,
GaussNoise,
MotionBlur,
MedianBlur,
RandomBrightnessContrast,
IAAPiecewiseAffine,
IAASharpen,
IAAEmboss,
Flip,
OneOf,
Compose,
)
main_path = "../input/prostate-data/"
image_dim = (224, 224, 3)
BATCH_SIZE = 16
EPOCHS = 10
train_df = pd.read_csv(os.path.join(main_path, "train_data.csv"))
test_df = pd.read_csv(os.path.join(main_path, "test2.csv"))
train_df.head()
train_df["Gleason Score"].value_counts()
for i in range(641):
train_df["file_name"][i] = train_df["file_name"][i].replace(".png", "")
if train_df["Gleason Score"][i] > 5:
train_df["Gleason Score"][i] = train_df["Gleason Score"][i] - 5
train_df.head()
train_df["Gleason Score"].value_counts()
rows, cols = 3, 3
fig = plt.figure(figsize=(10, 10))
for i in range(1, rows * cols + 1):
img = MultiImage(
os.path.join(
main_path,
"Gleason_train/Gleason_train",
train_df.loc[i - 1, "file_name"] + ".jpg",
)
)
img = resize(img[-1], (512, 512))
fig.add_subplot(rows, cols, i)
plt.imshow(img)
plt.title("Gleason Score: " + str(train_df.loc[i - 1, "Gleason Score"]))
plt.show()
colums = ["file_name", "Gleason Score"]
train_df, val_df = train_test_split(train_df[colums], test_size=0.15)
print("Train shape: {}".format(train_df.shape))
print("Validation shape: {}".format(val_df.shape))
class Generator(Sequence):
def __init__(
self,
input_data,
batch_size=BATCH_SIZE,
dims=image_dim,
is_shuffle=True,
n_classes=6,
is_train=True,
):
self.image_ids = input_data[0]
self.labels = input_data[1]
self.batch_size = batch_size
self.dims = image_dim
self.shuffle = is_shuffle
self.n_classes = n_classes
self.is_train = is_train
self.on_epoch_end()
def __len__(self):
return int(np.floor(len(self.image_ids) / self.batch_size))
def on_epoch_end(self):
self.indexes = np.arange(len(self.image_ids))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __getitem__(self, index):
indexes = self.indexes[index * self.batch_size : (index + 1) * self.batch_size]
image_ids_temp = [self.image_ids[k] for k in indexes]
labels_temp = [self.labels[k] for k in indexes]
# Generate data
X, y = self.__data_generation(image_ids_temp, labels_temp)
return X, y
def augment_flips_color(self, p=0.5):
return Compose(
[
Flip(),
RandomRotate90(),
Transpose(),
HorizontalFlip(),
ShiftScaleRotate(
shift_limit=0.0625, scale_limit=0.50, rotate_limit=45, p=0.75
),
Blur(blur_limit=3),
],
p=p,
)
def __data_generation(self, list_IDs_temp, lbls):
X = np.zeros((self.batch_size, *self.dims))
y = np.zeros((self.batch_size), dtype=int)
# Generate data
for i, ID in enumerate(list_IDs_temp):
# Store sample
img = MultiImage(
os.path.join(main_path, "Gleason_train/Gleason_train", ID + ".jpg")
)
img = resize(img[-1], (self.dims[0], self.dims[1]))
# Augmentation
if self.is_train:
aug = self.augment_flips_color(p=1)
img = aug(image=img)["image"]
X[i] = img
# Store class
y[i] = lbls[i]
return X, to_categorical(y, num_classes=self.n_classes)
train_gen = Generator([train_df["file_name"].values, train_df["Gleason Score"].values])
val_gen = Generator(
[val_df["file_name"].values, val_df["Gleason Score"].values],
is_shuffle=False,
is_train=False,
)
class QWKEvaluation(Callback):
def __init__(self, validation_data=(), batch_size=BATCH_SIZE, interval=1):
super(Callback, self).__init__()
self.interval = interval
self.batch_size = batch_size
self.valid_generator, self.y_val = validation_data
self.history = []
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict_generator(
generator=self.valid_generator,
steps=np.ceil(float(len(self.y_val)) / float(self.batch_size)),
workers=1,
use_multiprocessing=False,
verbose=1,
)
def flatten(y):
return np.argmax(y, axis=1).reshape(-1)
score = cohen_kappa_score(
self.y_val,
flatten(y_pred),
labels=[0, 1, 2, 3, 4, 5],
weights="quadratic",
)
print("\n epoch: %d - QWK_score: %.6f \n" % (epoch + 1, score))
self.history.append(score)
if score >= max(self.history):
print("saving checkpoint: ", score)
self.model.save("classifier.h5")
qwk = QWKEvaluation(
validation_data=(
val_gen,
np.asarray(val_df["Gleason Score"][: val_gen.__len__() * BATCH_SIZE]),
),
batch_size=BATCH_SIZE,
interval=1,
)
in_lay = Input(shape=image_dim)
base_model = EfficientNetB5(weights=None, input_tensor=in_lay, include_top=False)
base_model.load_weights(
"../input/efficientnet-keras-weights-b0b5/efficientnet-b5_imagenet_1000_notop.h5"
)
pt_features = base_model(in_lay)
pt_depth = base_model.get_output_shape_at(0)[-1]
bn_features = BatchNormalization()(pt_features)
# %% [markdown]
# ## Attention model
# %%
# here we do an attention mechanism to turn pixels in the GAP on an off
attn_layer = Conv2D(64, kernel_size=(1, 1), padding="same", activation="relu")(
Dropout(0.5)(bn_features)
)
attn_layer = Conv2D(16, kernel_size=(1, 1), padding="same", activation="relu")(
attn_layer
)
attn_layer = Conv2D(8, kernel_size=(1, 1), padding="same", activation="relu")(
attn_layer
)
attn_layer = Conv2D(1, kernel_size=(1, 1), padding="valid", activation="sigmoid")(
attn_layer
)
# fan it out to all of the channels
up_c2_w = np.ones((1, 1, 1, pt_depth))
up_c2 = Conv2D(
pt_depth,
kernel_size=(1, 1),
padding="same",
activation="linear",
use_bias=False,
weights=[up_c2_w],
)
up_c2.trainable = False
attn_layer = up_c2(attn_layer)
mask_features = multiply([attn_layer, bn_features])
gap_features = GlobalAveragePooling2D()(mask_features)
gap_mask = GlobalAveragePooling2D()(attn_layer)
# to account for missing values from the attention model
gap = Lambda(lambda x: x[0] / x[1], name="RescaleGAP")([gap_features, gap_mask])
gap_dr = Dropout(0.25)(gap)
dr_steps = Dropout(0.25)(Dense(128, activation="relu")(gap_dr))
out_layer = Dense(6, activation="softmax")(dr_steps)
retina_model = Model(inputs=[in_lay], outputs=[out_layer])
retina_model.summary()
from keras.callbacks import (
ModelCheckpoint,
LearningRateScheduler,
EarlyStopping,
ReduceLROnPlateau,
CSVLogger,
)
epochs = 10
batch_size = 16
checkpoint = ModelCheckpoint(
"../working/model_.h5",
monitor="val_loss",
verbose=1,
save_best_only=True,
mode="min",
save_weights_only=True,
)
reduceLROnPlat = ReduceLROnPlateau(
monitor="val_loss", factor=0.5, patience=4, verbose=1, mode="auto", epsilon=0.0001
)
early = EarlyStopping(monitor="val_loss", mode="min", patience=4)
csv_logger = CSVLogger(
filename="../working/training_log.csv", separator=",", append=True
)
def kappa_loss(y_true, y_pred, y_pow=2, eps=1e-12, N=5, bsize=32, name="kappa"):
with tf.name_scope(name):
y_true = tf.to_float(y_true)
repeat_op = tf.to_float(tf.tile(tf.reshape(tf.range(0, N), [N, 1]), [1, N]))
repeat_op_sq = tf.square((repeat_op - tf.transpose(repeat_op)))
weights = repeat_op_sq / tf.to_float((N - 1) ** 2)
pred_ = y_pred**y_pow
try:
pred_norm = pred_ / (eps + tf.reshape(tf.reduce_sum(pred_, 1), [-1, 1]))
except Exception:
pred_norm = pred_ / (eps + tf.reshape(tf.reduce_sum(pred_, 1), [bsize, 1]))
hist_rater_a = tf.reduce_sum(pred_norm, 0)
hist_rater_b = tf.reduce_sum(y_true, 0)
conf_mat = tf.matmul(tf.transpose(pred_norm), y_true)
nom = tf.reduce_sum(weights * conf_mat)
denom = tf.reduce_sum(
weights
* tf.matmul(
tf.reshape(hist_rater_a, [N, 1]), tf.reshape(hist_rater_b, [1, N])
)
/ tf.to_float(bsize)
)
return (
nom * 0.5 / (denom + eps) + categorical_crossentropy(y_true, y_pred) * 0.5
)
# %%
from keras.callbacks import Callback
class QWKEvaluation(Callback):
def __init__(self, validation_data=(), batch_size=BATCH_SIZE, interval=1):
super(Callback, self).__init__()
self.interval = interval
self.batch_size = batch_size
self.valid_generator, self.y_val = validation_data
self.history = []
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict_generator(
generator=self.valid_generator,
steps=np.ceil(float(len(self.y_val)) / float(self.batch_size)),
workers=1,
use_multiprocessing=False,
verbose=1,
)
def flatten(y):
return np.argmax(y, axis=1).reshape(-1)
score = cohen_kappa_score(
self.y_val,
flatten(y_pred),
labels=[0, 1, 2, 3, 4, 5],
weights="quadratic",
)
print("\n epoch: %d - QWK_score: %.6f \n" % (epoch + 1, score))
self.history.append(score)
if score >= max(self.history):
print("saving checkpoint: ", score)
self.model.save("classifier.h5")
qwk = QWKEvaluation(
validation_data=(
val_gen,
np.asarray(val_df["Gleason Score"][: val_gen.__len__() * BATCH_SIZE]),
),
batch_size=batch_size,
interval=1,
)
for layer in retina_model.layers:
layer.trainable = True
callbacks_list = [checkpoint, csv_logger, reduceLROnPlat, early, qwk]
retina_model.compile(
loss="categorical_crossentropy",
# loss=kappa_loss,
optimizer=Adam(lr=1e-4),
metrics=["accuracy"],
)
retina_model.fit_generator(
train_gen,
# steps_per_epoch=np.ceil(float(len(train_x)) / float(batch_size)),
validation_data=val_gen,
# validation_steps=np.ceil(float(len(valid_x)) / float(batch_size)),
epochs=epochs,
verbose=1,
workers=1,
use_multiprocessing=False,
callbacks=callbacks_list,
)
loss = retina_model.history.history["loss"]
val_loss = retina_model.history.history["val_loss"]
acc = retina_model.history.history["accuracy"]
val_acc = retina_model.history.history["val_accuracy"]
score = qwk.history
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, "b", color="red", label="Training Loss")
plt.plot(epochs, val_loss, "b", color="blue", label="Validation Loss")
plt.title("Training and Validation Loss")
plt.legend()
plt.figure()
plt.plot(epochs, acc, "b", color="red", label="Training Accuracy")
plt.plot(epochs, val_acc, "b", color="blue", label="Validation Accuracy")
plt.title("Training and Validation Accuracy")
plt.legend()
plt.figure()
plt.plot(epochs, score, "b", color="red", label="Validation Kappa")
plt.legend()
plt.figure()
plt.show()
test_df.head()
test_dir = "../input/prostate-data/Test_data/Test_data/Test_images"
if os.path.exists(test_dir):
model = load_model("./classifier.h5")
predicted = []
for ID in test_df["file_name"]:
file = ID.replace("mask2_", "")
file = file.replace(".png", "")
# print(file)
img = MultiImage(os.path.join(test_dir, file + ".jpg"))
img = resize(img[-1], (image_dim[0], image_dim[1]))
preds = model.predict(np.expand_dims(img, 0))
preds = np.argmax(preds)
predicted.append(preds)
submission = pd.DataFrame(
{"file_name": test_df["file_name"], "Gleason Score": predicted}
)
submission.to_csv("submission1.csv", index=False)
res = pd.read_csv("./submission1.csv")
test = pd.read_csv("../input/prostate-data/test2.csv")
for i in range(245):
if test["Gleason Score"][i] > 5:
test["Gleason Score"][i] = test["Gleason Score"][i] - 5
else:
continue
res.head()
X_actual = test["Gleason Score"]
X_pred = res["Gleason Score"]
res["Gleason Score"].value_counts()
test["Gleason Score"].value_counts()
for i in range(245):
if test_df["Gleason Score"][i] == 0:
test_df["Gleason Score"][i] = 1
test_df["Gleason Score"].value_counts()
for i in range(245):
if res["Gleason Score"][i] == 0:
res["Gleason Score"][i] = 1
res["Gleason Score"].value_counts()
X_actual.fillna(1, inplace=True)
X_actual.isnull().sum()
from sklearn.metrics import confusion_matrix
x = confusion_matrix(X_actual, X_pred)
x
N = len(X_actual)
N
w = np.zeros((6, 6))
w
for i in range(len(w)):
for j in range(len(w)):
w[i][j] = float(((i - j) ** 2) / ((N - 1) ** 2))
w
N = 6
# calculation of actual histogram vector
X_actual_hist = np.zeros([N])
X_actual_hist[0] = 10
X_actual_hist[1] = 29
X_actual_hist[2] = 81
X_actual_hist[3] = 86
X_actual_hist[4] = 26
X_actual_hist[5] = 13
print("Actuals value counts : {}".format(X_actual_hist))
# calculation of predicted histogram vector
X_pred_hist = np.zeros([N])
X_pred_hist[0] = 9
X_pred_hist[1] = 153
X_pred_hist[2] = 32
X_pred_hist[3] = 34
X_pred_hist[4] = 6
X_pred_hist[5] = 11
E = np.outer(X_actual_hist, X_pred_hist)
E
E = E / E.sum()
E.sum()
x = x / x.sum()
x.sum()
E
x
Num = 0
Den = 0
for i in range(len(w)):
for j in range(len(w)):
Num += w[i][j] * x[i][j]
Den += w[i][j] * E[i][j]
Res = Num / Den
QWK = 1 - Res
print("The QWK value is {}".format(round(QWK, 4)))
| false | 1 | 5,327 | 0 | 5,487 | 5,327 |
||
69003349
|
<jupyter_start><jupyter_text>World Happiness Report 2021
### Context
The World Happiness Report is a landmark survey of the state of global happiness . The report continues to gain global recognition as governments, organizations and civil society increasingly use happiness indicators to inform their policy-making decisions. Leading experts across fields – economics, psychology, survey analysis, national statistics, health, public policy and more – describe how measurements of well-being can be used effectively to assess the progress of nations. The reports review the state of happiness in the world today and show how the new science of happiness explains personal and national variations in happiness.
### Content
The happiness scores and rankings use data from the Gallup World Poll . The columns following the happiness score estimate the extent to which each of six factors – economic production, social support, life expectancy, freedom, absence of corruption, and generosity – contribute to making life evaluations higher in each country than they are in Dystopia, a hypothetical country that has values equal to the world’s lowest national averages for each of the six factors. They have no impact on the total score reported for each country, but they do explain why some countries rank higher than others.
Kaggle dataset identifier: world-happiness-report-2021
<jupyter_code>import pandas as pd
df = pd.read_csv('world-happiness-report-2021/world-happiness-report.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 1949 entries, 0 to 1948
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Country name 1949 non-null object
1 year 1949 non-null int64
2 Life Ladder 1949 non-null float64
3 Log GDP per capita 1913 non-null float64
4 Social support 1936 non-null float64
5 Healthy life expectancy at birth 1894 non-null float64
6 Freedom to make life choices 1917 non-null float64
7 Generosity 1860 non-null float64
8 Perceptions of corruption 1839 non-null float64
9 Positive affect 1927 non-null float64
10 Negative affect 1933 non-null float64
dtypes: float64(9), int64(1), object(1)
memory usage: 167.6+ KB
<jupyter_text>Examples:
{
"Country name": "Afghanistan",
"year": 2008,
"Life Ladder": 3.724,
"Log GDP per capita": 7.37,
"Social support": 0.451,
"Healthy life expectancy at birth": 50.8,
"Freedom to make life choices": 0.718,
"Generosity": 0.168,
"Perceptions of corruption": 0.882,
"Positive affect": 0.518,
"Negative affect": 0.258
}
{
"Country name": "Afghanistan",
"year": 2009,
"Life Ladder": 4.402,
"Log GDP per capita": 7.54,
"Social support": 0.552,
"Healthy life expectancy at birth": 51.2,
"Freedom to make life choices": 0.679,
"Generosity": 0.19,
"Perceptions of corruption": 0.85,
"Positive affect": 0.584,
"Negative affect": 0.23700000000000002
}
{
"Country name": "Afghanistan",
"year": 2010,
"Life Ladder": 4.758,
"Log GDP per capita": 7.647,
"Social support": 0.539,
"Healthy life expectancy at birth": 51.6,
"Freedom to make life choices": 0.6000000000000001,
"Generosity": 0.121,
"Perceptions of corruption": 0.707,
"Positive affect": 0.618,
"Negative affect": 0.275
}
{
"Country name": "Afghanistan",
"year": 2011,
"Life Ladder": 3.832,
"Log GDP per capita": 7.62,
"Social support": 0.521,
"Healthy life expectancy at birth": 51.92,
"Freedom to make life choices": 0.496,
"Generosity": 0.162,
"Perceptions of corruption": 0.731,
"Positive affect": 0.611,
"Negative affect": 0.267
}
<jupyter_code>import pandas as pd
df = pd.read_csv('world-happiness-report-2021/world-happiness-report-2021.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 149 entries, 0 to 148
Data columns (total 20 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Country name 149 non-null object
1 Regional indicator 149 non-null object
2 Ladder score 149 non-null float64
3 Standard error of ladder score 149 non-null float64
4 upperwhisker 149 non-null float64
5 lowerwhisker 149 non-null float64
6 Logged GDP per capita 149 non-null float64
7 Social support 149 non-null float64
8 Healthy life expectancy 149 non-null float64
9 Freedom to make life choices 149 non-null float64
10 Generosity 149 non-null float64
11 Perceptions of corruption 149 non-null float64
12 Ladder score in Dystopia 149 non-null float64
13 Explained by: Log GDP per capita 149 non-null float64
14 Explained by: Social support 149 non-null float64
15 Explained by: Healthy life expectancy 149 non-null float64
16 Explained by: Freedom to make life choices 149 non-null float64
17 Explained by: Generosity 149 non-null float64
18 Explained by: Perceptions of corruption 149 non-null float64
19 Dystopia + residual 149 non-null float64
dtypes: float64(18), object(2)
memory usage: 23.4+ KB
<jupyter_text>Examples:
{
"Country name": "Finland",
"Regional indicator": "Western Europe",
"Ladder score": 7.842,
"Standard error of ladder score": 0.032,
"upperwhisker": 7.904,
"lowerwhisker": 7.78,
"Logged GDP per capita": 10.775,
"Social support": 0.9540000000000001,
"Healthy life expectancy": 72.0,
"Freedom to make life choices": 0.9490000000000001,
"Generosity": -0.098,
"Perceptions of corruption": 0.186,
"Ladder score in Dystopia": 2.43,
"Explained by: Log GDP per capita": 1.446,
"Explained by: Social support": 1.106,
"Explained by: Healthy life expectancy": 0.741,
"Explained by: Freedom to make life choices": 0.6910000000000001,
"Explained by: Generosity": 0.124,
"Explained by: Perceptions of corruption": 0.481,
"Dystopia + residual": 3.253
}
{
"Country name": "Denmark",
"Regional indicator": "Western Europe",
"Ladder score": 7.62,
"Standard error of ladder score": 0.035,
"upperwhisker": 7.687,
"lowerwhisker": 7.552,
"Logged GDP per capita": 10.933,
"Social support": 0.9540000000000001,
"Healthy life expectancy": 72.7,
"Freedom to make life choices": 0.9460000000000001,
"Generosity": 0.03,
"Perceptions of corruption": 0.179,
"Ladder score in Dystopia": 2.43,
"Explained by: Log GDP per capita": 1.502,
"Explained by: Social support": 1.108,
"Explained by: Healthy life expectancy": 0.763,
"Explained by: Freedom to make life choices": 0.686,
"Explained by: Generosity": 0.20800000000000002,
"Explained by: Perceptions of corruption": 0.485,
"Dystopia + residual": 2.868
}
{
"Country name": "Switzerland",
"Regional indicator": "Western Europe",
"Ladder score": 7.571,
"Standard error of ladder score": 0.036000000000000004,
"upperwhisker": 7.643,
"lowerwhisker": 7.5,
"Logged GDP per capita": 11.117,
"Social support": 0.9420000000000001,
"Healthy life expectancy": 74.4,
"Freedom to make life choices": 0.919,
"Generosity": 0.025,
"Perceptions of corruption": 0.292,
"Ladder score in Dystopia": 2.43,
"Explained by: Log GDP per capita": 1.566,
"Explained by: Social support": 1.079,
"Explained by: Healthy life expectancy": 0.8160000000000001,
"Explained by: Freedom to make life choices": 0.653,
"Explained by: Generosity": 0.20400000000000001,
"Explained by: Perceptions of corruption": 0.41300000000000003,
"Dystopia + residual": 2.839
}
{
"Country name": "Iceland",
"Regional indicator": "Western Europe",
"Ladder score": 7.554,
"Standard error of ladder score": 0.059000000000000004,
"upperwhisker": 7.67,
"lowerwhisker": 7.438,
"Logged GDP per capita": 10.878,
"Social support": 0.983,
"Healthy life expectancy": 73.0,
"Freedom to make life choices": 0.9550000000000001,
"Generosity": 0.16,
"Perceptions of corruption": 0.673,
"Ladder score in Dystopia": 2.43,
"Explained by: Log GDP per capita": 1.482,
"Explained by: Social support": 1.172,
"Explained by: Healthy life expectancy": 0.772,
"Explained by: Freedom to make life choices": 0.6980000000000001,
"Explained by: Generosity": 0.293,
"Explained by: Perceptions of corruption": 0.17,
"Dystopia + residual": 2.967
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# 
# # Context
# "The World Happiness Report is a landmark survey of the state of global happiness. The first report was published in 2012, the second in 2013, the third in 2015, and the fourth in the 2016 Update. The World Happiness 2017, which ranks 155 countries by their happiness levels, was released at an event celebrating International Day of Happiness on March 20th. The report continues to gain global recognition as governments, organizations and civil society increasingly use happiness indicators to inform their policy-making decisions. Leading experts across fields – economics, psychology, survey analysis, national statistics, health, public policy and more – describe how measurements of well-being can be used effectively to assess the progress of nations. The reports review the state of happiness in the world today and show how the new science of happiness explains personal and national variations in happiness."
# # Data Content
# "The happiness scores and rankings use data from the Gallup World Poll.
# The columns following the happiness score estimate the extent to which each of six factors – economic production, social support, life expectancy, freedom, absence of corruption, and generosity – contribute to making life evaluations higher in each country than they are in Dystopia, a hypothetical country that has values equal to the world’s lowest national averages for each of the six factors. They have no impact on the total score reported for each country, but they do explain why some countries rank higher than others."
# * Ladder score: Happiness score or subjective well-being. This is the national average response to the question of life evaluations.
# * Logged GDP per capita: The GDP-per-capita time series from 2019 to 2020 using countryspecific forecasts of real GDP growth in 2020.
# * Social support: Social support refers to assistance or support provided by members of social networks (like government) to an individual.
# * Healthy life expectancy: Healthy life expectancy is the average life in good health - that is to say without irreversible limitation of activity in daily life or incapacities - of a fictitious generation subject to the conditions of mortality and morbidity prevailing that year.
# * Freedom to make life choices: Freedom to make life choices is the national average of binary responses to the GWP question “Are you satisfied or dissatisfied with your freedom to choose what you do with your life?” ... It is defined as the average of laughter and enjoyment for other waves where the happiness question was not asked
# * Generosity: Generosity is the residual of regressing national average of response to the GWP question “Have you donated money to a charity in the past month?” on GDP per capita.
# * Perceptions of corruption: The measure is the national average of the survey responses to two questions in the GWP: “Is corruption widespread throughout the government or not” and “Is corruption widespread within businesses or not?”
# * Ladder score in Dystopia: It has values equal to the world’s lowest national averages. Dystopia as a benchmark against which to compare contributions from each of the six factors. Dystopia is an imaginary country that has the world's least-happy people. ... Since life would be very unpleasant in a country with the world's lowest incomes, lowest life expectancy, lowest generosity, most corruption, least freedom, and least social support, it is referred to as “Dystopia,” in contrast to Utopia.
# World Happiness Report Official Website: https://worldhappiness.report/
# # Does Money Buy Happiness?
# "For many Americans, the pursuit of happiness and the pursuit of money come to much the same thing. More money means more goods (inflation aside) and thus more of the material benefits of life. As it is for the individual, so it is for society as a whole. National economic growth - a steady upward march in average income, year after year, decade after decate - means it is supposed, greater well-being and a happier society."
# # Hypothesis:
# $H_0: \beta_1 = 0$ vs. $H_a: \beta_1 \neq 0$
# $H_0$: There is not a sagnificant statistical association between GDP per capita, and happiness score.
# $H_a$: There is a sagnificant statistical association between GDP per capita, and happiness score.
# # Importing Libraries / Reading in the Data
# Import Libraries
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from statsmodels.formula.api import ols
import scipy.stats as st
# **Reading in data**
# Read in the data -
# Import Local CSV files sourced from - "https://www.kaggle.com/ajaypalsinghlo/world-happiness-report-2021?select=world-happiness-report.csv"
world1 = pd.read_csv("../input/world-happiness-report-2021/world-happiness-report.csv")
world2 = pd.read_csv(
"../input/world-happiness-report-2021/world-happiness-report-2021.csv"
)
# # Preview of Dataset 1 / Cleaning
# Preview Data World 1 -
# Preview the first dataset
world1
# Preview a value count of each year
print(world1["year"].value_counts())
print("-----------------------------------------------------------------------")
# Preview a value count of each country
print(world1["Country name"].value_counts())
# Preview of info on the dataframe
print(world1.info())
print("-----------------------------------------------------------------------")
# Check for total count of null vlaues
print("Is null count:", world1.isnull().sum().sum())
# Unique countries used within data set
print("Unique countries used:", world1["Country name"].unique())
print("-----------------------------------------------------------------------")
# Unique years used within data set
print("Unique years used:", world1["year"].unique())
# Clean Data from world1 so there's no NaN values
world1_clean_ = world1.dropna(axis=0)
# Clean Data from world1 so column names fit better
world1_clean = world1_clean_.rename(
columns={
"Country name": "Country_name",
"year": "Year",
"Life Ladder": "Ladder_score",
"Log GDP per capita": "GDP_per_capita",
"Social support": "Social_support",
"Healthy life expectancy at birth": "Life_expectancy",
"Freedom to make life choices": "Freedom_of_choice",
"Perceptions of corruption": "Corruption",
"Positive affect": "Positive_affect",
"Negative affect": "Negative_affect",
}
)
# Remove 2005 from dataset since there's limited to no data
# Finding the index of 2005
world1_clean.loc[world1_clean["Year"] == 2005]
# Dropping the row from the set
world1_clean_ = world1_clean.drop([0, 293])
# Organize set to present columns in a desired fassion
world1_final_ = world1_clean_[
[
"Year",
"Country_name",
"Ladder_score",
"GDP_per_capita",
"Generosity",
"Social_support",
"Life_expectancy",
"Freedom_of_choice",
"Corruption",
"Positive_affect",
"Negative_affect",
]
]
# drop columns to match world2
world1_final = world1_final_.drop(["Positive_affect", "Negative_affect"], axis=1)
# # Preview of Dataset 2 / Cleaning
# Preview Data World 2 -
# Preview the second data set
world2
# Preview of info on the dataframe
print(world2.info())
print("-----------------------------------------------------------------------")
# Check for total count of null vlaues
print("Null count:", world2.isnull().sum().sum())
# Cleaning world 2 Data -
# Drop columns that don't pertain to top dataset
world2_clean_ = world2.drop(
[
"Standard error of ladder score",
"upperwhisker",
"lowerwhisker",
"Ladder score in Dystopia",
"Explained by: Log GDP per capita",
"Explained by: Social support",
"Explained by: Freedom to make life choices",
"Explained by: Generosity",
"Explained by: Perceptions of corruption",
"Dystopia + residual",
"Explained by: Healthy life expectancy",
],
axis=1,
)
# Add year column to include the year this data is from is 2021
world2_clean_["Year"] = 2021
# Print columns for orginization phase
print(world2_clean_.info())
# Taking previous dataset, including 2021, then orginizing columns
world2_final_ = world2_clean_[
[
"Year",
"Country name",
"Regional indicator",
"Ladder score",
"Logged GDP per capita",
"Generosity",
"Social support",
"Healthy life expectancy",
"Freedom to make life choices",
"Perceptions of corruption",
]
]
# Clean up the names
world2_final = world2_final_.rename(
columns={
"Country name": "Country_name",
"Regional indicator": "Region_indicator",
"Ladder score": "Ladder_score",
"Logged GDP per capita": "GDP_per_capita",
"Social support": "Social_support",
"Healthy life expectancy": "Life_expectancy",
"Freedom to make life choices": "Freedom_of_choice",
"Perceptions of corruption": "Corruption",
}
)
# # Merging Both Sets Together / Cleaning Final Set
# Merge World1_final + world 2 Final, try to use country as the identifier so region gets applied to every existing country -
world_test = pd.merge(
world1_final,
world2_final[["Country_name", "Region_indicator"]],
how="left",
on="Country_name",
)
# Preview of merged set
world_test.info()
# Is null test to see which values are missing
world_test["Missing"] = world_test["Region_indicator"].isnull()
# Assign these missing values to a new list
world_missing = pd.DataFrame(world_test.loc[world_test["Missing"] == True])
# calculate the names of the countries missing values
world_missing["Country_name"].value_counts(ascending=False)
# Create values for all nulls using a for loop function, and apply it to the df as a new column
region = []
for i in range(len(world_test)):
if world_test["Country_name"][i] == "Angola":
region.append("Sub-Saharan Africa")
elif world_test["Country_name"][i] == "Belize":
region.append("Latin America and Caribbean")
elif world_test["Country_name"][i] == "Congo (Kinshasa)":
region.append("Sub-Saharan Africa")
elif world_test["Country_name"][i] == "Syria":
region.append("Middle East and North Africa")
elif world_test["Country_name"][i] == "Trinidad and Tobago":
region.append("Latin America and Caribbean")
elif world_test["Country_name"][i] == "Qatar":
region.append("Middle East and North Africa")
elif world_test["Country_name"][i] == "Sudan":
region.append("Middle East and North Africa")
elif world_test["Country_name"][i] == "Central African Republic":
region.append("Sub-Saharan Africa")
elif world_test["Country_name"][i] == "Djibouti":
region.append("Sub-Saharan Africa")
elif world_test["Country_name"][i] == "Guyana":
region.append("Latin America and Caribbean")
elif world_test["Country_name"][i] == "Bhutan":
region.append("South Asia")
elif world_test["Country_name"][i] == "Suriname":
region.append("Latin America and Caribbea")
world_missing["Region"] = region
# **After merging the dataframes using the country value as the key, there were a couple of region identifiers that got left out. Instead of dropping the NaN values, I decided to create a for loop that automatically filled the region value that was missing. I found the missing region values by doing a NaN value counts.**
# Drop the old Region column, and the missing vlaues column
world_missing_cleaner_ = world_missing.drop("Region_indicator", axis=1)
world_missing_cleaner = world_missing_cleaner_.drop("Missing", axis=1)
# clean the missing column off, drop the rows from the origional set, and merge this set and the 2021 set.
world_missing_final_ = world_missing_cleaner.rename(
columns={"Region": "Region_indicator"}
)
# Re-order columns to match other dataframes
world_missing_final = world_missing_final_[
[
"Year",
"Country_name",
"Region_indicator",
"Ladder_score",
"GDP_per_capita",
"Generosity",
"Social_support",
"Life_expectancy",
"Freedom_of_choice",
"Corruption",
]
]
# Drop the missing column from the previous calculations
world_test = world_test.drop(["Missing"], axis=1)
# Organize columns to match other dataframes
world_test = world_test[
[
"Year",
"Country_name",
"Region_indicator",
"Ladder_score",
"GDP_per_capita",
"Generosity",
"Social_support",
"Life_expectancy",
"Freedom_of_choice",
"Corruption",
]
]
# Concat both dataframes
world_final_close = pd.DataFrame(pd.concat([world_test, world_missing_final]))
# Dropping duplicate, na values from the original set, and replacing them with the new values
world_final_ = world_final_close.dropna(axis=0)
# **I ran into a problem where the set had duplicate values after concatenating the values from the world test to the world missing final, so I dropped the NaN values that were duplicates.**
# Isnull count to see that the values have been dropped
world_final_.isnull().sum()
# Merging the cleaned dataframes to a final dataframe
world_final = pd.DataFrame(pd.concat([world_final_, world2_final]))
# Final nullcheck
world_final.isnull().sum()
# Sort year values decending
world_final.sort_values(by="Year", inplace=True)
# **I had trouble creating some visualizations, so I had to sort the year values in order to work out the kink.**
# # Preview and Analytics of Final Set
# Preview of dataframe final
world_final
# Final dataframe describe
round(world_final.describe(), 4)
# **Analytics - Data Describe Visualization**
# * The max ladder score appears to be 7.97, and the highest life expectancy age is to be 77.1.
# * The lowest score for corruption (the lower, the more corrupt) is .035.
# * The lowest average life expectancy age is 32.3.
# Pairplot to show an overview of all of the data, and their distrobutions.
sns.pairplot(
world_final[
[
"Ladder_score",
"GDP_per_capita",
"Generosity",
"Social_support",
"Freedom_of_choice",
]
]
)
plt.show()
# **Analytics - Pairplot Visualization**
# * Ladder score appears to have a unimodal distribution.
# * GDP per capita appears to have a non-symetric bimodeal distribution.
# * Generosity appears to have a distribution skewed to the left.
# * Social support appears to have a distribution skewed to the right.
# * Freedom of choice appears to have a distribution skewed to the right.
# Correlation of variables
world_final.corr()
# Heat map customization
plt.figure(figsize=(15, 12.5))
sns.heatmap(
world_final[
[
"Country_name",
"Region_indicator",
"Ladder_score",
"GDP_per_capita",
"Generosity",
"Social_support",
"Life_expectancy",
"Freedom_of_choice",
"Corruption",
]
].corr(),
annot=True,
cmap="Blues",
linewidth=0.9,
)
# Axis ticks rotated so full column is displayed
plt.yticks(rotation=45)
plt.xticks(rotation=45)
# Create title for plot, and show plot
plt.title("Relationship Between Columns")
plt.show()
# **Analytics - Correlation Between Columns, and Visualization**
# * GDP per capita, social support, and life expectancy seem to have the highest correlation to ladder score.
# Strip plot to show Generosity per regions
plt.figure(figsize=(15, 12.5))
sns.stripplot(x="Year", y="Generosity", data=world_final, hue="Region_indicator")
# Create title for plot, and show plot
plt.title("Generosity Per Region by Year")
plt.show()
# **Analytics - Strip Plot Visualization**
# * Southeast Asian countries appear to be the most generious countries consistently throughout the years.
# Strip plot to show Corruption per regions
plt.figure(figsize=(15, 12.5))
sns.stripplot(x="Year", y="Corruption", data=world_final, hue="Region_indicator")
# Create title for plot, and show plot
plt.title("Corruption Per Region by Year")
plt.show()
# **Analytics - Strip Plot Visualization**
# * Western Europe, and North america appear to be the most corrupt countries consistently throughout the years.
# OLS model statistics for GDP compaired to happiness
model = ols("Ladder_score ~ GDP_per_capita", data=world_final).fit()
# Label slope, and intercept
slope = model.params[1]
intercept = model.params[0]
# Print Slope / Intercept
print("Slope is:", slope)
print("------------------------------------------------------------------------------")
print("intercept is:", intercept)
print("==============================================================================")
# Print the model summary
print(model.summary())
# **Analytics - GDP per Capita Compared to Happiness**
# * We reject our null hypothesis from the .01 sagnificance level, and conclude there is a sagnificant statistical correlation between GDP, and happiness.
# * Now, let's see if there's a relationship between social support, and happiness after accounting for GDP.
# **$H_0: \beta_2 = 0$ vs. $H_a: \beta_2 \neq 0$**
# * $H_0$: There is not a sagnificant statistical association between Social score and happiness score after accounting for GDP per capita.
# * $H_a$: There is a sagnificant statistical association between Social score and happiness score after accounting for GDP per capita.
# OLS model statistics for GDP + Social support compaired to happiness
model = ols("Ladder_score ~ GDP_per_capita + Social_support", data=world_final).fit()
# Label slope, and intercept
slope = model.params[1]
intercept = model.params[0]
# Print Slope / Intercept
print("Slope is:", slope)
print("------------------------------------------------------------------------------")
print("intercept is:", intercept)
print("==============================================================================")
# Print the model summary
print(model.summary())
# **Analytics - GDP per Capita + Social Support Compared to Happiness**
# * We reject our null hypothesis from the .01 sagnificance level, and conclude there is a sagnificant statistical association between Social score and happiness score after accounting for GDP per capita.
# * There was an increase to the adj. r-squared value of about 5%.
# Boxplot of GDP per capita per year
plt.figure(figsize=(15, 7))
sns.boxplot(x="Year", y="GDP_per_capita", data=world_final)
# Create title for plot, and show plot
plt.title("Box Plot of GDP per Capita by Year")
plt.show()
# **Analytics - GDP per Capita Box Plot Visualization**
# * It appears the average GDP median across the years is > 9, with 2020 coming in at the highest.
# * * This might be due to the lack of information provided from that year.
# * A large majority of the interquartile range of GDP througout the years lies in range of the 8-10.
#
# Linear regression of GDP per capita vs happiness
sns.lmplot(x="GDP_per_capita", y="Ladder_score", data=world_final, ci=None)
# Create title for plot, and show plot
plt.title("Linear Regression of GDP per Capita vs Happiness")
plt.show()
# **Analytics - GDP per Capita Linear Regression Visualization**
# * The slope of the linear regression of GDP compared to happiness appears to be > 0.
# * This is a sign of a strong correlation between both variables.
# Boxplot of Social Support per year
plt.figure(figsize=(15, 7))
sns.boxplot(x="Year", y="Social_support", data=world_final)
plt.title("Box Plot of Social Support by Year")
plt.show()
# **Analytics - Social Support Box Plot Visualization**
# * It appears the average social support score median across the years is > .8, with 2006 coming in at the highest.
# * A large majority of the interquartile range of GDP througout the years lies in range of the .7-.9.
# * There appears to be a few outliers across the years, with most outliers in 2010, and 2011.
# Linear regression of Social support vs happiness
sns.lmplot(x="Social_support", y="Ladder_score", data=world_final, ci=None)
# Create title for plot, and show plot
plt.title("Linear Regression of Social Support vs Happiness")
plt.show()
# **Analytics - Social Support Linear Regression Visualization**
# * The slope of the linear regression of Social Support to happiness appears to be > 0.
# * This is a sign of a strong correlation between both variables.
# animated scatter plot to present GDP per capita in compairison to happiness rating per year
# Also plot points based on size of social score
fig = px.scatter(
world_final,
x="GDP_per_capita",
y="Ladder_score",
animation_frame="Year",
animation_group="Country_name",
template="plotly_white",
color="Region_indicator",
size="Social_support",
size_max=20,
title="GDP per Capita + Social Support per Region Compared to Happiness",
)
fig.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/003/69003349.ipynb
|
world-happiness-report-2021
|
ajaypalsinghlo
|
[{"Id": 69003349, "ScriptId": 18813963, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6648760, "CreationDate": "07/25/2021 17:51:45", "VersionNumber": 6.0, "Title": "Does Money Buy Happiness? [Plotly&Seaborn Visuals]", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 551.0, "LinesInsertedFromPrevious": 23.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 528.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 91688707, "KernelVersionId": 69003349, "SourceDatasetVersionId": 2048065}]
|
[{"Id": 2048065, "DatasetId": 1222432, "DatasourceVersionId": 2088075, "CreatorUserId": 4698936, "LicenseName": "CC0: Public Domain", "CreationDate": "03/22/2021 16:51:01", "VersionNumber": 2.0, "Title": "World Happiness Report 2021", "Slug": "world-happiness-report-2021", "Subtitle": "World Happiness Report", "Description": "### Context\n\nThe World Happiness Report is a landmark survey of the state of global happiness . The report continues to gain global recognition as governments, organizations and civil society increasingly use happiness indicators to inform their policy-making decisions. Leading experts across fields \u2013 economics, psychology, survey analysis, national statistics, health, public policy and more \u2013 describe how measurements of well-being can be used effectively to assess the progress of nations. The reports review the state of happiness in the world today and show how the new science of happiness explains personal and national variations in happiness.\n\n### Content\n\nThe happiness scores and rankings use data from the Gallup World Poll . The columns following the happiness score estimate the extent to which each of six factors \u2013 economic production, social support, life expectancy, freedom, absence of corruption, and generosity \u2013 contribute to making life evaluations higher in each country than they are in Dystopia, a hypothetical country that has values equal to the world\u2019s lowest national averages for each of the six factors. They have no impact on the total score reported for each country, but they do explain why some countries rank higher than others.", "VersionNotes": "world-happiness-report", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1222432, "CreatorUserId": 4698936, "OwnerUserId": 4698936.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2048065.0, "CurrentDatasourceVersionId": 2088075.0, "ForumId": 1240553, "Type": 2, "CreationDate": "03/20/2021 07:57:28", "LastActivityDate": "03/20/2021", "TotalViews": 325991, "TotalDownloads": 67415, "TotalVotes": 1298, "TotalKernels": 266}]
|
[{"Id": 4698936, "UserName": "ajaypalsinghlo", "DisplayName": "Ajaypal Singh", "RegisterDate": "03/19/2020", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# 
# # Context
# "The World Happiness Report is a landmark survey of the state of global happiness. The first report was published in 2012, the second in 2013, the third in 2015, and the fourth in the 2016 Update. The World Happiness 2017, which ranks 155 countries by their happiness levels, was released at an event celebrating International Day of Happiness on March 20th. The report continues to gain global recognition as governments, organizations and civil society increasingly use happiness indicators to inform their policy-making decisions. Leading experts across fields – economics, psychology, survey analysis, national statistics, health, public policy and more – describe how measurements of well-being can be used effectively to assess the progress of nations. The reports review the state of happiness in the world today and show how the new science of happiness explains personal and national variations in happiness."
# # Data Content
# "The happiness scores and rankings use data from the Gallup World Poll.
# The columns following the happiness score estimate the extent to which each of six factors – economic production, social support, life expectancy, freedom, absence of corruption, and generosity – contribute to making life evaluations higher in each country than they are in Dystopia, a hypothetical country that has values equal to the world’s lowest national averages for each of the six factors. They have no impact on the total score reported for each country, but they do explain why some countries rank higher than others."
# * Ladder score: Happiness score or subjective well-being. This is the national average response to the question of life evaluations.
# * Logged GDP per capita: The GDP-per-capita time series from 2019 to 2020 using countryspecific forecasts of real GDP growth in 2020.
# * Social support: Social support refers to assistance or support provided by members of social networks (like government) to an individual.
# * Healthy life expectancy: Healthy life expectancy is the average life in good health - that is to say without irreversible limitation of activity in daily life or incapacities - of a fictitious generation subject to the conditions of mortality and morbidity prevailing that year.
# * Freedom to make life choices: Freedom to make life choices is the national average of binary responses to the GWP question “Are you satisfied or dissatisfied with your freedom to choose what you do with your life?” ... It is defined as the average of laughter and enjoyment for other waves where the happiness question was not asked
# * Generosity: Generosity is the residual of regressing national average of response to the GWP question “Have you donated money to a charity in the past month?” on GDP per capita.
# * Perceptions of corruption: The measure is the national average of the survey responses to two questions in the GWP: “Is corruption widespread throughout the government or not” and “Is corruption widespread within businesses or not?”
# * Ladder score in Dystopia: It has values equal to the world’s lowest national averages. Dystopia as a benchmark against which to compare contributions from each of the six factors. Dystopia is an imaginary country that has the world's least-happy people. ... Since life would be very unpleasant in a country with the world's lowest incomes, lowest life expectancy, lowest generosity, most corruption, least freedom, and least social support, it is referred to as “Dystopia,” in contrast to Utopia.
# World Happiness Report Official Website: https://worldhappiness.report/
# # Does Money Buy Happiness?
# "For many Americans, the pursuit of happiness and the pursuit of money come to much the same thing. More money means more goods (inflation aside) and thus more of the material benefits of life. As it is for the individual, so it is for society as a whole. National economic growth - a steady upward march in average income, year after year, decade after decate - means it is supposed, greater well-being and a happier society."
# # Hypothesis:
# $H_0: \beta_1 = 0$ vs. $H_a: \beta_1 \neq 0$
# $H_0$: There is not a sagnificant statistical association between GDP per capita, and happiness score.
# $H_a$: There is a sagnificant statistical association between GDP per capita, and happiness score.
# # Importing Libraries / Reading in the Data
# Import Libraries
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import plotly.express as px
import plotly.graph_objs as go
from statsmodels.formula.api import ols
import scipy.stats as st
# **Reading in data**
# Read in the data -
# Import Local CSV files sourced from - "https://www.kaggle.com/ajaypalsinghlo/world-happiness-report-2021?select=world-happiness-report.csv"
world1 = pd.read_csv("../input/world-happiness-report-2021/world-happiness-report.csv")
world2 = pd.read_csv(
"../input/world-happiness-report-2021/world-happiness-report-2021.csv"
)
# # Preview of Dataset 1 / Cleaning
# Preview Data World 1 -
# Preview the first dataset
world1
# Preview a value count of each year
print(world1["year"].value_counts())
print("-----------------------------------------------------------------------")
# Preview a value count of each country
print(world1["Country name"].value_counts())
# Preview of info on the dataframe
print(world1.info())
print("-----------------------------------------------------------------------")
# Check for total count of null vlaues
print("Is null count:", world1.isnull().sum().sum())
# Unique countries used within data set
print("Unique countries used:", world1["Country name"].unique())
print("-----------------------------------------------------------------------")
# Unique years used within data set
print("Unique years used:", world1["year"].unique())
# Clean Data from world1 so there's no NaN values
world1_clean_ = world1.dropna(axis=0)
# Clean Data from world1 so column names fit better
world1_clean = world1_clean_.rename(
columns={
"Country name": "Country_name",
"year": "Year",
"Life Ladder": "Ladder_score",
"Log GDP per capita": "GDP_per_capita",
"Social support": "Social_support",
"Healthy life expectancy at birth": "Life_expectancy",
"Freedom to make life choices": "Freedom_of_choice",
"Perceptions of corruption": "Corruption",
"Positive affect": "Positive_affect",
"Negative affect": "Negative_affect",
}
)
# Remove 2005 from dataset since there's limited to no data
# Finding the index of 2005
world1_clean.loc[world1_clean["Year"] == 2005]
# Dropping the row from the set
world1_clean_ = world1_clean.drop([0, 293])
# Organize set to present columns in a desired fassion
world1_final_ = world1_clean_[
[
"Year",
"Country_name",
"Ladder_score",
"GDP_per_capita",
"Generosity",
"Social_support",
"Life_expectancy",
"Freedom_of_choice",
"Corruption",
"Positive_affect",
"Negative_affect",
]
]
# drop columns to match world2
world1_final = world1_final_.drop(["Positive_affect", "Negative_affect"], axis=1)
# # Preview of Dataset 2 / Cleaning
# Preview Data World 2 -
# Preview the second data set
world2
# Preview of info on the dataframe
print(world2.info())
print("-----------------------------------------------------------------------")
# Check for total count of null vlaues
print("Null count:", world2.isnull().sum().sum())
# Cleaning world 2 Data -
# Drop columns that don't pertain to top dataset
world2_clean_ = world2.drop(
[
"Standard error of ladder score",
"upperwhisker",
"lowerwhisker",
"Ladder score in Dystopia",
"Explained by: Log GDP per capita",
"Explained by: Social support",
"Explained by: Freedom to make life choices",
"Explained by: Generosity",
"Explained by: Perceptions of corruption",
"Dystopia + residual",
"Explained by: Healthy life expectancy",
],
axis=1,
)
# Add year column to include the year this data is from is 2021
world2_clean_["Year"] = 2021
# Print columns for orginization phase
print(world2_clean_.info())
# Taking previous dataset, including 2021, then orginizing columns
world2_final_ = world2_clean_[
[
"Year",
"Country name",
"Regional indicator",
"Ladder score",
"Logged GDP per capita",
"Generosity",
"Social support",
"Healthy life expectancy",
"Freedom to make life choices",
"Perceptions of corruption",
]
]
# Clean up the names
world2_final = world2_final_.rename(
columns={
"Country name": "Country_name",
"Regional indicator": "Region_indicator",
"Ladder score": "Ladder_score",
"Logged GDP per capita": "GDP_per_capita",
"Social support": "Social_support",
"Healthy life expectancy": "Life_expectancy",
"Freedom to make life choices": "Freedom_of_choice",
"Perceptions of corruption": "Corruption",
}
)
# # Merging Both Sets Together / Cleaning Final Set
# Merge World1_final + world 2 Final, try to use country as the identifier so region gets applied to every existing country -
world_test = pd.merge(
world1_final,
world2_final[["Country_name", "Region_indicator"]],
how="left",
on="Country_name",
)
# Preview of merged set
world_test.info()
# Is null test to see which values are missing
world_test["Missing"] = world_test["Region_indicator"].isnull()
# Assign these missing values to a new list
world_missing = pd.DataFrame(world_test.loc[world_test["Missing"] == True])
# calculate the names of the countries missing values
world_missing["Country_name"].value_counts(ascending=False)
# Create values for all nulls using a for loop function, and apply it to the df as a new column
region = []
for i in range(len(world_test)):
if world_test["Country_name"][i] == "Angola":
region.append("Sub-Saharan Africa")
elif world_test["Country_name"][i] == "Belize":
region.append("Latin America and Caribbean")
elif world_test["Country_name"][i] == "Congo (Kinshasa)":
region.append("Sub-Saharan Africa")
elif world_test["Country_name"][i] == "Syria":
region.append("Middle East and North Africa")
elif world_test["Country_name"][i] == "Trinidad and Tobago":
region.append("Latin America and Caribbean")
elif world_test["Country_name"][i] == "Qatar":
region.append("Middle East and North Africa")
elif world_test["Country_name"][i] == "Sudan":
region.append("Middle East and North Africa")
elif world_test["Country_name"][i] == "Central African Republic":
region.append("Sub-Saharan Africa")
elif world_test["Country_name"][i] == "Djibouti":
region.append("Sub-Saharan Africa")
elif world_test["Country_name"][i] == "Guyana":
region.append("Latin America and Caribbean")
elif world_test["Country_name"][i] == "Bhutan":
region.append("South Asia")
elif world_test["Country_name"][i] == "Suriname":
region.append("Latin America and Caribbea")
world_missing["Region"] = region
# **After merging the dataframes using the country value as the key, there were a couple of region identifiers that got left out. Instead of dropping the NaN values, I decided to create a for loop that automatically filled the region value that was missing. I found the missing region values by doing a NaN value counts.**
# Drop the old Region column, and the missing vlaues column
world_missing_cleaner_ = world_missing.drop("Region_indicator", axis=1)
world_missing_cleaner = world_missing_cleaner_.drop("Missing", axis=1)
# clean the missing column off, drop the rows from the origional set, and merge this set and the 2021 set.
world_missing_final_ = world_missing_cleaner.rename(
columns={"Region": "Region_indicator"}
)
# Re-order columns to match other dataframes
world_missing_final = world_missing_final_[
[
"Year",
"Country_name",
"Region_indicator",
"Ladder_score",
"GDP_per_capita",
"Generosity",
"Social_support",
"Life_expectancy",
"Freedom_of_choice",
"Corruption",
]
]
# Drop the missing column from the previous calculations
world_test = world_test.drop(["Missing"], axis=1)
# Organize columns to match other dataframes
world_test = world_test[
[
"Year",
"Country_name",
"Region_indicator",
"Ladder_score",
"GDP_per_capita",
"Generosity",
"Social_support",
"Life_expectancy",
"Freedom_of_choice",
"Corruption",
]
]
# Concat both dataframes
world_final_close = pd.DataFrame(pd.concat([world_test, world_missing_final]))
# Dropping duplicate, na values from the original set, and replacing them with the new values
world_final_ = world_final_close.dropna(axis=0)
# **I ran into a problem where the set had duplicate values after concatenating the values from the world test to the world missing final, so I dropped the NaN values that were duplicates.**
# Isnull count to see that the values have been dropped
world_final_.isnull().sum()
# Merging the cleaned dataframes to a final dataframe
world_final = pd.DataFrame(pd.concat([world_final_, world2_final]))
# Final nullcheck
world_final.isnull().sum()
# Sort year values decending
world_final.sort_values(by="Year", inplace=True)
# **I had trouble creating some visualizations, so I had to sort the year values in order to work out the kink.**
# # Preview and Analytics of Final Set
# Preview of dataframe final
world_final
# Final dataframe describe
round(world_final.describe(), 4)
# **Analytics - Data Describe Visualization**
# * The max ladder score appears to be 7.97, and the highest life expectancy age is to be 77.1.
# * The lowest score for corruption (the lower, the more corrupt) is .035.
# * The lowest average life expectancy age is 32.3.
# Pairplot to show an overview of all of the data, and their distrobutions.
sns.pairplot(
world_final[
[
"Ladder_score",
"GDP_per_capita",
"Generosity",
"Social_support",
"Freedom_of_choice",
]
]
)
plt.show()
# **Analytics - Pairplot Visualization**
# * Ladder score appears to have a unimodal distribution.
# * GDP per capita appears to have a non-symetric bimodeal distribution.
# * Generosity appears to have a distribution skewed to the left.
# * Social support appears to have a distribution skewed to the right.
# * Freedom of choice appears to have a distribution skewed to the right.
# Correlation of variables
world_final.corr()
# Heat map customization
plt.figure(figsize=(15, 12.5))
sns.heatmap(
world_final[
[
"Country_name",
"Region_indicator",
"Ladder_score",
"GDP_per_capita",
"Generosity",
"Social_support",
"Life_expectancy",
"Freedom_of_choice",
"Corruption",
]
].corr(),
annot=True,
cmap="Blues",
linewidth=0.9,
)
# Axis ticks rotated so full column is displayed
plt.yticks(rotation=45)
plt.xticks(rotation=45)
# Create title for plot, and show plot
plt.title("Relationship Between Columns")
plt.show()
# **Analytics - Correlation Between Columns, and Visualization**
# * GDP per capita, social support, and life expectancy seem to have the highest correlation to ladder score.
# Strip plot to show Generosity per regions
plt.figure(figsize=(15, 12.5))
sns.stripplot(x="Year", y="Generosity", data=world_final, hue="Region_indicator")
# Create title for plot, and show plot
plt.title("Generosity Per Region by Year")
plt.show()
# **Analytics - Strip Plot Visualization**
# * Southeast Asian countries appear to be the most generious countries consistently throughout the years.
# Strip plot to show Corruption per regions
plt.figure(figsize=(15, 12.5))
sns.stripplot(x="Year", y="Corruption", data=world_final, hue="Region_indicator")
# Create title for plot, and show plot
plt.title("Corruption Per Region by Year")
plt.show()
# **Analytics - Strip Plot Visualization**
# * Western Europe, and North america appear to be the most corrupt countries consistently throughout the years.
# OLS model statistics for GDP compaired to happiness
model = ols("Ladder_score ~ GDP_per_capita", data=world_final).fit()
# Label slope, and intercept
slope = model.params[1]
intercept = model.params[0]
# Print Slope / Intercept
print("Slope is:", slope)
print("------------------------------------------------------------------------------")
print("intercept is:", intercept)
print("==============================================================================")
# Print the model summary
print(model.summary())
# **Analytics - GDP per Capita Compared to Happiness**
# * We reject our null hypothesis from the .01 sagnificance level, and conclude there is a sagnificant statistical correlation between GDP, and happiness.
# * Now, let's see if there's a relationship between social support, and happiness after accounting for GDP.
# **$H_0: \beta_2 = 0$ vs. $H_a: \beta_2 \neq 0$**
# * $H_0$: There is not a sagnificant statistical association between Social score and happiness score after accounting for GDP per capita.
# * $H_a$: There is a sagnificant statistical association between Social score and happiness score after accounting for GDP per capita.
# OLS model statistics for GDP + Social support compaired to happiness
model = ols("Ladder_score ~ GDP_per_capita + Social_support", data=world_final).fit()
# Label slope, and intercept
slope = model.params[1]
intercept = model.params[0]
# Print Slope / Intercept
print("Slope is:", slope)
print("------------------------------------------------------------------------------")
print("intercept is:", intercept)
print("==============================================================================")
# Print the model summary
print(model.summary())
# **Analytics - GDP per Capita + Social Support Compared to Happiness**
# * We reject our null hypothesis from the .01 sagnificance level, and conclude there is a sagnificant statistical association between Social score and happiness score after accounting for GDP per capita.
# * There was an increase to the adj. r-squared value of about 5%.
# Boxplot of GDP per capita per year
plt.figure(figsize=(15, 7))
sns.boxplot(x="Year", y="GDP_per_capita", data=world_final)
# Create title for plot, and show plot
plt.title("Box Plot of GDP per Capita by Year")
plt.show()
# **Analytics - GDP per Capita Box Plot Visualization**
# * It appears the average GDP median across the years is > 9, with 2020 coming in at the highest.
# * * This might be due to the lack of information provided from that year.
# * A large majority of the interquartile range of GDP througout the years lies in range of the 8-10.
#
# Linear regression of GDP per capita vs happiness
sns.lmplot(x="GDP_per_capita", y="Ladder_score", data=world_final, ci=None)
# Create title for plot, and show plot
plt.title("Linear Regression of GDP per Capita vs Happiness")
plt.show()
# **Analytics - GDP per Capita Linear Regression Visualization**
# * The slope of the linear regression of GDP compared to happiness appears to be > 0.
# * This is a sign of a strong correlation between both variables.
# Boxplot of Social Support per year
plt.figure(figsize=(15, 7))
sns.boxplot(x="Year", y="Social_support", data=world_final)
plt.title("Box Plot of Social Support by Year")
plt.show()
# **Analytics - Social Support Box Plot Visualization**
# * It appears the average social support score median across the years is > .8, with 2006 coming in at the highest.
# * A large majority of the interquartile range of GDP througout the years lies in range of the .7-.9.
# * There appears to be a few outliers across the years, with most outliers in 2010, and 2011.
# Linear regression of Social support vs happiness
sns.lmplot(x="Social_support", y="Ladder_score", data=world_final, ci=None)
# Create title for plot, and show plot
plt.title("Linear Regression of Social Support vs Happiness")
plt.show()
# **Analytics - Social Support Linear Regression Visualization**
# * The slope of the linear regression of Social Support to happiness appears to be > 0.
# * This is a sign of a strong correlation between both variables.
# animated scatter plot to present GDP per capita in compairison to happiness rating per year
# Also plot points based on size of social score
fig = px.scatter(
world_final,
x="GDP_per_capita",
y="Ladder_score",
animation_frame="Year",
animation_group="Country_name",
template="plotly_white",
color="Region_indicator",
size="Social_support",
size_max=20,
title="GDP per Capita + Social Support per Region Compared to Happiness",
)
fig.show()
|
[{"world-happiness-report-2021/world-happiness-report.csv": {"column_names": "[\"Country name\", \"year\", \"Life Ladder\", \"Log GDP per capita\", \"Social support\", \"Healthy life expectancy at birth\", \"Freedom to make life choices\", \"Generosity\", \"Perceptions of corruption\", \"Positive affect\", \"Negative affect\"]", "column_data_types": "{\"Country name\": \"object\", \"year\": \"int64\", \"Life Ladder\": \"float64\", \"Log GDP per capita\": \"float64\", \"Social support\": \"float64\", \"Healthy life expectancy at birth\": \"float64\", \"Freedom to make life choices\": \"float64\", \"Generosity\": \"float64\", \"Perceptions of corruption\": \"float64\", \"Positive affect\": \"float64\", \"Negative affect\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1949 entries, 0 to 1948\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Country name 1949 non-null object \n 1 year 1949 non-null int64 \n 2 Life Ladder 1949 non-null float64\n 3 Log GDP per capita 1913 non-null float64\n 4 Social support 1936 non-null float64\n 5 Healthy life expectancy at birth 1894 non-null float64\n 6 Freedom to make life choices 1917 non-null float64\n 7 Generosity 1860 non-null float64\n 8 Perceptions of corruption 1839 non-null float64\n 9 Positive affect 1927 non-null float64\n 10 Negative affect 1933 non-null float64\ndtypes: float64(9), int64(1), object(1)\nmemory usage: 167.6+ KB\n", "summary": "{\"year\": {\"count\": 1949.0, \"mean\": 2013.216008209338, \"std\": 4.16682781957226, \"min\": 2005.0, \"25%\": 2010.0, \"50%\": 2013.0, \"75%\": 2017.0, \"max\": 2020.0}, \"Life Ladder\": {\"count\": 1949.0, \"mean\": 5.46670548999487, \"std\": 1.1157105016473905, \"min\": 2.375, \"25%\": 4.64, \"50%\": 5.386, \"75%\": 6.283, \"max\": 8.019}, \"Log GDP per capita\": {\"count\": 1913.0, \"mean\": 9.368452692106638, \"std\": 1.154084029731952, \"min\": 6.635, \"25%\": 8.464, \"50%\": 9.46, \"75%\": 10.353, \"max\": 11.648}, \"Social support\": {\"count\": 1936.0, \"mean\": 0.8125521694214877, \"std\": 0.11848163156602372, \"min\": 0.29, \"25%\": 0.74975, \"50%\": 0.8354999999999999, \"75%\": 0.905, \"max\": 0.987}, \"Healthy life expectancy at birth\": {\"count\": 1894.0, \"mean\": 63.35937381203802, \"std\": 7.51024461823635, \"min\": 32.3, \"25%\": 58.685, \"50%\": 65.2, \"75%\": 68.59, \"max\": 77.1}, \"Freedom to make life choices\": {\"count\": 1917.0, \"mean\": 0.7425576421491914, \"std\": 0.14209286577975108, \"min\": 0.258, \"25%\": 0.647, \"50%\": 0.763, \"75%\": 0.856, \"max\": 0.985}, \"Generosity\": {\"count\": 1860.0, \"mean\": 0.00010322580645161109, \"std\": 0.16221532880635953, \"min\": -0.335, \"25%\": -0.113, \"50%\": -0.025500000000000002, \"75%\": 0.091, \"max\": 0.698}, \"Perceptions of corruption\": {\"count\": 1839.0, \"mean\": 0.7471250679717237, \"std\": 0.18678881844350428, \"min\": 0.035, \"25%\": 0.69, \"50%\": 0.802, \"75%\": 0.872, \"max\": 0.983}, \"Positive affect\": {\"count\": 1927.0, \"mean\": 0.7100031136481577, \"std\": 0.10709993290814633, \"min\": 0.322, \"25%\": 0.6255, \"50%\": 0.722, \"75%\": 0.799, \"max\": 0.944}, \"Negative affect\": {\"count\": 1933.0, \"mean\": 0.26854423176409725, \"std\": 0.08516806994884693, \"min\": 0.083, \"25%\": 0.206, \"50%\": 0.258, \"75%\": 0.32, \"max\": 0.705}}", "examples": "{\"Country name\":{\"0\":\"Afghanistan\",\"1\":\"Afghanistan\",\"2\":\"Afghanistan\",\"3\":\"Afghanistan\"},\"year\":{\"0\":2008,\"1\":2009,\"2\":2010,\"3\":2011},\"Life Ladder\":{\"0\":3.724,\"1\":4.402,\"2\":4.758,\"3\":3.832},\"Log GDP per capita\":{\"0\":7.37,\"1\":7.54,\"2\":7.647,\"3\":7.62},\"Social support\":{\"0\":0.451,\"1\":0.552,\"2\":0.539,\"3\":0.521},\"Healthy life expectancy at birth\":{\"0\":50.8,\"1\":51.2,\"2\":51.6,\"3\":51.92},\"Freedom to make life choices\":{\"0\":0.718,\"1\":0.679,\"2\":0.6,\"3\":0.496},\"Generosity\":{\"0\":0.168,\"1\":0.19,\"2\":0.121,\"3\":0.162},\"Perceptions of corruption\":{\"0\":0.882,\"1\":0.85,\"2\":0.707,\"3\":0.731},\"Positive affect\":{\"0\":0.518,\"1\":0.584,\"2\":0.618,\"3\":0.611},\"Negative affect\":{\"0\":0.258,\"1\":0.237,\"2\":0.275,\"3\":0.267}}"}}, {"world-happiness-report-2021/world-happiness-report-2021.csv": {"column_names": "[\"Country name\", \"Regional indicator\", \"Ladder score\", \"Standard error of ladder score\", \"upperwhisker\", \"lowerwhisker\", \"Logged GDP per capita\", \"Social support\", \"Healthy life expectancy\", \"Freedom to make life choices\", \"Generosity\", \"Perceptions of corruption\", \"Ladder score in Dystopia\", \"Explained by: Log GDP per capita\", \"Explained by: Social support\", \"Explained by: Healthy life expectancy\", \"Explained by: Freedom to make life choices\", \"Explained by: Generosity\", \"Explained by: Perceptions of corruption\", \"Dystopia + residual\"]", "column_data_types": "{\"Country name\": \"object\", \"Regional indicator\": \"object\", \"Ladder score\": \"float64\", \"Standard error of ladder score\": \"float64\", \"upperwhisker\": \"float64\", \"lowerwhisker\": \"float64\", \"Logged GDP per capita\": \"float64\", \"Social support\": \"float64\", \"Healthy life expectancy\": \"float64\", \"Freedom to make life choices\": \"float64\", \"Generosity\": \"float64\", \"Perceptions of corruption\": \"float64\", \"Ladder score in Dystopia\": \"float64\", \"Explained by: Log GDP per capita\": \"float64\", \"Explained by: Social support\": \"float64\", \"Explained by: Healthy life expectancy\": \"float64\", \"Explained by: Freedom to make life choices\": \"float64\", \"Explained by: Generosity\": \"float64\", \"Explained by: Perceptions of corruption\": \"float64\", \"Dystopia + residual\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 149 entries, 0 to 148\nData columns (total 20 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Country name 149 non-null object \n 1 Regional indicator 149 non-null object \n 2 Ladder score 149 non-null float64\n 3 Standard error of ladder score 149 non-null float64\n 4 upperwhisker 149 non-null float64\n 5 lowerwhisker 149 non-null float64\n 6 Logged GDP per capita 149 non-null float64\n 7 Social support 149 non-null float64\n 8 Healthy life expectancy 149 non-null float64\n 9 Freedom to make life choices 149 non-null float64\n 10 Generosity 149 non-null float64\n 11 Perceptions of corruption 149 non-null float64\n 12 Ladder score in Dystopia 149 non-null float64\n 13 Explained by: Log GDP per capita 149 non-null float64\n 14 Explained by: Social support 149 non-null float64\n 15 Explained by: Healthy life expectancy 149 non-null float64\n 16 Explained by: Freedom to make life choices 149 non-null float64\n 17 Explained by: Generosity 149 non-null float64\n 18 Explained by: Perceptions of corruption 149 non-null float64\n 19 Dystopia + residual 149 non-null float64\ndtypes: float64(18), object(2)\nmemory usage: 23.4+ KB\n", "summary": "{\"Ladder score\": {\"count\": 149.0, \"mean\": 5.532838926174497, \"std\": 1.073923565823598, \"min\": 2.523, \"25%\": 4.852, \"50%\": 5.534, \"75%\": 6.255, \"max\": 7.842}, \"Standard error of ladder score\": {\"count\": 149.0, \"mean\": 0.05875167785234898, \"std\": 0.02200119961111103, \"min\": 0.026, \"25%\": 0.043, \"50%\": 0.054, \"75%\": 0.07, \"max\": 0.173}, \"upperwhisker\": {\"count\": 149.0, \"mean\": 5.648006711409396, \"std\": 1.0543296223939433, \"min\": 2.596, \"25%\": 4.991, \"50%\": 5.625, \"75%\": 6.344, \"max\": 7.904}, \"lowerwhisker\": {\"count\": 149.0, \"mean\": 5.4176308724832225, \"std\": 1.0948790526132375, \"min\": 2.449, \"25%\": 4.706, \"50%\": 5.413, \"75%\": 6.128, \"max\": 7.78}, \"Logged GDP per capita\": {\"count\": 149.0, \"mean\": 9.432208053691276, \"std\": 1.1586014476640767, \"min\": 6.635, \"25%\": 8.541, \"50%\": 9.569, \"75%\": 10.421, \"max\": 11.647}, \"Social support\": {\"count\": 149.0, \"mean\": 0.8147449664429529, \"std\": 0.11488902720653997, \"min\": 0.463, \"25%\": 0.75, \"50%\": 0.832, \"75%\": 0.905, \"max\": 0.983}, \"Healthy life expectancy\": {\"count\": 149.0, \"mean\": 64.99279865771811, \"std\": 6.76204309040431, \"min\": 48.478, \"25%\": 59.802, \"50%\": 66.603, \"75%\": 69.6, \"max\": 76.953}, \"Freedom to make life choices\": {\"count\": 149.0, \"mean\": 0.7915973154362417, \"std\": 0.11333178506605257, \"min\": 0.382, \"25%\": 0.718, \"50%\": 0.804, \"75%\": 0.877, \"max\": 0.97}, \"Generosity\": {\"count\": 149.0, \"mean\": -0.015134228187919463, \"std\": 0.15065670021779698, \"min\": -0.288, \"25%\": -0.126, \"50%\": -0.036, \"75%\": 0.079, \"max\": 0.542}, \"Perceptions of corruption\": {\"count\": 149.0, \"mean\": 0.7274496644295303, \"std\": 0.17922631911280348, \"min\": 0.082, \"25%\": 0.667, \"50%\": 0.781, \"75%\": 0.845, \"max\": 0.939}, \"Ladder score in Dystopia\": {\"count\": 149.0, \"mean\": 2.43, \"std\": 0.0, \"min\": 2.43, \"25%\": 2.43, \"50%\": 2.43, \"75%\": 2.43, \"max\": 2.43}, \"Explained by: Log GDP per capita\": {\"count\": 149.0, \"mean\": 0.9771610738255032, \"std\": 0.4047399409816952, \"min\": 0.0, \"25%\": 0.666, \"50%\": 1.025, \"75%\": 1.323, \"max\": 1.751}, \"Explained by: Social support\": {\"count\": 149.0, \"mean\": 0.7933154362416108, \"std\": 0.2588712527557969, \"min\": 0.0, \"25%\": 0.647, \"50%\": 0.832, \"75%\": 0.996, \"max\": 1.172}, \"Explained by: Healthy life expectancy\": {\"count\": 149.0, \"mean\": 0.5201610738255034, \"std\": 0.21301909783416687, \"min\": 0.0, \"25%\": 0.357, \"50%\": 0.571, \"75%\": 0.665, \"max\": 0.897}, \"Explained by: Freedom to make life choices\": {\"count\": 149.0, \"mean\": 0.4987114093959732, \"std\": 0.13788838491066044, \"min\": 0.0, \"25%\": 0.409, \"50%\": 0.514, \"75%\": 0.603, \"max\": 0.716}, \"Explained by: Generosity\": {\"count\": 149.0, \"mean\": 0.17804697986577178, \"std\": 0.09827033422549317, \"min\": 0.0, \"25%\": 0.105, \"50%\": 0.164, \"75%\": 0.239, \"max\": 0.541}, \"Explained by: Perceptions of corruption\": {\"count\": 149.0, \"mean\": 0.13514093959731543, \"std\": 0.11436138902230591, \"min\": 0.0, \"25%\": 0.06, \"50%\": 0.101, \"75%\": 0.174, \"max\": 0.547}, \"Dystopia + residual\": {\"count\": 149.0, \"mean\": 2.430328859060403, \"std\": 0.5376452090837568, \"min\": 0.648, \"25%\": 2.138, \"50%\": 2.509, \"75%\": 2.794, \"max\": 3.482}}", "examples": "{\"Country name\":{\"0\":\"Finland\",\"1\":\"Denmark\",\"2\":\"Switzerland\",\"3\":\"Iceland\"},\"Regional indicator\":{\"0\":\"Western Europe\",\"1\":\"Western Europe\",\"2\":\"Western Europe\",\"3\":\"Western Europe\"},\"Ladder score\":{\"0\":7.842,\"1\":7.62,\"2\":7.571,\"3\":7.554},\"Standard error of ladder score\":{\"0\":0.032,\"1\":0.035,\"2\":0.036,\"3\":0.059},\"upperwhisker\":{\"0\":7.904,\"1\":7.687,\"2\":7.643,\"3\":7.67},\"lowerwhisker\":{\"0\":7.78,\"1\":7.552,\"2\":7.5,\"3\":7.438},\"Logged GDP per capita\":{\"0\":10.775,\"1\":10.933,\"2\":11.117,\"3\":10.878},\"Social support\":{\"0\":0.954,\"1\":0.954,\"2\":0.942,\"3\":0.983},\"Healthy life expectancy\":{\"0\":72.0,\"1\":72.7,\"2\":74.4,\"3\":73.0},\"Freedom to make life choices\":{\"0\":0.949,\"1\":0.946,\"2\":0.919,\"3\":0.955},\"Generosity\":{\"0\":-0.098,\"1\":0.03,\"2\":0.025,\"3\":0.16},\"Perceptions of corruption\":{\"0\":0.186,\"1\":0.179,\"2\":0.292,\"3\":0.673},\"Ladder score in Dystopia\":{\"0\":2.43,\"1\":2.43,\"2\":2.43,\"3\":2.43},\"Explained by: Log GDP per capita\":{\"0\":1.446,\"1\":1.502,\"2\":1.566,\"3\":1.482},\"Explained by: Social support\":{\"0\":1.106,\"1\":1.108,\"2\":1.079,\"3\":1.172},\"Explained by: Healthy life expectancy\":{\"0\":0.741,\"1\":0.763,\"2\":0.816,\"3\":0.772},\"Explained by: Freedom to make life choices\":{\"0\":0.691,\"1\":0.686,\"2\":0.653,\"3\":0.698},\"Explained by: Generosity\":{\"0\":0.124,\"1\":0.208,\"2\":0.204,\"3\":0.293},\"Explained by: Perceptions of corruption\":{\"0\":0.481,\"1\":0.485,\"2\":0.413,\"3\":0.17},\"Dystopia + residual\":{\"0\":3.253,\"1\":2.868,\"2\":2.839,\"3\":2.967}}"}}]
| true | 2 |
<start_data_description><data_path>world-happiness-report-2021/world-happiness-report.csv:
<column_names>
['Country name', 'year', 'Life Ladder', 'Log GDP per capita', 'Social support', 'Healthy life expectancy at birth', 'Freedom to make life choices', 'Generosity', 'Perceptions of corruption', 'Positive affect', 'Negative affect']
<column_types>
{'Country name': 'object', 'year': 'int64', 'Life Ladder': 'float64', 'Log GDP per capita': 'float64', 'Social support': 'float64', 'Healthy life expectancy at birth': 'float64', 'Freedom to make life choices': 'float64', 'Generosity': 'float64', 'Perceptions of corruption': 'float64', 'Positive affect': 'float64', 'Negative affect': 'float64'}
<dataframe_Summary>
{'year': {'count': 1949.0, 'mean': 2013.216008209338, 'std': 4.16682781957226, 'min': 2005.0, '25%': 2010.0, '50%': 2013.0, '75%': 2017.0, 'max': 2020.0}, 'Life Ladder': {'count': 1949.0, 'mean': 5.46670548999487, 'std': 1.1157105016473905, 'min': 2.375, '25%': 4.64, '50%': 5.386, '75%': 6.283, 'max': 8.019}, 'Log GDP per capita': {'count': 1913.0, 'mean': 9.368452692106638, 'std': 1.154084029731952, 'min': 6.635, '25%': 8.464, '50%': 9.46, '75%': 10.353, 'max': 11.648}, 'Social support': {'count': 1936.0, 'mean': 0.8125521694214877, 'std': 0.11848163156602372, 'min': 0.29, '25%': 0.74975, '50%': 0.8354999999999999, '75%': 0.905, 'max': 0.987}, 'Healthy life expectancy at birth': {'count': 1894.0, 'mean': 63.35937381203802, 'std': 7.51024461823635, 'min': 32.3, '25%': 58.685, '50%': 65.2, '75%': 68.59, 'max': 77.1}, 'Freedom to make life choices': {'count': 1917.0, 'mean': 0.7425576421491914, 'std': 0.14209286577975108, 'min': 0.258, '25%': 0.647, '50%': 0.763, '75%': 0.856, 'max': 0.985}, 'Generosity': {'count': 1860.0, 'mean': 0.00010322580645161109, 'std': 0.16221532880635953, 'min': -0.335, '25%': -0.113, '50%': -0.025500000000000002, '75%': 0.091, 'max': 0.698}, 'Perceptions of corruption': {'count': 1839.0, 'mean': 0.7471250679717237, 'std': 0.18678881844350428, 'min': 0.035, '25%': 0.69, '50%': 0.802, '75%': 0.872, 'max': 0.983}, 'Positive affect': {'count': 1927.0, 'mean': 0.7100031136481577, 'std': 0.10709993290814633, 'min': 0.322, '25%': 0.6255, '50%': 0.722, '75%': 0.799, 'max': 0.944}, 'Negative affect': {'count': 1933.0, 'mean': 0.26854423176409725, 'std': 0.08516806994884693, 'min': 0.083, '25%': 0.206, '50%': 0.258, '75%': 0.32, 'max': 0.705}}
<dataframe_info>
RangeIndex: 1949 entries, 0 to 1948
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Country name 1949 non-null object
1 year 1949 non-null int64
2 Life Ladder 1949 non-null float64
3 Log GDP per capita 1913 non-null float64
4 Social support 1936 non-null float64
5 Healthy life expectancy at birth 1894 non-null float64
6 Freedom to make life choices 1917 non-null float64
7 Generosity 1860 non-null float64
8 Perceptions of corruption 1839 non-null float64
9 Positive affect 1927 non-null float64
10 Negative affect 1933 non-null float64
dtypes: float64(9), int64(1), object(1)
memory usage: 167.6+ KB
<some_examples>
{'Country name': {'0': 'Afghanistan', '1': 'Afghanistan', '2': 'Afghanistan', '3': 'Afghanistan'}, 'year': {'0': 2008, '1': 2009, '2': 2010, '3': 2011}, 'Life Ladder': {'0': 3.724, '1': 4.402, '2': 4.758, '3': 3.832}, 'Log GDP per capita': {'0': 7.37, '1': 7.54, '2': 7.647, '3': 7.62}, 'Social support': {'0': 0.451, '1': 0.552, '2': 0.539, '3': 0.521}, 'Healthy life expectancy at birth': {'0': 50.8, '1': 51.2, '2': 51.6, '3': 51.92}, 'Freedom to make life choices': {'0': 0.718, '1': 0.679, '2': 0.6, '3': 0.496}, 'Generosity': {'0': 0.168, '1': 0.19, '2': 0.121, '3': 0.162}, 'Perceptions of corruption': {'0': 0.882, '1': 0.85, '2': 0.707, '3': 0.731}, 'Positive affect': {'0': 0.518, '1': 0.584, '2': 0.618, '3': 0.611}, 'Negative affect': {'0': 0.258, '1': 0.237, '2': 0.275, '3': 0.267}}
<end_description>
<start_data_description><data_path>world-happiness-report-2021/world-happiness-report-2021.csv:
<column_names>
['Country name', 'Regional indicator', 'Ladder score', 'Standard error of ladder score', 'upperwhisker', 'lowerwhisker', 'Logged GDP per capita', 'Social support', 'Healthy life expectancy', 'Freedom to make life choices', 'Generosity', 'Perceptions of corruption', 'Ladder score in Dystopia', 'Explained by: Log GDP per capita', 'Explained by: Social support', 'Explained by: Healthy life expectancy', 'Explained by: Freedom to make life choices', 'Explained by: Generosity', 'Explained by: Perceptions of corruption', 'Dystopia + residual']
<column_types>
{'Country name': 'object', 'Regional indicator': 'object', 'Ladder score': 'float64', 'Standard error of ladder score': 'float64', 'upperwhisker': 'float64', 'lowerwhisker': 'float64', 'Logged GDP per capita': 'float64', 'Social support': 'float64', 'Healthy life expectancy': 'float64', 'Freedom to make life choices': 'float64', 'Generosity': 'float64', 'Perceptions of corruption': 'float64', 'Ladder score in Dystopia': 'float64', 'Explained by: Log GDP per capita': 'float64', 'Explained by: Social support': 'float64', 'Explained by: Healthy life expectancy': 'float64', 'Explained by: Freedom to make life choices': 'float64', 'Explained by: Generosity': 'float64', 'Explained by: Perceptions of corruption': 'float64', 'Dystopia + residual': 'float64'}
<dataframe_Summary>
{'Ladder score': {'count': 149.0, 'mean': 5.532838926174497, 'std': 1.073923565823598, 'min': 2.523, '25%': 4.852, '50%': 5.534, '75%': 6.255, 'max': 7.842}, 'Standard error of ladder score': {'count': 149.0, 'mean': 0.05875167785234898, 'std': 0.02200119961111103, 'min': 0.026, '25%': 0.043, '50%': 0.054, '75%': 0.07, 'max': 0.173}, 'upperwhisker': {'count': 149.0, 'mean': 5.648006711409396, 'std': 1.0543296223939433, 'min': 2.596, '25%': 4.991, '50%': 5.625, '75%': 6.344, 'max': 7.904}, 'lowerwhisker': {'count': 149.0, 'mean': 5.4176308724832225, 'std': 1.0948790526132375, 'min': 2.449, '25%': 4.706, '50%': 5.413, '75%': 6.128, 'max': 7.78}, 'Logged GDP per capita': {'count': 149.0, 'mean': 9.432208053691276, 'std': 1.1586014476640767, 'min': 6.635, '25%': 8.541, '50%': 9.569, '75%': 10.421, 'max': 11.647}, 'Social support': {'count': 149.0, 'mean': 0.8147449664429529, 'std': 0.11488902720653997, 'min': 0.463, '25%': 0.75, '50%': 0.832, '75%': 0.905, 'max': 0.983}, 'Healthy life expectancy': {'count': 149.0, 'mean': 64.99279865771811, 'std': 6.76204309040431, 'min': 48.478, '25%': 59.802, '50%': 66.603, '75%': 69.6, 'max': 76.953}, 'Freedom to make life choices': {'count': 149.0, 'mean': 0.7915973154362417, 'std': 0.11333178506605257, 'min': 0.382, '25%': 0.718, '50%': 0.804, '75%': 0.877, 'max': 0.97}, 'Generosity': {'count': 149.0, 'mean': -0.015134228187919463, 'std': 0.15065670021779698, 'min': -0.288, '25%': -0.126, '50%': -0.036, '75%': 0.079, 'max': 0.542}, 'Perceptions of corruption': {'count': 149.0, 'mean': 0.7274496644295303, 'std': 0.17922631911280348, 'min': 0.082, '25%': 0.667, '50%': 0.781, '75%': 0.845, 'max': 0.939}, 'Ladder score in Dystopia': {'count': 149.0, 'mean': 2.43, 'std': 0.0, 'min': 2.43, '25%': 2.43, '50%': 2.43, '75%': 2.43, 'max': 2.43}, 'Explained by: Log GDP per capita': {'count': 149.0, 'mean': 0.9771610738255032, 'std': 0.4047399409816952, 'min': 0.0, '25%': 0.666, '50%': 1.025, '75%': 1.323, 'max': 1.751}, 'Explained by: Social support': {'count': 149.0, 'mean': 0.7933154362416108, 'std': 0.2588712527557969, 'min': 0.0, '25%': 0.647, '50%': 0.832, '75%': 0.996, 'max': 1.172}, 'Explained by: Healthy life expectancy': {'count': 149.0, 'mean': 0.5201610738255034, 'std': 0.21301909783416687, 'min': 0.0, '25%': 0.357, '50%': 0.571, '75%': 0.665, 'max': 0.897}, 'Explained by: Freedom to make life choices': {'count': 149.0, 'mean': 0.4987114093959732, 'std': 0.13788838491066044, 'min': 0.0, '25%': 0.409, '50%': 0.514, '75%': 0.603, 'max': 0.716}, 'Explained by: Generosity': {'count': 149.0, 'mean': 0.17804697986577178, 'std': 0.09827033422549317, 'min': 0.0, '25%': 0.105, '50%': 0.164, '75%': 0.239, 'max': 0.541}, 'Explained by: Perceptions of corruption': {'count': 149.0, 'mean': 0.13514093959731543, 'std': 0.11436138902230591, 'min': 0.0, '25%': 0.06, '50%': 0.101, '75%': 0.174, 'max': 0.547}, 'Dystopia + residual': {'count': 149.0, 'mean': 2.430328859060403, 'std': 0.5376452090837568, 'min': 0.648, '25%': 2.138, '50%': 2.509, '75%': 2.794, 'max': 3.482}}
<dataframe_info>
RangeIndex: 149 entries, 0 to 148
Data columns (total 20 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Country name 149 non-null object
1 Regional indicator 149 non-null object
2 Ladder score 149 non-null float64
3 Standard error of ladder score 149 non-null float64
4 upperwhisker 149 non-null float64
5 lowerwhisker 149 non-null float64
6 Logged GDP per capita 149 non-null float64
7 Social support 149 non-null float64
8 Healthy life expectancy 149 non-null float64
9 Freedom to make life choices 149 non-null float64
10 Generosity 149 non-null float64
11 Perceptions of corruption 149 non-null float64
12 Ladder score in Dystopia 149 non-null float64
13 Explained by: Log GDP per capita 149 non-null float64
14 Explained by: Social support 149 non-null float64
15 Explained by: Healthy life expectancy 149 non-null float64
16 Explained by: Freedom to make life choices 149 non-null float64
17 Explained by: Generosity 149 non-null float64
18 Explained by: Perceptions of corruption 149 non-null float64
19 Dystopia + residual 149 non-null float64
dtypes: float64(18), object(2)
memory usage: 23.4+ KB
<some_examples>
{'Country name': {'0': 'Finland', '1': 'Denmark', '2': 'Switzerland', '3': 'Iceland'}, 'Regional indicator': {'0': 'Western Europe', '1': 'Western Europe', '2': 'Western Europe', '3': 'Western Europe'}, 'Ladder score': {'0': 7.842, '1': 7.62, '2': 7.571, '3': 7.554}, 'Standard error of ladder score': {'0': 0.032, '1': 0.035, '2': 0.036, '3': 0.059}, 'upperwhisker': {'0': 7.904, '1': 7.687, '2': 7.643, '3': 7.67}, 'lowerwhisker': {'0': 7.78, '1': 7.552, '2': 7.5, '3': 7.438}, 'Logged GDP per capita': {'0': 10.775, '1': 10.933, '2': 11.117, '3': 10.878}, 'Social support': {'0': 0.954, '1': 0.954, '2': 0.942, '3': 0.983}, 'Healthy life expectancy': {'0': 72.0, '1': 72.7, '2': 74.4, '3': 73.0}, 'Freedom to make life choices': {'0': 0.949, '1': 0.946, '2': 0.919, '3': 0.955}, 'Generosity': {'0': -0.098, '1': 0.03, '2': 0.025, '3': 0.16}, 'Perceptions of corruption': {'0': 0.186, '1': 0.179, '2': 0.292, '3': 0.673}, 'Ladder score in Dystopia': {'0': 2.43, '1': 2.43, '2': 2.43, '3': 2.43}, 'Explained by: Log GDP per capita': {'0': 1.446, '1': 1.502, '2': 1.566, '3': 1.482}, 'Explained by: Social support': {'0': 1.106, '1': 1.108, '2': 1.079, '3': 1.172}, 'Explained by: Healthy life expectancy': {'0': 0.741, '1': 0.763, '2': 0.816, '3': 0.772}, 'Explained by: Freedom to make life choices': {'0': 0.691, '1': 0.686, '2': 0.653, '3': 0.698}, 'Explained by: Generosity': {'0': 0.124, '1': 0.208, '2': 0.204, '3': 0.293}, 'Explained by: Perceptions of corruption': {'0': 0.481, '1': 0.485, '2': 0.413, '3': 0.17}, 'Dystopia + residual': {'0': 3.253, '1': 2.868, '2': 2.839, '3': 2.967}}
<end_description>
| 5,842 | 0 | 9,089 | 5,842 |
69003373
|
<jupyter_start><jupyter_text>English Premier League(2020-21)
### Context
This dataset is a collection of basic but crucial stats of the English Premier League 2020-21 season. The dataset has all the players that played in the EPL and their standard stats such as Goals, Assists, xG, xA, Passes Attempted, Pass Accuracy and more! Do upvote if you like it!
### Content
| Attribute | Description |
| --- | --- |
| Position | Each player has a certain position, in which he plays regularly. The position in this dataset are, FW - Forward, MF - Midfield, DF - Defensive, GK - Goalkeeper |
| Starts | The number of times the player was named in the starting 11 by the manager. |
| Mins | The number of minutes played by the player. |
| Goals | The number of Goals scored by the player. |
| Assists | The number of times the player has assisted other player in scoring the goal. |
| Passes_Attempted | The number of passes attempted by the player. |
| Perc_Passes_Completed | The number of passes that the player accurately passed to his teammate. |
| xG | Expected number of goals from the player in a match. |
| xA | Expected number of assists from the player in a match. |
| Yellow_Cards | The players get a yellow card from the referee for indiscipline, technical fouls, or other minor fouls. |
| Red Cards | The players get a red card for accumulating 2 yellow cards in a single game, or for a major foul. |
### Inspiration
There are several directions you can take with this dataset:
1) Find out which team has the most aggressive defenders (or players for that matter)
2) Which team had more players in the top 10 most assists chart
3) Who were the players with most attempted passes
4) Which players had the most accurate passes excluding the goal keeper and the defenders
5) Defenders with most goals!!
6) Which nation had the most aggressive players?
the possibilities are endless, create a notebook and explore them!
Kaggle dataset identifier: english-premier-league202021
<jupyter_code>import pandas as pd
df = pd.read_csv('english-premier-league202021/EPL_20_21.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 532 entries, 0 to 531
Data columns (total 18 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Name 532 non-null object
1 Club 532 non-null object
2 Nationality 532 non-null object
3 Position 532 non-null object
4 Age 532 non-null int64
5 Matches 532 non-null int64
6 Starts 532 non-null int64
7 Mins 532 non-null int64
8 Goals 532 non-null int64
9 Assists 532 non-null int64
10 Passes_Attempted 532 non-null int64
11 Perc_Passes_Completed 532 non-null float64
12 Penalty_Goals 532 non-null int64
13 Penalty_Attempted 532 non-null int64
14 xG 532 non-null float64
15 xA 532 non-null float64
16 Yellow_Cards 532 non-null int64
17 Red_Cards 532 non-null int64
dtypes: float64(3), int64(11), object(4)
memory usage: 74.9+ KB
<jupyter_text>Examples:
{
"Name": "Mason Mount",
"Club": "Chelsea",
"Nationality": "ENG",
"Position": "MF,FW",
"Age": 21,
"Matches": 36,
"Starts": 32,
"Mins": 2890,
"Goals": 6,
"Assists": 5,
"Passes_Attempted": 1881,
"Perc_Passes_Completed": 82.3,
"Penalty_Goals": 1,
"Penalty_Attempted": 1,
"xG": 0.21,
"xA": 0.24,
"Yellow_Cards": 2,
"Red_Cards": 0
}
{
"Name": "Edouard Mendy",
"Club": "Chelsea",
"Nationality": "SEN",
"Position": "GK",
"Age": 28,
"Matches": 31,
"Starts": 31,
"Mins": 2745,
"Goals": 0,
"Assists": 0,
"Passes_Attempted": 1007,
"Perc_Passes_Completed": 84.6,
"Penalty_Goals": 0,
"Penalty_Attempted": 0,
"xG": 0.0,
"xA": 0.0,
"Yellow_Cards": 2,
"Red_Cards": 0
}
{
"Name": "Timo Werner",
"Club": "Chelsea",
"Nationality": "GER",
"Position": "FW",
"Age": 24,
"Matches": 35,
"Starts": 29,
"Mins": 2602,
"Goals": 6,
"Assists": 8,
"Passes_Attempted": 826,
"Perc_Passes_Completed": 77.2,
"Penalty_Goals": 0,
"Penalty_Attempted": 0,
"xG": 0.41000000000000003,
"xA": 0.21,
"Yellow_Cards": 2,
"Red_Cards": 0
}
{
"Name": "Ben Chilwell",
"Club": "Chelsea",
"Nationality": "ENG",
"Position": "DF",
"Age": 23,
"Matches": 27,
"Starts": 27,
"Mins": 2286,
"Goals": 3,
"Assists": 5,
"Passes_Attempted": 1806,
"Perc_Passes_Completed": 78.6,
"Penalty_Goals": 0,
"Penalty_Attempted": 0,
"xG": 0.1,
"xA": 0.11,
"Yellow_Cards": 3,
"Red_Cards": 0
}
<jupyter_script>import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import squarify
epl = pd.read_csv("/kaggle/input/english-premier-league202021/EPL_20_21.csv")
epl.head()
epl.info()
# **NUMBER OF PLAYERS IN EACH TEAMS ROSTER ****
player_count = epl.groupby("Club")
player_count["Name"].count()
# **Age Stats**
plt.figure(figsize=(20, 13))
sns.boxplot(x="Age", y="Club", data=epl)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/003/69003373.ipynb
|
english-premier-league202021
|
rajatrc1705
|
[{"Id": 69003373, "ScriptId": 18830453, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1690361, "CreationDate": "07/25/2021 17:52:19", "VersionNumber": 1.0, "Title": "EPL Season 20-2021 Players Analysis", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 30.0, "LinesInsertedFromPrevious": 30.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 91688774, "KernelVersionId": 69003373, "SourceDatasetVersionId": 2300115}]
|
[{"Id": 2300115, "DatasetId": 1386885, "DatasourceVersionId": 2341394, "CreatorUserId": 5493928, "LicenseName": "CC0: Public Domain", "CreationDate": "06/03/2021 15:44:41", "VersionNumber": 1.0, "Title": "English Premier League(2020-21)", "Slug": "english-premier-league202021", "Subtitle": "Statistics of EPL 2020-21 season Players", "Description": "### Context\n\nThis dataset is a collection of basic but crucial stats of the English Premier League 2020-21 season. The dataset has all the players that played in the EPL and their standard stats such as Goals, Assists, xG, xA, Passes Attempted, Pass Accuracy and more! Do upvote if you like it!\n\n### Content\n\n| Attribute | Description |\n| --- | --- |\n| Position | Each player has a certain position, in which he plays regularly. The position in this dataset are, FW - Forward, MF - Midfield, DF - Defensive, GK - Goalkeeper |\n| Starts | The number of times the player was named in the starting 11 by the manager. |\n| Mins | The number of minutes played by the player. |\n| Goals | The number of Goals scored by the player. |\n| Assists | The number of times the player has assisted other player in scoring the goal. |\n| Passes_Attempted | The number of passes attempted by the player. |\n| Perc_Passes_Completed | The number of passes that the player accurately passed to his teammate. |\n| xG | Expected number of goals from the player in a match. |\n| xA | Expected number of assists from the player in a match. |\n| Yellow_Cards | The players get a yellow card from the referee for indiscipline, technical fouls, or other minor fouls. |\n| Red Cards | The players get a red card for accumulating 2 yellow cards in a single game, or for a major foul. | \n\n### Inspiration\n\nThere are several directions you can take with this dataset:\n1) Find out which team has the most aggressive defenders (or players for that matter)\n2) Which team had more players in the top 10 most assists chart\n3) Who were the players with most attempted passes\n4) Which players had the most accurate passes excluding the goal keeper and the defenders\n5) Defenders with most goals!!\n6) Which nation had the most aggressive players?\nthe possibilities are endless, create a notebook and explore them!", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1386885, "CreatorUserId": 5493928, "OwnerUserId": 5493928.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2300115.0, "CurrentDatasourceVersionId": 2341394.0, "ForumId": 1406099, "Type": 2, "CreationDate": "06/03/2021 15:44:41", "LastActivityDate": "06/03/2021", "TotalViews": 41256, "TotalDownloads": 6768, "TotalVotes": 130, "TotalKernels": 46}]
|
[{"Id": 5493928, "UserName": "rajatrc1705", "DisplayName": "Rajat Chaudhari", "RegisterDate": "07/19/2020", "PerformanceTier": 2}]
|
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import squarify
epl = pd.read_csv("/kaggle/input/english-premier-league202021/EPL_20_21.csv")
epl.head()
epl.info()
# **NUMBER OF PLAYERS IN EACH TEAMS ROSTER ****
player_count = epl.groupby("Club")
player_count["Name"].count()
# **Age Stats**
plt.figure(figsize=(20, 13))
sns.boxplot(x="Age", y="Club", data=epl)
|
[{"english-premier-league202021/EPL_20_21.csv": {"column_names": "[\"Name\", \"Club\", \"Nationality\", \"Position\", \"Age\", \"Matches\", \"Starts\", \"Mins\", \"Goals\", \"Assists\", \"Passes_Attempted\", \"Perc_Passes_Completed\", \"Penalty_Goals\", \"Penalty_Attempted\", \"xG\", \"xA\", \"Yellow_Cards\", \"Red_Cards\"]", "column_data_types": "{\"Name\": \"object\", \"Club\": \"object\", \"Nationality\": \"object\", \"Position\": \"object\", \"Age\": \"int64\", \"Matches\": \"int64\", \"Starts\": \"int64\", \"Mins\": \"int64\", \"Goals\": \"int64\", \"Assists\": \"int64\", \"Passes_Attempted\": \"int64\", \"Perc_Passes_Completed\": \"float64\", \"Penalty_Goals\": \"int64\", \"Penalty_Attempted\": \"int64\", \"xG\": \"float64\", \"xA\": \"float64\", \"Yellow_Cards\": \"int64\", \"Red_Cards\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 532 entries, 0 to 531\nData columns (total 18 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Name 532 non-null object \n 1 Club 532 non-null object \n 2 Nationality 532 non-null object \n 3 Position 532 non-null object \n 4 Age 532 non-null int64 \n 5 Matches 532 non-null int64 \n 6 Starts 532 non-null int64 \n 7 Mins 532 non-null int64 \n 8 Goals 532 non-null int64 \n 9 Assists 532 non-null int64 \n 10 Passes_Attempted 532 non-null int64 \n 11 Perc_Passes_Completed 532 non-null float64\n 12 Penalty_Goals 532 non-null int64 \n 13 Penalty_Attempted 532 non-null int64 \n 14 xG 532 non-null float64\n 15 xA 532 non-null float64\n 16 Yellow_Cards 532 non-null int64 \n 17 Red_Cards 532 non-null int64 \ndtypes: float64(3), int64(11), object(4)\nmemory usage: 74.9+ KB\n", "summary": "{\"Age\": {\"count\": 532.0, \"mean\": 25.5, \"std\": 4.319403948556999, \"min\": 16.0, \"25%\": 22.0, \"50%\": 26.0, \"75%\": 29.0, \"max\": 38.0}, \"Matches\": {\"count\": 532.0, \"mean\": 19.535714285714285, \"std\": 11.840458698914475, \"min\": 1.0, \"25%\": 9.0, \"50%\": 21.0, \"75%\": 30.0, \"max\": 38.0}, \"Starts\": {\"count\": 532.0, \"mean\": 15.714285714285714, \"std\": 11.921160618285109, \"min\": 0.0, \"25%\": 4.0, \"50%\": 15.0, \"75%\": 27.0, \"max\": 38.0}, \"Mins\": {\"count\": 532.0, \"mean\": 1411.4436090225563, \"std\": 1043.1718556185067, \"min\": 1.0, \"25%\": 426.0, \"50%\": 1345.0, \"75%\": 2303.5, \"max\": 3420.0}, \"Goals\": {\"count\": 532.0, \"mean\": 1.8533834586466165, \"std\": 3.338009124437716, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 2.0, \"max\": 23.0}, \"Assists\": {\"count\": 532.0, \"mean\": 1.287593984962406, \"std\": 2.0951913918664617, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 2.0, \"max\": 14.0}, \"Passes_Attempted\": {\"count\": 532.0, \"mean\": 717.75, \"std\": 631.3725218121509, \"min\": 0.0, \"25%\": 171.5, \"50%\": 573.5, \"75%\": 1129.5, \"max\": 3214.0}, \"Perc_Passes_Completed\": {\"count\": 532.0, \"mean\": 77.82387218045113, \"std\": 13.011630812100805, \"min\": -1.0, \"25%\": 73.5, \"50%\": 79.2, \"75%\": 84.625, \"max\": 100.0}, \"Penalty_Goals\": {\"count\": 532.0, \"mean\": 0.19172932330827067, \"std\": 0.8508814627760809, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 9.0}, \"Penalty_Attempted\": {\"count\": 532.0, \"mean\": 0.2349624060150376, \"std\": 0.9758184543420467, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 10.0}, \"xG\": {\"count\": 532.0, \"mean\": 0.11328947368421054, \"std\": 0.14817371225647819, \"min\": 0.0, \"25%\": 0.01, \"50%\": 0.06, \"75%\": 0.15, \"max\": 1.16}, \"xA\": {\"count\": 532.0, \"mean\": 0.07265037593984963, \"std\": 0.09007177478985508, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.05, \"75%\": 0.11, \"max\": 0.9}, \"Yellow_Cards\": {\"count\": 532.0, \"mean\": 2.1146616541353382, \"std\": 2.26909376498821, \"min\": 0.0, \"25%\": 0.0, \"50%\": 2.0, \"75%\": 3.0, \"max\": 12.0}, \"Red_Cards\": {\"count\": 532.0, \"mean\": 0.09022556390977443, \"std\": 0.29326775375150527, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 2.0}}", "examples": "{\"Name\":{\"0\":\"Mason Mount\",\"1\":\"Edouard Mendy\",\"2\":\"Timo Werner\",\"3\":\"Ben Chilwell\"},\"Club\":{\"0\":\"Chelsea\",\"1\":\"Chelsea\",\"2\":\"Chelsea\",\"3\":\"Chelsea\"},\"Nationality\":{\"0\":\"ENG\",\"1\":\"SEN\",\"2\":\"GER\",\"3\":\"ENG\"},\"Position\":{\"0\":\"MF,FW\",\"1\":\"GK\",\"2\":\"FW\",\"3\":\"DF\"},\"Age\":{\"0\":21,\"1\":28,\"2\":24,\"3\":23},\"Matches\":{\"0\":36,\"1\":31,\"2\":35,\"3\":27},\"Starts\":{\"0\":32,\"1\":31,\"2\":29,\"3\":27},\"Mins\":{\"0\":2890,\"1\":2745,\"2\":2602,\"3\":2286},\"Goals\":{\"0\":6,\"1\":0,\"2\":6,\"3\":3},\"Assists\":{\"0\":5,\"1\":0,\"2\":8,\"3\":5},\"Passes_Attempted\":{\"0\":1881,\"1\":1007,\"2\":826,\"3\":1806},\"Perc_Passes_Completed\":{\"0\":82.3,\"1\":84.6,\"2\":77.2,\"3\":78.6},\"Penalty_Goals\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"Penalty_Attempted\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"xG\":{\"0\":0.21,\"1\":0.0,\"2\":0.41,\"3\":0.1},\"xA\":{\"0\":0.24,\"1\":0.0,\"2\":0.21,\"3\":0.11},\"Yellow_Cards\":{\"0\":2,\"1\":2,\"2\":2,\"3\":3},\"Red_Cards\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0}}"}}]
| true | 1 |
<start_data_description><data_path>english-premier-league202021/EPL_20_21.csv:
<column_names>
['Name', 'Club', 'Nationality', 'Position', 'Age', 'Matches', 'Starts', 'Mins', 'Goals', 'Assists', 'Passes_Attempted', 'Perc_Passes_Completed', 'Penalty_Goals', 'Penalty_Attempted', 'xG', 'xA', 'Yellow_Cards', 'Red_Cards']
<column_types>
{'Name': 'object', 'Club': 'object', 'Nationality': 'object', 'Position': 'object', 'Age': 'int64', 'Matches': 'int64', 'Starts': 'int64', 'Mins': 'int64', 'Goals': 'int64', 'Assists': 'int64', 'Passes_Attempted': 'int64', 'Perc_Passes_Completed': 'float64', 'Penalty_Goals': 'int64', 'Penalty_Attempted': 'int64', 'xG': 'float64', 'xA': 'float64', 'Yellow_Cards': 'int64', 'Red_Cards': 'int64'}
<dataframe_Summary>
{'Age': {'count': 532.0, 'mean': 25.5, 'std': 4.319403948556999, 'min': 16.0, '25%': 22.0, '50%': 26.0, '75%': 29.0, 'max': 38.0}, 'Matches': {'count': 532.0, 'mean': 19.535714285714285, 'std': 11.840458698914475, 'min': 1.0, '25%': 9.0, '50%': 21.0, '75%': 30.0, 'max': 38.0}, 'Starts': {'count': 532.0, 'mean': 15.714285714285714, 'std': 11.921160618285109, 'min': 0.0, '25%': 4.0, '50%': 15.0, '75%': 27.0, 'max': 38.0}, 'Mins': {'count': 532.0, 'mean': 1411.4436090225563, 'std': 1043.1718556185067, 'min': 1.0, '25%': 426.0, '50%': 1345.0, '75%': 2303.5, 'max': 3420.0}, 'Goals': {'count': 532.0, 'mean': 1.8533834586466165, 'std': 3.338009124437716, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 2.0, 'max': 23.0}, 'Assists': {'count': 532.0, 'mean': 1.287593984962406, 'std': 2.0951913918664617, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 2.0, 'max': 14.0}, 'Passes_Attempted': {'count': 532.0, 'mean': 717.75, 'std': 631.3725218121509, 'min': 0.0, '25%': 171.5, '50%': 573.5, '75%': 1129.5, 'max': 3214.0}, 'Perc_Passes_Completed': {'count': 532.0, 'mean': 77.82387218045113, 'std': 13.011630812100805, 'min': -1.0, '25%': 73.5, '50%': 79.2, '75%': 84.625, 'max': 100.0}, 'Penalty_Goals': {'count': 532.0, 'mean': 0.19172932330827067, 'std': 0.8508814627760809, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 9.0}, 'Penalty_Attempted': {'count': 532.0, 'mean': 0.2349624060150376, 'std': 0.9758184543420467, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 10.0}, 'xG': {'count': 532.0, 'mean': 0.11328947368421054, 'std': 0.14817371225647819, 'min': 0.0, '25%': 0.01, '50%': 0.06, '75%': 0.15, 'max': 1.16}, 'xA': {'count': 532.0, 'mean': 0.07265037593984963, 'std': 0.09007177478985508, 'min': 0.0, '25%': 0.0, '50%': 0.05, '75%': 0.11, 'max': 0.9}, 'Yellow_Cards': {'count': 532.0, 'mean': 2.1146616541353382, 'std': 2.26909376498821, 'min': 0.0, '25%': 0.0, '50%': 2.0, '75%': 3.0, 'max': 12.0}, 'Red_Cards': {'count': 532.0, 'mean': 0.09022556390977443, 'std': 0.29326775375150527, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 2.0}}
<dataframe_info>
RangeIndex: 532 entries, 0 to 531
Data columns (total 18 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Name 532 non-null object
1 Club 532 non-null object
2 Nationality 532 non-null object
3 Position 532 non-null object
4 Age 532 non-null int64
5 Matches 532 non-null int64
6 Starts 532 non-null int64
7 Mins 532 non-null int64
8 Goals 532 non-null int64
9 Assists 532 non-null int64
10 Passes_Attempted 532 non-null int64
11 Perc_Passes_Completed 532 non-null float64
12 Penalty_Goals 532 non-null int64
13 Penalty_Attempted 532 non-null int64
14 xG 532 non-null float64
15 xA 532 non-null float64
16 Yellow_Cards 532 non-null int64
17 Red_Cards 532 non-null int64
dtypes: float64(3), int64(11), object(4)
memory usage: 74.9+ KB
<some_examples>
{'Name': {'0': 'Mason Mount', '1': 'Edouard Mendy', '2': 'Timo Werner', '3': 'Ben Chilwell'}, 'Club': {'0': 'Chelsea', '1': 'Chelsea', '2': 'Chelsea', '3': 'Chelsea'}, 'Nationality': {'0': 'ENG', '1': 'SEN', '2': 'GER', '3': 'ENG'}, 'Position': {'0': 'MF,FW', '1': 'GK', '2': 'FW', '3': 'DF'}, 'Age': {'0': 21, '1': 28, '2': 24, '3': 23}, 'Matches': {'0': 36, '1': 31, '2': 35, '3': 27}, 'Starts': {'0': 32, '1': 31, '2': 29, '3': 27}, 'Mins': {'0': 2890, '1': 2745, '2': 2602, '3': 2286}, 'Goals': {'0': 6, '1': 0, '2': 6, '3': 3}, 'Assists': {'0': 5, '1': 0, '2': 8, '3': 5}, 'Passes_Attempted': {'0': 1881, '1': 1007, '2': 826, '3': 1806}, 'Perc_Passes_Completed': {'0': 82.3, '1': 84.6, '2': 77.2, '3': 78.6}, 'Penalty_Goals': {'0': 1, '1': 0, '2': 0, '3': 0}, 'Penalty_Attempted': {'0': 1, '1': 0, '2': 0, '3': 0}, 'xG': {'0': 0.21, '1': 0.0, '2': 0.41, '3': 0.1}, 'xA': {'0': 0.24, '1': 0.0, '2': 0.21, '3': 0.11}, 'Yellow_Cards': {'0': 2, '1': 2, '2': 2, '3': 3}, 'Red_Cards': {'0': 0, '1': 0, '2': 0, '3': 0}}
<end_description>
| 196 | 0 | 1,904 | 196 |
69003076
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.ensemble import RandomForestClassifier
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()
train_data.info()
test_data.info()
train_data[["Pclass", "Survived"]].groupby(["Pclass"], as_index=False).mean()
train_data[["Sex", "Survived"]].groupby(["Sex"], as_index=False).mean()
train_data[["Embarked", "Survived"]].groupby(["Embarked"], as_index=False).mean()
train_data[["SibSp", "Survived"]].groupby(["SibSp"], as_index=False).mean()
train_data[["Parch", "Survived"]].groupby(["Parch"], as_index=False).mean()
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
sns.barplot(x="Pclass", y="Survived", data=train_data)
sns.barplot(x="Sex", y="Survived", data=train_data)
sns.factorplot("Sex", "Survived", hue="Pclass", size=3, aspect=1, data=train_data)
sns.factorplot(x="Pclass", y="Survived", hue="Sex", col="Embarked", data=train_data)
train_test_data = pd.concat([train_data, test_data], sort=True).reset_index(drop=True)
train_test_data["Title"] = train_test_data.Name.str.extract("([A-Za-z]+)\.")
pd.crosstab(train_test_data["Title"], train_test_data["Sex"])
train_test_data[["Title", "Survived"]].groupby(["Title"], as_index=False).mean()
train_test_data["Title"] = train_test_data["Title"].replace(
[
"Lady",
"Countess",
"Capt",
"Col",
"Don",
"Dr",
"Major",
"Rev",
"Sir",
"Jonkheer",
"Dona",
],
"Other",
)
train_test_data["Title"] = train_test_data["Title"].replace("Mlle", "Miss")
train_test_data["Title"] = train_test_data["Title"].replace("Ms", "Miss")
train_test_data["Title"] = train_test_data["Title"].replace("Mme", "Mrs")
train_test_data["Title"] = (
train_test_data["Title"]
.map({"Master": 0, "Miss": 1, "Mr": 2, "Mrs": 3, "Other": 4})
.astype(int)
)
train_test_data.head()
train_test_data["Sex"] = (
train_test_data["Sex"].map({"female": 1, "male": 0}).astype(int)
)
train_test_data.head()
train_test_data.isnull().sum()
train_test_data.Embarked.value_counts()
train_test_data["Embarked"] = train_test_data["Embarked"].fillna("S")
train_test_data["Embarked"] = train_test_data["Embarked"].map({"S": 0, "C": 1, "Q": 2})
train_test_data.head()
age = train_test_data.groupby(["Sex", "Pclass"]).median()["Age"]
age
train_test_data["Age"] = train_test_data.groupby(["Sex", "Pclass"])["Age"].apply(
lambda x: x.fillna(x.median())
)
train_test_data["Age"] = train_test_data["Age"].astype(int)
median_fare = train_test_data.groupby(["Pclass", "Parch", "SibSp"]).Fare.median()[3][0][
0
]
train_test_data["Fare"] = train_test_data["Fare"].fillna(median_fare)
train_test_data.isnull().sum()
train_test_data["AgeBand"] = pd.qcut(train_test_data["Age"], 7)
print(
train_test_data[["AgeBand", "Survived"]].groupby(["AgeBand"], as_index=False).mean()
)
train_test_data.loc[train_test_data["Age"] <= 18, "Age_Band"] = 0
train_test_data.loc[
(train_test_data["Age"] > 18) & (train_test_data["Age"] <= 22), "Age_Band"
] = 1
train_test_data.loc[
(train_test_data["Age"] > 22) & (train_test_data["Age"] <= 25), "Age_Band"
] = 2
train_test_data.loc[
(train_test_data["Age"] > 25) & (train_test_data["Age"] <= 43), "Age_Band"
] = 3
train_test_data.loc[train_test_data["Age"] > 43, "Age_Band"] = 4
train_test_data["Age_Band"] = train_test_data["Age_Band"].astype(int)
train_test_data.head()
train_test_data["FareBand"] = pd.qcut(train_test_data["Fare"], 8)
print(
train_test_data[["FareBand", "Survived"]]
.groupby(["FareBand"], as_index=False)
.mean()
)
train_test_data["Fare"].median()
train_test_data.loc[train_test_data["Fare"] <= 9.844, "Fare_Band"] = 0
train_test_data.loc[
(train_test_data["Fare"] > 9.844) & (train_test_data["Fare"] <= 69.55), "Fare_Band"
] = 1
train_test_data.loc[train_test_data["Fare"] > 69.55, "Fare_Band"] = 2
train_test_data["Fare_Band"] = train_test_data["Fare_Band"].astype(int)
train_test_data["FamilySize"] = train_test_data["SibSp"] + train_test_data["Parch"] + 1
train_test_data["FamilySize"] = train_test_data["FamilySize"].astype(int)
print(
train_test_data[["FamilySize", "Survived"]]
.groupby(["FamilySize"], as_index=False)
.mean()
)
train_test_data.loc[train_test_data["FamilySize"] == 1, "IsAlone"] = 1
train_test_data.loc[train_test_data["FamilySize"] > 1, "IsAlone"] = 0
train_test_data["IsAlone"] = train_test_data["IsAlone"].astype(int)
print(
train_test_data[["IsAlone", "Survived"]].groupby(["IsAlone"], as_index=False).mean()
)
train_test_data.head()
drop_cols = [
"Age",
"Cabin",
"Fare",
"Name",
"Parch",
"SibSp",
"Ticket",
"AgeBand",
"FareBand",
"FamilySize",
]
train_test_data.drop(columns=drop_cols, inplace=True)
train_test_data.head()
train_data = train_test_data[train_test_data["Survived"].notna()]
train_data["Survived"] = train_data["Survived"].astype(int)
train_data.head()
test_data = train_test_data.drop(train_test_data[train_test_data.Survived >= 0].index)
test_data.head()
y = train_data["Survived"]
features = ["Embarked", "Pclass", "Sex", "Title", "Age_Band", "Fare_Band", "IsAlone"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
acc_random_forest = round(model.score(X, train_data["Survived"]) * 100, 2)
print(acc_random_forest)
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions})
output.to_csv("submission.csv", index=False)
print("Your submission was successfully saved!")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/003/69003076.ipynb
| null | null |
[{"Id": 69003076, "ScriptId": 18791116, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7889999, "CreationDate": "07/25/2021 17:47:08", "VersionNumber": 9.0, "Title": "getting_started_with_Titanic", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 189.0, "LinesInsertedFromPrevious": 105.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 84.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.ensemble import RandomForestClassifier
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()
train_data.info()
test_data.info()
train_data[["Pclass", "Survived"]].groupby(["Pclass"], as_index=False).mean()
train_data[["Sex", "Survived"]].groupby(["Sex"], as_index=False).mean()
train_data[["Embarked", "Survived"]].groupby(["Embarked"], as_index=False).mean()
train_data[["SibSp", "Survived"]].groupby(["SibSp"], as_index=False).mean()
train_data[["Parch", "Survived"]].groupby(["Parch"], as_index=False).mean()
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
sns.barplot(x="Pclass", y="Survived", data=train_data)
sns.barplot(x="Sex", y="Survived", data=train_data)
sns.factorplot("Sex", "Survived", hue="Pclass", size=3, aspect=1, data=train_data)
sns.factorplot(x="Pclass", y="Survived", hue="Sex", col="Embarked", data=train_data)
train_test_data = pd.concat([train_data, test_data], sort=True).reset_index(drop=True)
train_test_data["Title"] = train_test_data.Name.str.extract("([A-Za-z]+)\.")
pd.crosstab(train_test_data["Title"], train_test_data["Sex"])
train_test_data[["Title", "Survived"]].groupby(["Title"], as_index=False).mean()
train_test_data["Title"] = train_test_data["Title"].replace(
[
"Lady",
"Countess",
"Capt",
"Col",
"Don",
"Dr",
"Major",
"Rev",
"Sir",
"Jonkheer",
"Dona",
],
"Other",
)
train_test_data["Title"] = train_test_data["Title"].replace("Mlle", "Miss")
train_test_data["Title"] = train_test_data["Title"].replace("Ms", "Miss")
train_test_data["Title"] = train_test_data["Title"].replace("Mme", "Mrs")
train_test_data["Title"] = (
train_test_data["Title"]
.map({"Master": 0, "Miss": 1, "Mr": 2, "Mrs": 3, "Other": 4})
.astype(int)
)
train_test_data.head()
train_test_data["Sex"] = (
train_test_data["Sex"].map({"female": 1, "male": 0}).astype(int)
)
train_test_data.head()
train_test_data.isnull().sum()
train_test_data.Embarked.value_counts()
train_test_data["Embarked"] = train_test_data["Embarked"].fillna("S")
train_test_data["Embarked"] = train_test_data["Embarked"].map({"S": 0, "C": 1, "Q": 2})
train_test_data.head()
age = train_test_data.groupby(["Sex", "Pclass"]).median()["Age"]
age
train_test_data["Age"] = train_test_data.groupby(["Sex", "Pclass"])["Age"].apply(
lambda x: x.fillna(x.median())
)
train_test_data["Age"] = train_test_data["Age"].astype(int)
median_fare = train_test_data.groupby(["Pclass", "Parch", "SibSp"]).Fare.median()[3][0][
0
]
train_test_data["Fare"] = train_test_data["Fare"].fillna(median_fare)
train_test_data.isnull().sum()
train_test_data["AgeBand"] = pd.qcut(train_test_data["Age"], 7)
print(
train_test_data[["AgeBand", "Survived"]].groupby(["AgeBand"], as_index=False).mean()
)
train_test_data.loc[train_test_data["Age"] <= 18, "Age_Band"] = 0
train_test_data.loc[
(train_test_data["Age"] > 18) & (train_test_data["Age"] <= 22), "Age_Band"
] = 1
train_test_data.loc[
(train_test_data["Age"] > 22) & (train_test_data["Age"] <= 25), "Age_Band"
] = 2
train_test_data.loc[
(train_test_data["Age"] > 25) & (train_test_data["Age"] <= 43), "Age_Band"
] = 3
train_test_data.loc[train_test_data["Age"] > 43, "Age_Band"] = 4
train_test_data["Age_Band"] = train_test_data["Age_Band"].astype(int)
train_test_data.head()
train_test_data["FareBand"] = pd.qcut(train_test_data["Fare"], 8)
print(
train_test_data[["FareBand", "Survived"]]
.groupby(["FareBand"], as_index=False)
.mean()
)
train_test_data["Fare"].median()
train_test_data.loc[train_test_data["Fare"] <= 9.844, "Fare_Band"] = 0
train_test_data.loc[
(train_test_data["Fare"] > 9.844) & (train_test_data["Fare"] <= 69.55), "Fare_Band"
] = 1
train_test_data.loc[train_test_data["Fare"] > 69.55, "Fare_Band"] = 2
train_test_data["Fare_Band"] = train_test_data["Fare_Band"].astype(int)
train_test_data["FamilySize"] = train_test_data["SibSp"] + train_test_data["Parch"] + 1
train_test_data["FamilySize"] = train_test_data["FamilySize"].astype(int)
print(
train_test_data[["FamilySize", "Survived"]]
.groupby(["FamilySize"], as_index=False)
.mean()
)
train_test_data.loc[train_test_data["FamilySize"] == 1, "IsAlone"] = 1
train_test_data.loc[train_test_data["FamilySize"] > 1, "IsAlone"] = 0
train_test_data["IsAlone"] = train_test_data["IsAlone"].astype(int)
print(
train_test_data[["IsAlone", "Survived"]].groupby(["IsAlone"], as_index=False).mean()
)
train_test_data.head()
drop_cols = [
"Age",
"Cabin",
"Fare",
"Name",
"Parch",
"SibSp",
"Ticket",
"AgeBand",
"FareBand",
"FamilySize",
]
train_test_data.drop(columns=drop_cols, inplace=True)
train_test_data.head()
train_data = train_test_data[train_test_data["Survived"].notna()]
train_data["Survived"] = train_data["Survived"].astype(int)
train_data.head()
test_data = train_test_data.drop(train_test_data[train_test_data.Survived >= 0].index)
test_data.head()
y = train_data["Survived"]
features = ["Embarked", "Pclass", "Sex", "Title", "Age_Band", "Fare_Band", "IsAlone"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
acc_random_forest = round(model.score(X, train_data["Survived"]) * 100, 2)
print(acc_random_forest)
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions})
output.to_csv("submission.csv", index=False)
print("Your submission was successfully saved!")
| false | 0 | 2,368 | 1 | 2,368 | 2,368 |
||
69197880
|
<jupyter_start><jupyter_text>Chest X-Ray Images (Pneumonia)
### Context
http://www.cell.com/cell/fulltext/S0092-8674(18)30154-5

Figure S6. Illustrative Examples of Chest X-Rays in Patients with Pneumonia, Related to Figure 6
The normal chest X-ray (left panel) depicts clear lungs without any areas of abnormal opacification in the image. Bacterial pneumonia (middle) typically exhibits a focal lobar consolidation, in this case in the right upper lobe (white arrows), whereas viral pneumonia (right) manifests with a more diffuse ‘‘interstitial’’ pattern in both lungs.
http://www.cell.com/cell/fulltext/S0092-8674(18)30154-5
### Content
The dataset is organized into 3 folders (train, test, val) and contains subfolders for each image category (Pneumonia/Normal). There are 5,863 X-Ray images (JPEG) and 2 categories (Pneumonia/Normal).
Chest X-ray images (anterior-posterior) were selected from retrospective cohorts of pediatric patients of one to five years old from Guangzhou Women and Children’s Medical Center, Guangzhou. All chest X-ray imaging was performed as part of patients’ routine clinical care.
For the analysis of chest x-ray images, all chest radiographs were initially screened for quality control by removing all low quality or unreadable scans. The diagnoses for the images were then graded by two expert physicians before being cleared for training the AI system. In order to account for any grading errors, the evaluation set was also checked by a third expert.
Kaggle dataset identifier: chest-xray-pneumonia
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, _ in os.walk("/kaggle/input"):
print(dirname)
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Number of images in each class
train_pneumonia_dir = (
"/kaggle/input/chest-xray-pneumonia/chest_xray/chest_xray/train/PNEUMONIA"
)
train_normal_dir = (
"/kaggle/input/chest-xray-pneumonia/chest_xray/chest_xray/train/NORMAL"
)
train_pneumonia_count = len(os.listdir(train_pneumonia_dir))
train_normal_count = len(os.listdir(train_normal_dir))
print(f"Number of pneumonia images in training set: {train_pneumonia_count}")
print(f"Number of normal images in training set: {train_normal_count}")
val_pneumonia_dir = (
"/kaggle/input/chest-xray-pneumonia/chest_xray/chest_xray/val/PNEUMONIA"
)
val_normal_dir = "/kaggle/input/chest-xray-pneumonia/chest_xray/chest_xray/val/NORMAL"
val_pneumonia_count = len(os.listdir(val_pneumonia_dir))
val_normal_count = len(os.listdir(val_normal_dir))
print(f"Number of pneumonia images in validation set: {val_pneumonia_count}")
print(f"Number of normal images in validation set: {val_normal_count}")
batch_size = 32
img_height = 180
img_width = 180
train_dir = "/kaggle/input/chest-xray-pneumonia/chest_xray/train"
val_dir = "/kaggle/input/chest-xray-pneumonia/chest_xray/val"
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
train_dir, image_size=(img_height, img_width), batch_size=batch_size
)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
val_dir, image_size=(img_height, img_width), batch_size=batch_size
)
print(f"Train class names: {train_ds.class_names}")
print(f"Validation class names: {val_ds.class_names}")
class_names = train_ds.class_names
for image, label in train_ds.take(1):
print(image.shape)
print(label.shape)
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=tf.data.AUTOTUNE)
weight_for_normal = (1 / train_normal_count) * (
(train_pneumonia_count + train_normal_count) / 2
)
weight_for_pneumonia = (1 / train_pneumonia_count) * (
(train_pneumonia_count + train_normal_count) / 2
)
class_weight = {0: weight_for_normal, 1: weight_for_pneumonia}
print("Weight for Normal class (0): {:.2f}".format(weight_for_normal))
print("Weight for Pneumonia class (0): {:.2f}".format(weight_for_pneumonia))
base_model = tf.keras.applications.EfficientNetB0(
input_shape=(img_height, img_width, 3), include_top=False, weights="imagenet"
)
base_model.summary()
base_model.trainable = False
inputs = tf.keras.layers.InputLayer(input_shape=(img_height, img_width, 3))
# Data augmentation
data_augmentation = tf.keras.Sequential(
[
tf.keras.layers.experimental.preprocessing.RandomFlip("horizontal"),
tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),
],
name="data_augmentation",
)
# Global Average
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
# Dropout rate of 0.2
dropout_layer = tf.keras.layers.Dropout(0.2)
# We are dealing with binary classification problem of cats vs dogs, so sigmoid, 1 neuron
prediction_layer = tf.keras.layers.Dense(1, activation="sigmoid")
model = tf.keras.Sequential(
[
inputs,
data_augmentation,
base_model,
global_average_layer,
dropout_layer,
prediction_layer,
]
)
model.summary()
METRICS = [
tf.keras.metrics.TruePositives(name="tp"),
tf.keras.metrics.FalsePositives(name="fp"),
tf.keras.metrics.TrueNegatives(name="tn"),
tf.keras.metrics.FalseNegatives(name="fn"),
tf.keras.metrics.BinaryAccuracy(name="accuracy"),
tf.keras.metrics.Precision(name="precision"),
tf.keras.metrics.Recall(name="recall"),
]
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=METRICS)
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor="val_prc", verbose=1, patience=10, mode="max", restore_best_weights=True
)
EPOCHS = 50
history = model.fit(
train_ds,
epochs=EPOCHS,
validation_data=val_ds,
class_weight=class_weight,
callbacks=[early_stopping],
)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/197/69197880.ipynb
|
chest-xray-pneumonia
|
paultimothymooney
|
[{"Id": 69197880, "ScriptId": 18889681, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7978439, "CreationDate": "07/28/2021 02:08:05", "VersionNumber": 1.0, "Title": "notebook290635b9f8", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 139.0, "LinesInsertedFromPrevious": 139.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92075828, "KernelVersionId": 69197880, "SourceDatasetVersionId": 23812}]
|
[{"Id": 23812, "DatasetId": 17810, "DatasourceVersionId": 23851, "CreatorUserId": 1314380, "LicenseName": "Other (specified in description)", "CreationDate": "03/24/2018 19:41:59", "VersionNumber": 2.0, "Title": "Chest X-Ray Images (Pneumonia)", "Slug": "chest-xray-pneumonia", "Subtitle": "5,863 images, 2 categories", "Description": "### Context\n\nhttp://www.cell.com/cell/fulltext/S0092-8674(18)30154-5\n\n\n\nFigure S6. Illustrative Examples of Chest X-Rays in Patients with Pneumonia, Related to Figure 6\nThe normal chest X-ray (left panel) depicts clear lungs without any areas of abnormal opacification in the image. Bacterial pneumonia (middle) typically exhibits a focal lobar consolidation, in this case in the right upper lobe (white arrows), whereas viral pneumonia (right) manifests with a more diffuse \u2018\u2018interstitial\u2019\u2019 pattern in both lungs.\nhttp://www.cell.com/cell/fulltext/S0092-8674(18)30154-5\n\n### Content\n\nThe dataset is organized into 3 folders (train, test, val) and contains subfolders for each image category (Pneumonia/Normal). There are 5,863 X-Ray images (JPEG) and 2 categories (Pneumonia/Normal). \n\nChest X-ray images (anterior-posterior) were selected from retrospective cohorts of pediatric patients of one to five years old from Guangzhou Women and Children\u2019s Medical Center, Guangzhou. All chest X-ray imaging was performed as part of patients\u2019 routine clinical care. \n\nFor the analysis of chest x-ray images, all chest radiographs were initially screened for quality control by removing all low quality or unreadable scans. The diagnoses for the images were then graded by two expert physicians before being cleared for training the AI system. In order to account for any grading errors, the evaluation set was also checked by a third expert.\n\n### Acknowledgements\n\nData: https://data.mendeley.com/datasets/rscbjbr9sj/2\n\nLicense: [CC BY 4.0][1]\n\nCitation: http://www.cell.com/cell/fulltext/S0092-8674(18)30154-5\n\n![enter image description here][2]\n\n\n### Inspiration\n\nAutomated methods to detect and classify human diseases from medical images.\n\n\n [1]: https://creativecommons.org/licenses/by/4.0/\n [2]: https://i.imgur.com/8AUJkin.png", "VersionNotes": "train/test/val", "TotalCompressedBytes": 1237249419.0, "TotalUncompressedBytes": 1237249419.0}]
|
[{"Id": 17810, "CreatorUserId": 1314380, "OwnerUserId": 1314380.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 23812.0, "CurrentDatasourceVersionId": 23851.0, "ForumId": 25540, "Type": 2, "CreationDate": "03/22/2018 05:42:41", "LastActivityDate": "03/22/2018", "TotalViews": 2063138, "TotalDownloads": 237932, "TotalVotes": 5834, "TotalKernels": 2058}]
|
[{"Id": 1314380, "UserName": "paultimothymooney", "DisplayName": "Paul Mooney", "RegisterDate": "10/05/2017", "PerformanceTier": 5}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, _ in os.walk("/kaggle/input"):
print(dirname)
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Number of images in each class
train_pneumonia_dir = (
"/kaggle/input/chest-xray-pneumonia/chest_xray/chest_xray/train/PNEUMONIA"
)
train_normal_dir = (
"/kaggle/input/chest-xray-pneumonia/chest_xray/chest_xray/train/NORMAL"
)
train_pneumonia_count = len(os.listdir(train_pneumonia_dir))
train_normal_count = len(os.listdir(train_normal_dir))
print(f"Number of pneumonia images in training set: {train_pneumonia_count}")
print(f"Number of normal images in training set: {train_normal_count}")
val_pneumonia_dir = (
"/kaggle/input/chest-xray-pneumonia/chest_xray/chest_xray/val/PNEUMONIA"
)
val_normal_dir = "/kaggle/input/chest-xray-pneumonia/chest_xray/chest_xray/val/NORMAL"
val_pneumonia_count = len(os.listdir(val_pneumonia_dir))
val_normal_count = len(os.listdir(val_normal_dir))
print(f"Number of pneumonia images in validation set: {val_pneumonia_count}")
print(f"Number of normal images in validation set: {val_normal_count}")
batch_size = 32
img_height = 180
img_width = 180
train_dir = "/kaggle/input/chest-xray-pneumonia/chest_xray/train"
val_dir = "/kaggle/input/chest-xray-pneumonia/chest_xray/val"
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
train_dir, image_size=(img_height, img_width), batch_size=batch_size
)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
val_dir, image_size=(img_height, img_width), batch_size=batch_size
)
print(f"Train class names: {train_ds.class_names}")
print(f"Validation class names: {val_ds.class_names}")
class_names = train_ds.class_names
for image, label in train_ds.take(1):
print(image.shape)
print(label.shape)
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=tf.data.AUTOTUNE)
weight_for_normal = (1 / train_normal_count) * (
(train_pneumonia_count + train_normal_count) / 2
)
weight_for_pneumonia = (1 / train_pneumonia_count) * (
(train_pneumonia_count + train_normal_count) / 2
)
class_weight = {0: weight_for_normal, 1: weight_for_pneumonia}
print("Weight for Normal class (0): {:.2f}".format(weight_for_normal))
print("Weight for Pneumonia class (0): {:.2f}".format(weight_for_pneumonia))
base_model = tf.keras.applications.EfficientNetB0(
input_shape=(img_height, img_width, 3), include_top=False, weights="imagenet"
)
base_model.summary()
base_model.trainable = False
inputs = tf.keras.layers.InputLayer(input_shape=(img_height, img_width, 3))
# Data augmentation
data_augmentation = tf.keras.Sequential(
[
tf.keras.layers.experimental.preprocessing.RandomFlip("horizontal"),
tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),
],
name="data_augmentation",
)
# Global Average
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
# Dropout rate of 0.2
dropout_layer = tf.keras.layers.Dropout(0.2)
# We are dealing with binary classification problem of cats vs dogs, so sigmoid, 1 neuron
prediction_layer = tf.keras.layers.Dense(1, activation="sigmoid")
model = tf.keras.Sequential(
[
inputs,
data_augmentation,
base_model,
global_average_layer,
dropout_layer,
prediction_layer,
]
)
model.summary()
METRICS = [
tf.keras.metrics.TruePositives(name="tp"),
tf.keras.metrics.FalsePositives(name="fp"),
tf.keras.metrics.TrueNegatives(name="tn"),
tf.keras.metrics.FalseNegatives(name="fn"),
tf.keras.metrics.BinaryAccuracy(name="accuracy"),
tf.keras.metrics.Precision(name="precision"),
tf.keras.metrics.Recall(name="recall"),
]
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=METRICS)
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor="val_prc", verbose=1, patience=10, mode="max", restore_best_weights=True
)
EPOCHS = 50
history = model.fit(
train_ds,
epochs=EPOCHS,
validation_data=val_ds,
class_weight=class_weight,
callbacks=[early_stopping],
)
| false | 0 | 1,637 | 0 | 2,114 | 1,637 |
||
69197615
|
<jupyter_start><jupyter_text>ILTACON Fair AI Tutorial
Kaggle dataset identifier: iltacon-fair-ai-tutorial
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
year_dfs = []
for dirname, _, filenames in os.walk("/kaggle/input"):
for count, filename in enumerate(sorted(filenames)):
if count < 5:
print(filename)
year_dfs.append(pd.read_csv(os.path.join(dirname, filename)))
df = pd.concat(year_dfs, ignore_index=True)
# # Install Fair AI packages
# 1. [Surgio](https://surgeo.readthedocs.io/en/dev/) - Python package to deterime race by proxy variables (i.e. given name, surname, geocode)
# 2. [AI Fairness 360](https://aif360.mybluemix.net/) - An API to measure fairness and mitigate bias in machine learning datasets and models.
# # Create Labels
# Was the outcome in favor of the Plaintiff?
# For the demo, we'll mark
# * Judgment in favor of Plaintiff (or Both parties)
# * Disposition is a settlement
# * If the judgment is unknown, then default and consent judgments are considered favorable.
# > Note: We're only determining if the plaintiff receives a judgment/settlement and not if the judgment was equitable
def for_plaintiff(row):
judgment = row["JUDGMENT"]
disp = row["DISP"]
if judgment in [1, 3] or disp == 13 or (judgment != 2 and disp in [4, 5]):
return 1.0
else:
return 0.0
df["for_plaintiff"] = df.apply(for_plaintiff, axis=1)
df["for_plaintiff"].value_counts()
# # Label Privileged Classes
# Although the federal docket dataset doesn't contain explicit race or gender columns, the dataset includes some income attributes. These include:
# 1. Informa Pauperis (uanble to pay court costs)
# 2. Pro Se (not represented by a laywer)
print("Informa Pauperis (court fees waived)")
df["ifp_val"] = df["IFP"].apply(lambda x: 1.0 if x == "FP" else 0.0)
print(df["ifp_val"].value_counts())
print()
print("Pro Se plaintiff (i.e. no lawyer)")
df["pro_se_plt"] = df["PROSE"].apply(lambda x: 1.0 if x in [1, 3] else 0.0)
print(df["pro_se_plt"].value_counts())
print()
print("Pro Se defendant (i.e. no lawyer)")
df["pro_se_def"] = df["PROSE"].apply(lambda x: 1.0 if x in [2, 3] else 0.0)
print(df["pro_se_def"].value_counts())
file_date = pd.to_datetime(df["FILEDATE"], infer_datetime_format=True)
term_date = pd.to_datetime(df["TERMDATE"], infer_datetime_format=True)
df["days_open"] = (term_date - file_date).dt.days
# # Dataset
# Using the docket's metadata predict if the case's outcome was in the favor of the Plaintiff.
# From the Panda's dataframe:
# * Select to column's label
# * List the protected attributes
# * List categorical features (i.e. the district the case was filed in)
# * List features to keep (i.e. the number of days the case was opened)
#
from aif360.datasets import StandardDataset
ds = StandardDataset(
df,
label_name="for_plaintiff",
favorable_classes=[1.0],
protected_attribute_names=["ifp_val", "pro_se_plt"],
privileged_classes=[[0.0], [0.0]],
categorical_features=[
"NOS",
"JURIS",
"CIRCUIT",
"DISTRICT",
"OFFICE",
"JURY",
"PROCPROG",
],
features_to_keep=[
"NOS",
"JURIS",
"CIRCUIT",
"DISTRICT",
"OFFICE",
"JURY",
"PROCPROG",
"DEMANDED",
"days_open",
],
)
from aif360.metrics import BinaryLabelDatasetMetric
priv_group = [{"ifp_val": 0.0}]
unpriv_group = [{"ifp_val": 1.0}]
ifp_metrics = BinaryLabelDatasetMetric(ds, unpriv_group, priv_group)
consistency = round(ifp_metrics.consistency()[0], 4)
disparate_impact = round(ifp_metrics.disparate_impact(), 4)
statistical_parity_difference = round(ifp_metrics.statistical_parity_difference(), 4)
print(f"Consistency: {consistency}")
print(f"Disparate Impact: {disparate_impact}")
print(f"Statistical Parity Difference: {statistical_parity_difference}")
from aif360.metrics import BinaryLabelDatasetMetric
priv_group = [{"pro_se_plt": 0.0}]
unpriv_group = [{"pro_se_plt": 1.0}]
pro_se_metrics = BinaryLabelDatasetMetric(ds, unpriv_group, priv_group)
consistency = round(pro_se_metrics.consistency()[0], 4)
disparate_impact = round(pro_se_metrics.disparate_impact(), 4)
statistical_parity_difference = round(pro_se_metrics.statistical_parity_difference(), 4)
print(f"Consistency: {consistency}")
print(f"Disparate Impact: {disparate_impact}")
print(f"Statistical Parity Difference: {statistical_parity_difference}")
from tqdm import tqdm
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC as SVM
from sklearn.preprocessing import MinMaxScaler
from aif360.algorithms.preprocessing import DisparateImpactRemover
from matplotlib import pyplot as plt
protected = "ifp_val"
scaler = MinMaxScaler(copy=False)
test, train = ds.split([0.2])
train.features = scaler.fit_transform(train.features)
test.features = scaler.fit_transform(test.features)
index = train.feature_names.index(protected)
DIs = []
for level in tqdm(np.linspace(0.0, 1.0, 11)):
di = DisparateImpactRemover(repair_level=level)
train_repd = di.fit_transform(train)
test_repd = di.fit_transform(test)
X_tr = np.delete(train_repd.features, index, axis=1)
X_te = np.delete(test_repd.features, index, axis=1)
y_tr = train_repd.labels.ravel()
lmod = LogisticRegression(class_weight="balanced", solver="liblinear")
lmod.fit(X_tr, y_tr)
test_repd_pred = test_repd.copy()
test_repd_pred.labels = lmod.predict(X_te)
p = [{protected: 0.0}]
u = [{protected: 1.0}]
cm = BinaryLabelDatasetMetric(
test_repd_pred, privileged_groups=p, unprivileged_groups=u
)
DIs.append(cm.disparate_impact())
plt.plot(np.linspace(0, 1, 11), DIs, marker="o")
plt.plot([0, 1], [1, 1], "g")
plt.plot([0, 1], [0.8, 0.8], "r")
plt.ylim([0.0, 1.1])
plt.ylabel("Disparate Impact (DI)")
plt.xlabel("repair level")
plt.show()
import re
def last_name(party: str):
"""Extract lastname form party."""
tokens = re.split(r"[.,\s]+", party)
if len(tokens) == 1:
# Assume is party is a person if only one word
return tokens[0]
elif len(tokens) == 3 and " ".join(tokens).endswith(" ET AL"):
# Assume first name listed is the primary person
return tokens[0]
else:
# Assume longer names are orginzations
return None
plt_df = df[df["PLT"].notnull()].copy()
plt_df["PLT_LASTNAME"] = plt_df["PLT"].apply(last_name)
person_df = (
plt_df[(plt_df["PLT_LASTNAME"].notnull()) & (plt_df["COUNTY"].notnull())]
.copy()
.reset_index()
)
person_df[["PLT", "PLT_LASTNAME", "COUNTY"]].tail(20)
import surgeo
# Instatiate your model
surgeo_model = surgeo.SurgeoModel(geo_level="FIPSCC")
surgeo_df = surgeo_model.get_probabilities(
person_df["PLT_LASTNAME"], person_df["COUNTY"]
)
surgeo_df.head(30)
surname_model = surgeo.SurnameModel()
surname_df = surname_model.get_probabilities(person_df["PLT_LASTNAME"])
surname_df.head(30)
proxy_df = pd.concat([person_df, surgeo_df], axis=1)
from aif360.datasets import StandardDataset
proxy_ds = StandardDataset(
proxy_df,
label_name="for_plaintiff",
favorable_classes=[1.0],
protected_attribute_names=["ifp_val", "pro_se_plt", "white"],
privileged_classes=[[0.0], [0.0], lambda x: x >= 0.90],
categorical_features=[
"NOS",
"JURIS",
"CIRCUIT",
"DISTRICT",
"OFFICE",
"JURY",
"PROCPROG",
],
features_to_keep=[
"NOS",
"JURIS",
"CIRCUIT",
"DISTRICT",
"OFFICE",
"JURY",
"PROCPROG",
"DEMANDED",
],
)
from aif360.metrics import BinaryLabelDatasetMetric
priv_group = [{"white": 1.0}]
unpriv_group = [{"white": 0.0}]
race_metrics = BinaryLabelDatasetMetric(proxy_ds, unpriv_group, priv_group)
print(race_metrics.consistency())
print(race_metrics.disparate_impact())
print(race_metrics.statistical_parity_difference())
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/197/69197615.ipynb
|
iltacon-fair-ai-tutorial
|
johnhudzina
|
[{"Id": 69197615, "ScriptId": 18124586, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 861199, "CreationDate": "07/28/2021 02:01:30", "VersionNumber": 5.0, "Title": "Plaintiff Protected Class", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 210.0, "LinesInsertedFromPrevious": 97.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 113.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92075277, "KernelVersionId": 69197615, "SourceDatasetVersionId": 2470710}]
|
[{"Id": 2470710, "DatasetId": 1434604, "DatasourceVersionId": 2513193, "CreatorUserId": 861199, "LicenseName": "U.S. Government Works", "CreationDate": "07/28/2021 01:49:13", "VersionNumber": 4.0, "Title": "ILTACON Fair AI Tutorial", "Slug": "iltacon-fair-ai-tutorial", "Subtitle": "Tort Dockets from the Federal Judical Center's Integrated Database", "Description": NaN, "VersionNotes": "Updated lookup", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1434604, "CreatorUserId": 861199, "OwnerUserId": 861199.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2521508.0, "CurrentDatasourceVersionId": 2564318.0, "ForumId": 1454062, "Type": 2, "CreationDate": "06/27/2021 14:56:47", "LastActivityDate": "06/27/2021", "TotalViews": 1481, "TotalDownloads": 15, "TotalVotes": 1, "TotalKernels": 1}]
|
[{"Id": 861199, "UserName": "johnhudzina", "DisplayName": "John S. Hudzina", "RegisterDate": "01/09/2017", "PerformanceTier": 0}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
year_dfs = []
for dirname, _, filenames in os.walk("/kaggle/input"):
for count, filename in enumerate(sorted(filenames)):
if count < 5:
print(filename)
year_dfs.append(pd.read_csv(os.path.join(dirname, filename)))
df = pd.concat(year_dfs, ignore_index=True)
# # Install Fair AI packages
# 1. [Surgio](https://surgeo.readthedocs.io/en/dev/) - Python package to deterime race by proxy variables (i.e. given name, surname, geocode)
# 2. [AI Fairness 360](https://aif360.mybluemix.net/) - An API to measure fairness and mitigate bias in machine learning datasets and models.
# # Create Labels
# Was the outcome in favor of the Plaintiff?
# For the demo, we'll mark
# * Judgment in favor of Plaintiff (or Both parties)
# * Disposition is a settlement
# * If the judgment is unknown, then default and consent judgments are considered favorable.
# > Note: We're only determining if the plaintiff receives a judgment/settlement and not if the judgment was equitable
def for_plaintiff(row):
judgment = row["JUDGMENT"]
disp = row["DISP"]
if judgment in [1, 3] or disp == 13 or (judgment != 2 and disp in [4, 5]):
return 1.0
else:
return 0.0
df["for_plaintiff"] = df.apply(for_plaintiff, axis=1)
df["for_plaintiff"].value_counts()
# # Label Privileged Classes
# Although the federal docket dataset doesn't contain explicit race or gender columns, the dataset includes some income attributes. These include:
# 1. Informa Pauperis (uanble to pay court costs)
# 2. Pro Se (not represented by a laywer)
print("Informa Pauperis (court fees waived)")
df["ifp_val"] = df["IFP"].apply(lambda x: 1.0 if x == "FP" else 0.0)
print(df["ifp_val"].value_counts())
print()
print("Pro Se plaintiff (i.e. no lawyer)")
df["pro_se_plt"] = df["PROSE"].apply(lambda x: 1.0 if x in [1, 3] else 0.0)
print(df["pro_se_plt"].value_counts())
print()
print("Pro Se defendant (i.e. no lawyer)")
df["pro_se_def"] = df["PROSE"].apply(lambda x: 1.0 if x in [2, 3] else 0.0)
print(df["pro_se_def"].value_counts())
file_date = pd.to_datetime(df["FILEDATE"], infer_datetime_format=True)
term_date = pd.to_datetime(df["TERMDATE"], infer_datetime_format=True)
df["days_open"] = (term_date - file_date).dt.days
# # Dataset
# Using the docket's metadata predict if the case's outcome was in the favor of the Plaintiff.
# From the Panda's dataframe:
# * Select to column's label
# * List the protected attributes
# * List categorical features (i.e. the district the case was filed in)
# * List features to keep (i.e. the number of days the case was opened)
#
from aif360.datasets import StandardDataset
ds = StandardDataset(
df,
label_name="for_plaintiff",
favorable_classes=[1.0],
protected_attribute_names=["ifp_val", "pro_se_plt"],
privileged_classes=[[0.0], [0.0]],
categorical_features=[
"NOS",
"JURIS",
"CIRCUIT",
"DISTRICT",
"OFFICE",
"JURY",
"PROCPROG",
],
features_to_keep=[
"NOS",
"JURIS",
"CIRCUIT",
"DISTRICT",
"OFFICE",
"JURY",
"PROCPROG",
"DEMANDED",
"days_open",
],
)
from aif360.metrics import BinaryLabelDatasetMetric
priv_group = [{"ifp_val": 0.0}]
unpriv_group = [{"ifp_val": 1.0}]
ifp_metrics = BinaryLabelDatasetMetric(ds, unpriv_group, priv_group)
consistency = round(ifp_metrics.consistency()[0], 4)
disparate_impact = round(ifp_metrics.disparate_impact(), 4)
statistical_parity_difference = round(ifp_metrics.statistical_parity_difference(), 4)
print(f"Consistency: {consistency}")
print(f"Disparate Impact: {disparate_impact}")
print(f"Statistical Parity Difference: {statistical_parity_difference}")
from aif360.metrics import BinaryLabelDatasetMetric
priv_group = [{"pro_se_plt": 0.0}]
unpriv_group = [{"pro_se_plt": 1.0}]
pro_se_metrics = BinaryLabelDatasetMetric(ds, unpriv_group, priv_group)
consistency = round(pro_se_metrics.consistency()[0], 4)
disparate_impact = round(pro_se_metrics.disparate_impact(), 4)
statistical_parity_difference = round(pro_se_metrics.statistical_parity_difference(), 4)
print(f"Consistency: {consistency}")
print(f"Disparate Impact: {disparate_impact}")
print(f"Statistical Parity Difference: {statistical_parity_difference}")
from tqdm import tqdm
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC as SVM
from sklearn.preprocessing import MinMaxScaler
from aif360.algorithms.preprocessing import DisparateImpactRemover
from matplotlib import pyplot as plt
protected = "ifp_val"
scaler = MinMaxScaler(copy=False)
test, train = ds.split([0.2])
train.features = scaler.fit_transform(train.features)
test.features = scaler.fit_transform(test.features)
index = train.feature_names.index(protected)
DIs = []
for level in tqdm(np.linspace(0.0, 1.0, 11)):
di = DisparateImpactRemover(repair_level=level)
train_repd = di.fit_transform(train)
test_repd = di.fit_transform(test)
X_tr = np.delete(train_repd.features, index, axis=1)
X_te = np.delete(test_repd.features, index, axis=1)
y_tr = train_repd.labels.ravel()
lmod = LogisticRegression(class_weight="balanced", solver="liblinear")
lmod.fit(X_tr, y_tr)
test_repd_pred = test_repd.copy()
test_repd_pred.labels = lmod.predict(X_te)
p = [{protected: 0.0}]
u = [{protected: 1.0}]
cm = BinaryLabelDatasetMetric(
test_repd_pred, privileged_groups=p, unprivileged_groups=u
)
DIs.append(cm.disparate_impact())
plt.plot(np.linspace(0, 1, 11), DIs, marker="o")
plt.plot([0, 1], [1, 1], "g")
plt.plot([0, 1], [0.8, 0.8], "r")
plt.ylim([0.0, 1.1])
plt.ylabel("Disparate Impact (DI)")
plt.xlabel("repair level")
plt.show()
import re
def last_name(party: str):
"""Extract lastname form party."""
tokens = re.split(r"[.,\s]+", party)
if len(tokens) == 1:
# Assume is party is a person if only one word
return tokens[0]
elif len(tokens) == 3 and " ".join(tokens).endswith(" ET AL"):
# Assume first name listed is the primary person
return tokens[0]
else:
# Assume longer names are orginzations
return None
plt_df = df[df["PLT"].notnull()].copy()
plt_df["PLT_LASTNAME"] = plt_df["PLT"].apply(last_name)
person_df = (
plt_df[(plt_df["PLT_LASTNAME"].notnull()) & (plt_df["COUNTY"].notnull())]
.copy()
.reset_index()
)
person_df[["PLT", "PLT_LASTNAME", "COUNTY"]].tail(20)
import surgeo
# Instatiate your model
surgeo_model = surgeo.SurgeoModel(geo_level="FIPSCC")
surgeo_df = surgeo_model.get_probabilities(
person_df["PLT_LASTNAME"], person_df["COUNTY"]
)
surgeo_df.head(30)
surname_model = surgeo.SurnameModel()
surname_df = surname_model.get_probabilities(person_df["PLT_LASTNAME"])
surname_df.head(30)
proxy_df = pd.concat([person_df, surgeo_df], axis=1)
from aif360.datasets import StandardDataset
proxy_ds = StandardDataset(
proxy_df,
label_name="for_plaintiff",
favorable_classes=[1.0],
protected_attribute_names=["ifp_val", "pro_se_plt", "white"],
privileged_classes=[[0.0], [0.0], lambda x: x >= 0.90],
categorical_features=[
"NOS",
"JURIS",
"CIRCUIT",
"DISTRICT",
"OFFICE",
"JURY",
"PROCPROG",
],
features_to_keep=[
"NOS",
"JURIS",
"CIRCUIT",
"DISTRICT",
"OFFICE",
"JURY",
"PROCPROG",
"DEMANDED",
],
)
from aif360.metrics import BinaryLabelDatasetMetric
priv_group = [{"white": 1.0}]
unpriv_group = [{"white": 0.0}]
race_metrics = BinaryLabelDatasetMetric(proxy_ds, unpriv_group, priv_group)
print(race_metrics.consistency())
print(race_metrics.disparate_impact())
print(race_metrics.statistical_parity_difference())
| false | 0 | 2,633 | 0 | 2,663 | 2,633 |
||
69197776
|
import numpy as np
import pandas as pd
import tidybear as tb
from tqdm import tqdm
import nltk
from pyphen import Pyphen
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
def summarise_cv_scores(scores):
return (len(scores), np.mean(scores), np.std(scores))
# ## Set Up
# To start, I read in the training data and select the columns I care about.
# Then I assign an approx grade by dividing the target into 10 regioins (deciles, grades 3-12).
train_ = pd.read_csv("/kaggle/input/commonlitreadabilityprize/train.csv")
train_ = train_.loc[:, ["id", "excerpt", "target", "standard_error"]]
grade_lbls = [i for i in range(12, 2, -1)]
train_["grade"] = pd.qcut(train_.target, q=len(grade_lbls), labels=grade_lbls)
train_["grade"] = train_.grade.astype(int)
def get_school_level(grade):
if grade <= 5:
return "elementary"
elif grade <= 8:
return "middle"
else:
return "high"
train_["school"] = train_.grade.apply(get_school_level)
print(train_.shape)
train_.tail()
# Next, we look at an example of the easiest and hardest (by target) excerpts to read. As per the discussion and by example, higher scores are eaiser (lower grade level), lower scores are harder (higher grade level).
print("Max Target - Easiest to read - lowest grade level\n")
print(train_[train_.target == train_.target.max()].excerpt.values[0])
print("\n-------------------------\n")
print("Min Target - Hardest to read - highest grade level\n")
print(train_[train_.target == train_.target.min()].excerpt.values[0])
train_.target.plot.hist()
# Looking at the distribution above, the target is pretty normal. This tells me there is lots of overlap between grade level readability... or as the discussion puts it, the categories are squisy....
# However below, using the approx grade level, the words being used on average for higher grade levels are longer.
# ## Non-Text Features
train = train_.copy()
pyphen = Pyphen(lang="en")
def syllables(word):
return len(pyphen.positions(word)) + 1
def engineer_features(df):
word_tok = df.excerpt.apply(nltk.tokenize.word_tokenize)
sent_tok = df.excerpt.apply(nltk.tokenize.sent_tokenize)
syls_tok = word_tok.apply(lambda x: [syllables(w) for w in x])
total_charachters = word_tok.apply(lambda x: np.sum([len(w) for w in x]))
total_words = word_tok.apply(lambda x: len(x))
total_syllables = syls_tok.apply(lambda x: np.sum(x))
total_sentences = sent_tok.apply(lambda x: len(x))
df["unique_words"] = word_tok.apply(lambda x: np.unique(x).shape[0]) / total_words
df["words_geq_len8"] = (
word_tok.apply(lambda x: np.sum([len(w) >= 8 for w in x])) / total_words
)
df["hard_words"] = (
syls_tok.apply(lambda x: np.sum([s >= 3 for s in x])) / total_words
)
df["characters_per_word"] = total_charachters / total_words
df["syllables_per_word"] = total_syllables / total_words
df["words_per_sentence"] = total_words / total_sentences
engineer_features(train)
train.drop(columns=["standard_error", "grade"]).corr()
non_text_features = ["unique_words", "words_geq_len8", "words_per_sentence"]
with tb.GroupBy(train, "grade") as g:
g.mean(non_text_features, decimals=2)
grade_summary = g.summarise()
grade_summary = (
grade_summary.stack()
.rename("value")
.reset_index()
.rename(columns={"level_1": "feature"})
)
g = sns.FacetGrid(grade_summary, col="feature", col_wrap=3, sharey=False, height=4)
g.map_dataframe(sns.barplot, x="grade", y="value")
from sklearn.model_selection import cross_val_score, GridSearchCV
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.impute import SimpleImputer
from sklearn.decomposition import PCA
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import RidgeCV, LassoCV
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.compose import make_column_transformer
from sklearn.feature_selection import RFECV
gs_params = {
"cv": 10,
"scoring": "neg_root_mean_squared_error",
"n_jobs": -1,
"verbose": 2,
}
model_grid = {
"reg": [
DummyRegressor(),
LinearRegression(),
RidgeCV(),
RandomForestRegressor(max_depth=3, random_state=123),
]
}
pipe = Pipeline(
[
("impute", SimpleImputer(strategy="median")),
("scale", StandardScaler()),
("reg", RidgeCV()),
]
)
grid = GridSearchCV(pipe, model_grid, **gs_params)
X = train[non_text_features]
y = train.target
grid.fit(X, y)
pd.DataFrame(grid.cv_results_)[["param_reg", "mean_test_score", "std_test_score"]]
non_text_pipe = Pipeline(
[
("impute", SimpleImputer(strategy="median")),
("scale", StandardScaler()),
("poly", PolynomialFeatures()),
("feat", RFECV(LinearRegression(), cv=10)),
("reg", RidgeCV()),
]
)
grid = GridSearchCV(non_text_pipe, model_grid, **gs_params)
grid.fit(X, y)
pd.DataFrame(grid.cv_results_)[["param_reg", "mean_test_score", "std_test_score"]]
non_text_lm = Pipeline(
[
("impute", SimpleImputer(strategy="median")),
("scale", StandardScaler()),
("reg", LinearRegression()),
]
)
scores = cross_val_score(non_text_lm, X, y, **gs_params)
print("RMSE (cv={}): {:.3f} ({:.3f})".format(*summarise_cv_scores(-scores)))
non_text_lm.fit(X, y)
coefs = pd.DataFrame(
{"coef": non_text_lm.named_steps["reg"].coef_}, index=non_text_features
)
coefs.sort_values("coef").plot.barh()
# With no NLP, just summary stats about the vocab of the text, we get an average RMSE across 10 folds of .84 for the Ridge regression. We'll need to halve that to win...
# ## Simple NLP
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
tm_grid = {"reg": [LinearRegression(), Ridge(), Lasso()]}
text_pipe = Pipeline(
[("count", CountVectorizer()), ("scale", TfidfTransformer()), ("reg", Ridge())]
)
grid = GridSearchCV(text_pipe, tm_grid, **gs_params)
grid.fit(train.excerpt, train.target)
pd.DataFrame(grid.cv_results_)[["param_reg", "mean_test_score", "std_test_score"]]
# ## Combine Non-Text and Simple NLP
from sklearn.compose import make_column_transformer
from sklearn.pipeline import make_pipeline
non_text_trans = make_pipeline(
SimpleImputer(strategy="median"),
StandardScaler(),
PolynomialFeatures(),
RFECV(LinearRegression(), cv=10),
)
text_trans = make_pipeline(
CountVectorizer(),
TfidfTransformer(),
)
combined_pipe = Pipeline(
[
(
"transform",
make_column_transformer(
(non_text_trans, non_text_features),
(text_trans, "excerpt"),
remainder="drop",
),
),
("predict", Ridge()),
]
)
scores = cross_val_score(combined_pipe, train, train.target, **gs_params)
print("RMSE (cv={}): {:.3f} ({:.3f})".format(*summarise_cv_scores(-scores)))
test = pd.read_csv("/kaggle/input/commonlitreadabilityprize/test.csv")
engineer_features(test)
train_cols = ["excerpt"] + non_text_features
combined_pipe.fit(train[train_cols], train.target.values)
test_pred = combined_pipe.predict(test[train_cols])
submission = pd.DataFrame({"id": test.id, "target": test_pred})
submission
submission.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/197/69197776.ipynb
| null | null |
[{"Id": 69197776, "ScriptId": 18708552, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3600743, "CreationDate": "07/28/2021 02:05:52", "VersionNumber": 4.0, "Title": "Feature Engineering and TFIDF", "EvaluationDate": "07/28/2021", "IsChange": false, "TotalLines": 252.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 252.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
| null | null | null | null |
import numpy as np
import pandas as pd
import tidybear as tb
from tqdm import tqdm
import nltk
from pyphen import Pyphen
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
def summarise_cv_scores(scores):
return (len(scores), np.mean(scores), np.std(scores))
# ## Set Up
# To start, I read in the training data and select the columns I care about.
# Then I assign an approx grade by dividing the target into 10 regioins (deciles, grades 3-12).
train_ = pd.read_csv("/kaggle/input/commonlitreadabilityprize/train.csv")
train_ = train_.loc[:, ["id", "excerpt", "target", "standard_error"]]
grade_lbls = [i for i in range(12, 2, -1)]
train_["grade"] = pd.qcut(train_.target, q=len(grade_lbls), labels=grade_lbls)
train_["grade"] = train_.grade.astype(int)
def get_school_level(grade):
if grade <= 5:
return "elementary"
elif grade <= 8:
return "middle"
else:
return "high"
train_["school"] = train_.grade.apply(get_school_level)
print(train_.shape)
train_.tail()
# Next, we look at an example of the easiest and hardest (by target) excerpts to read. As per the discussion and by example, higher scores are eaiser (lower grade level), lower scores are harder (higher grade level).
print("Max Target - Easiest to read - lowest grade level\n")
print(train_[train_.target == train_.target.max()].excerpt.values[0])
print("\n-------------------------\n")
print("Min Target - Hardest to read - highest grade level\n")
print(train_[train_.target == train_.target.min()].excerpt.values[0])
train_.target.plot.hist()
# Looking at the distribution above, the target is pretty normal. This tells me there is lots of overlap between grade level readability... or as the discussion puts it, the categories are squisy....
# However below, using the approx grade level, the words being used on average for higher grade levels are longer.
# ## Non-Text Features
train = train_.copy()
pyphen = Pyphen(lang="en")
def syllables(word):
return len(pyphen.positions(word)) + 1
def engineer_features(df):
word_tok = df.excerpt.apply(nltk.tokenize.word_tokenize)
sent_tok = df.excerpt.apply(nltk.tokenize.sent_tokenize)
syls_tok = word_tok.apply(lambda x: [syllables(w) for w in x])
total_charachters = word_tok.apply(lambda x: np.sum([len(w) for w in x]))
total_words = word_tok.apply(lambda x: len(x))
total_syllables = syls_tok.apply(lambda x: np.sum(x))
total_sentences = sent_tok.apply(lambda x: len(x))
df["unique_words"] = word_tok.apply(lambda x: np.unique(x).shape[0]) / total_words
df["words_geq_len8"] = (
word_tok.apply(lambda x: np.sum([len(w) >= 8 for w in x])) / total_words
)
df["hard_words"] = (
syls_tok.apply(lambda x: np.sum([s >= 3 for s in x])) / total_words
)
df["characters_per_word"] = total_charachters / total_words
df["syllables_per_word"] = total_syllables / total_words
df["words_per_sentence"] = total_words / total_sentences
engineer_features(train)
train.drop(columns=["standard_error", "grade"]).corr()
non_text_features = ["unique_words", "words_geq_len8", "words_per_sentence"]
with tb.GroupBy(train, "grade") as g:
g.mean(non_text_features, decimals=2)
grade_summary = g.summarise()
grade_summary = (
grade_summary.stack()
.rename("value")
.reset_index()
.rename(columns={"level_1": "feature"})
)
g = sns.FacetGrid(grade_summary, col="feature", col_wrap=3, sharey=False, height=4)
g.map_dataframe(sns.barplot, x="grade", y="value")
from sklearn.model_selection import cross_val_score, GridSearchCV
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.impute import SimpleImputer
from sklearn.decomposition import PCA
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import RidgeCV, LassoCV
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.compose import make_column_transformer
from sklearn.feature_selection import RFECV
gs_params = {
"cv": 10,
"scoring": "neg_root_mean_squared_error",
"n_jobs": -1,
"verbose": 2,
}
model_grid = {
"reg": [
DummyRegressor(),
LinearRegression(),
RidgeCV(),
RandomForestRegressor(max_depth=3, random_state=123),
]
}
pipe = Pipeline(
[
("impute", SimpleImputer(strategy="median")),
("scale", StandardScaler()),
("reg", RidgeCV()),
]
)
grid = GridSearchCV(pipe, model_grid, **gs_params)
X = train[non_text_features]
y = train.target
grid.fit(X, y)
pd.DataFrame(grid.cv_results_)[["param_reg", "mean_test_score", "std_test_score"]]
non_text_pipe = Pipeline(
[
("impute", SimpleImputer(strategy="median")),
("scale", StandardScaler()),
("poly", PolynomialFeatures()),
("feat", RFECV(LinearRegression(), cv=10)),
("reg", RidgeCV()),
]
)
grid = GridSearchCV(non_text_pipe, model_grid, **gs_params)
grid.fit(X, y)
pd.DataFrame(grid.cv_results_)[["param_reg", "mean_test_score", "std_test_score"]]
non_text_lm = Pipeline(
[
("impute", SimpleImputer(strategy="median")),
("scale", StandardScaler()),
("reg", LinearRegression()),
]
)
scores = cross_val_score(non_text_lm, X, y, **gs_params)
print("RMSE (cv={}): {:.3f} ({:.3f})".format(*summarise_cv_scores(-scores)))
non_text_lm.fit(X, y)
coefs = pd.DataFrame(
{"coef": non_text_lm.named_steps["reg"].coef_}, index=non_text_features
)
coefs.sort_values("coef").plot.barh()
# With no NLP, just summary stats about the vocab of the text, we get an average RMSE across 10 folds of .84 for the Ridge regression. We'll need to halve that to win...
# ## Simple NLP
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
tm_grid = {"reg": [LinearRegression(), Ridge(), Lasso()]}
text_pipe = Pipeline(
[("count", CountVectorizer()), ("scale", TfidfTransformer()), ("reg", Ridge())]
)
grid = GridSearchCV(text_pipe, tm_grid, **gs_params)
grid.fit(train.excerpt, train.target)
pd.DataFrame(grid.cv_results_)[["param_reg", "mean_test_score", "std_test_score"]]
# ## Combine Non-Text and Simple NLP
from sklearn.compose import make_column_transformer
from sklearn.pipeline import make_pipeline
non_text_trans = make_pipeline(
SimpleImputer(strategy="median"),
StandardScaler(),
PolynomialFeatures(),
RFECV(LinearRegression(), cv=10),
)
text_trans = make_pipeline(
CountVectorizer(),
TfidfTransformer(),
)
combined_pipe = Pipeline(
[
(
"transform",
make_column_transformer(
(non_text_trans, non_text_features),
(text_trans, "excerpt"),
remainder="drop",
),
),
("predict", Ridge()),
]
)
scores = cross_val_score(combined_pipe, train, train.target, **gs_params)
print("RMSE (cv={}): {:.3f} ({:.3f})".format(*summarise_cv_scores(-scores)))
test = pd.read_csv("/kaggle/input/commonlitreadabilityprize/test.csv")
engineer_features(test)
train_cols = ["excerpt"] + non_text_features
combined_pipe.fit(train[train_cols], train.target.values)
test_pred = combined_pipe.predict(test[train_cols])
submission = pd.DataFrame({"id": test.id, "target": test_pred})
submission
submission.to_csv("submission.csv", index=False)
| false | 0 | 2,298 | 2 | 2,298 | 2,298 |
||
69197448
|
# **Leaf Classification**
# Importing Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import warnings
warnings.filterwarnings("ignore")
train_df = pd.read_csv("/kaggle/input/leaf-classification/train.csv.zip")
test_df = pd.read_csv("/kaggle/input/leaf-classification/test.csv.zip")
train_df.head()
test_ids = test_df.id
test_data = test_df.drop(["id"], axis=1)
test_data.head()
train_df.isnull().sum()
train_df.shape
test_data.shape
train_df.describe().T
train_df["species"].nunique()
plt.figure(figsize=(15, 8))
sns.heatmap(data=train_df.corr())
X = train_df.drop(["id", "species"], axis=1)
Y = train_df["species"]
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
y_fit = encoder.fit(train_df["species"])
y_label = y_fit.transform(train_df["species"])
classes = list(y_fit.classes_)
classes
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
X, y_label, test_size=0.2, random_state=1
)
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators=40)
classifier.fit(x_train, y_train)
from sklearn.metrics import classification_report
predictions = classifier.predict(x_test)
print(classification_report(y_test, predictions))
final_predictions = classifier.predict_proba(test_data)
submission = pd.DataFrame(final_predictions, columns=classes)
submission.insert(0, "id", test_ids)
submission.reset_index()
submission.to_csv("result.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/197/69197448.ipynb
| null | null |
[{"Id": 69197448, "ScriptId": 18889682, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7316071, "CreationDate": "07/28/2021 01:57:32", "VersionNumber": 1.0, "Title": "Leaf_Classification", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 66.0, "LinesInsertedFromPrevious": 66.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# **Leaf Classification**
# Importing Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import warnings
warnings.filterwarnings("ignore")
train_df = pd.read_csv("/kaggle/input/leaf-classification/train.csv.zip")
test_df = pd.read_csv("/kaggle/input/leaf-classification/test.csv.zip")
train_df.head()
test_ids = test_df.id
test_data = test_df.drop(["id"], axis=1)
test_data.head()
train_df.isnull().sum()
train_df.shape
test_data.shape
train_df.describe().T
train_df["species"].nunique()
plt.figure(figsize=(15, 8))
sns.heatmap(data=train_df.corr())
X = train_df.drop(["id", "species"], axis=1)
Y = train_df["species"]
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
y_fit = encoder.fit(train_df["species"])
y_label = y_fit.transform(train_df["species"])
classes = list(y_fit.classes_)
classes
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
X, y_label, test_size=0.2, random_state=1
)
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators=40)
classifier.fit(x_train, y_train)
from sklearn.metrics import classification_report
predictions = classifier.predict(x_test)
print(classification_report(y_test, predictions))
final_predictions = classifier.predict_proba(test_data)
submission = pd.DataFrame(final_predictions, columns=classes)
submission.insert(0, "id", test_ids)
submission.reset_index()
submission.to_csv("result.csv", index=False)
| false | 0 | 541 | 0 | 541 | 541 |
||
69197406
|
# ライブラリのインポート欄
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn import model_selection
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
import warnings
import seaborn as sns # 可視化
warnings.filterwarnings("ignore")
import zipfile # サンプルがzipなので展開する
zipfile.ZipFile(
"/kaggle/input/ghouls-goblins-and-ghosts-boo/train.csv.zip"
).extractall()
zipfile.ZipFile("/kaggle/input/ghouls-goblins-and-ghosts-boo/test.csv.zip").extractall()
# zipfile.ZipFile('/kaggle/input/ghouls-goblins-and-ghosts-boo/sample_submission.csv.zip').extractall()
from sklearn import datasets
from sklearn.model_selection import train_test_split # クロスバリデーション用(テストとトレ分ける)
from sklearn.model_selection import cross_val_score
from sklearn import metrics # 精度検証用
from sklearn.metrics import accuracy_score
from sklearn import preprocessing
# ニューラルネットワーク
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn import metrics
import joblib
from sklearn import svm
from sklearn.linear_model import LogisticRegression
# LightGBM#import lightgbm as lgb
import optuna
import optuna.integration.lightgbm as lgb
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
# sklearn モデル 沢山
from sklearn.linear_model import (
LinearRegression,
Ridge,
Lasso,
ElasticNet,
SGDRegressor,
)
from sklearn.linear_model import PassiveAggressiveRegressor, ARDRegression, RidgeCV
from sklearn.linear_model import TheilSenRegressor, RANSACRegressor, HuberRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.svm import SVR, LinearSVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import (
RandomForestRegressor,
AdaBoostRegressor,
ExtraTreesRegressor,
HistGradientBoostingRegressor,
)
from sklearn.ensemble import (
BaggingRegressor,
GradientBoostingRegressor,
VotingRegressor,
StackingRegressor,
)
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
from sklearn.cross_decomposition import PLSRegression
def N_netlog(train, test, target, tartest):
# paramate
hidden_layer_sizes = (100,)
activation = "relu"
solver = "adam"
batch_size = "auto"
alpha = 0.0001
random_state = 0
max_iter = 10000
early_stopping = True
# 学習
clf = MLPRegressor(
hidden_layer_sizes=hidden_layer_sizes,
activation=activation,
solver=solver,
batch_size=batch_size,
alpha=alpha,
random_state=random_state,
max_iter=max_iter,
# early_stopping = early_stopping
)
clf.fit(train, target)
SAVE_TRAINED_DATA_PATH = "train1.learn"
# 学習結果を出力
joblib.dump(clf, SAVE_TRAINED_DATA_PATH)
# 学習済ファイルのロード
clf1 = joblib.load(SAVE_TRAINED_DATA_PATH)
# 学習結果の検証
# predict_y1 = clf1.predict_proba(test)それぞれの回答確率を出す?
# predict = clf1.predict(test)
# accs=accuracy_score(train, target)
# return predict,accs
# スコア用
if len(tartest) > 1:
print(tartest)
pred = clf1.predict(test) # LightGBM推論]
pred_r = np.round(np.round(pred, decimals=1)) # 最尤と判断したクラスの値にする
predict = accuracy_score(tartest, pred_r) # 最尤と判断したクラスの値にする
# スコアじゃないとき
if len(tartest) == 1:
print(test)
predict_no = clf1.predict(test) # LightGBM推論
predict = np.round(np.round(predict_no, decimals=1)) # 最尤と判断したクラスの値にする
return predict
def lightgbm(train, test, target, tartest): # データ用意
X_train, X_test, Y_train, Y_test = train_test_split(
train, target, random_state=0
) # random_stateはseed値。
# LightGBMのパラメータ設定
params = {
"boosting_type": "gbdt",
"objective": "regression",
"metric": "rmse", # クラスは0,1,2,...と与えられる(数字は関係ない)#評価指標:正答率
#'num_iterations': 1000,#1000回学習
"verbose": -1, # 学習情報を非表示
}
#'metric': 'multi_logress'かえた
# LightGBMを利用するのに必要なフォーマットに変換
lgb_train = lgb.Dataset(X_train, Y_train)
lgb_eval = lgb.Dataset(X_test, Y_test, reference=lgb_train)
best_params, history = {}, []
# LightGBM学習
gbm = lgb.train(
params,
lgb_train,
valid_sets=[lgb_train, lgb_eval],
verbose_eval=100,
early_stopping_rounds=100,
)
best_params = gbm.params
print("Best params:", best_params)
params = best_params
# ここから推理します
# スコア用
if len(tartest) > 1:
print(tartest)
pred = gbm.predict(test, num_iteration=gbm.best_iteration) # LightGBM推論]
pred_r = np.round(np.round(pred, decimals=1)) # 最尤と判断したクラスの値にする
predict = accuracy_score(tartest, pred_r) # 最尤と判断したクラスの値にする
# スコアじゃないとき
if len(tartest) == 1:
print(test)
predict_no = gbm.predict(test, num_iteration=gbm.best_iteration) # LightGBM推論
predict = np.round(np.round(predict_no, decimals=1)) # 最尤と判断したクラスの値にする
return predict
def def_two(
ghost, ghoul, goblin, test, target, accuracy, best_cell_gob, reg_dict
): # おかしい。うまく行っていない
# HowDoはどの分析方法にするか
acc_score_gob = np.zeros((40), dtype="float64") # スコアを保存
vsnp = np.empty((529), dtype="float64")
vsnpp = np.empty((529), dtype="float64")
submission = np.empty((529), dtype="int")
vote = np.zeros((529, 2), dtype="int") # 投票によるスコア
ones = np.ones((529), dtype="int") # 投票によるスコア
ghost0 = np.zeros(len(ghost))
ghost1 = np.ones(len(ghost))
ghoul0 = np.zeros(len(ghoul))
ghoul1 = np.ones(len(ghoul))
goblin0 = np.zeros(len(goblin))
goblin1 = np.ones(len(goblin)) # target作成前段階
vs = ghost.append(ghoul, ignore_index=True)
vst = np.append(ghost1, ghoul0) # target作成
# 今回はゴーストが1
# 本番かどうか
if accuracy == True: # スコア出す
train_r, test_r, target_r, tartest_r = train_test_split(
vs, vst, random_state=0
) # random_stateはseed値。
model = LogisticRegression()
model.fit(train_r, target_r)
vsnp = model.predict(test_r)
acc_score_gob[0] = accuracy_score(tartest_r, vsnp) # LogReg
submission = np.round(np.round(vsnp, decimals=1))
vote[: len(test_r), 0] = vote[: len(test_r), 0] + submission[: len(test_r)]
vote[: len(test_r), 1] = (
vote[: len(test_r), 1] + ones[: len(test_r)] - submission[: len(test_r)]
)
# acc_score_gob[1]=lightgbm(train_r, test_r, target_r, tartest_r)
# acc_score_gob[2]=N_netlog(train_r, test_r, target_r, tartest_r)
# sklearn沢山
n = 0
for reg_name, reg in reg_dict.items():
reg.fit(train_r, target_r)
vsnp = reg.predict(test_r)
submission = np.round(np.round(vsnp, decimals=1))
acc_score_gob[n + 3] = accuracy_score(tartest_r, submission)
n += 1
vote[: len(test_r), 0] = vote[: len(test_r), 0] + submission[: len(test_r)]
vote[: len(test_r), 1] = (
vote[: len(test_r), 1] + ones[: len(test_r)] - submission[: len(test_r)]
)
for n in range(len(test_r)):
submission[n] = 0 if vote[n, 1] > vote[n, 0] else 1
# acc_score_gob[39]=accuracy_score(tartest_r, submission)
print(acc_score_gob)
return acc_score_gob
if accuracy == False: # 本シミュレーション
train_r, test_r, target_r, tartest_r = vs, test, vst, [0]
if best_cell_gob == 0: # LogReg
model = LogisticRegression()
model.fit(train_r, target_r)
vsnp = model.predict(test_r)
vsnpp = vsnp
if best_cell_gob == 1: # LogReg
vsnp = lightgbm(train_r, test_r, target_r, tartest_r)
vsnpp = vsnp
if best_cell_gob == 2: # LogReg
vsnp = N_netlog(train_r, test_r, target_r, tartest_r)
vsnpp = vsnp
if best_cell_gob > 2: # many_sk
n = 0 # n初期化
for reg_name, reg in reg_dict.items():
# if n == best_cell_gob-3:
reg.fit(train_r, target_r)
vsnp = reg.predict(test_r)
if n == best_cell_gob - 3:
vsnpp = vsnp
n += 1 # 特定の数のときだけfit
vote[: len(test_r), 0] = (
vote[: len(test_r), 0] + submission[: len(test_r)]
)
vote[: len(test_r), 1] = (
vote[: len(test_r), 1]
+ ones[: len(test_r)]
- submission[: len(test_r)]
)
if best_cell_gob == 39:
for n in range(len(test_r)):
vsnp[n] = 0 if vote[n, 1] > vote[n, 0] else 1
vsnpp = vsnp
submission = np.round(np.round(vsnpp, decimals=1)) # 最尤と判断したクラスの値にする
return submission
def def_gob(
ghost, ghoul, goblin, test, target, accuracy, best_cell_gob, reg_dict
): # ゴブリンかどうか最初に判別するために、ゴブリンを一番区別できる分別機を選ぶ
acc_score_gob = np.zeros((40), dtype="float64") # スコアを保存
vsnp = np.zeros((529), dtype="float64")
vsnpp = np.zeros((529), dtype="float64")
submission = np.empty((529), dtype="bool")
vote = np.zeros((529, 2), dtype="int") # 投票によるスコア
ones = np.ones((529), dtype="bool") # 投票によるスコア
ghost0 = np.zeros(len(ghost))
ghost1 = np.ones(len(ghost))
ghoul0 = np.zeros(len(ghoul))
ghoul1 = np.ones(len(ghoul))
goblin0 = np.zeros(len(goblin))
goblin1 = np.ones(len(goblin)) # target作成前段階
vs = goblin.append(ghost, ignore_index=True) # train作成
vs = vs.append(ghoul, ignore_index=True) # train作成
vst = np.append(goblin1, ghost0) # target作成
vst = np.append(vst, ghoul0)
# 本番かどうか
if accuracy == True: # スコア出す
train_r, test_r, target_r, tartest_r = train_test_split(
vs, vst, random_state=0
) # random_stateはseed値。
# vote[:len(test_r),0]=ones[:len(test_r)]*5
model = LogisticRegression()
model.fit(train_r, target_r)
vsnp = model.predict(test_r)
acc_score_gob[0] = accuracy_score(tartest_r, vsnp) # LogReg
submission = np.round(np.round(vsnp, decimals=1))
vote[: len(test_r), 0] = vote[: len(test_r), 0] + submission[: len(test_r)]
vote[: len(test_r), 1] = (
vote[: len(test_r), 1] + ones[: len(test_r)] - submission[: len(test_r)]
)
# acc_score_gob[1]=lightgbm(train_r, test_r, target_r, tartest_r)
acc_score_gob[2] = N_netlog(train_r, test_r, target_r, tartest_r)
# sklearn沢山
n = 0
for reg_name, reg in reg_dict.items():
reg.fit(train_r, target_r)
vsnp = reg.predict(test_r)
submission = np.round(np.round(vsnp, decimals=1))
acc_score_gob[n + 3] = accuracy_score(tartest_r, submission)
n += 1
vote[: len(test_r), 0] = vote[: len(test_r), 0] + submission[: len(test_r)]
vote[: len(test_r), 1] = (
vote[: len(test_r), 1] + ones[: len(test_r)] - submission[: len(test_r)]
)
for n in range(len(test_r)):
submission[n] = 0 if vote[n, 1] > vote[n, 0] else 1
# acc_score_gob[39]=accuracy_score(tartest_r, submission)
print(acc_score_gob)
return acc_score_gob
if accuracy == False: # 本シミュレーション
train_r, test_r, target_r, tartest_r = vs, test, vst, [0]
vsnpp_int = np.zeros((529), dtype="int")
if best_cell_gob == 0: # LogReg
model = LogisticRegression()
model.fit(train_r, target_r)
vsnp = model.predict(test_r)
vsnpp = vsnp
if best_cell_gob == 1: # LogReg
vsnp = lightgbm(train_r, test_r, target_r, tartest_r)
vsnpp = vsnp
if best_cell_gob == 2: # LogReg
vsnp = N_netlog(train_r, test_r, target_r, tartest_r)
vsnpp = vsnp
if best_cell_gob > 2: # many_sk
n = 0 # n初期化
for reg_name, reg in reg_dict.items():
reg.fit(train_r, target_r)
vsnp = reg.predict(test_r)
vsnpp_int = vsnpp_int + (np.round(np.round(vsnp, decimals=1)) == 1)
if n == best_cell_gob - 3:
vsnpp = vsnp
n += 1 # 特定の数のときだけfit
vote[: len(test_r), 0] = (
vote[: len(test_r), 0] + submission[: len(test_r)]
)
vote[: len(test_r), 1] = (
vote[: len(test_r), 1]
+ ones[: len(test_r)]
- submission[: len(test_r)]
)
if best_cell_gob == 39:
for n in range(len(test_r)):
vsnp[n] = 0 if vote[n, 1] > vote[n, 0] else 1
vsnpp = vsnp
submission = np.round(np.round(vsnpp, decimals=1)) # 最尤と判断したクラスの値にする
submission = (submission == 1) | (vsnpp_int > 3)
return submission
def main_n():
# sklearn沢山用
reg_dict = { # "LinearRegression": LinearRegression(),
# "Ridge": Ridge(),
# "Lasso": Lasso(),
# "ElasticNet": ElasticNet(),
# "KNeighborsRegressor": KNeighborsRegressor(n_neighbors=3),
# "DecisionTreeRegressor": DecisionTreeRegressor(),
"RandomForestRegressor": RandomForestRegressor(),
# "SVR": SVR(kernel='rbf', C=1e3, gamma=0.1, epsilon=0.1),
# "SGDRegressor": SGDRegressor(),
# "MLPRegressor": MLPRegressor(hidden_layer_sizes=(10,10), max_iter=100, early_stopping=True, n_iter_no_change=5),
"ExtraTreesRegressor": ExtraTreesRegressor(n_estimators=100),
# "PassiveAggressiveRegressor": PassiveAggressiveRegressor(max_iter=100, tol=1e-3),
# "TheilSenRegressor": TheilSenRegressor(random_state=0),
"HistGradientBoostingRegressor": HistGradientBoostingRegressor(),
"AdaBoostRegressor": AdaBoostRegressor(random_state=0, n_estimators=100),
"BaggingRegressor": BaggingRegressor(base_estimator=SVR(), n_estimators=2),
"GradientBoostingRegressor": GradientBoostingRegressor(random_state=0),
"VotingRegressor": VotingRegressor(
[("lr", LinearRegression()), ("rf", RandomForestRegressor(n_estimators=2))]
),
# "StackingRegressor": StackingRegressor(estimators=[('lr', RidgeCV()), ('svr', LinearSVR())], final_estimator=RandomForestRegressor(n_estimators=10)),
# "ARDRegression": ARDRegression(),
# "HuberRegressor": HuberRegressor(),
}
# CSVを読み込む
train = pd.read_csv("./train.csv")
test = pd.read_csv("./test.csv")
submission_no = np.empty((529, 3), dtype="int")
submission = [""] * 529
# type_pd = train["type"]
type_array = pd.get_dummies(train["type"])
del train["type"] # typeをトレインから分離
COLOR = pd.get_dummies(train["color"])
del train["color"]
del train["id"] # colorをトレインから分離;idをトレインから分離
COLOR2 = pd.get_dummies(test["color"])
del test["color"]
ID = test["id"]
del test["id"] # testも同じようにする
vote = np.zeros((3, 529), dtype="int")
target = pd.DataFrame(
type_array["Ghost"] * 0 + type_array["Ghoul"] * 2 + type_array["Goblin"] * 1
) # targetを作成する
# 怪物のデータが別々にいるプログラム用
ghost = train[type_array["Ghost"] == 1]
ghoul = train[type_array["Ghoul"] == 1]
goblin = train[type_array["Goblin"] == 1]
# ここからは色つきでもう一度同じ
train_c = train.join(COLOR)
test_c = test.join(COLOR2)
# 怪物のデータが別々にいるプログラム用
ghost_c = train_c[type_array["Ghost"] == 1]
ghoul_c = train_c[type_array["Ghoul"] == 1]
goblin_c = train_c[type_array["Goblin"] == 1]
# DAOAのためのスコアで分析
# ゴブリンかどうか自動判別
best_cell_gob = 0
accuracy = True
gob_c_or_no = True # 色付きがいいならTrue出ないならFalse
acc_score_gob = def_gob(
ghost, ghoul, goblin, test, target, accuracy, best_cell_gob, reg_dict
)
best_cell_gob = np.argmax(acc_score_gob) # 色なし最高スコア
print("serect", best_cell_gob)
best_cell_gob_c = 0 # 色あり
acc_score_gob_c = def_gob(
ghost_c, ghoul_c, goblin_c, test_c, target, accuracy, best_cell_gob_c, reg_dict
)
best_cell_gob_c = np.argmax(acc_score_gob_c) # 追加して
print("serect", best_cell_gob)
gob_c_or_no = True if best_cell_gob_c > best_cell_gob else False # 色付き色なしどちらがいい?
# ゴブリンか判別本番
accuracy = False
if gob_c_or_no:
submission_no[:, 0] = def_gob(
ghost_c,
ghoul_c,
goblin_c,
test_c,
target,
accuracy,
best_cell_gob_c,
reg_dict,
)
if gob_c_or_no == False:
submission_no[:, 0] = def_gob(
ghost, ghoul, goblin, test, target, accuracy, best_cell_gob, reg_dict
)
# 判別終わり
# ID_goblin=np.array(ID[submission_no==1])#ゴブリン該当するIDを取り出し
# ID_nogob=np.array(ID[submission_no==0])#ゴブリン該当しないIDを取り出し
test_nogob = np.array(test[submission_no[:, 0] == 0]) # ゴブリン該当しないテストを取り出し
test_nogob_c = np.array(test_c[submission_no[:, 0] == 0]) # ゴブリン該当しないテストを取り出し色あり
# submission[submission_no==1]="Goblin"#ゴブリンを事前に入れておく#いらない
# magicno=nonono(ID_nogob)
# submission_no_gob=np.zeros((magicno),dtype="int")
# ここから、ghoulとghostの判別
best_cell_two = 0
accuracy = True
c_or_no = True # 色付きがいいならTrue出ないならFalse
acc_score_two = def_two(
ghost, ghoul, goblin, test_nogob, target, accuracy, best_cell_two, reg_dict
)
best_cell_two = np.argmax(acc_score_two) # 色なし最高スコア
best_cell_two_c = 0 # 色あり
acc_score_two_c = def_two(
ghost_c,
ghoul_c,
goblin_c,
test_nogob_c,
target,
accuracy,
best_cell_two_c,
reg_dict,
)
best_cell_two_c = np.argmax(acc_score_two_c) # 追加して
c_or_no = True if best_cell_two_c > best_cell_two else False # 色付き色なしどちらがいい?
# 2つの判別本番
accuracy = False
if gob_c_or_no:
submission_no[:, 1] = def_two(
ghost_c,
ghoul_c,
goblin_c,
test_c,
target,
accuracy,
best_cell_two_c,
reg_dict,
)
if gob_c_or_no == False:
submission_no[:, 1] = def_two(
ghost, ghoul, goblin, test, target, accuracy, best_cell_two, reg_dict
)
# ID_ghost=np.array(ID_nogob[submission_no_gob[:len(ID_nogob)]==1])#ghost該当するIDを取り出し
# ID_ghoul=np.array(ID_nogob[submission_no_gob[:len(ID_nogob)]==0])#ghoul該当IDを取り出し
# print(ID_ghost)
# nghost, nghoul, ngoblin = 0,0,0
for n in range(len(ID)):
if submission_no[n, 0] == 1:
submission[n] = "Goblin"
if submission_no[n, 0] == 0:
submission[n] = "Ghost" if submission_no[n, 1] == 1 else "Ghoul"
# if ID[n]==ID_ghost[nghost]:
# submission[n]="Ghost";nghost =(nghost+1 if len(ID_ghost)>nghost+1 else 0 )
# if ID[n]==ID_ghoul[nghoul]:
# submission[n]="Ghoul";nghoul = (nghoul+1 if len(ID_ghoul)>nghoul+1 else 0 )
# if ID[n]==ID_goblin[ngoblin]:
# submission[n]="Ghoblin";ngoblin = (ngoblin+1 if len(ID_goblin)>ngoblin+1 else 0 )
s_c = pd.DataFrame({"id": ID, "type": submission})
return s_c
# ここでメインを一つずつ実行
submission = main_n()
##ここから推理します
# Kaggle提出用csvファイルの作成
submission.to_csv("submission6.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/197/69197406.ipynb
| null | null |
[{"Id": 69197406, "ScriptId": 18860194, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7650248, "CreationDate": "07/28/2021 01:56:21", "VersionNumber": 18.0, "Title": "boo_vote_Isorution", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 430.0, "LinesInsertedFromPrevious": 12.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 418.0, "LinesInsertedFromFork": 300.0, "LinesDeletedFromFork": 267.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 130.0, "TotalVotes": 0}]
| null | null | null | null |
# ライブラリのインポート欄
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn import model_selection
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
import warnings
import seaborn as sns # 可視化
warnings.filterwarnings("ignore")
import zipfile # サンプルがzipなので展開する
zipfile.ZipFile(
"/kaggle/input/ghouls-goblins-and-ghosts-boo/train.csv.zip"
).extractall()
zipfile.ZipFile("/kaggle/input/ghouls-goblins-and-ghosts-boo/test.csv.zip").extractall()
# zipfile.ZipFile('/kaggle/input/ghouls-goblins-and-ghosts-boo/sample_submission.csv.zip').extractall()
from sklearn import datasets
from sklearn.model_selection import train_test_split # クロスバリデーション用(テストとトレ分ける)
from sklearn.model_selection import cross_val_score
from sklearn import metrics # 精度検証用
from sklearn.metrics import accuracy_score
from sklearn import preprocessing
# ニューラルネットワーク
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn import metrics
import joblib
from sklearn import svm
from sklearn.linear_model import LogisticRegression
# LightGBM#import lightgbm as lgb
import optuna
import optuna.integration.lightgbm as lgb
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
# sklearn モデル 沢山
from sklearn.linear_model import (
LinearRegression,
Ridge,
Lasso,
ElasticNet,
SGDRegressor,
)
from sklearn.linear_model import PassiveAggressiveRegressor, ARDRegression, RidgeCV
from sklearn.linear_model import TheilSenRegressor, RANSACRegressor, HuberRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.svm import SVR, LinearSVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import (
RandomForestRegressor,
AdaBoostRegressor,
ExtraTreesRegressor,
HistGradientBoostingRegressor,
)
from sklearn.ensemble import (
BaggingRegressor,
GradientBoostingRegressor,
VotingRegressor,
StackingRegressor,
)
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
from sklearn.cross_decomposition import PLSRegression
def N_netlog(train, test, target, tartest):
# paramate
hidden_layer_sizes = (100,)
activation = "relu"
solver = "adam"
batch_size = "auto"
alpha = 0.0001
random_state = 0
max_iter = 10000
early_stopping = True
# 学習
clf = MLPRegressor(
hidden_layer_sizes=hidden_layer_sizes,
activation=activation,
solver=solver,
batch_size=batch_size,
alpha=alpha,
random_state=random_state,
max_iter=max_iter,
# early_stopping = early_stopping
)
clf.fit(train, target)
SAVE_TRAINED_DATA_PATH = "train1.learn"
# 学習結果を出力
joblib.dump(clf, SAVE_TRAINED_DATA_PATH)
# 学習済ファイルのロード
clf1 = joblib.load(SAVE_TRAINED_DATA_PATH)
# 学習結果の検証
# predict_y1 = clf1.predict_proba(test)それぞれの回答確率を出す?
# predict = clf1.predict(test)
# accs=accuracy_score(train, target)
# return predict,accs
# スコア用
if len(tartest) > 1:
print(tartest)
pred = clf1.predict(test) # LightGBM推論]
pred_r = np.round(np.round(pred, decimals=1)) # 最尤と判断したクラスの値にする
predict = accuracy_score(tartest, pred_r) # 最尤と判断したクラスの値にする
# スコアじゃないとき
if len(tartest) == 1:
print(test)
predict_no = clf1.predict(test) # LightGBM推論
predict = np.round(np.round(predict_no, decimals=1)) # 最尤と判断したクラスの値にする
return predict
def lightgbm(train, test, target, tartest): # データ用意
X_train, X_test, Y_train, Y_test = train_test_split(
train, target, random_state=0
) # random_stateはseed値。
# LightGBMのパラメータ設定
params = {
"boosting_type": "gbdt",
"objective": "regression",
"metric": "rmse", # クラスは0,1,2,...と与えられる(数字は関係ない)#評価指標:正答率
#'num_iterations': 1000,#1000回学習
"verbose": -1, # 学習情報を非表示
}
#'metric': 'multi_logress'かえた
# LightGBMを利用するのに必要なフォーマットに変換
lgb_train = lgb.Dataset(X_train, Y_train)
lgb_eval = lgb.Dataset(X_test, Y_test, reference=lgb_train)
best_params, history = {}, []
# LightGBM学習
gbm = lgb.train(
params,
lgb_train,
valid_sets=[lgb_train, lgb_eval],
verbose_eval=100,
early_stopping_rounds=100,
)
best_params = gbm.params
print("Best params:", best_params)
params = best_params
# ここから推理します
# スコア用
if len(tartest) > 1:
print(tartest)
pred = gbm.predict(test, num_iteration=gbm.best_iteration) # LightGBM推論]
pred_r = np.round(np.round(pred, decimals=1)) # 最尤と判断したクラスの値にする
predict = accuracy_score(tartest, pred_r) # 最尤と判断したクラスの値にする
# スコアじゃないとき
if len(tartest) == 1:
print(test)
predict_no = gbm.predict(test, num_iteration=gbm.best_iteration) # LightGBM推論
predict = np.round(np.round(predict_no, decimals=1)) # 最尤と判断したクラスの値にする
return predict
def def_two(
ghost, ghoul, goblin, test, target, accuracy, best_cell_gob, reg_dict
): # おかしい。うまく行っていない
# HowDoはどの分析方法にするか
acc_score_gob = np.zeros((40), dtype="float64") # スコアを保存
vsnp = np.empty((529), dtype="float64")
vsnpp = np.empty((529), dtype="float64")
submission = np.empty((529), dtype="int")
vote = np.zeros((529, 2), dtype="int") # 投票によるスコア
ones = np.ones((529), dtype="int") # 投票によるスコア
ghost0 = np.zeros(len(ghost))
ghost1 = np.ones(len(ghost))
ghoul0 = np.zeros(len(ghoul))
ghoul1 = np.ones(len(ghoul))
goblin0 = np.zeros(len(goblin))
goblin1 = np.ones(len(goblin)) # target作成前段階
vs = ghost.append(ghoul, ignore_index=True)
vst = np.append(ghost1, ghoul0) # target作成
# 今回はゴーストが1
# 本番かどうか
if accuracy == True: # スコア出す
train_r, test_r, target_r, tartest_r = train_test_split(
vs, vst, random_state=0
) # random_stateはseed値。
model = LogisticRegression()
model.fit(train_r, target_r)
vsnp = model.predict(test_r)
acc_score_gob[0] = accuracy_score(tartest_r, vsnp) # LogReg
submission = np.round(np.round(vsnp, decimals=1))
vote[: len(test_r), 0] = vote[: len(test_r), 0] + submission[: len(test_r)]
vote[: len(test_r), 1] = (
vote[: len(test_r), 1] + ones[: len(test_r)] - submission[: len(test_r)]
)
# acc_score_gob[1]=lightgbm(train_r, test_r, target_r, tartest_r)
# acc_score_gob[2]=N_netlog(train_r, test_r, target_r, tartest_r)
# sklearn沢山
n = 0
for reg_name, reg in reg_dict.items():
reg.fit(train_r, target_r)
vsnp = reg.predict(test_r)
submission = np.round(np.round(vsnp, decimals=1))
acc_score_gob[n + 3] = accuracy_score(tartest_r, submission)
n += 1
vote[: len(test_r), 0] = vote[: len(test_r), 0] + submission[: len(test_r)]
vote[: len(test_r), 1] = (
vote[: len(test_r), 1] + ones[: len(test_r)] - submission[: len(test_r)]
)
for n in range(len(test_r)):
submission[n] = 0 if vote[n, 1] > vote[n, 0] else 1
# acc_score_gob[39]=accuracy_score(tartest_r, submission)
print(acc_score_gob)
return acc_score_gob
if accuracy == False: # 本シミュレーション
train_r, test_r, target_r, tartest_r = vs, test, vst, [0]
if best_cell_gob == 0: # LogReg
model = LogisticRegression()
model.fit(train_r, target_r)
vsnp = model.predict(test_r)
vsnpp = vsnp
if best_cell_gob == 1: # LogReg
vsnp = lightgbm(train_r, test_r, target_r, tartest_r)
vsnpp = vsnp
if best_cell_gob == 2: # LogReg
vsnp = N_netlog(train_r, test_r, target_r, tartest_r)
vsnpp = vsnp
if best_cell_gob > 2: # many_sk
n = 0 # n初期化
for reg_name, reg in reg_dict.items():
# if n == best_cell_gob-3:
reg.fit(train_r, target_r)
vsnp = reg.predict(test_r)
if n == best_cell_gob - 3:
vsnpp = vsnp
n += 1 # 特定の数のときだけfit
vote[: len(test_r), 0] = (
vote[: len(test_r), 0] + submission[: len(test_r)]
)
vote[: len(test_r), 1] = (
vote[: len(test_r), 1]
+ ones[: len(test_r)]
- submission[: len(test_r)]
)
if best_cell_gob == 39:
for n in range(len(test_r)):
vsnp[n] = 0 if vote[n, 1] > vote[n, 0] else 1
vsnpp = vsnp
submission = np.round(np.round(vsnpp, decimals=1)) # 最尤と判断したクラスの値にする
return submission
def def_gob(
ghost, ghoul, goblin, test, target, accuracy, best_cell_gob, reg_dict
): # ゴブリンかどうか最初に判別するために、ゴブリンを一番区別できる分別機を選ぶ
acc_score_gob = np.zeros((40), dtype="float64") # スコアを保存
vsnp = np.zeros((529), dtype="float64")
vsnpp = np.zeros((529), dtype="float64")
submission = np.empty((529), dtype="bool")
vote = np.zeros((529, 2), dtype="int") # 投票によるスコア
ones = np.ones((529), dtype="bool") # 投票によるスコア
ghost0 = np.zeros(len(ghost))
ghost1 = np.ones(len(ghost))
ghoul0 = np.zeros(len(ghoul))
ghoul1 = np.ones(len(ghoul))
goblin0 = np.zeros(len(goblin))
goblin1 = np.ones(len(goblin)) # target作成前段階
vs = goblin.append(ghost, ignore_index=True) # train作成
vs = vs.append(ghoul, ignore_index=True) # train作成
vst = np.append(goblin1, ghost0) # target作成
vst = np.append(vst, ghoul0)
# 本番かどうか
if accuracy == True: # スコア出す
train_r, test_r, target_r, tartest_r = train_test_split(
vs, vst, random_state=0
) # random_stateはseed値。
# vote[:len(test_r),0]=ones[:len(test_r)]*5
model = LogisticRegression()
model.fit(train_r, target_r)
vsnp = model.predict(test_r)
acc_score_gob[0] = accuracy_score(tartest_r, vsnp) # LogReg
submission = np.round(np.round(vsnp, decimals=1))
vote[: len(test_r), 0] = vote[: len(test_r), 0] + submission[: len(test_r)]
vote[: len(test_r), 1] = (
vote[: len(test_r), 1] + ones[: len(test_r)] - submission[: len(test_r)]
)
# acc_score_gob[1]=lightgbm(train_r, test_r, target_r, tartest_r)
acc_score_gob[2] = N_netlog(train_r, test_r, target_r, tartest_r)
# sklearn沢山
n = 0
for reg_name, reg in reg_dict.items():
reg.fit(train_r, target_r)
vsnp = reg.predict(test_r)
submission = np.round(np.round(vsnp, decimals=1))
acc_score_gob[n + 3] = accuracy_score(tartest_r, submission)
n += 1
vote[: len(test_r), 0] = vote[: len(test_r), 0] + submission[: len(test_r)]
vote[: len(test_r), 1] = (
vote[: len(test_r), 1] + ones[: len(test_r)] - submission[: len(test_r)]
)
for n in range(len(test_r)):
submission[n] = 0 if vote[n, 1] > vote[n, 0] else 1
# acc_score_gob[39]=accuracy_score(tartest_r, submission)
print(acc_score_gob)
return acc_score_gob
if accuracy == False: # 本シミュレーション
train_r, test_r, target_r, tartest_r = vs, test, vst, [0]
vsnpp_int = np.zeros((529), dtype="int")
if best_cell_gob == 0: # LogReg
model = LogisticRegression()
model.fit(train_r, target_r)
vsnp = model.predict(test_r)
vsnpp = vsnp
if best_cell_gob == 1: # LogReg
vsnp = lightgbm(train_r, test_r, target_r, tartest_r)
vsnpp = vsnp
if best_cell_gob == 2: # LogReg
vsnp = N_netlog(train_r, test_r, target_r, tartest_r)
vsnpp = vsnp
if best_cell_gob > 2: # many_sk
n = 0 # n初期化
for reg_name, reg in reg_dict.items():
reg.fit(train_r, target_r)
vsnp = reg.predict(test_r)
vsnpp_int = vsnpp_int + (np.round(np.round(vsnp, decimals=1)) == 1)
if n == best_cell_gob - 3:
vsnpp = vsnp
n += 1 # 特定の数のときだけfit
vote[: len(test_r), 0] = (
vote[: len(test_r), 0] + submission[: len(test_r)]
)
vote[: len(test_r), 1] = (
vote[: len(test_r), 1]
+ ones[: len(test_r)]
- submission[: len(test_r)]
)
if best_cell_gob == 39:
for n in range(len(test_r)):
vsnp[n] = 0 if vote[n, 1] > vote[n, 0] else 1
vsnpp = vsnp
submission = np.round(np.round(vsnpp, decimals=1)) # 最尤と判断したクラスの値にする
submission = (submission == 1) | (vsnpp_int > 3)
return submission
def main_n():
# sklearn沢山用
reg_dict = { # "LinearRegression": LinearRegression(),
# "Ridge": Ridge(),
# "Lasso": Lasso(),
# "ElasticNet": ElasticNet(),
# "KNeighborsRegressor": KNeighborsRegressor(n_neighbors=3),
# "DecisionTreeRegressor": DecisionTreeRegressor(),
"RandomForestRegressor": RandomForestRegressor(),
# "SVR": SVR(kernel='rbf', C=1e3, gamma=0.1, epsilon=0.1),
# "SGDRegressor": SGDRegressor(),
# "MLPRegressor": MLPRegressor(hidden_layer_sizes=(10,10), max_iter=100, early_stopping=True, n_iter_no_change=5),
"ExtraTreesRegressor": ExtraTreesRegressor(n_estimators=100),
# "PassiveAggressiveRegressor": PassiveAggressiveRegressor(max_iter=100, tol=1e-3),
# "TheilSenRegressor": TheilSenRegressor(random_state=0),
"HistGradientBoostingRegressor": HistGradientBoostingRegressor(),
"AdaBoostRegressor": AdaBoostRegressor(random_state=0, n_estimators=100),
"BaggingRegressor": BaggingRegressor(base_estimator=SVR(), n_estimators=2),
"GradientBoostingRegressor": GradientBoostingRegressor(random_state=0),
"VotingRegressor": VotingRegressor(
[("lr", LinearRegression()), ("rf", RandomForestRegressor(n_estimators=2))]
),
# "StackingRegressor": StackingRegressor(estimators=[('lr', RidgeCV()), ('svr', LinearSVR())], final_estimator=RandomForestRegressor(n_estimators=10)),
# "ARDRegression": ARDRegression(),
# "HuberRegressor": HuberRegressor(),
}
# CSVを読み込む
train = pd.read_csv("./train.csv")
test = pd.read_csv("./test.csv")
submission_no = np.empty((529, 3), dtype="int")
submission = [""] * 529
# type_pd = train["type"]
type_array = pd.get_dummies(train["type"])
del train["type"] # typeをトレインから分離
COLOR = pd.get_dummies(train["color"])
del train["color"]
del train["id"] # colorをトレインから分離;idをトレインから分離
COLOR2 = pd.get_dummies(test["color"])
del test["color"]
ID = test["id"]
del test["id"] # testも同じようにする
vote = np.zeros((3, 529), dtype="int")
target = pd.DataFrame(
type_array["Ghost"] * 0 + type_array["Ghoul"] * 2 + type_array["Goblin"] * 1
) # targetを作成する
# 怪物のデータが別々にいるプログラム用
ghost = train[type_array["Ghost"] == 1]
ghoul = train[type_array["Ghoul"] == 1]
goblin = train[type_array["Goblin"] == 1]
# ここからは色つきでもう一度同じ
train_c = train.join(COLOR)
test_c = test.join(COLOR2)
# 怪物のデータが別々にいるプログラム用
ghost_c = train_c[type_array["Ghost"] == 1]
ghoul_c = train_c[type_array["Ghoul"] == 1]
goblin_c = train_c[type_array["Goblin"] == 1]
# DAOAのためのスコアで分析
# ゴブリンかどうか自動判別
best_cell_gob = 0
accuracy = True
gob_c_or_no = True # 色付きがいいならTrue出ないならFalse
acc_score_gob = def_gob(
ghost, ghoul, goblin, test, target, accuracy, best_cell_gob, reg_dict
)
best_cell_gob = np.argmax(acc_score_gob) # 色なし最高スコア
print("serect", best_cell_gob)
best_cell_gob_c = 0 # 色あり
acc_score_gob_c = def_gob(
ghost_c, ghoul_c, goblin_c, test_c, target, accuracy, best_cell_gob_c, reg_dict
)
best_cell_gob_c = np.argmax(acc_score_gob_c) # 追加して
print("serect", best_cell_gob)
gob_c_or_no = True if best_cell_gob_c > best_cell_gob else False # 色付き色なしどちらがいい?
# ゴブリンか判別本番
accuracy = False
if gob_c_or_no:
submission_no[:, 0] = def_gob(
ghost_c,
ghoul_c,
goblin_c,
test_c,
target,
accuracy,
best_cell_gob_c,
reg_dict,
)
if gob_c_or_no == False:
submission_no[:, 0] = def_gob(
ghost, ghoul, goblin, test, target, accuracy, best_cell_gob, reg_dict
)
# 判別終わり
# ID_goblin=np.array(ID[submission_no==1])#ゴブリン該当するIDを取り出し
# ID_nogob=np.array(ID[submission_no==0])#ゴブリン該当しないIDを取り出し
test_nogob = np.array(test[submission_no[:, 0] == 0]) # ゴブリン該当しないテストを取り出し
test_nogob_c = np.array(test_c[submission_no[:, 0] == 0]) # ゴブリン該当しないテストを取り出し色あり
# submission[submission_no==1]="Goblin"#ゴブリンを事前に入れておく#いらない
# magicno=nonono(ID_nogob)
# submission_no_gob=np.zeros((magicno),dtype="int")
# ここから、ghoulとghostの判別
best_cell_two = 0
accuracy = True
c_or_no = True # 色付きがいいならTrue出ないならFalse
acc_score_two = def_two(
ghost, ghoul, goblin, test_nogob, target, accuracy, best_cell_two, reg_dict
)
best_cell_two = np.argmax(acc_score_two) # 色なし最高スコア
best_cell_two_c = 0 # 色あり
acc_score_two_c = def_two(
ghost_c,
ghoul_c,
goblin_c,
test_nogob_c,
target,
accuracy,
best_cell_two_c,
reg_dict,
)
best_cell_two_c = np.argmax(acc_score_two_c) # 追加して
c_or_no = True if best_cell_two_c > best_cell_two else False # 色付き色なしどちらがいい?
# 2つの判別本番
accuracy = False
if gob_c_or_no:
submission_no[:, 1] = def_two(
ghost_c,
ghoul_c,
goblin_c,
test_c,
target,
accuracy,
best_cell_two_c,
reg_dict,
)
if gob_c_or_no == False:
submission_no[:, 1] = def_two(
ghost, ghoul, goblin, test, target, accuracy, best_cell_two, reg_dict
)
# ID_ghost=np.array(ID_nogob[submission_no_gob[:len(ID_nogob)]==1])#ghost該当するIDを取り出し
# ID_ghoul=np.array(ID_nogob[submission_no_gob[:len(ID_nogob)]==0])#ghoul該当IDを取り出し
# print(ID_ghost)
# nghost, nghoul, ngoblin = 0,0,0
for n in range(len(ID)):
if submission_no[n, 0] == 1:
submission[n] = "Goblin"
if submission_no[n, 0] == 0:
submission[n] = "Ghost" if submission_no[n, 1] == 1 else "Ghoul"
# if ID[n]==ID_ghost[nghost]:
# submission[n]="Ghost";nghost =(nghost+1 if len(ID_ghost)>nghost+1 else 0 )
# if ID[n]==ID_ghoul[nghoul]:
# submission[n]="Ghoul";nghoul = (nghoul+1 if len(ID_ghoul)>nghoul+1 else 0 )
# if ID[n]==ID_goblin[ngoblin]:
# submission[n]="Ghoblin";ngoblin = (ngoblin+1 if len(ID_goblin)>ngoblin+1 else 0 )
s_c = pd.DataFrame({"id": ID, "type": submission})
return s_c
# ここでメインを一つずつ実行
submission = main_n()
##ここから推理します
# Kaggle提出用csvファイルの作成
submission.to_csv("submission6.csv", index=False)
| false | 0 | 6,986 | 0 | 6,986 | 6,986 |
||
69197996
|
# Make a copy of this notebook and submit the link in Google Classroom. To submit the link to a Kaggle, click Share > Private > Switch it to Public > copy the link and submit in Classroom under the assignment.
# # 1. Printing Text
# Please print out your name!
# # 2. Variables/Math Operators: Counting Cookies!
# ## Tasks/Objectives
# 1. Create a variable called cookies with value 30. (You have 30 cookies in a jar).
# 2. Create a variable called jars with value 5. (You have 5 jars)
# 3. Print the following message: "I have 30 cookies!". Make sure to use the variable, cookies, to put the 30, instead of just typing 30.
# 4. Use math to calculate the total number of cookies, if you have 5 jars with 30 cookies each! Multiply using variables.
#
# Remember to use variables throughout the entire task.
# # 3. String Manipulation
# Each DNA strand has 8 characters comprised of A, G, T, C.
# 1. Given the variables, try to make a DNA strand.
# 2. Replace A in the DNA strand with T
# 3. Make the DNA strand lowercase
# 4. Print out the length of the DNA strand to verify that you have all 8 characters in the format of "The length of the DNA strand is: (length of the DNA strand)
#
# given variables
rando_1 = "AAA"
rando_2 = "GGG"
a = "A"
g = "g"
t = "t"
c = "c"
# your code here
# # 4. Iteration/Lists
# ## Exercise 1: The Counter
# Print all numbers from 0-10 using a for loop
# ## Exercise 2
# Use the 2 given lists to
# 1. Combine the lists
# 2. Print the list altogether
# 3. Print every individual element of the combined list
# 4. Sort the list by alphabetical order
# 5. Print the length of the combined list
# 6. Print the first element in the combined list
# ## Exercise 3: Odometer
# When someone is driving on the highway, there is no real speed limit. However, when someone is driving on a residential road, the speed limit is 25. To help our police officers reinforce that, we will be writing a simple program. When ```road_state``` is 0, that means the driver is on the freeway and there is no speed limit. However, while they are on ```road_state = 1```, they must obey the 25 mph speed limit. Please remind them by recognizing when they are on the residential roads and need to be going 25 mph by printing a statement that tells them to do so.
# Exercise 1: The Counter
# Exercise 2
# given lists
cookies = ["Chocolate Chip", "Raisin", "White Chocolate Chip", "Sugar"]
ice_cream = ["Mint Chocolate Chip", "Cookie Dough", "Chocolate Chip"]
# your code here
# Exercise 3: Odometer
# given variables
road_state = 0
# your code here [below]
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/197/69197996.ipynb
| null | null |
[{"Id": 69197996, "ScriptId": 18889020, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4117206, "CreationDate": "07/28/2021 02:10:09", "VersionNumber": 4.0, "Title": "Introduction to Python Exercises - Helyx Summer Ca", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 102.0, "LinesInsertedFromPrevious": 6.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 96.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# Make a copy of this notebook and submit the link in Google Classroom. To submit the link to a Kaggle, click Share > Private > Switch it to Public > copy the link and submit in Classroom under the assignment.
# # 1. Printing Text
# Please print out your name!
# # 2. Variables/Math Operators: Counting Cookies!
# ## Tasks/Objectives
# 1. Create a variable called cookies with value 30. (You have 30 cookies in a jar).
# 2. Create a variable called jars with value 5. (You have 5 jars)
# 3. Print the following message: "I have 30 cookies!". Make sure to use the variable, cookies, to put the 30, instead of just typing 30.
# 4. Use math to calculate the total number of cookies, if you have 5 jars with 30 cookies each! Multiply using variables.
#
# Remember to use variables throughout the entire task.
# # 3. String Manipulation
# Each DNA strand has 8 characters comprised of A, G, T, C.
# 1. Given the variables, try to make a DNA strand.
# 2. Replace A in the DNA strand with T
# 3. Make the DNA strand lowercase
# 4. Print out the length of the DNA strand to verify that you have all 8 characters in the format of "The length of the DNA strand is: (length of the DNA strand)
#
# given variables
rando_1 = "AAA"
rando_2 = "GGG"
a = "A"
g = "g"
t = "t"
c = "c"
# your code here
# # 4. Iteration/Lists
# ## Exercise 1: The Counter
# Print all numbers from 0-10 using a for loop
# ## Exercise 2
# Use the 2 given lists to
# 1. Combine the lists
# 2. Print the list altogether
# 3. Print every individual element of the combined list
# 4. Sort the list by alphabetical order
# 5. Print the length of the combined list
# 6. Print the first element in the combined list
# ## Exercise 3: Odometer
# When someone is driving on the highway, there is no real speed limit. However, when someone is driving on a residential road, the speed limit is 25. To help our police officers reinforce that, we will be writing a simple program. When ```road_state``` is 0, that means the driver is on the freeway and there is no speed limit. However, while they are on ```road_state = 1```, they must obey the 25 mph speed limit. Please remind them by recognizing when they are on the residential roads and need to be going 25 mph by printing a statement that tells them to do so.
# Exercise 1: The Counter
# Exercise 2
# given lists
cookies = ["Chocolate Chip", "Raisin", "White Chocolate Chip", "Sugar"]
ice_cream = ["Mint Chocolate Chip", "Cookie Dough", "Chocolate Chip"]
# your code here
# Exercise 3: Odometer
# given variables
road_state = 0
# your code here [below]
| false | 0 | 774 | 0 | 774 | 774 |
||
69197095
|
<jupyter_start><jupyter_text>Hepatitis C Prediction Dataset
### Context
The data set contains laboratory values of blood donors and Hepatitis C patients and demographic values like age. The data was obtained from UCI Machine Learning Repository: https://archive.ics.uci.edu/ml/datasets/HCV+data
### Content
All attributes except Category and Sex are numerical.
Attributes 1 to 4 refer to the data of the patient:
1) X (Patient ID/No.)
2) Category (diagnosis) (values: '0=Blood Donor', '0s=suspect Blood Donor', '1=Hepatitis', '2=Fibrosis', '3=Cirrhosis')
3) Age (in years)
4) Sex (f,m)
Attributes 5 to 14 refer to laboratory data:
5) ALB
6) ALP
7) ALT
8) AST
9) BIL
10) CHE
11) CHOL
12) CREA
13) GGT
14) PROT
The target attribute for classification is Category (2): blood donors vs. Hepatitis C patients (including its progress ('just' Hepatitis C, Fibrosis, Cirrhosis).
Kaggle dataset identifier: hepatitis-c-dataset
<jupyter_code>import pandas as pd
df = pd.read_csv('hepatitis-c-dataset/HepatitisCdata.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 615 entries, 0 to 614
Data columns (total 14 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Unnamed: 0 615 non-null int64
1 Category 615 non-null object
2 Age 615 non-null int64
3 Sex 615 non-null object
4 ALB 614 non-null float64
5 ALP 597 non-null float64
6 ALT 614 non-null float64
7 AST 615 non-null float64
8 BIL 615 non-null float64
9 CHE 615 non-null float64
10 CHOL 605 non-null float64
11 CREA 615 non-null float64
12 GGT 615 non-null float64
13 PROT 614 non-null float64
dtypes: float64(10), int64(2), object(2)
memory usage: 67.4+ KB
<jupyter_text>Examples:
{
"Unnamed: 0": 1,
"Category": "0=Blood Donor",
"Age": 32,
"Sex": "m",
"ALB": 38.5,
"ALP": 52.5,
"ALT": 7.7,
"AST": 22.1,
"BIL": 7.5,
"CHE": 6.93,
"CHOL": 3.23,
"CREA": 106,
"GGT": 12.1,
"PROT": 69.0
}
{
"Unnamed: 0": 2,
"Category": "0=Blood Donor",
"Age": 32,
"Sex": "m",
"ALB": 38.5,
"ALP": 70.3,
"ALT": 18.0,
"AST": 24.7,
"BIL": 3.9,
"CHE": 11.17,
"CHOL": 4.8,
"CREA": 74,
"GGT": 15.6,
"PROT": 76.5
}
{
"Unnamed: 0": 3,
"Category": "0=Blood Donor",
"Age": 32,
"Sex": "m",
"ALB": 46.9,
"ALP": 74.7,
"ALT": 36.2,
"AST": 52.6,
"BIL": 6.1,
"CHE": 8.84,
"CHOL": 5.2,
"CREA": 86,
"GGT": 33.2,
"PROT": 79.3
}
{
"Unnamed: 0": 4,
"Category": "0=Blood Donor",
"Age": 32,
"Sex": "m",
"ALB": 43.2,
"ALP": 52.0,
"ALT": 30.6,
"AST": 22.6,
"BIL": 18.9,
"CHE": 7.33,
"CHOL": 4.74,
"CREA": 80,
"GGT": 33.8,
"PROT": 75.7
}
<jupyter_script>import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
from imblearn.pipeline import Pipeline
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import f1_score
from xgboost import XGBClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV
# # EDA & Preprocessing
data = pd.read_csv("../input/hepatitis-c-dataset/HepatitisCdata.csv")
data.head()
data.tail()
data.describe()
data.info()
data.isnull().sum()
data = data.drop("Unnamed: 0", axis=1)
data["Category"].loc[
data["Category"].isin(["1=Hepatitis", "2=Fibrosis", "3=Cirrhosis"])
] = 1
data["Category"].loc[
data["Category"].isin(["0=Blood Donor", "0s=suspect Blood Donor"])
] = 0
data["Sex"].loc[data["Sex"] == "m"] = 1
data["Sex"].loc[data["Sex"] == "f"] = 0
data.head()
data.fillna(data.median(), inplace=True)
data.isnull().sum()
fig, axes = plt.subplots(nrows=5, ncols=2, figsize=(18, 18))
sns.histplot(data=data, x="ALB", kde=True, ax=axes[0][0])
sns.histplot(data=data, x="ALP", kde=True, ax=axes[0][1])
sns.histplot(data=data, x="ALT", kde=True, ax=axes[1][0])
sns.histplot(data=data, x="AST", kde=True, ax=axes[1][1])
sns.histplot(data=data, x="BIL", kde=True, ax=axes[2][0])
sns.histplot(data=data, x="CHE", kde=True, ax=axes[2][1])
sns.histplot(data=data, x="CHOL", kde=True, ax=axes[3][0])
sns.histplot(data=data, x="CREA", kde=True, ax=axes[3][1])
sns.histplot(data=data, x="GGT", kde=True, ax=axes[4][0])
sns.histplot(data=data, x="PROT", kde=True, ax=axes[4][1])
labels = data["Category"].value_counts(sort=True).index
sizes = data["Category"].value_counts(sort=True)
colors = ["Red", "Blue"]
plt.figure(figsize=(7, 7))
plt.pie(
sizes,
labels=labels,
colors=colors,
autopct="%1.1f%%",
startangle=90,
)
plt.title("Category pie")
plt.show()
data.corr()
sns.pairplot(data, diag_kind="kde", hue="Category")
data = pd.get_dummies(data, columns=["Sex"], drop_first=True)
data.head()
plt.figure(figsize=(16, 8))
sns.heatmap(data.corr(), annot=True)
robust_sc = preprocessing.RobustScaler()
standard_sc = preprocessing.StandardScaler()
minmax_sc = preprocessing.MinMaxScaler()
X = data.drop(["Category"], axis=1)
y = data["Category"]
for x in [robust_sc, standard_sc, minmax_sc]:
resultado = []
scaler = x.fit(X)
X_new = x.transform(X)
tree = DecisionTreeClassifier(max_depth=25, random_state=42)
tree.fit(X_new, y)
y_pred = tree.predict(X_new)
f1sc = f1_score(y, y_pred, average="weighted")
rauc = (y, y_pred)
resultado.append(f1sc)
print("El escalado Utilizado--->", x)
print("f1 segun el tipo de estrategia:", f1sc)
print("----------------------------------------")
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42, stratify=y
)
over = SMOTE()
overs = RandomOverSampler()
under = RandomUnderSampler()
steps = [("o", over), ("os", overs), ("u", under)]
pipeline = Pipeline(steps=steps)
X_train, y_train = pipeline.fit_resample(X_train, y_train)
X_train = standard_sc.fit_transform(X_train)
X_test = standard_sc.transform(X_test)
def confusion(y_test, y_test_pred, X):
names = ["Non Hepatitis", "Hepatitis"]
cm = confusion_matrix(y_test, y_test_pred)
f, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(cm, annot=True, linewidth=0.5, linecolor="r", fmt=".0f", ax=ax)
plt.title(X, size=25)
plt.xlabel("y_pred")
plt.ylabel("y_true")
ax.set_xticklabels(names)
ax.set_yticklabels(names)
plt.show()
return
# # Machine Learning
RF = RandomForestClassifier(random_state=42)
RF.fit(X_train, y_train)
pred = RF.predict(X_test)
score = RF.score(X_test, y_test)
score
confusion(y_test, pred, "RF")
feat_importances = pd.Series(
RF.feature_importances_, index=data.drop("Category", axis=1).columns
)
feat_importances.nlargest(5).plot(kind="barh")
# ## XGB
gbm = XGBClassifier(verbosity=1)
params_xgb = {
"n_estimators": [500, 1000, 1500],
"learning_rate": [0.1, 0.3, 0.6],
"gpu_id": [0],
"predictor": ["gpu_predictor"],
"tree_method": ["gpu_hist"],
"updater": ["grow_gpu_hist"],
"sampling_method": ["gradient_based"],
"updater": ["grow_gpu_hist"],
}
model_xgb = GridSearchCV(gbm, param_grid=params_xgb, cv=5, n_jobs=-1)
model_xgb.fit(X_train, y_train)
print("Best params: " + str(model_xgb.best_params_))
print("Best Score: " + str(model_xgb.best_score_) + "\n")
scores = pd.DataFrame(model_xgb.cv_results_)
scores.sort_values(by="rank_test_score")
y_train_pred_xgb = model_xgb.predict(X_train)
y_test_pred_xgb = model_xgb.predict(X_test)
print(classification_report(y_test, y_test_pred_xgb))
confusion(y_test, y_test_pred_xgb, "XGB")
# ## MLP
clf = MLPClassifier(random_state=42)
params_MLP = {
"hidden_layer_sizes": [64, 128, 256],
"activation": ["identity", "logistic", "tanh", "relu"],
"solver": ["lbfgs", "sgd", "adam"],
"learning_rate": ["constant", "invscaling", "adaptive"],
"max_iter": [100, 200],
"warm_start": [True],
}
model_MLP = GridSearchCV(clf, param_grid=params_MLP, cv=3, n_jobs=-1)
model_MLP.fit(X_train, y_train)
print("Best params: " + str(model_MLP.best_params_))
print("Best Score: " + str(model_MLP.best_score_) + "\n")
scores = pd.DataFrame(model_MLP.cv_results_)
scores.sort_values(by="rank_test_score")
y_train_pred_MLP = model_MLP.predict(X_train)
y_test_pred_MLP = model_MLP.predict(X_test)
print(classification_report(y_test, y_test_pred_MLP))
confusion(y_test, y_test_pred_MLP, "MLP")
# ## Random Forest
clf = RandomForestClassifier(random_state=42)
params_RF = {
"max_depth": [250, 500, 1000],
"criterion": ["gini", "entropy"],
"min_samples_split": [2, 4, 6],
"min_samples_leaf": [1, 2, 3],
"max_features": ["auto", "sqrt", "log2"],
"warm_start": [True],
"class_weight": ["balanced", "balanced_subsample"],
}
model_RF = GridSearchCV(clf, param_grid=params_RF, cv=3, n_jobs=-1)
model_RF.fit(X_train, y_train)
print("Best params: " + str(model_RF.best_params_))
print("Best Score: " + str(model_RF.best_score_) + "\n")
scores = pd.DataFrame(model_RF.cv_results_)
scores.sort_values(by="rank_test_score")
y_train_pred_RF = model_RF.predict(X_train)
y_test_pred_RF = model_RF.predict(X_test)
print(classification_report(y_test, y_test_pred_RF))
confusion(y_test, y_test_pred_RF, "RF")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/197/69197095.ipynb
|
hepatitis-c-dataset
|
fedesoriano
|
[{"Id": 69197095, "ScriptId": 18863997, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5996309, "CreationDate": "07/28/2021 01:48:53", "VersionNumber": 1.0, "Title": "Easy Hepatitis C Prediction ACC=98%!!!", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 229.0, "LinesInsertedFromPrevious": 229.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 7}]
|
[{"Id": 92074086, "KernelVersionId": 69197095, "SourceDatasetVersionId": 1768479}]
|
[{"Id": 1768479, "DatasetId": 1051216, "DatasourceVersionId": 1805771, "CreatorUserId": 6402661, "LicenseName": "Database: Open Database, Contents: \u00a9 Original Authors", "CreationDate": "12/21/2020 16:50:55", "VersionNumber": 1.0, "Title": "Hepatitis C Prediction Dataset", "Slug": "hepatitis-c-dataset", "Subtitle": "Laboratory values of blood donors and Hepatitis C patients", "Description": "### Context\n\nThe data set contains laboratory values of blood donors and Hepatitis C patients and demographic values like age. The data was obtained from UCI Machine Learning Repository: https://archive.ics.uci.edu/ml/datasets/HCV+data\n\n\n### Content\n\nAll attributes except Category and Sex are numerical.\nAttributes 1 to 4 refer to the data of the patient:\n1) X (Patient ID/No.)\n2) Category (diagnosis) (values: '0=Blood Donor', '0s=suspect Blood Donor', '1=Hepatitis', '2=Fibrosis', '3=Cirrhosis')\n3) Age (in years)\n4) Sex (f,m)\nAttributes 5 to 14 refer to laboratory data:\n5) ALB\n6) ALP\n7) ALT\n8) AST\n9) BIL\n10) CHE\n11) CHOL\n12) CREA\n13) GGT\n14) PROT\n\nThe target attribute for classification is Category (2): blood donors vs. Hepatitis C patients (including its progress ('just' Hepatitis C, Fibrosis, Cirrhosis).\n\n\n### Acknowledgements\n\nCreators: Ralf Lichtinghagen, Frank Klawonn, Georg Hoffmann\nDonor: Ralf Lichtinghagen: Institute of Clinical Chemistry; Medical University Hannover (MHH); Hannover, Germany; lichtinghagen.ralf '@' mh-hannover.de\nDonor: Frank Klawonn; Helmholtz Centre for Infection Research; Braunschweig, Germany; frank.klawonn '@' helmholtz-hzi.de\nDonor: Georg Hoffmann; Trillium GmbH; Grafrath, Germany; georg.hoffmann '@' trillium.de\n\n\n### Relevant Papers\n\nLichtinghagen R et al. J Hepatol 2013; 59: 236-42\nHoffmann G et al. Using machine learning techniques to generate laboratory diagnostic pathways \u00e2\u20ac\u201c a case study. J Lab Precis Med 2018; 3: 58-67\n\n\n### Other Datasets\n\n- Stroke Prediction Dataset: [LINK](https://www.kaggle.com/fedesoriano/stroke-prediction-dataset)\n- Wind Speed Prediction Dataset: [LINK](https://www.kaggle.com/datasets/fedesoriano/wind-speed-prediction-dataset)\n- Spanish Wine Quality Dataset: [LINK](https://www.kaggle.com/datasets/fedesoriano/spanish-wine-quality-dataset)", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1051216, "CreatorUserId": 6402661, "OwnerUserId": 6402661.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1768479.0, "CurrentDatasourceVersionId": 1805771.0, "ForumId": 1068228, "Type": 2, "CreationDate": "12/21/2020 16:50:55", "LastActivityDate": "12/21/2020", "TotalViews": 116756, "TotalDownloads": 11738, "TotalVotes": 158, "TotalKernels": 43}]
|
[{"Id": 6402661, "UserName": "fedesoriano", "DisplayName": "fedesoriano", "RegisterDate": "12/18/2020", "PerformanceTier": 4}]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
from imblearn.pipeline import Pipeline
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import f1_score
from xgboost import XGBClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV
# # EDA & Preprocessing
data = pd.read_csv("../input/hepatitis-c-dataset/HepatitisCdata.csv")
data.head()
data.tail()
data.describe()
data.info()
data.isnull().sum()
data = data.drop("Unnamed: 0", axis=1)
data["Category"].loc[
data["Category"].isin(["1=Hepatitis", "2=Fibrosis", "3=Cirrhosis"])
] = 1
data["Category"].loc[
data["Category"].isin(["0=Blood Donor", "0s=suspect Blood Donor"])
] = 0
data["Sex"].loc[data["Sex"] == "m"] = 1
data["Sex"].loc[data["Sex"] == "f"] = 0
data.head()
data.fillna(data.median(), inplace=True)
data.isnull().sum()
fig, axes = plt.subplots(nrows=5, ncols=2, figsize=(18, 18))
sns.histplot(data=data, x="ALB", kde=True, ax=axes[0][0])
sns.histplot(data=data, x="ALP", kde=True, ax=axes[0][1])
sns.histplot(data=data, x="ALT", kde=True, ax=axes[1][0])
sns.histplot(data=data, x="AST", kde=True, ax=axes[1][1])
sns.histplot(data=data, x="BIL", kde=True, ax=axes[2][0])
sns.histplot(data=data, x="CHE", kde=True, ax=axes[2][1])
sns.histplot(data=data, x="CHOL", kde=True, ax=axes[3][0])
sns.histplot(data=data, x="CREA", kde=True, ax=axes[3][1])
sns.histplot(data=data, x="GGT", kde=True, ax=axes[4][0])
sns.histplot(data=data, x="PROT", kde=True, ax=axes[4][1])
labels = data["Category"].value_counts(sort=True).index
sizes = data["Category"].value_counts(sort=True)
colors = ["Red", "Blue"]
plt.figure(figsize=(7, 7))
plt.pie(
sizes,
labels=labels,
colors=colors,
autopct="%1.1f%%",
startangle=90,
)
plt.title("Category pie")
plt.show()
data.corr()
sns.pairplot(data, diag_kind="kde", hue="Category")
data = pd.get_dummies(data, columns=["Sex"], drop_first=True)
data.head()
plt.figure(figsize=(16, 8))
sns.heatmap(data.corr(), annot=True)
robust_sc = preprocessing.RobustScaler()
standard_sc = preprocessing.StandardScaler()
minmax_sc = preprocessing.MinMaxScaler()
X = data.drop(["Category"], axis=1)
y = data["Category"]
for x in [robust_sc, standard_sc, minmax_sc]:
resultado = []
scaler = x.fit(X)
X_new = x.transform(X)
tree = DecisionTreeClassifier(max_depth=25, random_state=42)
tree.fit(X_new, y)
y_pred = tree.predict(X_new)
f1sc = f1_score(y, y_pred, average="weighted")
rauc = (y, y_pred)
resultado.append(f1sc)
print("El escalado Utilizado--->", x)
print("f1 segun el tipo de estrategia:", f1sc)
print("----------------------------------------")
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42, stratify=y
)
over = SMOTE()
overs = RandomOverSampler()
under = RandomUnderSampler()
steps = [("o", over), ("os", overs), ("u", under)]
pipeline = Pipeline(steps=steps)
X_train, y_train = pipeline.fit_resample(X_train, y_train)
X_train = standard_sc.fit_transform(X_train)
X_test = standard_sc.transform(X_test)
def confusion(y_test, y_test_pred, X):
names = ["Non Hepatitis", "Hepatitis"]
cm = confusion_matrix(y_test, y_test_pred)
f, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(cm, annot=True, linewidth=0.5, linecolor="r", fmt=".0f", ax=ax)
plt.title(X, size=25)
plt.xlabel("y_pred")
plt.ylabel("y_true")
ax.set_xticklabels(names)
ax.set_yticklabels(names)
plt.show()
return
# # Machine Learning
RF = RandomForestClassifier(random_state=42)
RF.fit(X_train, y_train)
pred = RF.predict(X_test)
score = RF.score(X_test, y_test)
score
confusion(y_test, pred, "RF")
feat_importances = pd.Series(
RF.feature_importances_, index=data.drop("Category", axis=1).columns
)
feat_importances.nlargest(5).plot(kind="barh")
# ## XGB
gbm = XGBClassifier(verbosity=1)
params_xgb = {
"n_estimators": [500, 1000, 1500],
"learning_rate": [0.1, 0.3, 0.6],
"gpu_id": [0],
"predictor": ["gpu_predictor"],
"tree_method": ["gpu_hist"],
"updater": ["grow_gpu_hist"],
"sampling_method": ["gradient_based"],
"updater": ["grow_gpu_hist"],
}
model_xgb = GridSearchCV(gbm, param_grid=params_xgb, cv=5, n_jobs=-1)
model_xgb.fit(X_train, y_train)
print("Best params: " + str(model_xgb.best_params_))
print("Best Score: " + str(model_xgb.best_score_) + "\n")
scores = pd.DataFrame(model_xgb.cv_results_)
scores.sort_values(by="rank_test_score")
y_train_pred_xgb = model_xgb.predict(X_train)
y_test_pred_xgb = model_xgb.predict(X_test)
print(classification_report(y_test, y_test_pred_xgb))
confusion(y_test, y_test_pred_xgb, "XGB")
# ## MLP
clf = MLPClassifier(random_state=42)
params_MLP = {
"hidden_layer_sizes": [64, 128, 256],
"activation": ["identity", "logistic", "tanh", "relu"],
"solver": ["lbfgs", "sgd", "adam"],
"learning_rate": ["constant", "invscaling", "adaptive"],
"max_iter": [100, 200],
"warm_start": [True],
}
model_MLP = GridSearchCV(clf, param_grid=params_MLP, cv=3, n_jobs=-1)
model_MLP.fit(X_train, y_train)
print("Best params: " + str(model_MLP.best_params_))
print("Best Score: " + str(model_MLP.best_score_) + "\n")
scores = pd.DataFrame(model_MLP.cv_results_)
scores.sort_values(by="rank_test_score")
y_train_pred_MLP = model_MLP.predict(X_train)
y_test_pred_MLP = model_MLP.predict(X_test)
print(classification_report(y_test, y_test_pred_MLP))
confusion(y_test, y_test_pred_MLP, "MLP")
# ## Random Forest
clf = RandomForestClassifier(random_state=42)
params_RF = {
"max_depth": [250, 500, 1000],
"criterion": ["gini", "entropy"],
"min_samples_split": [2, 4, 6],
"min_samples_leaf": [1, 2, 3],
"max_features": ["auto", "sqrt", "log2"],
"warm_start": [True],
"class_weight": ["balanced", "balanced_subsample"],
}
model_RF = GridSearchCV(clf, param_grid=params_RF, cv=3, n_jobs=-1)
model_RF.fit(X_train, y_train)
print("Best params: " + str(model_RF.best_params_))
print("Best Score: " + str(model_RF.best_score_) + "\n")
scores = pd.DataFrame(model_RF.cv_results_)
scores.sort_values(by="rank_test_score")
y_train_pred_RF = model_RF.predict(X_train)
y_test_pred_RF = model_RF.predict(X_test)
print(classification_report(y_test, y_test_pred_RF))
confusion(y_test, y_test_pred_RF, "RF")
|
[{"hepatitis-c-dataset/HepatitisCdata.csv": {"column_names": "[\"Unnamed: 0\", \"Category\", \"Age\", \"Sex\", \"ALB\", \"ALP\", \"ALT\", \"AST\", \"BIL\", \"CHE\", \"CHOL\", \"CREA\", \"GGT\", \"PROT\"]", "column_data_types": "{\"Unnamed: 0\": \"int64\", \"Category\": \"object\", \"Age\": \"int64\", \"Sex\": \"object\", \"ALB\": \"float64\", \"ALP\": \"float64\", \"ALT\": \"float64\", \"AST\": \"float64\", \"BIL\": \"float64\", \"CHE\": \"float64\", \"CHOL\": \"float64\", \"CREA\": \"float64\", \"GGT\": \"float64\", \"PROT\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 615 entries, 0 to 614\nData columns (total 14 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Unnamed: 0 615 non-null int64 \n 1 Category 615 non-null object \n 2 Age 615 non-null int64 \n 3 Sex 615 non-null object \n 4 ALB 614 non-null float64\n 5 ALP 597 non-null float64\n 6 ALT 614 non-null float64\n 7 AST 615 non-null float64\n 8 BIL 615 non-null float64\n 9 CHE 615 non-null float64\n 10 CHOL 605 non-null float64\n 11 CREA 615 non-null float64\n 12 GGT 615 non-null float64\n 13 PROT 614 non-null float64\ndtypes: float64(10), int64(2), object(2)\nmemory usage: 67.4+ KB\n", "summary": "{\"Unnamed: 0\": {\"count\": 615.0, \"mean\": 308.0, \"std\": 177.67948671695333, \"min\": 1.0, \"25%\": 154.5, \"50%\": 308.0, \"75%\": 461.5, \"max\": 615.0}, \"Age\": {\"count\": 615.0, \"mean\": 47.40813008130081, \"std\": 10.055105445519235, \"min\": 19.0, \"25%\": 39.0, \"50%\": 47.0, \"75%\": 54.0, \"max\": 77.0}, \"ALB\": {\"count\": 614.0, \"mean\": 41.62019543973941, \"std\": 5.78062940410308, \"min\": 14.9, \"25%\": 38.8, \"50%\": 41.95, \"75%\": 45.2, \"max\": 82.2}, \"ALP\": {\"count\": 597.0, \"mean\": 68.28391959798995, \"std\": 26.028315300123676, \"min\": 11.3, \"25%\": 52.5, \"50%\": 66.2, \"75%\": 80.1, \"max\": 416.6}, \"ALT\": {\"count\": 614.0, \"mean\": 28.450814332247557, \"std\": 25.469688813870942, \"min\": 0.9, \"25%\": 16.4, \"50%\": 23.0, \"75%\": 33.075, \"max\": 325.3}, \"AST\": {\"count\": 615.0, \"mean\": 34.78634146341463, \"std\": 33.09069033855157, \"min\": 10.6, \"25%\": 21.6, \"50%\": 25.9, \"75%\": 32.9, \"max\": 324.0}, \"BIL\": {\"count\": 615.0, \"mean\": 11.396747967479675, \"std\": 19.673149805846588, \"min\": 0.8, \"25%\": 5.3, \"50%\": 7.3, \"75%\": 11.2, \"max\": 254.0}, \"CHE\": {\"count\": 615.0, \"mean\": 8.196634146341465, \"std\": 2.205657270429293, \"min\": 1.42, \"25%\": 6.9350000000000005, \"50%\": 8.26, \"75%\": 9.59, \"max\": 16.41}, \"CHOL\": {\"count\": 605.0, \"mean\": 5.368099173553719, \"std\": 1.1327284311597348, \"min\": 1.43, \"25%\": 4.61, \"50%\": 5.3, \"75%\": 6.06, \"max\": 9.67}, \"CREA\": {\"count\": 615.0, \"mean\": 81.28780487804877, \"std\": 49.75616601234977, \"min\": 8.0, \"25%\": 67.0, \"50%\": 77.0, \"75%\": 88.0, \"max\": 1079.1}, \"GGT\": {\"count\": 615.0, \"mean\": 39.53317073170732, \"std\": 54.66107123891245, \"min\": 4.5, \"25%\": 15.7, \"50%\": 23.3, \"75%\": 40.2, \"max\": 650.9}, \"PROT\": {\"count\": 614.0, \"mean\": 72.0441368078176, \"std\": 5.402635737104958, \"min\": 44.8, \"25%\": 69.3, \"50%\": 72.2, \"75%\": 75.4, \"max\": 90.0}}", "examples": "{\"Unnamed: 0\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"Category\":{\"0\":\"0=Blood Donor\",\"1\":\"0=Blood Donor\",\"2\":\"0=Blood Donor\",\"3\":\"0=Blood Donor\"},\"Age\":{\"0\":32,\"1\":32,\"2\":32,\"3\":32},\"Sex\":{\"0\":\"m\",\"1\":\"m\",\"2\":\"m\",\"3\":\"m\"},\"ALB\":{\"0\":38.5,\"1\":38.5,\"2\":46.9,\"3\":43.2},\"ALP\":{\"0\":52.5,\"1\":70.3,\"2\":74.7,\"3\":52.0},\"ALT\":{\"0\":7.7,\"1\":18.0,\"2\":36.2,\"3\":30.6},\"AST\":{\"0\":22.1,\"1\":24.7,\"2\":52.6,\"3\":22.6},\"BIL\":{\"0\":7.5,\"1\":3.9,\"2\":6.1,\"3\":18.9},\"CHE\":{\"0\":6.93,\"1\":11.17,\"2\":8.84,\"3\":7.33},\"CHOL\":{\"0\":3.23,\"1\":4.8,\"2\":5.2,\"3\":4.74},\"CREA\":{\"0\":106.0,\"1\":74.0,\"2\":86.0,\"3\":80.0},\"GGT\":{\"0\":12.1,\"1\":15.6,\"2\":33.2,\"3\":33.8},\"PROT\":{\"0\":69.0,\"1\":76.5,\"2\":79.3,\"3\":75.7}}"}}]
| true | 1 |
<start_data_description><data_path>hepatitis-c-dataset/HepatitisCdata.csv:
<column_names>
['Unnamed: 0', 'Category', 'Age', 'Sex', 'ALB', 'ALP', 'ALT', 'AST', 'BIL', 'CHE', 'CHOL', 'CREA', 'GGT', 'PROT']
<column_types>
{'Unnamed: 0': 'int64', 'Category': 'object', 'Age': 'int64', 'Sex': 'object', 'ALB': 'float64', 'ALP': 'float64', 'ALT': 'float64', 'AST': 'float64', 'BIL': 'float64', 'CHE': 'float64', 'CHOL': 'float64', 'CREA': 'float64', 'GGT': 'float64', 'PROT': 'float64'}
<dataframe_Summary>
{'Unnamed: 0': {'count': 615.0, 'mean': 308.0, 'std': 177.67948671695333, 'min': 1.0, '25%': 154.5, '50%': 308.0, '75%': 461.5, 'max': 615.0}, 'Age': {'count': 615.0, 'mean': 47.40813008130081, 'std': 10.055105445519235, 'min': 19.0, '25%': 39.0, '50%': 47.0, '75%': 54.0, 'max': 77.0}, 'ALB': {'count': 614.0, 'mean': 41.62019543973941, 'std': 5.78062940410308, 'min': 14.9, '25%': 38.8, '50%': 41.95, '75%': 45.2, 'max': 82.2}, 'ALP': {'count': 597.0, 'mean': 68.28391959798995, 'std': 26.028315300123676, 'min': 11.3, '25%': 52.5, '50%': 66.2, '75%': 80.1, 'max': 416.6}, 'ALT': {'count': 614.0, 'mean': 28.450814332247557, 'std': 25.469688813870942, 'min': 0.9, '25%': 16.4, '50%': 23.0, '75%': 33.075, 'max': 325.3}, 'AST': {'count': 615.0, 'mean': 34.78634146341463, 'std': 33.09069033855157, 'min': 10.6, '25%': 21.6, '50%': 25.9, '75%': 32.9, 'max': 324.0}, 'BIL': {'count': 615.0, 'mean': 11.396747967479675, 'std': 19.673149805846588, 'min': 0.8, '25%': 5.3, '50%': 7.3, '75%': 11.2, 'max': 254.0}, 'CHE': {'count': 615.0, 'mean': 8.196634146341465, 'std': 2.205657270429293, 'min': 1.42, '25%': 6.9350000000000005, '50%': 8.26, '75%': 9.59, 'max': 16.41}, 'CHOL': {'count': 605.0, 'mean': 5.368099173553719, 'std': 1.1327284311597348, 'min': 1.43, '25%': 4.61, '50%': 5.3, '75%': 6.06, 'max': 9.67}, 'CREA': {'count': 615.0, 'mean': 81.28780487804877, 'std': 49.75616601234977, 'min': 8.0, '25%': 67.0, '50%': 77.0, '75%': 88.0, 'max': 1079.1}, 'GGT': {'count': 615.0, 'mean': 39.53317073170732, 'std': 54.66107123891245, 'min': 4.5, '25%': 15.7, '50%': 23.3, '75%': 40.2, 'max': 650.9}, 'PROT': {'count': 614.0, 'mean': 72.0441368078176, 'std': 5.402635737104958, 'min': 44.8, '25%': 69.3, '50%': 72.2, '75%': 75.4, 'max': 90.0}}
<dataframe_info>
RangeIndex: 615 entries, 0 to 614
Data columns (total 14 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Unnamed: 0 615 non-null int64
1 Category 615 non-null object
2 Age 615 non-null int64
3 Sex 615 non-null object
4 ALB 614 non-null float64
5 ALP 597 non-null float64
6 ALT 614 non-null float64
7 AST 615 non-null float64
8 BIL 615 non-null float64
9 CHE 615 non-null float64
10 CHOL 605 non-null float64
11 CREA 615 non-null float64
12 GGT 615 non-null float64
13 PROT 614 non-null float64
dtypes: float64(10), int64(2), object(2)
memory usage: 67.4+ KB
<some_examples>
{'Unnamed: 0': {'0': 1, '1': 2, '2': 3, '3': 4}, 'Category': {'0': '0=Blood Donor', '1': '0=Blood Donor', '2': '0=Blood Donor', '3': '0=Blood Donor'}, 'Age': {'0': 32, '1': 32, '2': 32, '3': 32}, 'Sex': {'0': 'm', '1': 'm', '2': 'm', '3': 'm'}, 'ALB': {'0': 38.5, '1': 38.5, '2': 46.9, '3': 43.2}, 'ALP': {'0': 52.5, '1': 70.3, '2': 74.7, '3': 52.0}, 'ALT': {'0': 7.7, '1': 18.0, '2': 36.2, '3': 30.6}, 'AST': {'0': 22.1, '1': 24.7, '2': 52.6, '3': 22.6}, 'BIL': {'0': 7.5, '1': 3.9, '2': 6.1, '3': 18.9}, 'CHE': {'0': 6.93, '1': 11.17, '2': 8.84, '3': 7.33}, 'CHOL': {'0': 3.23, '1': 4.8, '2': 5.2, '3': 4.74}, 'CREA': {'0': 106.0, '1': 74.0, '2': 86.0, '3': 80.0}, 'GGT': {'0': 12.1, '1': 15.6, '2': 33.2, '3': 33.8}, 'PROT': {'0': 69.0, '1': 76.5, '2': 79.3, '3': 75.7}}
<end_description>
| 2,507 | 7 | 3,741 | 2,507 |
69197652
|
<jupyter_start><jupyter_text>FIFA World Cup
### Context
The FIFA World Cup is a global football competition contested by the various football-playing nations of the world. It is contested every four years and is the most prestigious and important trophy in the sport of football.
### Content
The World Cups dataset show all information about all the World Cups in the history, while the World Cup Matches dataset shows all the results from the matches contested as part of the cups.
Kaggle dataset identifier: fifa-world-cup
<jupyter_code>import pandas as pd
df = pd.read_csv('fifa-world-cup/WorldCupMatches.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 4572 entries, 0 to 4571
Data columns (total 20 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Year 852 non-null float64
1 Datetime 852 non-null object
2 Stage 852 non-null object
3 Stadium 852 non-null object
4 City 852 non-null object
5 Home Team Name 852 non-null object
6 Home Team Goals 852 non-null float64
7 Away Team Goals 852 non-null float64
8 Away Team Name 852 non-null object
9 Win conditions 852 non-null object
10 Attendance 850 non-null float64
11 Half-time Home Goals 852 non-null float64
12 Half-time Away Goals 852 non-null float64
13 Referee 852 non-null object
14 Assistant 1 852 non-null object
15 Assistant 2 852 non-null object
16 RoundID 852 non-null float64
17 MatchID 852 non-null float64
18 Home Team Initials 852 non-null object
19 Away Team Initials 852 non-null object
dtypes: float64(8), object(12)
memory usage: 714.5+ KB
<jupyter_text>Examples:
{
"Year": 1930,
"Datetime": "1930-07-13 15:00:00",
"Stage": "Group 1",
"Stadium": "Pocitos",
"City": "Montevideo ",
"Home Team Name": "France",
"Home Team Goals": 4,
"Away Team Goals": 1,
"Away Team Name": "Mexico",
"Win conditions": " ",
"Attendance": 4444,
"Half-time Home Goals": 3,
"Half-time Away Goals": 0,
"Referee": "LOMBARDI Domingo (URU)",
"Assistant 1": "CRISTOPHE Henry (BEL)",
"Assistant 2": "REGO Gilberto (BRA)",
"RoundID": 201,
"MatchID": 1096,
"Home Team Initials": "FRA",
"Away Team Initials": "MEX"
}
{
"Year": 1930,
"Datetime": "1930-07-13 15:00:00",
"Stage": "Group 4",
"Stadium": "Parque Central",
"City": "Montevideo ",
"Home Team Name": "USA",
"Home Team Goals": 3,
"Away Team Goals": 0,
"Away Team Name": "Belgium",
"Win conditions": " ",
"Attendance": 18346,
"Half-time Home Goals": 2,
"Half-time Away Goals": 0,
"Referee": "MACIAS Jose (ARG)",
"Assistant 1": "MATEUCCI Francisco (URU)",
"Assistant 2": "WARNKEN Alberto (CHI)",
"RoundID": 201,
"MatchID": 1090,
"Home Team Initials": "USA",
"Away Team Initials": "BEL"
}
{
"Year": 1930,
"Datetime": "1930-07-14 12:45:00",
"Stage": "Group 2",
"Stadium": "Parque Central",
"City": "Montevideo ",
"Home Team Name": "Yugoslavia",
"Home Team Goals": 2,
"Away Team Goals": 1,
"Away Team Name": "Brazil",
"Win conditions": " ",
"Attendance": 24059,
"Half-time Home Goals": 2,
"Half-time Away Goals": 0,
"Referee": "TEJADA Anibal (URU)",
"Assistant 1": "VALLARINO Ricardo (URU)",
"Assistant 2": "BALWAY Thomas (FRA)",
"RoundID": 201,
"MatchID": 1093,
"Home Team Initials": "YUG",
"Away Team Initials": "BRA"
}
{
"Year": 1930,
"Datetime": "1930-07-14 14:50:00",
"Stage": "Group 3",
"Stadium": "Pocitos",
"City": "Montevideo ",
"Home Team Name": "Romania",
"Home Team Goals": 3,
"Away Team Goals": 1,
"Away Team Name": "Peru",
"Win conditions": " ",
"Attendance": 2549,
"Half-time Home Goals": 1,
"Half-time Away Goals": 0,
"Referee": "WARNKEN Alberto (CHI)",
"Assistant 1": "LANGENUS Jean (BEL)",
"Assistant 2": "MATEUCCI Francisco (URU)",
"RoundID": 201,
"MatchID": 1098,
"Home Team Initials": "ROU",
"Away Team Initials": "PER"
}
<jupyter_code>import pandas as pd
df = pd.read_csv('fifa-world-cup/WorldCupPlayers.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 37784 entries, 0 to 37783
Data columns (total 9 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 RoundID 37784 non-null int64
1 MatchID 37784 non-null int64
2 Team Initials 37784 non-null object
3 Coach Name 37784 non-null object
4 Line-up 37784 non-null object
5 Shirt Number 37784 non-null int64
6 Player Name 37784 non-null object
7 Position 4143 non-null object
8 Event 9069 non-null object
dtypes: int64(3), object(6)
memory usage: 2.6+ MB
<jupyter_text>Examples:
{
"RoundID": 201,
"MatchID": 1096,
"Team Initials": "FRA",
"Coach Name": "CAUDRON Raoul (FRA)",
"Line-up": "S",
"Shirt Number": 0,
"Player Name": "Alex THEPOT",
"Position": "GK",
"Event": null
}
{
"RoundID": 201,
"MatchID": 1096,
"Team Initials": "MEX",
"Coach Name": "LUQUE Juan (MEX)",
"Line-up": "S",
"Shirt Number": 0,
"Player Name": "Oscar BONFIGLIO",
"Position": "GK",
"Event": null
}
{
"RoundID": 201,
"MatchID": 1096,
"Team Initials": "FRA",
"Coach Name": "CAUDRON Raoul (FRA)",
"Line-up": "S",
"Shirt Number": 0,
"Player Name": "Marcel LANGILLER",
"Position": null,
"Event": "G40'"
}
{
"RoundID": 201,
"MatchID": 1096,
"Team Initials": "MEX",
"Coach Name": "LUQUE Juan (MEX)",
"Line-up": "S",
"Shirt Number": 0,
"Player Name": "Juan CARRENO",
"Position": null,
"Event": "G70'"
}
<jupyter_code>import pandas as pd
df = pd.read_csv('fifa-world-cup/WorldCups.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 20 entries, 0 to 19
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Year 20 non-null int64
1 Country 20 non-null object
2 Winner 20 non-null object
3 Runners-Up 20 non-null object
4 Third 20 non-null object
5 Fourth 20 non-null object
6 GoalsScored 20 non-null int64
7 QualifiedTeams 20 non-null int64
8 MatchesPlayed 20 non-null int64
9 Attendance 20 non-null object
dtypes: int64(4), object(6)
memory usage: 1.7+ KB
<jupyter_text>Examples:
{
"Year": 1930,
"Country": "Uruguay",
"Winner": "Uruguay",
"Runners-Up": "Argentina",
"Third": "USA",
"Fourth": "Yugoslavia",
"GoalsScored": 70,
"QualifiedTeams": 13,
"MatchesPlayed": 18,
"Attendance": "590.549"
}
{
"Year": 1934,
"Country": "Italy",
"Winner": "Italy",
"Runners-Up": "Czechoslovakia",
"Third": "Germany",
"Fourth": "Austria",
"GoalsScored": 70,
"QualifiedTeams": 16,
"MatchesPlayed": 17,
"Attendance": "363.000"
}
{
"Year": 1938,
"Country": "France",
"Winner": "Italy",
"Runners-Up": "Hungary",
"Third": "Brazil",
"Fourth": "Sweden",
"GoalsScored": 84,
"QualifiedTeams": 15,
"MatchesPlayed": 18,
"Attendance": "375.700"
}
{
"Year": 1950,
"Country": "Brazil",
"Winner": "Uruguay",
"Runners-Up": "Brazil",
"Third": "Sweden",
"Fourth": "Spain",
"GoalsScored": 88,
"QualifiedTeams": 13,
"MatchesPlayed": 22,
"Attendance": "1.045.246"
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import plotly as py
import cufflinks as cf
import plotly.graph_objs as go
from plotly.offline import iplot
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
players = pd.read_csv("/kaggle/input/fifa-world-cup/WorldCupPlayers.csv")
matches = pd.read_csv("/kaggle/input/fifa-world-cup/WorldCupMatches.csv")
world_cup = pd.read_csv("/kaggle/input/fifa-world-cup/WorldCups.csv")
# Analise exploratória de dados
# - Estatística descritiva
# - Natureza aplicada
# - Abordagem quali-quantitativa
# # Visualização inicial dos dados
players.head()
matches.head()
world_cup.tail()
# # Corrigindo alguns nomes
matches.dropna(subset=["Year"], inplace=True) # Removendo valores nulos
names = matches[matches["Home Team Name"].str.contains('rn">')][
"Home Team Name"
].value_counts() # Alguns nomes estao errados e precisam ser arrumados
wrong = list(names.index) # Verifica os nomes errados
correct = [name.split(">")[1] for name in wrong] # Arruma os erros
old_name = [
"Germany FR",
"Maracan� - Est�dio Jornalista M�rio Filho",
"Estadio do Maracana",
] # Nomes que precisam ser modificadoos
new_name = ["Germany", "Maracan Stadium", "Maracan Stadium"] # Modificacao realizada
wrong = wrong + old_name
correct = correct + new_name
wrong, correct
for index, wr in enumerate(wrong):
world_cup = world_cup.replace(wrong[index], correct[index])
for index, wr in enumerate(wrong):
matches = matches.replace(wrong[index], correct[index])
for index, wr in enumerate(wrong):
players = players.replace(wrong[index], correct[index])
names = matches[matches["Home Team Name"].str.contains('rn">')][
"Home Team Name"
].value_counts()
names
# # Sera times quem os times que tem mando de campo tem melhor desempenho?
import plotly.express as px
import pandas as pd
import numpy as np
casa = pd.pivot_table(matches, values="Home Team Goals", index="Year")
fora = pd.pivot_table(matches, values="Away Team Goals", index="Year")
fig = px.line(
casa,
x=casa.index,
y="Home Team Goals",
title="Média de Gols Time Mandante(Azul) VS Gols Time Visitante(Vermelho)",
labels={"Home Team Goals": "Numero de gols"},
)
fig.add_scatter(
x=fora.index, y=fora["Away Team Goals"], mode="lines", name="Time de fora"
)
fig.show()
def get_labels(matches):
if matches["Home Team Goals"] > matches["Away Team Goals"]:
return "Time Mandante"
if matches["Home Team Goals"] < matches["Away Team Goals"]:
return "Time Visitante"
return "Empate"
matches["Resultado"] = matches.apply(lambda x: get_labels(x), axis=1)
mt = matches["Resultado"].value_counts()
plt.figure(figsize=(6, 6))
mt.plot.pie(autopct="%1.0f%%", colors=sns.color_palette("winter_r"), shadow=True)
c = plt.Circle((0, 0), 0.4, color="white")
plt.title("Resultado da partida por Time Mandante VS Time Visitante")
plt.show()
# # Quais são os paises que mais vencem?
winner = world_cup["Winner"].value_counts()
runnerup = world_cup["Runners-Up"].value_counts()
third = world_cup["Third"].value_counts()
teams = pd.concat([winner, runnerup, third], axis=1)
teams.fillna(0, inplace=True)
teams = teams.astype(int)
teams
py.offline.init_notebook_mode(connected=True)
cf.go_offline()
teams.iplot(
kind="bar", xTitle="times", yTitle="Contagem", title="Vencedores da Copa do Mundo"
)
# # Quais paises fazem mais gols?
casa = matches[["Home Team Name", "Home Team Goals"]].dropna()
fora = matches[["Away Team Name", "Away Team Goals"]].dropna()
casa.columns = ["Countries", "Goals"]
fora.columns = casa.columns
gols = casa.append(fora, ignore_index=True)
gols = gols.groupby("Countries").sum()
gols = gols.sort_values(by="Goals", ascending=False)
gols[:20].iplot(
kind="bar", xTitle="Paises", yTitle="Gols", title="Os 20 paises que mais fazem gols"
)
# ### Medidas de Posição
medida_de_posicao = world_cup[world_cup["Country"] == "Germany"]
medidas_de_posicao = world_cup.drop(["Year"], axis=1)
round(medidas_de_posicao.describe(), 2)
medida_de_posicao = world_cup[world_cup["Country"] == "Brazil"]
medidas_de_posicao = world_cup.drop(["Year"], axis=1)
round(medidas_de_posicao.describe(), 2)
medida_de_posicao = world_cup[world_cup["Country"] == "Argentina"]
medidas_de_posicao = world_cup.drop(["Year"], axis=1)
round(medidas_de_posicao.describe(), 2)
# # Mapa de calor
# Total de vitorias
df = world_cup[["Year", "Winner"]].dropna()
print("Vitorias totais:")
df["Winner"].value_counts()
df["Vitorias Totais"] = 0
df["Winner"][7] = "UK" # Concertando um nome
# Acrescentando o numero de vitorias por ano
for x in df["Winner"].unique():
temp = 0
for i in df.index:
if x == df["Winner"][i]:
df["Vitorias Totais"][i] = temp + 1
temp = df["Vitorias Totais"][i]
df
fig = px.choropleth(
df,
locations="Winner",
locationmode="country names",
color="Vitorias Totais",
animation_frame="Year",
hover_name="Winner",
hover_data=["Vitorias Totais"],
range_color=(0, 5),
projection="natural earth",
title="Vitorias",
)
fig.update_layout(autosize=False, width=1800, height=700)
fig.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/197/69197652.ipynb
|
fifa-world-cup
|
abecklas
|
[{"Id": 69197652, "ScriptId": 18884773, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6855514, "CreationDate": "07/28/2021 02:02:24", "VersionNumber": 14.0, "Title": "Analise explorat\u00f3ria das copas do mundo", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 159.0, "LinesInsertedFromPrevious": 15.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 144.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92075339, "KernelVersionId": 69197652, "SourceDatasetVersionId": 29747}]
|
[{"Id": 29747, "DatasetId": 19728, "DatasourceVersionId": 29839, "CreatorUserId": 1079649, "LicenseName": "CC0: Public Domain", "CreationDate": "04/23/2018 13:40:35", "VersionNumber": 5.0, "Title": "FIFA World Cup", "Slug": "fifa-world-cup", "Subtitle": "All the results from World Cups", "Description": "### Context\n\nThe FIFA World Cup is a global football competition contested by the various football-playing nations of the world. It is contested every four years and is the most prestigious and important trophy in the sport of football.\n\n### Content\n\nThe World Cups dataset show all information about all the World Cups in the history, while the World Cup Matches dataset shows all the results from the matches contested as part of the cups.\n\n### Acknowledgements\n\nThis data is courtesy of the FIFA World Cup Archive website.\n\n### Inspiration\n\nCan you predict who will win the next World Cup?", "VersionNotes": "Added file headers", "TotalCompressedBytes": 2424639.0, "TotalUncompressedBytes": 559009.0}]
|
[{"Id": 19728, "CreatorUserId": 1079649, "OwnerUserId": 1079649.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 29747.0, "CurrentDatasourceVersionId": 29839.0, "ForumId": 27602, "Type": 2, "CreationDate": "04/04/2018 18:35:30", "LastActivityDate": "04/04/2018", "TotalViews": 397687, "TotalDownloads": 63561, "TotalVotes": 924, "TotalKernels": 108}]
|
[{"Id": 1079649, "UserName": "abecklas", "DisplayName": "Andre Becklas", "RegisterDate": "05/17/2017", "PerformanceTier": 1}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import plotly as py
import cufflinks as cf
import plotly.graph_objs as go
from plotly.offline import iplot
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
players = pd.read_csv("/kaggle/input/fifa-world-cup/WorldCupPlayers.csv")
matches = pd.read_csv("/kaggle/input/fifa-world-cup/WorldCupMatches.csv")
world_cup = pd.read_csv("/kaggle/input/fifa-world-cup/WorldCups.csv")
# Analise exploratória de dados
# - Estatística descritiva
# - Natureza aplicada
# - Abordagem quali-quantitativa
# # Visualização inicial dos dados
players.head()
matches.head()
world_cup.tail()
# # Corrigindo alguns nomes
matches.dropna(subset=["Year"], inplace=True) # Removendo valores nulos
names = matches[matches["Home Team Name"].str.contains('rn">')][
"Home Team Name"
].value_counts() # Alguns nomes estao errados e precisam ser arrumados
wrong = list(names.index) # Verifica os nomes errados
correct = [name.split(">")[1] for name in wrong] # Arruma os erros
old_name = [
"Germany FR",
"Maracan� - Est�dio Jornalista M�rio Filho",
"Estadio do Maracana",
] # Nomes que precisam ser modificadoos
new_name = ["Germany", "Maracan Stadium", "Maracan Stadium"] # Modificacao realizada
wrong = wrong + old_name
correct = correct + new_name
wrong, correct
for index, wr in enumerate(wrong):
world_cup = world_cup.replace(wrong[index], correct[index])
for index, wr in enumerate(wrong):
matches = matches.replace(wrong[index], correct[index])
for index, wr in enumerate(wrong):
players = players.replace(wrong[index], correct[index])
names = matches[matches["Home Team Name"].str.contains('rn">')][
"Home Team Name"
].value_counts()
names
# # Sera times quem os times que tem mando de campo tem melhor desempenho?
import plotly.express as px
import pandas as pd
import numpy as np
casa = pd.pivot_table(matches, values="Home Team Goals", index="Year")
fora = pd.pivot_table(matches, values="Away Team Goals", index="Year")
fig = px.line(
casa,
x=casa.index,
y="Home Team Goals",
title="Média de Gols Time Mandante(Azul) VS Gols Time Visitante(Vermelho)",
labels={"Home Team Goals": "Numero de gols"},
)
fig.add_scatter(
x=fora.index, y=fora["Away Team Goals"], mode="lines", name="Time de fora"
)
fig.show()
def get_labels(matches):
if matches["Home Team Goals"] > matches["Away Team Goals"]:
return "Time Mandante"
if matches["Home Team Goals"] < matches["Away Team Goals"]:
return "Time Visitante"
return "Empate"
matches["Resultado"] = matches.apply(lambda x: get_labels(x), axis=1)
mt = matches["Resultado"].value_counts()
plt.figure(figsize=(6, 6))
mt.plot.pie(autopct="%1.0f%%", colors=sns.color_palette("winter_r"), shadow=True)
c = plt.Circle((0, 0), 0.4, color="white")
plt.title("Resultado da partida por Time Mandante VS Time Visitante")
plt.show()
# # Quais são os paises que mais vencem?
winner = world_cup["Winner"].value_counts()
runnerup = world_cup["Runners-Up"].value_counts()
third = world_cup["Third"].value_counts()
teams = pd.concat([winner, runnerup, third], axis=1)
teams.fillna(0, inplace=True)
teams = teams.astype(int)
teams
py.offline.init_notebook_mode(connected=True)
cf.go_offline()
teams.iplot(
kind="bar", xTitle="times", yTitle="Contagem", title="Vencedores da Copa do Mundo"
)
# # Quais paises fazem mais gols?
casa = matches[["Home Team Name", "Home Team Goals"]].dropna()
fora = matches[["Away Team Name", "Away Team Goals"]].dropna()
casa.columns = ["Countries", "Goals"]
fora.columns = casa.columns
gols = casa.append(fora, ignore_index=True)
gols = gols.groupby("Countries").sum()
gols = gols.sort_values(by="Goals", ascending=False)
gols[:20].iplot(
kind="bar", xTitle="Paises", yTitle="Gols", title="Os 20 paises que mais fazem gols"
)
# ### Medidas de Posição
medida_de_posicao = world_cup[world_cup["Country"] == "Germany"]
medidas_de_posicao = world_cup.drop(["Year"], axis=1)
round(medidas_de_posicao.describe(), 2)
medida_de_posicao = world_cup[world_cup["Country"] == "Brazil"]
medidas_de_posicao = world_cup.drop(["Year"], axis=1)
round(medidas_de_posicao.describe(), 2)
medida_de_posicao = world_cup[world_cup["Country"] == "Argentina"]
medidas_de_posicao = world_cup.drop(["Year"], axis=1)
round(medidas_de_posicao.describe(), 2)
# # Mapa de calor
# Total de vitorias
df = world_cup[["Year", "Winner"]].dropna()
print("Vitorias totais:")
df["Winner"].value_counts()
df["Vitorias Totais"] = 0
df["Winner"][7] = "UK" # Concertando um nome
# Acrescentando o numero de vitorias por ano
for x in df["Winner"].unique():
temp = 0
for i in df.index:
if x == df["Winner"][i]:
df["Vitorias Totais"][i] = temp + 1
temp = df["Vitorias Totais"][i]
df
fig = px.choropleth(
df,
locations="Winner",
locationmode="country names",
color="Vitorias Totais",
animation_frame="Year",
hover_name="Winner",
hover_data=["Vitorias Totais"],
range_color=(0, 5),
projection="natural earth",
title="Vitorias",
)
fig.update_layout(autosize=False, width=1800, height=700)
fig.show()
|
[{"fifa-world-cup/WorldCupMatches.csv": {"column_names": "[\"Year\", \"Datetime\", \"Stage\", \"Stadium\", \"City\", \"Home Team Name\", \"Home Team Goals\", \"Away Team Goals\", \"Away Team Name\", \"Win conditions\", \"Attendance\", \"Half-time Home Goals\", \"Half-time Away Goals\", \"Referee\", \"Assistant 1\", \"Assistant 2\", \"RoundID\", \"MatchID\", \"Home Team Initials\", \"Away Team Initials\"]", "column_data_types": "{\"Year\": \"float64\", \"Datetime\": \"object\", \"Stage\": \"object\", \"Stadium\": \"object\", \"City\": \"object\", \"Home Team Name\": \"object\", \"Home Team Goals\": \"float64\", \"Away Team Goals\": \"float64\", \"Away Team Name\": \"object\", \"Win conditions\": \"object\", \"Attendance\": \"float64\", \"Half-time Home Goals\": \"float64\", \"Half-time Away Goals\": \"float64\", \"Referee\": \"object\", \"Assistant 1\": \"object\", \"Assistant 2\": \"object\", \"RoundID\": \"float64\", \"MatchID\": \"float64\", \"Home Team Initials\": \"object\", \"Away Team Initials\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 4572 entries, 0 to 4571\nData columns (total 20 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Year 852 non-null float64\n 1 Datetime 852 non-null object \n 2 Stage 852 non-null object \n 3 Stadium 852 non-null object \n 4 City 852 non-null object \n 5 Home Team Name 852 non-null object \n 6 Home Team Goals 852 non-null float64\n 7 Away Team Goals 852 non-null float64\n 8 Away Team Name 852 non-null object \n 9 Win conditions 852 non-null object \n 10 Attendance 850 non-null float64\n 11 Half-time Home Goals 852 non-null float64\n 12 Half-time Away Goals 852 non-null float64\n 13 Referee 852 non-null object \n 14 Assistant 1 852 non-null object \n 15 Assistant 2 852 non-null object \n 16 RoundID 852 non-null float64\n 17 MatchID 852 non-null float64\n 18 Home Team Initials 852 non-null object \n 19 Away Team Initials 852 non-null object \ndtypes: float64(8), object(12)\nmemory usage: 714.5+ KB\n", "summary": "{\"Year\": {\"count\": 852.0, \"mean\": 1985.0892018779343, \"std\": 22.448824702021444, \"min\": 1930.0, \"25%\": 1970.0, \"50%\": 1990.0, \"75%\": 2002.0, \"max\": 2014.0}, \"Home Team Goals\": {\"count\": 852.0, \"mean\": 1.8110328638497653, \"std\": 1.6102551385229653, \"min\": 0.0, \"25%\": 1.0, \"50%\": 2.0, \"75%\": 3.0, \"max\": 10.0}, \"Away Team Goals\": {\"count\": 852.0, \"mean\": 1.022300469483568, \"std\": 1.0875733783096064, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 2.0, \"max\": 7.0}, \"Attendance\": {\"count\": 850.0, \"mean\": 45164.8, \"std\": 23485.249247289303, \"min\": 2000.0, \"25%\": 30000.0, \"50%\": 41579.5, \"75%\": 61374.5, \"max\": 173850.0}, \"Half-time Home Goals\": {\"count\": 852.0, \"mean\": 0.7089201877934272, \"std\": 0.9374141286628077, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 6.0}, \"Half-time Away Goals\": {\"count\": 852.0, \"mean\": 0.4284037558685446, \"std\": 0.6912518906955027, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 5.0}, \"RoundID\": {\"count\": 852.0, \"mean\": 10661772.591549296, \"std\": 27296131.702870198, \"min\": 201.0, \"25%\": 262.0, \"50%\": 337.0, \"75%\": 249722.0, \"max\": 97410600.0}, \"MatchID\": {\"count\": 852.0, \"mean\": 61346867.552816905, \"std\": 111057171.67191158, \"min\": 25.0, \"25%\": 1188.75, \"50%\": 2191.0, \"75%\": 43950059.25, \"max\": 300186515.0}}", "examples": "{\"Year\":{\"0\":1930.0,\"1\":1930.0,\"2\":1930.0,\"3\":1930.0},\"Datetime\":{\"0\":\"13 Jul 1930 - 15:00 \",\"1\":\"13 Jul 1930 - 15:00 \",\"2\":\"14 Jul 1930 - 12:45 \",\"3\":\"14 Jul 1930 - 14:50 \"},\"Stage\":{\"0\":\"Group 1\",\"1\":\"Group 4\",\"2\":\"Group 2\",\"3\":\"Group 3\"},\"Stadium\":{\"0\":\"Pocitos\",\"1\":\"Parque Central\",\"2\":\"Parque Central\",\"3\":\"Pocitos\"},\"City\":{\"0\":\"Montevideo \",\"1\":\"Montevideo \",\"2\":\"Montevideo \",\"3\":\"Montevideo \"},\"Home Team Name\":{\"0\":\"France\",\"1\":\"USA\",\"2\":\"Yugoslavia\",\"3\":\"Romania\"},\"Home Team Goals\":{\"0\":4.0,\"1\":3.0,\"2\":2.0,\"3\":3.0},\"Away Team Goals\":{\"0\":1.0,\"1\":0.0,\"2\":1.0,\"3\":1.0},\"Away Team Name\":{\"0\":\"Mexico\",\"1\":\"Belgium\",\"2\":\"Brazil\",\"3\":\"Peru\"},\"Win conditions\":{\"0\":\" \",\"1\":\" \",\"2\":\" \",\"3\":\" \"},\"Attendance\":{\"0\":4444.0,\"1\":18346.0,\"2\":24059.0,\"3\":2549.0},\"Half-time Home Goals\":{\"0\":3.0,\"1\":2.0,\"2\":2.0,\"3\":1.0},\"Half-time Away Goals\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"Referee\":{\"0\":\"LOMBARDI Domingo (URU)\",\"1\":\"MACIAS Jose (ARG)\",\"2\":\"TEJADA Anibal (URU)\",\"3\":\"WARNKEN Alberto (CHI)\"},\"Assistant 1\":{\"0\":\"CRISTOPHE Henry (BEL)\",\"1\":\"MATEUCCI Francisco (URU)\",\"2\":\"VALLARINO Ricardo (URU)\",\"3\":\"LANGENUS Jean (BEL)\"},\"Assistant 2\":{\"0\":\"REGO Gilberto (BRA)\",\"1\":\"WARNKEN Alberto (CHI)\",\"2\":\"BALWAY Thomas (FRA)\",\"3\":\"MATEUCCI Francisco (URU)\"},\"RoundID\":{\"0\":201.0,\"1\":201.0,\"2\":201.0,\"3\":201.0},\"MatchID\":{\"0\":1096.0,\"1\":1090.0,\"2\":1093.0,\"3\":1098.0},\"Home Team Initials\":{\"0\":\"FRA\",\"1\":\"USA\",\"2\":\"YUG\",\"3\":\"ROU\"},\"Away Team Initials\":{\"0\":\"MEX\",\"1\":\"BEL\",\"2\":\"BRA\",\"3\":\"PER\"}}"}}, {"fifa-world-cup/WorldCupPlayers.csv": {"column_names": "[\"RoundID\", \"MatchID\", \"Team Initials\", \"Coach Name\", \"Line-up\", \"Shirt Number\", \"Player Name\", \"Position\", \"Event\"]", "column_data_types": "{\"RoundID\": \"int64\", \"MatchID\": \"int64\", \"Team Initials\": \"object\", \"Coach Name\": \"object\", \"Line-up\": \"object\", \"Shirt Number\": \"int64\", \"Player Name\": \"object\", \"Position\": \"object\", \"Event\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 37784 entries, 0 to 37783\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 RoundID 37784 non-null int64 \n 1 MatchID 37784 non-null int64 \n 2 Team Initials 37784 non-null object\n 3 Coach Name 37784 non-null object\n 4 Line-up 37784 non-null object\n 5 Shirt Number 37784 non-null int64 \n 6 Player Name 37784 non-null object\n 7 Position 4143 non-null object\n 8 Event 9069 non-null object\ndtypes: int64(3), object(6)\nmemory usage: 2.6+ MB\n", "summary": "{\"RoundID\": {\"count\": 37784.0, \"mean\": 11056474.44595596, \"std\": 27701436.528428804, \"min\": 201.0, \"25%\": 263.0, \"50%\": 337.0, \"75%\": 255931.0, \"max\": 97410600.0}, \"MatchID\": {\"count\": 37784.0, \"mean\": 63622329.57122062, \"std\": 112391584.16407847, \"min\": 25.0, \"25%\": 1199.0, \"50%\": 2216.0, \"75%\": 97410003.0, \"max\": 300186515.0}, \"Shirt Number\": {\"count\": 37784.0, \"mean\": 10.726021596442939, \"std\": 6.960138422882863, \"min\": 0.0, \"25%\": 5.0, \"50%\": 11.0, \"75%\": 17.0, \"max\": 23.0}}", "examples": "{\"RoundID\":{\"0\":201,\"1\":201,\"2\":201,\"3\":201},\"MatchID\":{\"0\":1096,\"1\":1096,\"2\":1096,\"3\":1096},\"Team Initials\":{\"0\":\"FRA\",\"1\":\"MEX\",\"2\":\"FRA\",\"3\":\"MEX\"},\"Coach Name\":{\"0\":\"CAUDRON Raoul (FRA)\",\"1\":\"LUQUE Juan (MEX)\",\"2\":\"CAUDRON Raoul (FRA)\",\"3\":\"LUQUE Juan (MEX)\"},\"Line-up\":{\"0\":\"S\",\"1\":\"S\",\"2\":\"S\",\"3\":\"S\"},\"Shirt Number\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"Player Name\":{\"0\":\"Alex THEPOT\",\"1\":\"Oscar BONFIGLIO\",\"2\":\"Marcel LANGILLER\",\"3\":\"Juan CARRENO\"},\"Position\":{\"0\":\"GK\",\"1\":\"GK\",\"2\":null,\"3\":null},\"Event\":{\"0\":null,\"1\":null,\"2\":\"G40'\",\"3\":\"G70'\"}}"}}, {"fifa-world-cup/WorldCups.csv": {"column_names": "[\"Year\", \"Country\", \"Winner\", \"Runners-Up\", \"Third\", \"Fourth\", \"GoalsScored\", \"QualifiedTeams\", \"MatchesPlayed\", \"Attendance\"]", "column_data_types": "{\"Year\": \"int64\", \"Country\": \"object\", \"Winner\": \"object\", \"Runners-Up\": \"object\", \"Third\": \"object\", \"Fourth\": \"object\", \"GoalsScored\": \"int64\", \"QualifiedTeams\": \"int64\", \"MatchesPlayed\": \"int64\", \"Attendance\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 20 entries, 0 to 19\nData columns (total 10 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Year 20 non-null int64 \n 1 Country 20 non-null object\n 2 Winner 20 non-null object\n 3 Runners-Up 20 non-null object\n 4 Third 20 non-null object\n 5 Fourth 20 non-null object\n 6 GoalsScored 20 non-null int64 \n 7 QualifiedTeams 20 non-null int64 \n 8 MatchesPlayed 20 non-null int64 \n 9 Attendance 20 non-null object\ndtypes: int64(4), object(6)\nmemory usage: 1.7+ KB\n", "summary": "{\"Year\": {\"count\": 20.0, \"mean\": 1974.8, \"std\": 25.58288901837155, \"min\": 1930.0, \"25%\": 1957.0, \"50%\": 1976.0, \"75%\": 1995.0, \"max\": 2014.0}, \"GoalsScored\": {\"count\": 20.0, \"mean\": 118.95, \"std\": 32.97283570966929, \"min\": 70.0, \"25%\": 89.0, \"50%\": 120.5, \"75%\": 145.25, \"max\": 171.0}, \"QualifiedTeams\": {\"count\": 20.0, \"mean\": 21.25, \"std\": 7.268352452132536, \"min\": 13.0, \"25%\": 16.0, \"50%\": 16.0, \"75%\": 26.0, \"max\": 32.0}, \"MatchesPlayed\": {\"count\": 20.0, \"mean\": 41.8, \"std\": 17.218716866431013, \"min\": 17.0, \"25%\": 30.5, \"50%\": 38.0, \"75%\": 55.0, \"max\": 64.0}}", "examples": "{\"Year\":{\"0\":1930,\"1\":1934,\"2\":1938,\"3\":1950},\"Country\":{\"0\":\"Uruguay\",\"1\":\"Italy\",\"2\":\"France\",\"3\":\"Brazil\"},\"Winner\":{\"0\":\"Uruguay\",\"1\":\"Italy\",\"2\":\"Italy\",\"3\":\"Uruguay\"},\"Runners-Up\":{\"0\":\"Argentina\",\"1\":\"Czechoslovakia\",\"2\":\"Hungary\",\"3\":\"Brazil\"},\"Third\":{\"0\":\"USA\",\"1\":\"Germany\",\"2\":\"Brazil\",\"3\":\"Sweden\"},\"Fourth\":{\"0\":\"Yugoslavia\",\"1\":\"Austria\",\"2\":\"Sweden\",\"3\":\"Spain\"},\"GoalsScored\":{\"0\":70,\"1\":70,\"2\":84,\"3\":88},\"QualifiedTeams\":{\"0\":13,\"1\":16,\"2\":15,\"3\":13},\"MatchesPlayed\":{\"0\":18,\"1\":17,\"2\":18,\"3\":22},\"Attendance\":{\"0\":\"590.549\",\"1\":\"363.000\",\"2\":\"375.700\",\"3\":\"1.045.246\"}}"}}]
| true | 3 |
<start_data_description><data_path>fifa-world-cup/WorldCupMatches.csv:
<column_names>
['Year', 'Datetime', 'Stage', 'Stadium', 'City', 'Home Team Name', 'Home Team Goals', 'Away Team Goals', 'Away Team Name', 'Win conditions', 'Attendance', 'Half-time Home Goals', 'Half-time Away Goals', 'Referee', 'Assistant 1', 'Assistant 2', 'RoundID', 'MatchID', 'Home Team Initials', 'Away Team Initials']
<column_types>
{'Year': 'float64', 'Datetime': 'object', 'Stage': 'object', 'Stadium': 'object', 'City': 'object', 'Home Team Name': 'object', 'Home Team Goals': 'float64', 'Away Team Goals': 'float64', 'Away Team Name': 'object', 'Win conditions': 'object', 'Attendance': 'float64', 'Half-time Home Goals': 'float64', 'Half-time Away Goals': 'float64', 'Referee': 'object', 'Assistant 1': 'object', 'Assistant 2': 'object', 'RoundID': 'float64', 'MatchID': 'float64', 'Home Team Initials': 'object', 'Away Team Initials': 'object'}
<dataframe_Summary>
{'Year': {'count': 852.0, 'mean': 1985.0892018779343, 'std': 22.448824702021444, 'min': 1930.0, '25%': 1970.0, '50%': 1990.0, '75%': 2002.0, 'max': 2014.0}, 'Home Team Goals': {'count': 852.0, 'mean': 1.8110328638497653, 'std': 1.6102551385229653, 'min': 0.0, '25%': 1.0, '50%': 2.0, '75%': 3.0, 'max': 10.0}, 'Away Team Goals': {'count': 852.0, 'mean': 1.022300469483568, 'std': 1.0875733783096064, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 2.0, 'max': 7.0}, 'Attendance': {'count': 850.0, 'mean': 45164.8, 'std': 23485.249247289303, 'min': 2000.0, '25%': 30000.0, '50%': 41579.5, '75%': 61374.5, 'max': 173850.0}, 'Half-time Home Goals': {'count': 852.0, 'mean': 0.7089201877934272, 'std': 0.9374141286628077, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 6.0}, 'Half-time Away Goals': {'count': 852.0, 'mean': 0.4284037558685446, 'std': 0.6912518906955027, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 5.0}, 'RoundID': {'count': 852.0, 'mean': 10661772.591549296, 'std': 27296131.702870198, 'min': 201.0, '25%': 262.0, '50%': 337.0, '75%': 249722.0, 'max': 97410600.0}, 'MatchID': {'count': 852.0, 'mean': 61346867.552816905, 'std': 111057171.67191158, 'min': 25.0, '25%': 1188.75, '50%': 2191.0, '75%': 43950059.25, 'max': 300186515.0}}
<dataframe_info>
RangeIndex: 4572 entries, 0 to 4571
Data columns (total 20 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Year 852 non-null float64
1 Datetime 852 non-null object
2 Stage 852 non-null object
3 Stadium 852 non-null object
4 City 852 non-null object
5 Home Team Name 852 non-null object
6 Home Team Goals 852 non-null float64
7 Away Team Goals 852 non-null float64
8 Away Team Name 852 non-null object
9 Win conditions 852 non-null object
10 Attendance 850 non-null float64
11 Half-time Home Goals 852 non-null float64
12 Half-time Away Goals 852 non-null float64
13 Referee 852 non-null object
14 Assistant 1 852 non-null object
15 Assistant 2 852 non-null object
16 RoundID 852 non-null float64
17 MatchID 852 non-null float64
18 Home Team Initials 852 non-null object
19 Away Team Initials 852 non-null object
dtypes: float64(8), object(12)
memory usage: 714.5+ KB
<some_examples>
{'Year': {'0': 1930.0, '1': 1930.0, '2': 1930.0, '3': 1930.0}, 'Datetime': {'0': '13 Jul 1930 - 15:00 ', '1': '13 Jul 1930 - 15:00 ', '2': '14 Jul 1930 - 12:45 ', '3': '14 Jul 1930 - 14:50 '}, 'Stage': {'0': 'Group 1', '1': 'Group 4', '2': 'Group 2', '3': 'Group 3'}, 'Stadium': {'0': 'Pocitos', '1': 'Parque Central', '2': 'Parque Central', '3': 'Pocitos'}, 'City': {'0': 'Montevideo ', '1': 'Montevideo ', '2': 'Montevideo ', '3': 'Montevideo '}, 'Home Team Name': {'0': 'France', '1': 'USA', '2': 'Yugoslavia', '3': 'Romania'}, 'Home Team Goals': {'0': 4.0, '1': 3.0, '2': 2.0, '3': 3.0}, 'Away Team Goals': {'0': 1.0, '1': 0.0, '2': 1.0, '3': 1.0}, 'Away Team Name': {'0': 'Mexico', '1': 'Belgium', '2': 'Brazil', '3': 'Peru'}, 'Win conditions': {'0': ' ', '1': ' ', '2': ' ', '3': ' '}, 'Attendance': {'0': 4444.0, '1': 18346.0, '2': 24059.0, '3': 2549.0}, 'Half-time Home Goals': {'0': 3.0, '1': 2.0, '2': 2.0, '3': 1.0}, 'Half-time Away Goals': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'Referee': {'0': 'LOMBARDI Domingo (URU)', '1': 'MACIAS Jose (ARG)', '2': 'TEJADA Anibal (URU)', '3': 'WARNKEN Alberto (CHI)'}, 'Assistant 1': {'0': 'CRISTOPHE Henry (BEL)', '1': 'MATEUCCI Francisco (URU)', '2': 'VALLARINO Ricardo (URU)', '3': 'LANGENUS Jean (BEL)'}, 'Assistant 2': {'0': 'REGO Gilberto (BRA)', '1': 'WARNKEN Alberto (CHI)', '2': 'BALWAY Thomas (FRA)', '3': 'MATEUCCI Francisco (URU)'}, 'RoundID': {'0': 201.0, '1': 201.0, '2': 201.0, '3': 201.0}, 'MatchID': {'0': 1096.0, '1': 1090.0, '2': 1093.0, '3': 1098.0}, 'Home Team Initials': {'0': 'FRA', '1': 'USA', '2': 'YUG', '3': 'ROU'}, 'Away Team Initials': {'0': 'MEX', '1': 'BEL', '2': 'BRA', '3': 'PER'}}
<end_description>
<start_data_description><data_path>fifa-world-cup/WorldCupPlayers.csv:
<column_names>
['RoundID', 'MatchID', 'Team Initials', 'Coach Name', 'Line-up', 'Shirt Number', 'Player Name', 'Position', 'Event']
<column_types>
{'RoundID': 'int64', 'MatchID': 'int64', 'Team Initials': 'object', 'Coach Name': 'object', 'Line-up': 'object', 'Shirt Number': 'int64', 'Player Name': 'object', 'Position': 'object', 'Event': 'object'}
<dataframe_Summary>
{'RoundID': {'count': 37784.0, 'mean': 11056474.44595596, 'std': 27701436.528428804, 'min': 201.0, '25%': 263.0, '50%': 337.0, '75%': 255931.0, 'max': 97410600.0}, 'MatchID': {'count': 37784.0, 'mean': 63622329.57122062, 'std': 112391584.16407847, 'min': 25.0, '25%': 1199.0, '50%': 2216.0, '75%': 97410003.0, 'max': 300186515.0}, 'Shirt Number': {'count': 37784.0, 'mean': 10.726021596442939, 'std': 6.960138422882863, 'min': 0.0, '25%': 5.0, '50%': 11.0, '75%': 17.0, 'max': 23.0}}
<dataframe_info>
RangeIndex: 37784 entries, 0 to 37783
Data columns (total 9 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 RoundID 37784 non-null int64
1 MatchID 37784 non-null int64
2 Team Initials 37784 non-null object
3 Coach Name 37784 non-null object
4 Line-up 37784 non-null object
5 Shirt Number 37784 non-null int64
6 Player Name 37784 non-null object
7 Position 4143 non-null object
8 Event 9069 non-null object
dtypes: int64(3), object(6)
memory usage: 2.6+ MB
<some_examples>
{'RoundID': {'0': 201, '1': 201, '2': 201, '3': 201}, 'MatchID': {'0': 1096, '1': 1096, '2': 1096, '3': 1096}, 'Team Initials': {'0': 'FRA', '1': 'MEX', '2': 'FRA', '3': 'MEX'}, 'Coach Name': {'0': 'CAUDRON Raoul (FRA)', '1': 'LUQUE Juan (MEX)', '2': 'CAUDRON Raoul (FRA)', '3': 'LUQUE Juan (MEX)'}, 'Line-up': {'0': 'S', '1': 'S', '2': 'S', '3': 'S'}, 'Shirt Number': {'0': 0, '1': 0, '2': 0, '3': 0}, 'Player Name': {'0': 'Alex THEPOT', '1': 'Oscar BONFIGLIO', '2': 'Marcel LANGILLER', '3': 'Juan CARRENO'}, 'Position': {'0': 'GK', '1': 'GK', '2': None, '3': None}, 'Event': {'0': None, '1': None, '2': "G40'", '3': "G70'"}}
<end_description>
<start_data_description><data_path>fifa-world-cup/WorldCups.csv:
<column_names>
['Year', 'Country', 'Winner', 'Runners-Up', 'Third', 'Fourth', 'GoalsScored', 'QualifiedTeams', 'MatchesPlayed', 'Attendance']
<column_types>
{'Year': 'int64', 'Country': 'object', 'Winner': 'object', 'Runners-Up': 'object', 'Third': 'object', 'Fourth': 'object', 'GoalsScored': 'int64', 'QualifiedTeams': 'int64', 'MatchesPlayed': 'int64', 'Attendance': 'object'}
<dataframe_Summary>
{'Year': {'count': 20.0, 'mean': 1974.8, 'std': 25.58288901837155, 'min': 1930.0, '25%': 1957.0, '50%': 1976.0, '75%': 1995.0, 'max': 2014.0}, 'GoalsScored': {'count': 20.0, 'mean': 118.95, 'std': 32.97283570966929, 'min': 70.0, '25%': 89.0, '50%': 120.5, '75%': 145.25, 'max': 171.0}, 'QualifiedTeams': {'count': 20.0, 'mean': 21.25, 'std': 7.268352452132536, 'min': 13.0, '25%': 16.0, '50%': 16.0, '75%': 26.0, 'max': 32.0}, 'MatchesPlayed': {'count': 20.0, 'mean': 41.8, 'std': 17.218716866431013, 'min': 17.0, '25%': 30.5, '50%': 38.0, '75%': 55.0, 'max': 64.0}}
<dataframe_info>
RangeIndex: 20 entries, 0 to 19
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Year 20 non-null int64
1 Country 20 non-null object
2 Winner 20 non-null object
3 Runners-Up 20 non-null object
4 Third 20 non-null object
5 Fourth 20 non-null object
6 GoalsScored 20 non-null int64
7 QualifiedTeams 20 non-null int64
8 MatchesPlayed 20 non-null int64
9 Attendance 20 non-null object
dtypes: int64(4), object(6)
memory usage: 1.7+ KB
<some_examples>
{'Year': {'0': 1930, '1': 1934, '2': 1938, '3': 1950}, 'Country': {'0': 'Uruguay', '1': 'Italy', '2': 'France', '3': 'Brazil'}, 'Winner': {'0': 'Uruguay', '1': 'Italy', '2': 'Italy', '3': 'Uruguay'}, 'Runners-Up': {'0': 'Argentina', '1': 'Czechoslovakia', '2': 'Hungary', '3': 'Brazil'}, 'Third': {'0': 'USA', '1': 'Germany', '2': 'Brazil', '3': 'Sweden'}, 'Fourth': {'0': 'Yugoslavia', '1': 'Austria', '2': 'Sweden', '3': 'Spain'}, 'GoalsScored': {'0': 70, '1': 70, '2': 84, '3': 88}, 'QualifiedTeams': {'0': 13, '1': 16, '2': 15, '3': 13}, 'MatchesPlayed': {'0': 18, '1': 17, '2': 18, '3': 22}, 'Attendance': {'0': '590.549', '1': '363.000', '2': '375.700', '3': '1.045.246'}}
<end_description>
| 1,916 | 0 | 4,801 | 1,916 |
69197539
|
# # **House Price Prediction using Advance Reggression:**
# * **EDA**
# * **Pre-Processing**
# * **Lineer Reggresion**
#
# loading necessary libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# loading train and test from house-prices dataset
train_set = pd.read_csv(
"../input/house-prices-advanced-regression-techniques/train.csv"
)
test_set = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv")
test_label = pd.read_csv(
"../input/house-prices-advanced-regression-techniques/sample_submission.csv"
)
# adding test lable
test_set["SalePrice"] = test_label["SalePrice"]
# combining the train and test set for cleaning
df = pd.concat([train_set, test_set])
# # **EDA**
plt.scatter(train_set.GrLivArea, train_set.SalePrice)
plt.xlabel("GrLivArea")
plt.ylabel("SalePrice")
# # Pre-Processing
# 1. Handling missing value
# 2. High/Low Correletion data
# 3. Categorical Data
# 4. Numerical Columns to Categorical
# 5. Dealing with Outliers
# 6. Creating Dummy Variables
# one-hot-encodding
# # 1. Handling **missing** Values
# * Dropping columns with more than 70% null_value and Id (it might not be the best case for every problem or dataset).
# * Handling null value in the rest of thr features
# >
# finding features with the most duplicant value ?
#
def missing_percent(df):
nan_percent = 100 * (df.isnull().sum() / len(df))
nan_percent = nan_percent[nan_percent > 0].sort_values(ascending=False).round(1)
DataFrame = pd.DataFrame(nan_percent)
# Rename the columns
mis_percent_table = DataFrame.rename(columns={0: "% of Misiing Values"})
# Sort the table by percentage of missing descending
mis_percent = mis_percent_table
return mis_percent
miss = missing_percent(df)
miss
# drop features that have more than 70% missing value
# credit: https://www.kaggle.com/rushikeshdarge/handle-missing-values-only-notebook-you-need
threshold = 70
drop_cols = miss[miss["% of Misiing Values"] > threshold].index.tolist()
drop_cols
df = df.drop(columns=drop_cols)
df.shape
# Removing the Id that has no value for our prediction
df = df.drop("Id", axis=1)
nan_percent = 100 * (df.isnull().sum() / len(df))
nan_percent = nan_percent[nan_percent > 0].sort_values()
# every Feature with missing data must be checked!
# We choose a threshold of 1%. It means, if there is less than 1% of a feature are missing
plt.figure(figsize=(12, 6))
sns.barplot(x=nan_percent.index, y=nan_percent)
plt.xticks(rotation=90)
# Set 1% threshold:
plt.ylim(0, 1)
# **FireplaceQu: Fireplace quality**
# * acoording to the data this feature has an NA value that means the house has no fire place so we fill the column with 'None'
df["FireplaceQu"] = df["FireplaceQu"].fillna("None")
# Filling null values most freq value
df["KitchenQual"] = df["KitchenQual"].fillna("TA")
df["SaleType"] = df["SaleType"].fillna("Oth")
df["Utilities"] = df["Utilities"].fillna("Other")
df["Functional"] = df["Functional"].fillna("Typ")
df["Exterior2nd"] = df["Exterior2nd"].fillna("Other")
df["Exterior1st"] = df["Exterior1st"].fillna("Other")
# **Garage & Bacement**
# * by looking at the plot we realize that most features with missing value are from the same catagories.
# After checking the data documentation,
# it shows that missing value (two rows) in Basement Features are becouse of there is no basement in these rows
# Decision: Filling in data based on column: numerical basement & string descriptive:
# Numerical Columns fill with 0:
bsmt_num_cols = [
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"BsmtFullBath",
"BsmtHalfBath",
]
df[bsmt_num_cols] = df[bsmt_num_cols].fillna(0)
# String Columns fill with None:
bsmt_str_cols = ["BsmtQual", "BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2"]
df[bsmt_str_cols] = df[bsmt_str_cols].fillna("None")
# **Mas Vnr Features:**
# * Based on the Dataset Document File, missing values for 'Mas Vnr Type' and 'Mas Vnr Area' means the house doesn't have any mansonry veneer. so, we decide to fill the missing value as below:
df["MasVnrType"] = df["MasVnrType"].fillna("None")
df["MasVnrArea"] = df["MasVnrArea"].fillna(0)
# **Garage Columns:**
# * Based on the dataset documentation, NaN in Garage Columns seems to indicate no garage.
# * Decision: Fill with 'None' or 0
df[["GarageType", "GarageYrBlt", "GarageFinish", "GarageQual", "GarageCond"]]
# Filling the missing Value:
Gar_str_cols = ["GarageType", "GarageFinish", "GarageQual", "GarageCond"]
df[Gar_str_cols] = df[Gar_str_cols].fillna("None")
df["GarageYrBlt"] = df["GarageYrBlt"].fillna(0)
# Impute missing data based on other columns:
df.groupby("Neighborhood")["LotFrontage"]
df.groupby("Neighborhood")["LotFrontage"].mean()
# Filling null values mean value
df.groupby("Neighborhood")["LotFrontage"].transform(lambda val: val.fillna(val.mean()))
df["LotFrontage"] = df.groupby("Neighborhood")["LotFrontage"].transform(
lambda val: val.fillna(val.mean())
)
df["LotFrontage"] = df["LotFrontage"].fillna(0)
df[df["Electrical"].isnull()]
df[df["GarageArea"].isnull()]
# **Filling missing values with most freq**
# * Functional
# * Exterior1st
# * Exterior2nd
# * KitchenQual
# * SaleType
# * Utilities
# * Functional
# * MSZoning
df[df["MSZoning"].isnull()]
# Filling null values most freq value
d = (
df["MSZoning"]
.value_counts()[
df["MSZoning"].value_counts() == df["MSZoning"].value_counts().max()
]
.index
)
d
df["MSZoning"] = df["MSZoning"].fillna("RL")
# Filling null values most freq value
df["LotFrontage"] = df.groupby("Neighborhood")["LotFrontage"].transform(
lambda val: val.fillna(val.max())
)
df = df.dropna(axis=0, subset=["Electrical", "GarageArea"])
# Filling null values most freq value
df["Functional"].value_counts()[
df["Functional"].value_counts() == df["Functional"].value_counts().max()
].index
nan_percent = 100 * (df.isnull().sum() / len(df))
nan_percent = nan_percent[nan_percent > 0].sort_values()
# plot the feature with missing indicating the percent of missing data
plt.figure(figsize=(12, 6))
sns.barplot(x=nan_percent.index, y=nan_percent)
plt.xticks(rotation=90)
df[df["MSZoning"].isnull()]
nan_percent["SalePrice"]
df.isnull().sum()
# # Handling Outliers
corr = train_set.corr()
top_corr_featuress = corr.index[abs(corr["SalePrice"]) > 0.5].sort_values(
ascending=True
)
top_corr_features
sns.scatterplot(data=df, x="OverallQual", y="SalePrice")
plt.axhline(y=200000, color="r")
df[(df["OverallQual"] > 8) & (df["SalePrice"] < 200000)][["SalePrice", "OverallQual"]]
sns.scatterplot(x="GrLivArea", y="SalePrice", data=df)
plt.axhline(y=200000, color="r")
plt.axvline(x=4000, color="r")
df[(df["GrLivArea"] > 4000) & (df["SalePrice"] < 400000)][["SalePrice", "GrLivArea"]]
# Remove the outliers:
index_drop = df[(df["GrLivArea"] > 4000) & (df["SalePrice"] < 400000)].index
df = df.drop(index_drop, axis=0)
sns.scatterplot(x="GrLivArea", y="SalePrice", data=df)
plt.axhline(y=200000, color="r")
plt.axvline(x=4000, color="r")
sns.scatterplot(x="OverallQual", y="SalePrice", data=df)
plt.axhline(y=200000, color="r")
sns.scatterplot(data=df, x="GrLivArea", y="SalePrice")
plt.axhline(y=200000, color="r")
sns.scatterplot(data=df, x="GarageCars", y="SalePrice")
plt.axhline(y=200000, color="r")
sns.scatterplot(data=df, x="TotRmsAbvGrd", y="SalePrice")
plt.axhline(y=200000, color="r")
# # Dropping some Feautures that have high/low corrilation
sns.scatterplot(data=df, x="YearBuilt", y="SalePrice")
plt.axhline(y=200000, color="r")
# get correlations of each features in dataset
# Plotting Heat Map to visualise correlation data better.
# Drwan for only features having high correlation
# (>0.5) with Target Variable
corr = train_set.corr()
top_corr_features = corr.index[abs(corr["SalePrice"]) > 0.5]
plt.figure(figsize=(10, 10))
# plot heat map
g = sns.heatmap(train_set[top_corr_features].corr(), annot=True, cmap="YlGnBu")
categorical_features = [
feature for feature in train_set.columns if train_set[feature].dtype == "1"
]
categorical_features
# train_set.drop(Id)
# Continous Features
continuous_feature = [
feature
for feature in train_set_numeric
if feature not in discrete_feature + year_feature + ["Id"]
]
print("Continuous feature Count: {}".format(len(continuous_feature)))
for feature in categorical_features:
data = train_set.copy()
if 0 in data[feature].unique():
pass
else:
data[feature] = np.log(data[feature])
data.boxplot(column=feature)
plt.ylabel(feature)
plt.title(feature)
plt.show()
train_set.groupby("OverallQual")["SalePrice"].median().plot()
plt.xlabel("Year Sold")
plt.ylabel("Median House Price")
plt.title("House Price vs YearSold")
top_corr_features
# # **Linear Reggression**
categorical_features = [
feature for feature in train_set.columns if train_set[feature].dtype == "O"
]
categorical_features
train_set.drop(categorical_features, axis=1, inplace=True)
# train_set.drop(Id)
train_set
# Separate features and target from train_df
X = train_set.drop(["Id", "SalePrice"], axis=1)
y = train_set["SalePrice"]
X = X.apply(pd.to_numeric, errors="coerce")
y = y.apply(pd.to_numeric, errors="coerce")
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
# Split the Dataset to Train & Test
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=100)
X_test.fillna(X_test.mean())
X_train.fillna(X_train.mean())
y_train.fillna(y_train.mean())
model = LinearRegression()
model = model.fit(X_train, y_train)
y_pred = model.predict(X_test)
from sklearn import metrics
MAE = metrics.mean_absolute_error(y_test, y_pred)
MSE = metrics.mean_squared_error(y_test, y_pred)
RMSE = np.sqrt(MSE)
pd.DataFrame([MAE, MSE, RMSE], index=["MAE", "MSE", "RMSE"], columns=["Metrics"])
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/197/69197539.ipynb
| null | null |
[{"Id": 69197539, "ScriptId": 18706063, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2121244, "CreationDate": "07/28/2021 01:59:44", "VersionNumber": 9.0, "Title": "Regression", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 319.0, "LinesInsertedFromPrevious": 78.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 241.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # **House Price Prediction using Advance Reggression:**
# * **EDA**
# * **Pre-Processing**
# * **Lineer Reggresion**
#
# loading necessary libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# loading train and test from house-prices dataset
train_set = pd.read_csv(
"../input/house-prices-advanced-regression-techniques/train.csv"
)
test_set = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv")
test_label = pd.read_csv(
"../input/house-prices-advanced-regression-techniques/sample_submission.csv"
)
# adding test lable
test_set["SalePrice"] = test_label["SalePrice"]
# combining the train and test set for cleaning
df = pd.concat([train_set, test_set])
# # **EDA**
plt.scatter(train_set.GrLivArea, train_set.SalePrice)
plt.xlabel("GrLivArea")
plt.ylabel("SalePrice")
# # Pre-Processing
# 1. Handling missing value
# 2. High/Low Correletion data
# 3. Categorical Data
# 4. Numerical Columns to Categorical
# 5. Dealing with Outliers
# 6. Creating Dummy Variables
# one-hot-encodding
# # 1. Handling **missing** Values
# * Dropping columns with more than 70% null_value and Id (it might not be the best case for every problem or dataset).
# * Handling null value in the rest of thr features
# >
# finding features with the most duplicant value ?
#
def missing_percent(df):
nan_percent = 100 * (df.isnull().sum() / len(df))
nan_percent = nan_percent[nan_percent > 0].sort_values(ascending=False).round(1)
DataFrame = pd.DataFrame(nan_percent)
# Rename the columns
mis_percent_table = DataFrame.rename(columns={0: "% of Misiing Values"})
# Sort the table by percentage of missing descending
mis_percent = mis_percent_table
return mis_percent
miss = missing_percent(df)
miss
# drop features that have more than 70% missing value
# credit: https://www.kaggle.com/rushikeshdarge/handle-missing-values-only-notebook-you-need
threshold = 70
drop_cols = miss[miss["% of Misiing Values"] > threshold].index.tolist()
drop_cols
df = df.drop(columns=drop_cols)
df.shape
# Removing the Id that has no value for our prediction
df = df.drop("Id", axis=1)
nan_percent = 100 * (df.isnull().sum() / len(df))
nan_percent = nan_percent[nan_percent > 0].sort_values()
# every Feature with missing data must be checked!
# We choose a threshold of 1%. It means, if there is less than 1% of a feature are missing
plt.figure(figsize=(12, 6))
sns.barplot(x=nan_percent.index, y=nan_percent)
plt.xticks(rotation=90)
# Set 1% threshold:
plt.ylim(0, 1)
# **FireplaceQu: Fireplace quality**
# * acoording to the data this feature has an NA value that means the house has no fire place so we fill the column with 'None'
df["FireplaceQu"] = df["FireplaceQu"].fillna("None")
# Filling null values most freq value
df["KitchenQual"] = df["KitchenQual"].fillna("TA")
df["SaleType"] = df["SaleType"].fillna("Oth")
df["Utilities"] = df["Utilities"].fillna("Other")
df["Functional"] = df["Functional"].fillna("Typ")
df["Exterior2nd"] = df["Exterior2nd"].fillna("Other")
df["Exterior1st"] = df["Exterior1st"].fillna("Other")
# **Garage & Bacement**
# * by looking at the plot we realize that most features with missing value are from the same catagories.
# After checking the data documentation,
# it shows that missing value (two rows) in Basement Features are becouse of there is no basement in these rows
# Decision: Filling in data based on column: numerical basement & string descriptive:
# Numerical Columns fill with 0:
bsmt_num_cols = [
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"BsmtFullBath",
"BsmtHalfBath",
]
df[bsmt_num_cols] = df[bsmt_num_cols].fillna(0)
# String Columns fill with None:
bsmt_str_cols = ["BsmtQual", "BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2"]
df[bsmt_str_cols] = df[bsmt_str_cols].fillna("None")
# **Mas Vnr Features:**
# * Based on the Dataset Document File, missing values for 'Mas Vnr Type' and 'Mas Vnr Area' means the house doesn't have any mansonry veneer. so, we decide to fill the missing value as below:
df["MasVnrType"] = df["MasVnrType"].fillna("None")
df["MasVnrArea"] = df["MasVnrArea"].fillna(0)
# **Garage Columns:**
# * Based on the dataset documentation, NaN in Garage Columns seems to indicate no garage.
# * Decision: Fill with 'None' or 0
df[["GarageType", "GarageYrBlt", "GarageFinish", "GarageQual", "GarageCond"]]
# Filling the missing Value:
Gar_str_cols = ["GarageType", "GarageFinish", "GarageQual", "GarageCond"]
df[Gar_str_cols] = df[Gar_str_cols].fillna("None")
df["GarageYrBlt"] = df["GarageYrBlt"].fillna(0)
# Impute missing data based on other columns:
df.groupby("Neighborhood")["LotFrontage"]
df.groupby("Neighborhood")["LotFrontage"].mean()
# Filling null values mean value
df.groupby("Neighborhood")["LotFrontage"].transform(lambda val: val.fillna(val.mean()))
df["LotFrontage"] = df.groupby("Neighborhood")["LotFrontage"].transform(
lambda val: val.fillna(val.mean())
)
df["LotFrontage"] = df["LotFrontage"].fillna(0)
df[df["Electrical"].isnull()]
df[df["GarageArea"].isnull()]
# **Filling missing values with most freq**
# * Functional
# * Exterior1st
# * Exterior2nd
# * KitchenQual
# * SaleType
# * Utilities
# * Functional
# * MSZoning
df[df["MSZoning"].isnull()]
# Filling null values most freq value
d = (
df["MSZoning"]
.value_counts()[
df["MSZoning"].value_counts() == df["MSZoning"].value_counts().max()
]
.index
)
d
df["MSZoning"] = df["MSZoning"].fillna("RL")
# Filling null values most freq value
df["LotFrontage"] = df.groupby("Neighborhood")["LotFrontage"].transform(
lambda val: val.fillna(val.max())
)
df = df.dropna(axis=0, subset=["Electrical", "GarageArea"])
# Filling null values most freq value
df["Functional"].value_counts()[
df["Functional"].value_counts() == df["Functional"].value_counts().max()
].index
nan_percent = 100 * (df.isnull().sum() / len(df))
nan_percent = nan_percent[nan_percent > 0].sort_values()
# plot the feature with missing indicating the percent of missing data
plt.figure(figsize=(12, 6))
sns.barplot(x=nan_percent.index, y=nan_percent)
plt.xticks(rotation=90)
df[df["MSZoning"].isnull()]
nan_percent["SalePrice"]
df.isnull().sum()
# # Handling Outliers
corr = train_set.corr()
top_corr_featuress = corr.index[abs(corr["SalePrice"]) > 0.5].sort_values(
ascending=True
)
top_corr_features
sns.scatterplot(data=df, x="OverallQual", y="SalePrice")
plt.axhline(y=200000, color="r")
df[(df["OverallQual"] > 8) & (df["SalePrice"] < 200000)][["SalePrice", "OverallQual"]]
sns.scatterplot(x="GrLivArea", y="SalePrice", data=df)
plt.axhline(y=200000, color="r")
plt.axvline(x=4000, color="r")
df[(df["GrLivArea"] > 4000) & (df["SalePrice"] < 400000)][["SalePrice", "GrLivArea"]]
# Remove the outliers:
index_drop = df[(df["GrLivArea"] > 4000) & (df["SalePrice"] < 400000)].index
df = df.drop(index_drop, axis=0)
sns.scatterplot(x="GrLivArea", y="SalePrice", data=df)
plt.axhline(y=200000, color="r")
plt.axvline(x=4000, color="r")
sns.scatterplot(x="OverallQual", y="SalePrice", data=df)
plt.axhline(y=200000, color="r")
sns.scatterplot(data=df, x="GrLivArea", y="SalePrice")
plt.axhline(y=200000, color="r")
sns.scatterplot(data=df, x="GarageCars", y="SalePrice")
plt.axhline(y=200000, color="r")
sns.scatterplot(data=df, x="TotRmsAbvGrd", y="SalePrice")
plt.axhline(y=200000, color="r")
# # Dropping some Feautures that have high/low corrilation
sns.scatterplot(data=df, x="YearBuilt", y="SalePrice")
plt.axhline(y=200000, color="r")
# get correlations of each features in dataset
# Plotting Heat Map to visualise correlation data better.
# Drwan for only features having high correlation
# (>0.5) with Target Variable
corr = train_set.corr()
top_corr_features = corr.index[abs(corr["SalePrice"]) > 0.5]
plt.figure(figsize=(10, 10))
# plot heat map
g = sns.heatmap(train_set[top_corr_features].corr(), annot=True, cmap="YlGnBu")
categorical_features = [
feature for feature in train_set.columns if train_set[feature].dtype == "1"
]
categorical_features
# train_set.drop(Id)
# Continous Features
continuous_feature = [
feature
for feature in train_set_numeric
if feature not in discrete_feature + year_feature + ["Id"]
]
print("Continuous feature Count: {}".format(len(continuous_feature)))
for feature in categorical_features:
data = train_set.copy()
if 0 in data[feature].unique():
pass
else:
data[feature] = np.log(data[feature])
data.boxplot(column=feature)
plt.ylabel(feature)
plt.title(feature)
plt.show()
train_set.groupby("OverallQual")["SalePrice"].median().plot()
plt.xlabel("Year Sold")
plt.ylabel("Median House Price")
plt.title("House Price vs YearSold")
top_corr_features
# # **Linear Reggression**
categorical_features = [
feature for feature in train_set.columns if train_set[feature].dtype == "O"
]
categorical_features
train_set.drop(categorical_features, axis=1, inplace=True)
# train_set.drop(Id)
train_set
# Separate features and target from train_df
X = train_set.drop(["Id", "SalePrice"], axis=1)
y = train_set["SalePrice"]
X = X.apply(pd.to_numeric, errors="coerce")
y = y.apply(pd.to_numeric, errors="coerce")
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
# Split the Dataset to Train & Test
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=100)
X_test.fillna(X_test.mean())
X_train.fillna(X_train.mean())
y_train.fillna(y_train.mean())
model = LinearRegression()
model = model.fit(X_train, y_train)
y_pred = model.predict(X_test)
from sklearn import metrics
MAE = metrics.mean_absolute_error(y_test, y_pred)
MSE = metrics.mean_squared_error(y_test, y_pred)
RMSE = np.sqrt(MSE)
pd.DataFrame([MAE, MSE, RMSE], index=["MAE", "MSE", "RMSE"], columns=["Metrics"])
| false | 0 | 3,309 | 0 | 3,309 | 3,309 |
||
69197646
|
<jupyter_start><jupyter_text>The Ultimate Halloween Candy Power Ranking
# Context
What’s the best (or at least the most popular) Halloween candy? That was the question this dataset was collected to answer. Data was collected by creating a website where participants were shown [presenting two fun-sized candies and asked to click on the one they would prefer to receive](http://walthickey.com/2017/10/18/whats-the-best-halloween-candy/). In total, more than 269 thousand votes were collected from 8,371 different IP addresses.
# Content
`candy-data.csv` includes attributes for each candy along with its ranking. For binary variables, 1 means yes, 0 means no. The data contains the following fields:
* chocolate: Does it contain chocolate?
* fruity: Is it fruit flavored?
* caramel: Is there caramel in the candy?
* peanutalmondy: Does it contain peanuts, peanut butter or almonds?
* nougat: Does it contain nougat?
* crispedricewafer: Does it contain crisped rice, wafers, or a cookie component?
* hard: Is it a hard candy?
* bar: Is it a candy bar?
* pluribus: Is it one of many candies in a bag or box?
* sugarpercent: The percentile of sugar it falls under within the data set.
* pricepercent: The unit price percentile compared to the rest of the set.
* winpercent: The overall win percentage according to 269,000 matchups.
Kaggle dataset identifier: the-ultimate-halloween-candy-power-ranking
<jupyter_script># Welcome to the Guided Notes for Robotics for All Week 6: Class 1! We are going to be going over some pandas functions to make it easier for you to use some of the things you learned in our presentation.
# One function that is important to know and how to use is the .mean() function in Pandas. Given in the name, this function allows you to calculate the mean.
# Given a set of rows and columns let's take a look at how the mean function in pandas would look down below:
import pandas as pd
df = pd.DataFrame(
{"A": [6, 9, 12, 7], "B": [3, 4, 11, 1], "C": [11, 6, 15, 15], "D": [15, 5, 7, 2]}
)
print("Example Data Below:")
print(df)
df.mean()
# As seen above, the mean is calculated for every column, which is done by averaging all the row values for that specific column.
# Example: Mean of Column A
# Column A:
# Row 1: 6
# Row 2: 9
# Row 3: 12
# Row 4: 7
# 6 + 9 + 12 + 7 = 34
# 34 / 4 = 8.50
#
# What if we wanted to collect the mean of all the columns given above?
import pandas as pd
df = pd.DataFrame(
{"A": [6, 9, 12, 7], "B": [3, 4, 11, 1], "C": [11, 6, 15, 15], "D": [15, 5, 7, 2]}
)
print("Example Data Below:")
print(df)
column_means = df.mean()
total_mean = column_means.mean()
print(column_means)
print("Final Mean:", total_mean)
# See what we did above? We took the mean of the column's means by assigning a variable to the given column means.
# There is a specific parameter that is common for a lot of other function you are going to learn in this class called the axis parameter. As seen above, the default is always axis = 0. Axis = 0 is where the mean is calculate for every column averaging all of the row values.
# What if we wanted to take the mean of an individual row and average all the column values?
import pandas as pd
df = pd.DataFrame(
{"A": [6, 9, 12, 7], "B": [3, 4, 11, 1], "C": [11, 6, 15, 15], "D": [15, 5, 7, 2]}
)
print("Example Data Below:")
print(df)
df.mean(axis=1)
# When we changed the axis parameter to equal 1, we get the average for every row using the column values.
# As we have learned before, you can clean data and removed values which you do not want to have in your dataset. What if we were to have data that is not cleaned?
import pandas as pd
df = pd.DataFrame(
{
"A": [9, 4, 5, None, 6],
"B": [5, 6, 13, 3, None],
"C": [20, 16, 11, 3, 7],
"D": [15, 5, None, 2, 6],
}
)
# finding the mean of every row while skipping the null values
print(df)
df.mean(axis=1, skipna=True)
# As shown in the example above, we use the second parameter where skipna = True. This indicates that we want to skip the Null values when calculating the median.
# Notice that we make skipna = True, but what if we wanted to take into account the Null values? We would simply just changed skipna = False. This will be important while using functions later on.
# Now that we have introduced the .mean() function and you are comfortable with the basic parameters let's look at the .median() function.
import pandas as pd
df = pd.DataFrame(
{"A": [6, 9, 12, 7], "B": [3, 4, 11, 1], "C": [11, 6, 15, 15], "D": [15, 5, 7, 2]}
)
print("Example Data Below:")
print(df)
# default axis is 0
df.median()
# Let's break down column A's median.
# Rearrange the numbers in column A from least to greatest:
# 6, 7, 9, 12.
# The median would be between 7 and 9 which in this case is 8.
import pandas as pd
df = pd.DataFrame(
{
"A": [9, 4, 5, None, 6],
"B": [5, 6, 13, 3, None],
"C": [20, 16, 11, 3, 7],
"D": [15, 5, None, 2, 6],
}
)
# finding the median of every row while skipping the null values
print(df)
df.median(axis=1, skipna=True)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/197/69197646.ipynb
|
the-ultimate-halloween-candy-power-ranking
| null |
[{"Id": 69197646, "ScriptId": 18887016, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5277163, "CreationDate": "07/28/2021 02:02:17", "VersionNumber": 1.0, "Title": "Week 6 Class 1", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 118.0, "LinesInsertedFromPrevious": 118.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92075324, "KernelVersionId": 69197646, "SourceDatasetVersionId": 5912}]
|
[{"Id": 5912, "DatasetId": 3732, "DatasourceVersionId": 5912, "CreatorUserId": 1162990, "LicenseName": "Other (specified in description)", "CreationDate": "10/31/2017 18:29:10", "VersionNumber": 1.0, "Title": "The Ultimate Halloween Candy Power Ranking", "Slug": "the-ultimate-halloween-candy-power-ranking", "Subtitle": "What\u2019s the best Halloween candy?", "Description": "# Context\n\nWhat\u2019s the best (or at least the most popular) Halloween candy? That was the question this dataset was collected to answer. Data was collected by creating a website where participants were shown [presenting two fun-sized candies and asked to click on the one they would prefer to receive](http://walthickey.com/2017/10/18/whats-the-best-halloween-candy/). In total, more than 269 thousand votes were collected from 8,371 different IP addresses.\n\n# Content\n\n`candy-data.csv` includes attributes for each candy along with its ranking. For binary variables, 1 means yes, 0 means no. The data contains the following fields: \n\n* chocolate: Does it contain chocolate?\n* fruity: Is it fruit flavored?\n* caramel: Is there caramel in the candy?\n* peanutalmondy: Does it contain peanuts, peanut butter or almonds?\n* nougat: Does it contain nougat?\n* crispedricewafer: Does it contain crisped rice, wafers, or a cookie component?\n* hard: Is it a hard candy?\n* bar: Is it a candy bar?\n* pluribus: Is it one of many candies in a bag or box?\n* sugarpercent: The percentile of sugar it falls under within the data set.\n* pricepercent: The unit price percentile compared to the rest of the set.\n* winpercent: The overall win percentage according to 269,000 matchups.\n\n\n### Acknowledgements: \n\nThis dataset is Copyright (c) 2014 ESPN Internet Ventures and distributed under an [MIT license](https://github.com/fivethirtyeight/data/blob/master/LICENSE). Check out the analysis and write-up here: [The Ultimate Halloween Candy Power Ranking](http://fivethirtyeight.com/features/the-ultimate-halloween-candy-power-ranking/). Thanks to [Walt Hickey](http://walthickey.com/) for making the data available.\n\n### Inspiration: \n\n* Which qualities are associated with higher rankings?\n* What\u2019s the most popular candy? Least popular?\n* Can you recreate the [538 analysis](http://fivethirtyeight.com/features/the-ultimate-halloween-candy-power-ranking/) of this dataset?", "VersionNotes": "Initial release", "TotalCompressedBytes": 5205.0, "TotalUncompressedBytes": 5205.0}]
|
[{"Id": 3732, "CreatorUserId": 1162990, "OwnerUserId": NaN, "OwnerOrganizationId": 170.0, "CurrentDatasetVersionId": 5912.0, "CurrentDatasourceVersionId": 5912.0, "ForumId": 9120, "Type": 2, "CreationDate": "10/31/2017 18:29:10", "LastActivityDate": "02/06/2018", "TotalViews": 92666, "TotalDownloads": 11351, "TotalVotes": 137, "TotalKernels": 54}]
| null |
# Welcome to the Guided Notes for Robotics for All Week 6: Class 1! We are going to be going over some pandas functions to make it easier for you to use some of the things you learned in our presentation.
# One function that is important to know and how to use is the .mean() function in Pandas. Given in the name, this function allows you to calculate the mean.
# Given a set of rows and columns let's take a look at how the mean function in pandas would look down below:
import pandas as pd
df = pd.DataFrame(
{"A": [6, 9, 12, 7], "B": [3, 4, 11, 1], "C": [11, 6, 15, 15], "D": [15, 5, 7, 2]}
)
print("Example Data Below:")
print(df)
df.mean()
# As seen above, the mean is calculated for every column, which is done by averaging all the row values for that specific column.
# Example: Mean of Column A
# Column A:
# Row 1: 6
# Row 2: 9
# Row 3: 12
# Row 4: 7
# 6 + 9 + 12 + 7 = 34
# 34 / 4 = 8.50
#
# What if we wanted to collect the mean of all the columns given above?
import pandas as pd
df = pd.DataFrame(
{"A": [6, 9, 12, 7], "B": [3, 4, 11, 1], "C": [11, 6, 15, 15], "D": [15, 5, 7, 2]}
)
print("Example Data Below:")
print(df)
column_means = df.mean()
total_mean = column_means.mean()
print(column_means)
print("Final Mean:", total_mean)
# See what we did above? We took the mean of the column's means by assigning a variable to the given column means.
# There is a specific parameter that is common for a lot of other function you are going to learn in this class called the axis parameter. As seen above, the default is always axis = 0. Axis = 0 is where the mean is calculate for every column averaging all of the row values.
# What if we wanted to take the mean of an individual row and average all the column values?
import pandas as pd
df = pd.DataFrame(
{"A": [6, 9, 12, 7], "B": [3, 4, 11, 1], "C": [11, 6, 15, 15], "D": [15, 5, 7, 2]}
)
print("Example Data Below:")
print(df)
df.mean(axis=1)
# When we changed the axis parameter to equal 1, we get the average for every row using the column values.
# As we have learned before, you can clean data and removed values which you do not want to have in your dataset. What if we were to have data that is not cleaned?
import pandas as pd
df = pd.DataFrame(
{
"A": [9, 4, 5, None, 6],
"B": [5, 6, 13, 3, None],
"C": [20, 16, 11, 3, 7],
"D": [15, 5, None, 2, 6],
}
)
# finding the mean of every row while skipping the null values
print(df)
df.mean(axis=1, skipna=True)
# As shown in the example above, we use the second parameter where skipna = True. This indicates that we want to skip the Null values when calculating the median.
# Notice that we make skipna = True, but what if we wanted to take into account the Null values? We would simply just changed skipna = False. This will be important while using functions later on.
# Now that we have introduced the .mean() function and you are comfortable with the basic parameters let's look at the .median() function.
import pandas as pd
df = pd.DataFrame(
{"A": [6, 9, 12, 7], "B": [3, 4, 11, 1], "C": [11, 6, 15, 15], "D": [15, 5, 7, 2]}
)
print("Example Data Below:")
print(df)
# default axis is 0
df.median()
# Let's break down column A's median.
# Rearrange the numbers in column A from least to greatest:
# 6, 7, 9, 12.
# The median would be between 7 and 9 which in this case is 8.
import pandas as pd
df = pd.DataFrame(
{
"A": [9, 4, 5, None, 6],
"B": [5, 6, 13, 3, None],
"C": [20, 16, 11, 3, 7],
"D": [15, 5, None, 2, 6],
}
)
# finding the median of every row while skipping the null values
print(df)
df.median(axis=1, skipna=True)
| false | 0 | 1,283 | 0 | 1,700 | 1,283 |
||
69197840
|
# # Introdução
# Neste notebook vamos explorar dados de créditos presente na base dados de uma instituição financeira, nossa variavel resposta é chamada de **default**, e nos informa se um cliente é adimplente (`default = 0`) ou inadimplente (`default = 1`).
# Nosso objetivo é entender o porque um cliente deixa de honrar com suas dívidas nos baseando em variáveis explicativas, como *salário*, *escolaridade* e *movimentação financeira*.
# # Importando as bibliotecas
# Vamos utilizar algumas bibliotecas externas:
# * O **Pandas** para ler e manipular os dados,
# * O **Seaborn** e o **Matplotlib** para criação e visualização dos gráficos.
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# # Carregando dados
# Vamos começar carregando os dados do nosso arquivo CSV e informando os valores nulos *(marcados como 'na')*.
dataset_path = "../input/exemplocredito/credito.csv"
df = pd.read_csv(dataset_path, na_values="na")
df.head(n=5) # Carregamos 5 linhas para termos como exemplo.
# # Explorando dados
# Vamos fazer algumas analizes para entender o comportamento dos dados.
total_dados, _ = df.shape # Guarda o shape dos dados
total_adimplentes, _ = df[df["default"] == 0].shape # Guarda o total de adimplentes
total_inadimplentes, _ = df[df["default"] == 1].shape # Guarda o total de inadimplentes
print("Total de clientes: ", total_dados)
print("Total de adimplentes: ", total_adimplentes)
print("Total de inadimplentes: ", total_inadimplentes)
prop_clientes_adimplentes = round(
100 * total_adimplentes / total_dados, 2
) # Guarda a proporção de clientes adimplentes
prop_clientes_inadimplentes = round(
100 * total_inadimplentes / total_dados, 2
) # Guarda a proporção de clientes inadimplentes
print(f"proporção de clientes adimplentes: {prop_clientes_adimplentes}%")
print(f"proporção de clientes inadimplentes: {prop_clientes_inadimplentes}%")
# Aqui temos uma analise inicial, temos uma base de dados com 10127 usuarios sendo 8500 adimplentes e 1627 inadimplentes.
# Tambêm podemos verificar que 83,93% dos clientes são adimplentes contra 16,07% inadimplentes.
# # Limpeza e Transformação dos dados
# Agora vamos fazer a limpeza dos dados para garantir sua integridade e depois vamos excluir as linhas que possuem dados faltantes.
# Para isso primeiro precisamor conhecer a natureza dos nossos dados verificando quais colunas **categóricas** e quais são **numéricas**.
df.select_dtypes("object").describe().transpose() # Retorna os atributos categóricos
df.select_dtypes("number").describe().transpose() # Retorna os atributos numéricos
# *Valor das transações* e *limite de crédito* deveriam ser atributos numéricos mas estão sendo reconhecidos como atributos categóricos.
# Isso acontece pois os valores estão escritos diferente do padrão internacional reconhecido pelo Python.
# Vamos fazer a transformação dessas colunas para o tipo *Float*.
# ## Transformando dados
df[
["limite_credito", "valor_transacoes_12m"]
].dtypes # Confirma que as colunas limite_credito e valor_transacoes_12m são do tipo Object
df["valor_transacoes_12m"] = df["valor_transacoes_12m"].apply(
lambda x: float(x.replace(".", "").replace(",", "."))
)
df["limite_credito"] = df["limite_credito"].apply(
lambda x: float(x.replace(".", "").replace(",", "."))
)
# Aplicamos uma função lambda para formatar os dados das duas colunas
df[
["limite_credito", "valor_transacoes_12m"]
].dtypes # Agora os dados estão sendo reconhecidos como float64
# ## Limpando linhas
# Agora devemos excluir as linhas que faltam dados (representados por na), como futuramente precisaremos utilizar diversas variaveis explicativas dados faltantes trarão inconscientencia na nossa analize.
df.dropna(inplace=True)
# Vamos verificar como ficou nossa base de dados após a limpeza.
total_dados, _ = df.shape # Guarda o shape dos dados
total_adimplentes, _ = df[df["default"] == 0].shape # Guarda o total de adimplentes
total_inadimplentes, _ = df[df["default"] == 1].shape # Guarda o total de inadimplentes
print("Total de clientes: ", total_dados)
print("Total de adimplentes: ", total_adimplentes)
print("Total de inadimplentes: ", total_inadimplentes)
print(
f"A proporcão adimplentes ativos é de {round(100 * total_adimplentes / total_dados, 2)}%"
)
print(
f"A proporcão clientes inadimplentes é de {round(100 * total_inadimplentes / total_dados, 2)}%"
)
# Nesse caso tivemos pouca mudança na proporção de adimplentes e inadimplentes o que demonstra que o impacto da limpeza não vai atrapalhar nossa analize futura.
# # Visualização dos dados
# Com os dados prontos vamos correlacionar variáveis explicativas com a variável resposta para buscar entender qual fator leva um cliente a inadimplencia. E para isso, vamos comparar a base com todos os clientes com a base de adimplentes e inadimplentes.
# Nossos atributos de interesse serão os atributos numéricos então vamos ver algumas relações destes com a nossa variavel resposta **default**.
df.drop(["id", "default"], axis=1).select_dtypes("number").head(
n=5
) # Dados de interesse
sns.set_style("whitegrid") # Estilo do gráfico
# * Quantidade de transações nos ultimos 12 mêses
coluna = "qtd_transacoes_12m"
titulos = [
"Qtd. de Transações no Último Ano",
"Qtd. de Transações no Último Ano de Adimplentes",
"Qtd. de Transações no Último Ano de Inadimplentes",
]
eixo = 0
max_y = 0
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df[df["default"] == 0], df[df["default"] == 1]]:
f = sns.histplot(x=coluna, data=dataframe, stat="count", ax=eixos[eixo])
f.set(title=titulos[eixo], xlabel=coluna.capitalize(), ylabel="Frequência Absoluta")
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
figura.show()
# Observando a quantidade de transações dos inadimplentes podemos perceber que a frequencia de transações é muito menor que os adimplentes porêm chega a um pico quando estamos entre 30 e 70 transações no ano, podemos utilizar esse parametro como um alerta e observar clientes que estejam apresentando esse comportamento.
# Logo podemos concluir que clientes que possuem transações muito concentradas entre 30 e 70 transações no ano tendem a se tornar inadimplentes.
# * Valor de transações nos ultimos 12 mêses
coluna = "valor_transacoes_12m"
titulos = [
"Valor das Transações no Último Ano",
"Valor das Transações no Último Ano de Adimplentes",
"Valor das Transações no Último Ano de Inadimplentes",
]
eixo = 0
max_y = 0
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df_adimplente, df_inadimplente]:
f = sns.histplot(x=coluna, data=dataframe, stat="count", ax=eixos[eixo])
f.set(title=titulos[eixo], xlabel=coluna.capitalize(), ylabel="Frequência Absoluta")
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
figura.show()
# Temos insights bem interessantes aqui! O pico da nossa base de dados se concentra entre 1000 e 6000 nos ultimos 12 mêses com pequenos picos próximo aos 7500 e outro aos 15000, os clientes adimplentes tendem esse mesmo comportamento porêm os inadimplentes tem seu pico bem próximo aos 2500.
# Logo podemos concluir que clientes inadimplentes demonstram um comportamento onde tendem a ter um valor de transação médio próximo de 2500.
# * Valor de Transações nos Últimos 12 Meses x Quantidade de Transações nos Últimos 12 Meses
f = sns.relplot(
x="valor_transacoes_12m", y="qtd_transacoes_12m", data=df, hue="default"
)
_ = f.set(
title="Relação entre Valor e Quantidade de Transações no Último Ano",
xlabel="Valor das Transações no Último Ano",
ylabel="Quantidade das Transações no Último Ano",
)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/197/69197840.ipynb
| null | null |
[{"Id": 69197840, "ScriptId": 18861319, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7989885, "CreationDate": "07/28/2021 02:07:15", "VersionNumber": 3.0, "Title": "Explora\u00e7\u00e3o de credito e predi\u00e7\u00e3o de inadimpl\u00eancia", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 173.0, "LinesInsertedFromPrevious": 126.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 47.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Introdução
# Neste notebook vamos explorar dados de créditos presente na base dados de uma instituição financeira, nossa variavel resposta é chamada de **default**, e nos informa se um cliente é adimplente (`default = 0`) ou inadimplente (`default = 1`).
# Nosso objetivo é entender o porque um cliente deixa de honrar com suas dívidas nos baseando em variáveis explicativas, como *salário*, *escolaridade* e *movimentação financeira*.
# # Importando as bibliotecas
# Vamos utilizar algumas bibliotecas externas:
# * O **Pandas** para ler e manipular os dados,
# * O **Seaborn** e o **Matplotlib** para criação e visualização dos gráficos.
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# # Carregando dados
# Vamos começar carregando os dados do nosso arquivo CSV e informando os valores nulos *(marcados como 'na')*.
dataset_path = "../input/exemplocredito/credito.csv"
df = pd.read_csv(dataset_path, na_values="na")
df.head(n=5) # Carregamos 5 linhas para termos como exemplo.
# # Explorando dados
# Vamos fazer algumas analizes para entender o comportamento dos dados.
total_dados, _ = df.shape # Guarda o shape dos dados
total_adimplentes, _ = df[df["default"] == 0].shape # Guarda o total de adimplentes
total_inadimplentes, _ = df[df["default"] == 1].shape # Guarda o total de inadimplentes
print("Total de clientes: ", total_dados)
print("Total de adimplentes: ", total_adimplentes)
print("Total de inadimplentes: ", total_inadimplentes)
prop_clientes_adimplentes = round(
100 * total_adimplentes / total_dados, 2
) # Guarda a proporção de clientes adimplentes
prop_clientes_inadimplentes = round(
100 * total_inadimplentes / total_dados, 2
) # Guarda a proporção de clientes inadimplentes
print(f"proporção de clientes adimplentes: {prop_clientes_adimplentes}%")
print(f"proporção de clientes inadimplentes: {prop_clientes_inadimplentes}%")
# Aqui temos uma analise inicial, temos uma base de dados com 10127 usuarios sendo 8500 adimplentes e 1627 inadimplentes.
# Tambêm podemos verificar que 83,93% dos clientes são adimplentes contra 16,07% inadimplentes.
# # Limpeza e Transformação dos dados
# Agora vamos fazer a limpeza dos dados para garantir sua integridade e depois vamos excluir as linhas que possuem dados faltantes.
# Para isso primeiro precisamor conhecer a natureza dos nossos dados verificando quais colunas **categóricas** e quais são **numéricas**.
df.select_dtypes("object").describe().transpose() # Retorna os atributos categóricos
df.select_dtypes("number").describe().transpose() # Retorna os atributos numéricos
# *Valor das transações* e *limite de crédito* deveriam ser atributos numéricos mas estão sendo reconhecidos como atributos categóricos.
# Isso acontece pois os valores estão escritos diferente do padrão internacional reconhecido pelo Python.
# Vamos fazer a transformação dessas colunas para o tipo *Float*.
# ## Transformando dados
df[
["limite_credito", "valor_transacoes_12m"]
].dtypes # Confirma que as colunas limite_credito e valor_transacoes_12m são do tipo Object
df["valor_transacoes_12m"] = df["valor_transacoes_12m"].apply(
lambda x: float(x.replace(".", "").replace(",", "."))
)
df["limite_credito"] = df["limite_credito"].apply(
lambda x: float(x.replace(".", "").replace(",", "."))
)
# Aplicamos uma função lambda para formatar os dados das duas colunas
df[
["limite_credito", "valor_transacoes_12m"]
].dtypes # Agora os dados estão sendo reconhecidos como float64
# ## Limpando linhas
# Agora devemos excluir as linhas que faltam dados (representados por na), como futuramente precisaremos utilizar diversas variaveis explicativas dados faltantes trarão inconscientencia na nossa analize.
df.dropna(inplace=True)
# Vamos verificar como ficou nossa base de dados após a limpeza.
total_dados, _ = df.shape # Guarda o shape dos dados
total_adimplentes, _ = df[df["default"] == 0].shape # Guarda o total de adimplentes
total_inadimplentes, _ = df[df["default"] == 1].shape # Guarda o total de inadimplentes
print("Total de clientes: ", total_dados)
print("Total de adimplentes: ", total_adimplentes)
print("Total de inadimplentes: ", total_inadimplentes)
print(
f"A proporcão adimplentes ativos é de {round(100 * total_adimplentes / total_dados, 2)}%"
)
print(
f"A proporcão clientes inadimplentes é de {round(100 * total_inadimplentes / total_dados, 2)}%"
)
# Nesse caso tivemos pouca mudança na proporção de adimplentes e inadimplentes o que demonstra que o impacto da limpeza não vai atrapalhar nossa analize futura.
# # Visualização dos dados
# Com os dados prontos vamos correlacionar variáveis explicativas com a variável resposta para buscar entender qual fator leva um cliente a inadimplencia. E para isso, vamos comparar a base com todos os clientes com a base de adimplentes e inadimplentes.
# Nossos atributos de interesse serão os atributos numéricos então vamos ver algumas relações destes com a nossa variavel resposta **default**.
df.drop(["id", "default"], axis=1).select_dtypes("number").head(
n=5
) # Dados de interesse
sns.set_style("whitegrid") # Estilo do gráfico
# * Quantidade de transações nos ultimos 12 mêses
coluna = "qtd_transacoes_12m"
titulos = [
"Qtd. de Transações no Último Ano",
"Qtd. de Transações no Último Ano de Adimplentes",
"Qtd. de Transações no Último Ano de Inadimplentes",
]
eixo = 0
max_y = 0
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df[df["default"] == 0], df[df["default"] == 1]]:
f = sns.histplot(x=coluna, data=dataframe, stat="count", ax=eixos[eixo])
f.set(title=titulos[eixo], xlabel=coluna.capitalize(), ylabel="Frequência Absoluta")
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
figura.show()
# Observando a quantidade de transações dos inadimplentes podemos perceber que a frequencia de transações é muito menor que os adimplentes porêm chega a um pico quando estamos entre 30 e 70 transações no ano, podemos utilizar esse parametro como um alerta e observar clientes que estejam apresentando esse comportamento.
# Logo podemos concluir que clientes que possuem transações muito concentradas entre 30 e 70 transações no ano tendem a se tornar inadimplentes.
# * Valor de transações nos ultimos 12 mêses
coluna = "valor_transacoes_12m"
titulos = [
"Valor das Transações no Último Ano",
"Valor das Transações no Último Ano de Adimplentes",
"Valor das Transações no Último Ano de Inadimplentes",
]
eixo = 0
max_y = 0
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df_adimplente, df_inadimplente]:
f = sns.histplot(x=coluna, data=dataframe, stat="count", ax=eixos[eixo])
f.set(title=titulos[eixo], xlabel=coluna.capitalize(), ylabel="Frequência Absoluta")
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
figura.show()
# Temos insights bem interessantes aqui! O pico da nossa base de dados se concentra entre 1000 e 6000 nos ultimos 12 mêses com pequenos picos próximo aos 7500 e outro aos 15000, os clientes adimplentes tendem esse mesmo comportamento porêm os inadimplentes tem seu pico bem próximo aos 2500.
# Logo podemos concluir que clientes inadimplentes demonstram um comportamento onde tendem a ter um valor de transação médio próximo de 2500.
# * Valor de Transações nos Últimos 12 Meses x Quantidade de Transações nos Últimos 12 Meses
f = sns.relplot(
x="valor_transacoes_12m", y="qtd_transacoes_12m", data=df, hue="default"
)
_ = f.set(
title="Relação entre Valor e Quantidade de Transações no Último Ano",
xlabel="Valor das Transações no Último Ano",
ylabel="Quantidade das Transações no Último Ano",
)
| false | 0 | 2,650 | 0 | 2,650 | 2,650 |
||
69197988
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from keras import models
from keras import layers
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras import regularizers
# # Data Loading
train = pd.read_csv("../input/digit-recognizer/train.csv")
test = pd.read_csv("../input/digit-recognizer/test.csv")
train.head()
# # Data Preprocessing
train_labels = train["label"]
train1 = train.drop("label", axis=1)
train1 = np.asarray(train1).astype("float32") / 255
test = np.asarray(test).astype("float32") / 255
train1 = train1.reshape(train1.shape[0], 28, 28, 1)
test = test.reshape(test.shape[0], 28, 28, 1)
train_labels = to_categorical(train_labels)
img = train1[0] * 255
img_reshape = img.reshape(28, 28)
plt.imshow(img_reshape)
plt.show()
print(train1.shape)
print(train_labels.shape)
X_train, X_val, y_train, y_val = train_test_split(train1, train_labels, test_size=0.2)
# # CNN Model
model = models.Sequential()
model.add(
layers.Conv2D(
32,
(3, 3),
activation="relu",
input_shape=(28, 28, 1),
padding="same",
kernel_regularizer=regularizers.l2(0.001),
)
)
model.add(layers.MaxPooling2D((2, 2), strides=(2, 2)))
model.add(
layers.Conv2D(
32,
(3, 3),
activation="relu",
padding="same",
kernel_regularizer=regularizers.l2(0.001),
)
)
model.add(layers.MaxPooling2D((2, 2), strides=(2, 2)))
model.add(
layers.Conv2D(
32,
(3, 3),
activation="relu",
padding="same",
kernel_regularizer=regularizers.l2(0.001),
)
)
model.add(layers.Flatten())
model.add(
layers.Dense(32, activation="relu", kernel_regularizer=regularizers.l2(0.001))
)
model.add(layers.Dense(10, activation="softmax"))
from keras import optimizers
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["acc"])
model.fit(X_train, y_train, batch_size=128, epochs=20, validation_data=(X_val, y_val))
# # Model Evaluation
loss = pd.DataFrame(model.history.history)
loss[["acc", "val_acc"]].plot()
loss[["loss", "val_loss"]].plot()
model.fit(train1, train_labels, batch_size=128, epochs=20)
# # Prediction
predictions = model.predict(test, batch_size=32)
image_id = range(1, predictions.shape[0] + 1)
pred = [np.argmax(i) for i in predictions]
submission = pd.DataFrame({"ImageId": image_id, "Label": pred})
submission.to_csv("digit_recognizer_submission", index=False)
submission.head()
ss = pd.read_csv("../input/digit-recognizer/sample_submission.csv")
ss
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/197/69197988.ipynb
| null | null |
[{"Id": 69197988, "ScriptId": 18649366, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3742413, "CreationDate": "07/28/2021 02:10:00", "VersionNumber": 4.0, "Title": "Digit Recognizer | CNN | Image Classification", "EvaluationDate": "07/28/2021", "IsChange": false, "TotalLines": 117.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 117.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 8}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from keras import models
from keras import layers
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras import regularizers
# # Data Loading
train = pd.read_csv("../input/digit-recognizer/train.csv")
test = pd.read_csv("../input/digit-recognizer/test.csv")
train.head()
# # Data Preprocessing
train_labels = train["label"]
train1 = train.drop("label", axis=1)
train1 = np.asarray(train1).astype("float32") / 255
test = np.asarray(test).astype("float32") / 255
train1 = train1.reshape(train1.shape[0], 28, 28, 1)
test = test.reshape(test.shape[0], 28, 28, 1)
train_labels = to_categorical(train_labels)
img = train1[0] * 255
img_reshape = img.reshape(28, 28)
plt.imshow(img_reshape)
plt.show()
print(train1.shape)
print(train_labels.shape)
X_train, X_val, y_train, y_val = train_test_split(train1, train_labels, test_size=0.2)
# # CNN Model
model = models.Sequential()
model.add(
layers.Conv2D(
32,
(3, 3),
activation="relu",
input_shape=(28, 28, 1),
padding="same",
kernel_regularizer=regularizers.l2(0.001),
)
)
model.add(layers.MaxPooling2D((2, 2), strides=(2, 2)))
model.add(
layers.Conv2D(
32,
(3, 3),
activation="relu",
padding="same",
kernel_regularizer=regularizers.l2(0.001),
)
)
model.add(layers.MaxPooling2D((2, 2), strides=(2, 2)))
model.add(
layers.Conv2D(
32,
(3, 3),
activation="relu",
padding="same",
kernel_regularizer=regularizers.l2(0.001),
)
)
model.add(layers.Flatten())
model.add(
layers.Dense(32, activation="relu", kernel_regularizer=regularizers.l2(0.001))
)
model.add(layers.Dense(10, activation="softmax"))
from keras import optimizers
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["acc"])
model.fit(X_train, y_train, batch_size=128, epochs=20, validation_data=(X_val, y_val))
# # Model Evaluation
loss = pd.DataFrame(model.history.history)
loss[["acc", "val_acc"]].plot()
loss[["loss", "val_loss"]].plot()
model.fit(train1, train_labels, batch_size=128, epochs=20)
# # Prediction
predictions = model.predict(test, batch_size=32)
image_id = range(1, predictions.shape[0] + 1)
pred = [np.argmax(i) for i in predictions]
submission = pd.DataFrame({"ImageId": image_id, "Label": pred})
submission.to_csv("digit_recognizer_submission", index=False)
submission.head()
ss = pd.read_csv("../input/digit-recognizer/sample_submission.csv")
ss
| false | 0 | 1,089 | 8 | 1,089 | 1,089 |
||
69197857
|
# Make a copy of this notebook and submit the link in Google Classroom. To submit the link to a Kaggle, click Share > Private > Switch it to Public > copy the link and submit in Classroom under the assignment.
# # 1. Printing Text
# Please print out your name!
# # 2. Variables/Math Operators
# 1. Create a variable that stores the total amount of cookies and assign it to the value ```30```
# 2. Create a variable that stores the cookies per jar and assign it to the value ```5```
# ## Tasks/Objectives
# 1. Find the number of jars and print it out like below
# * The total number of jars is: (number of jars)
# 2. Find how many cookies there are in 5 jars
# Remember to use variables throughout the entire task.
# # 3. String Manipulation
# Each DNA strand has 8 characters comprised of A, G, T, C.
# 1. Given the variables, try to make a DNA strand.
# 2. Replace A in the DNA strand with T
# 3. Make the DNA strand lowercase
# 4. Print out the length of the DNA strand to verify that you have all 8 characters in the format of "The length of the DNA strand is: (length of the DNA strand)
#
# given variables
rando_1 = "AAA"
rando_2 = "GGG"
a = "A"
g = "g"
t = "t"
c = "c"
# your code here
# # 4. Iteration/Lists
# ## Exercise 1: The Counter
# Print all numbers from 0-10 using a for loop
# ## Exercise 2
# Use the 2 given lists to
# 1. Combine the lists
# 2. Print the list altogether
# 3. Print every individual element of the combined list
# 4. Sort the list by alphabetical order
# 5. Print the length of the combined list
# 6. Print the first element in the combined list
# ## Exercise 3: Odometer
# When someone is driving on the highway, there is no real speed limit. However, when someone is driving on a residential road, the speed limit is 25. To help our police officers reinforce that, we will be writing a simple program. When ```road_state``` is 0, that means the driver is on the freeway and there is no speed limit. However, while they are on ```road_state = 1```, they must obey the 25 mph speed limit. Please remind them by recognizing when they are on the residential roads and need to be going 25 mph by printing a statement that tells them to do so.
# Exercise 1: The Counter
# Exercise 2
# given lists
cookies = ["Chocolate Chip", "Raisin", "White Chocolate Chip", "Sugar"]
ice_cream = ["Mint Chocolate Chip", "Cookie Dough", "Chocolate Chip"]
# your code here
# Exercise 3: Odometer
# given variables
road_state = 0
# your code here [below]
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/197/69197857.ipynb
| null | null |
[{"Id": 69197857, "ScriptId": 18889020, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4117206, "CreationDate": "07/28/2021 02:07:36", "VersionNumber": 3.0, "Title": "Introduction to Python Exercises - Helyx Summer Ca", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 103.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 102.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# Make a copy of this notebook and submit the link in Google Classroom. To submit the link to a Kaggle, click Share > Private > Switch it to Public > copy the link and submit in Classroom under the assignment.
# # 1. Printing Text
# Please print out your name!
# # 2. Variables/Math Operators
# 1. Create a variable that stores the total amount of cookies and assign it to the value ```30```
# 2. Create a variable that stores the cookies per jar and assign it to the value ```5```
# ## Tasks/Objectives
# 1. Find the number of jars and print it out like below
# * The total number of jars is: (number of jars)
# 2. Find how many cookies there are in 5 jars
# Remember to use variables throughout the entire task.
# # 3. String Manipulation
# Each DNA strand has 8 characters comprised of A, G, T, C.
# 1. Given the variables, try to make a DNA strand.
# 2. Replace A in the DNA strand with T
# 3. Make the DNA strand lowercase
# 4. Print out the length of the DNA strand to verify that you have all 8 characters in the format of "The length of the DNA strand is: (length of the DNA strand)
#
# given variables
rando_1 = "AAA"
rando_2 = "GGG"
a = "A"
g = "g"
t = "t"
c = "c"
# your code here
# # 4. Iteration/Lists
# ## Exercise 1: The Counter
# Print all numbers from 0-10 using a for loop
# ## Exercise 2
# Use the 2 given lists to
# 1. Combine the lists
# 2. Print the list altogether
# 3. Print every individual element of the combined list
# 4. Sort the list by alphabetical order
# 5. Print the length of the combined list
# 6. Print the first element in the combined list
# ## Exercise 3: Odometer
# When someone is driving on the highway, there is no real speed limit. However, when someone is driving on a residential road, the speed limit is 25. To help our police officers reinforce that, we will be writing a simple program. When ```road_state``` is 0, that means the driver is on the freeway and there is no speed limit. However, while they are on ```road_state = 1```, they must obey the 25 mph speed limit. Please remind them by recognizing when they are on the residential roads and need to be going 25 mph by printing a statement that tells them to do so.
# Exercise 1: The Counter
# Exercise 2
# given lists
cookies = ["Chocolate Chip", "Raisin", "White Chocolate Chip", "Sugar"]
ice_cream = ["Mint Chocolate Chip", "Cookie Dough", "Chocolate Chip"]
# your code here
# Exercise 3: Odometer
# given variables
road_state = 0
# your code here [below]
| false | 0 | 738 | 0 | 738 | 738 |
||
69074201
|
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
import matplotlib.pyplot as plt
plt.rcParams["figure.dpi"] = 100
import xgboost as xgb
# Training will be done by RandomForest Algorithm
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import (
StandardScaler,
) # StandardScaler function of sklearn is used to scale down values of dataframe between 0 and 1
from sklearn.model_selection import train_test_split
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from sklearn.metrics import mean_squared_log_error
train = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/train.csv")
test = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/test.csv")
display(train)
display(test)
sns.pairplot(data=train)
# Based on the Pairplot above, following are my 1st cut inference
# 1) Targets are distributed along the entire range of humidity & Temperature. They can help to cluster the targets
# 2) Sensor 1, 2, 4 and 5 seem to have linear relation to the targets.
# 3) Sensor 3 has a non linear relationships with all three targets.
# Strategy :
# >> Run Mutual information on each of the targets seperately, and find the highest influencing feature.
# >> If the feature has non-linear relationships, then run Principle component analysis to get a better fit.
# >> Model the data after that and produce the equation
# >> Test it on the another other file, and find how well your model works.
# Start with the Mutual infomation for Target_Corbon_monoxide
# Remove the dates from the set, since their influence is not high
train = train.drop("date_time", axis=1)
# popping out the targets from the train dataset
y_CO = train.pop("target_carbon_monoxide")
y_BE = train.pop("target_benzene")
y_NO = train.pop("target_nitrogen_oxides")
# Checking dataset before Scaling the training dataset.This will be the input to the model
display(train)
# Scaling using MinMax Scaler
scaler = MinMaxScaler(feature_range=(0, 1))
train = scaler.fit_transform(train)
train[0]
# Training data after scaling is an numpy array datatype
print(train)
print(type(train))
# With the raw data for the model building ready, it has to be split into 3 groups.
# 1) Training Dataset
# 2) Cross Validation Dataset
# 3) Testing Dataset
# Why split this way?
# In this competition the goal is to reach the highest RMSE score on the test data, that is shared with all the competition. What is not shared is the sensor output based on the test data, on which the RMSE score will be calculated.
# You assume the role of the competition host, and not share some of the data and results to the models that you are training. This way, you can have a "Mock Exam" with the models before submitting to the real competition.
X_train, X_cv, y_train_co, y_cv_co = train_test_split(train, y_CO, test_size=0.2)
X_train, X_test, y_train_co, y_test_co = train_test_split(
X_train, y_train_co, test_size=0.2
)
# Working only on the remaining NO and Benezene targets
y_train_no, y_cv_no = train_test_split(y_NO, test_size=0.2)
y_train_no, y_test_no = train_test_split(y_train_no, test_size=0.2)
y_train_be, y_cv_be = train_test_split(y_BE, test_size=0.2)
y_train_be, y_test_be = train_test_split(y_train_be, test_size=0.2)
# Target carbon Monoxide columns has been also split same as training data.
# Ensure the lengths of all the datasets are same
print(X_train.shape)
print(X_cv.shape)
print(X_test.shape)
print(y_train_co.shape)
print(y_cv_co.shape)
print(y_test_co.shape)
# Running the MI Scores for the Target Carbon Monoxides
# Sensor 3 is not linear as per the above patter. The components of this sensor can render a different result. So doing PCA after the Linear Regression
# Fitting the data that matters
CO_r = LinearRegression(normalize=True)
CO_r.fit(X_train, y_train_co)
y_pred_co = CO_r.predict(X_cv)
# I am getting negative values when predicting only the CO values. So making them 0
y_pred_co[y_pred_co < 0] = 0
print("RMSE with simple Linear Regression", mean_squared_log_error(y_cv_co, y_pred_co))
BE_r = LinearRegression(normalize=True)
BE_r.fit(X_train, y_train_be)
y_pred_be = BE_r.predict(X_cv)
print("RMSE with simple Linear Regression", mean_squared_log_error(y_cv_be, y_pred_be))
NO_r = LinearRegression(normalize=True)
NO_r.fit(X_train, y_train_no)
y_pred_no = NO_r.predict(X_cv)
print("RMSE with simple Linear Regression", mean_squared_log_error(y_cv_no, y_pred_no))
d = {"CO": list(y_cv_co), "BE": list(y_cv_be), "NO": list(y_cv_no)}
y_cv = pd.DataFrame(data=d)
y_cv.head()
p = {"CO": list(rand_yco), "BE": list(rand_ybe), "NO": list(rand_yno)}
y_rand = pd.DataFrame(data=p)
y_rand.head()
y_cv.shape
# #Using RandomForest Regressor
# Carbon Monoxide
CO = RandomForestRegressor(n_estimators=50, random_state=0, criterion="mse")
CO.fit(X_train, y_train_co)
rand_yco = CO.predict(X_cv)
print("mean SQ error CO:", mean_squared_log_error(rand_yco, y_cv_co))
# Benzene
BE = RandomForestRegressor(n_estimators=50, random_state=0, criterion="mse")
BE.fit(X_train, y_train_be)
rand_ybe = BE.predict(X_cv)
print("mean SQ error BE:", mean_squared_log_error(rand_ybe, y_cv_be))
# Nitrous Oxide
NO = RandomForestRegressor(n_estimators=50, random_state=0, criterion="mse")
NO.fit(X_train, y_train_no)
rand_yno = NO.predict(X_cv)
print("mean SQ error NO:", mean_squared_log_error(rand_yno, y_cv_no))
# After the PCA there is not much improvement in the scores, so going ahead with existing data
# Carbon Monoxide
s_CO = SVR()
s_CO.fit(X_train, y_train_co)
s_yco = s_CO.predict(X_cv)
print("mean SQ error CO:", mean_squared_log_error(s_yco, y_cv_co))
# Benzene
s_BE = SVR()
s_BE.fit(X_train, y_train_be)
s_ybe = s_BE.predict(X_cv)
print("mean SQ error BE:", mean_squared_log_error(s_ybe, y_cv_be))
# Nitrous Oxide
s_NO = SVR()
s_NO.fit(X_train, y_train_no)
s_yno = s_NO.predict(X_cv)
print("mean SQ error NO:", mean_squared_log_error(s_yno, y_cv_no))
# ## Based on the SVR, Random Regressor and Linear Regressor, the RMSE for SVR has come low
# Predicting Test Carbon Monoxide with SVR
test_yco = s_CO.predict(X_test)
print("mean SQ error CO:", mean_squared_log_error(test_yco, y_test_co))
# Predicting Test Benzene with SVR
test_ybe = s_BE.predict(X_test)
print("mean SQ error BE:", mean_squared_log_error(test_ybe, y_test_be))
# Predicting Test Nitrous Oxide with SVR
test_yno = s_NO.predict(X_test)
print("mean SQ error NO:", mean_squared_log_error(test_yno, y_test_no))
# Mean SQ error with the test data on the SVR model has yielded the lower RMSE value
test.head()
# Scaling the Actual test dataframe after dropping the date column
test = test.drop("date_time", axis=1)
# Predicting Test Carbon Monoxide with SVR
test_yco = s_CO.predict(X_test)
print("mean SQ error CO:", mean_squared_log_error(test_yco, y_test_co))
# Predicting Test Benzene with SVR
test_ybe = s_BE.predict(X_test)
print("mean SQ error BE:", mean_squared_log_error(test_ybe, y_test_be))
# Predicting Test Nitrous Oxide with SVR
test_yno = s_NO.predict(X_test)
print("mean SQ error NO:", mean_squared_log_error(test_yno, y_test_no))
# Scaling using MinMax Scaler
scaler = MinMaxScaler(feature_range=(0, 1))
test = scaler.fit_transform(test)
# Predicting Carbon Monoxide with SVR and competition test data
test_co = s_CO.predict(test)
# Predicting Benzene with SVR and competition test data
test_be = s_BE.predict(test)
# Predicting Nitrous Oxide with SVR and competition test data
test_no = s_NO.predict(test)
# Reinitialising the competition test dataframe again
test = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/test.csv")
test[
"target_carbon_monoxide"
] = test_co # Adding the Predicted Target_CO to test dataframe
test["target_benzene"] = test_be # Adding the Predicted Target_CO to test dataframe
test[
"target_nitrogen_oxides"
] = test_no # Adding the Predicted Target_CO to test dataframe
submission = test[
["date_time", "target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"]
]
submission.to_csv("my_submission.csv")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/074/69074201.ipynb
| null | null |
[{"Id": 69074201, "ScriptId": 18507898, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3434465, "CreationDate": "07/26/2021 14:34:04", "VersionNumber": 4.0, "Title": "Tabular_data Competition", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 247.0, "LinesInsertedFromPrevious": 60.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 187.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
import matplotlib.pyplot as plt
plt.rcParams["figure.dpi"] = 100
import xgboost as xgb
# Training will be done by RandomForest Algorithm
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import (
StandardScaler,
) # StandardScaler function of sklearn is used to scale down values of dataframe between 0 and 1
from sklearn.model_selection import train_test_split
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from sklearn.metrics import mean_squared_log_error
train = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/train.csv")
test = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/test.csv")
display(train)
display(test)
sns.pairplot(data=train)
# Based on the Pairplot above, following are my 1st cut inference
# 1) Targets are distributed along the entire range of humidity & Temperature. They can help to cluster the targets
# 2) Sensor 1, 2, 4 and 5 seem to have linear relation to the targets.
# 3) Sensor 3 has a non linear relationships with all three targets.
# Strategy :
# >> Run Mutual information on each of the targets seperately, and find the highest influencing feature.
# >> If the feature has non-linear relationships, then run Principle component analysis to get a better fit.
# >> Model the data after that and produce the equation
# >> Test it on the another other file, and find how well your model works.
# Start with the Mutual infomation for Target_Corbon_monoxide
# Remove the dates from the set, since their influence is not high
train = train.drop("date_time", axis=1)
# popping out the targets from the train dataset
y_CO = train.pop("target_carbon_monoxide")
y_BE = train.pop("target_benzene")
y_NO = train.pop("target_nitrogen_oxides")
# Checking dataset before Scaling the training dataset.This will be the input to the model
display(train)
# Scaling using MinMax Scaler
scaler = MinMaxScaler(feature_range=(0, 1))
train = scaler.fit_transform(train)
train[0]
# Training data after scaling is an numpy array datatype
print(train)
print(type(train))
# With the raw data for the model building ready, it has to be split into 3 groups.
# 1) Training Dataset
# 2) Cross Validation Dataset
# 3) Testing Dataset
# Why split this way?
# In this competition the goal is to reach the highest RMSE score on the test data, that is shared with all the competition. What is not shared is the sensor output based on the test data, on which the RMSE score will be calculated.
# You assume the role of the competition host, and not share some of the data and results to the models that you are training. This way, you can have a "Mock Exam" with the models before submitting to the real competition.
X_train, X_cv, y_train_co, y_cv_co = train_test_split(train, y_CO, test_size=0.2)
X_train, X_test, y_train_co, y_test_co = train_test_split(
X_train, y_train_co, test_size=0.2
)
# Working only on the remaining NO and Benezene targets
y_train_no, y_cv_no = train_test_split(y_NO, test_size=0.2)
y_train_no, y_test_no = train_test_split(y_train_no, test_size=0.2)
y_train_be, y_cv_be = train_test_split(y_BE, test_size=0.2)
y_train_be, y_test_be = train_test_split(y_train_be, test_size=0.2)
# Target carbon Monoxide columns has been also split same as training data.
# Ensure the lengths of all the datasets are same
print(X_train.shape)
print(X_cv.shape)
print(X_test.shape)
print(y_train_co.shape)
print(y_cv_co.shape)
print(y_test_co.shape)
# Running the MI Scores for the Target Carbon Monoxides
# Sensor 3 is not linear as per the above patter. The components of this sensor can render a different result. So doing PCA after the Linear Regression
# Fitting the data that matters
CO_r = LinearRegression(normalize=True)
CO_r.fit(X_train, y_train_co)
y_pred_co = CO_r.predict(X_cv)
# I am getting negative values when predicting only the CO values. So making them 0
y_pred_co[y_pred_co < 0] = 0
print("RMSE with simple Linear Regression", mean_squared_log_error(y_cv_co, y_pred_co))
BE_r = LinearRegression(normalize=True)
BE_r.fit(X_train, y_train_be)
y_pred_be = BE_r.predict(X_cv)
print("RMSE with simple Linear Regression", mean_squared_log_error(y_cv_be, y_pred_be))
NO_r = LinearRegression(normalize=True)
NO_r.fit(X_train, y_train_no)
y_pred_no = NO_r.predict(X_cv)
print("RMSE with simple Linear Regression", mean_squared_log_error(y_cv_no, y_pred_no))
d = {"CO": list(y_cv_co), "BE": list(y_cv_be), "NO": list(y_cv_no)}
y_cv = pd.DataFrame(data=d)
y_cv.head()
p = {"CO": list(rand_yco), "BE": list(rand_ybe), "NO": list(rand_yno)}
y_rand = pd.DataFrame(data=p)
y_rand.head()
y_cv.shape
# #Using RandomForest Regressor
# Carbon Monoxide
CO = RandomForestRegressor(n_estimators=50, random_state=0, criterion="mse")
CO.fit(X_train, y_train_co)
rand_yco = CO.predict(X_cv)
print("mean SQ error CO:", mean_squared_log_error(rand_yco, y_cv_co))
# Benzene
BE = RandomForestRegressor(n_estimators=50, random_state=0, criterion="mse")
BE.fit(X_train, y_train_be)
rand_ybe = BE.predict(X_cv)
print("mean SQ error BE:", mean_squared_log_error(rand_ybe, y_cv_be))
# Nitrous Oxide
NO = RandomForestRegressor(n_estimators=50, random_state=0, criterion="mse")
NO.fit(X_train, y_train_no)
rand_yno = NO.predict(X_cv)
print("mean SQ error NO:", mean_squared_log_error(rand_yno, y_cv_no))
# After the PCA there is not much improvement in the scores, so going ahead with existing data
# Carbon Monoxide
s_CO = SVR()
s_CO.fit(X_train, y_train_co)
s_yco = s_CO.predict(X_cv)
print("mean SQ error CO:", mean_squared_log_error(s_yco, y_cv_co))
# Benzene
s_BE = SVR()
s_BE.fit(X_train, y_train_be)
s_ybe = s_BE.predict(X_cv)
print("mean SQ error BE:", mean_squared_log_error(s_ybe, y_cv_be))
# Nitrous Oxide
s_NO = SVR()
s_NO.fit(X_train, y_train_no)
s_yno = s_NO.predict(X_cv)
print("mean SQ error NO:", mean_squared_log_error(s_yno, y_cv_no))
# ## Based on the SVR, Random Regressor and Linear Regressor, the RMSE for SVR has come low
# Predicting Test Carbon Monoxide with SVR
test_yco = s_CO.predict(X_test)
print("mean SQ error CO:", mean_squared_log_error(test_yco, y_test_co))
# Predicting Test Benzene with SVR
test_ybe = s_BE.predict(X_test)
print("mean SQ error BE:", mean_squared_log_error(test_ybe, y_test_be))
# Predicting Test Nitrous Oxide with SVR
test_yno = s_NO.predict(X_test)
print("mean SQ error NO:", mean_squared_log_error(test_yno, y_test_no))
# Mean SQ error with the test data on the SVR model has yielded the lower RMSE value
test.head()
# Scaling the Actual test dataframe after dropping the date column
test = test.drop("date_time", axis=1)
# Predicting Test Carbon Monoxide with SVR
test_yco = s_CO.predict(X_test)
print("mean SQ error CO:", mean_squared_log_error(test_yco, y_test_co))
# Predicting Test Benzene with SVR
test_ybe = s_BE.predict(X_test)
print("mean SQ error BE:", mean_squared_log_error(test_ybe, y_test_be))
# Predicting Test Nitrous Oxide with SVR
test_yno = s_NO.predict(X_test)
print("mean SQ error NO:", mean_squared_log_error(test_yno, y_test_no))
# Scaling using MinMax Scaler
scaler = MinMaxScaler(feature_range=(0, 1))
test = scaler.fit_transform(test)
# Predicting Carbon Monoxide with SVR and competition test data
test_co = s_CO.predict(test)
# Predicting Benzene with SVR and competition test data
test_be = s_BE.predict(test)
# Predicting Nitrous Oxide with SVR and competition test data
test_no = s_NO.predict(test)
# Reinitialising the competition test dataframe again
test = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/test.csv")
test[
"target_carbon_monoxide"
] = test_co # Adding the Predicted Target_CO to test dataframe
test["target_benzene"] = test_be # Adding the Predicted Target_CO to test dataframe
test[
"target_nitrogen_oxides"
] = test_no # Adding the Predicted Target_CO to test dataframe
submission = test[
["date_time", "target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"]
]
submission.to_csv("my_submission.csv")
| false | 0 | 2,977 | 0 | 2,977 | 2,977 |
||
69074407
|
# **In this notebook,I try to analysis house price dataset and predict the house prices by regression methods.**
# **I start with introdusing the dataset,then I do data analysis and house price prediction step by step.**
# # **Import the dataset:**
# **This dataset have 1460 rows and 81 columns.The SalePrice is the target variable that I am trying to predict.At first I should import all necessary libraries and then read and import the dataset:**
import numpy as np # Import all necessary libraries
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_csv("../input/house-prices-advanced-regression-techniques/train.csv")
with open(
"../input/house-prices-advanced-regression-techniques/data_description.txt"
) as f:
print(f.read())
train.head()
# Cleaning the Id columns because it doesnt have important information.
train = train.drop("Id", axis=1)
train.info()
# # **Exploratory Data Analysis**
# **In this step I want to plot most of features and reach some useful analytical results. Drawing charts and examining the data before applying a model is a very good practice because we may detect some possible outliers or decide to do normalization.**
# ### **Target Value**
fig = plt.figure(figsize=(8, 4), dpi=100) # Draw histogram for target variable
sns.distplot(
train["SalePrice"], hist_kws=dict(edgecolor="w", linewidth=1), bins=25, color="r"
)
plt.title("Sales data distribution")
# **As we see the most amount of prices are between 100000 to 300000.This SalePrice distribution is not very normal so I use log for that to make more normal distribution.**
train["saleprice"] = np.log1p(train["SalePrice"]) # Use log function in numpy
fig = plt.figure(figsize=(8, 4), dpi=100)
sns.distplot(
train["saleprice"], hist_kws=dict(edgecolor="w", linewidth=1), bins=25, color="r"
)
plt.title("Sales data distribution")
# ### **Checking Missing Data**
# Let's check if the data set has any missing values.
100 * (train.isnull().sum() / len(train))
def missing_values_percent(train): # we can use this function in all dataframes.
nan_percent = 100 * (train.isnull().sum() / len(train))
nan_percent = nan_percent[nan_percent > 0].sort_values()
return nan_percent
nan_percent = missing_values_percent(train)
nan_percent
# Drawing barplot for these missing values.
plt.figure(figsize=(12, 6))
sns.barplot(x=nan_percent.index, y=nan_percent)
plt.xticks(rotation=45)
# determine the missing values that their percentages are between 0 and 5.
plt.figure(figsize=(12, 6))
sns.barplot(x=nan_percent.index, y=nan_percent)
plt.xticks(rotation=45)
plt.ylim(0, 5)
# ## **In this step we want to make a decision about our missing data**
train[train["Electrical"].isnull()]
# can delet this row
train = train.drop(labels=1379, axis=0)
nan_percent = missing_values_percent(
train
) # see Electrical has been droped from missing data.
plt.figure(figsize=(12, 6))
sns.barplot(x=nan_percent.index, y=nan_percent)
plt.xticks(rotation=45)
plt.ylim(0, 5)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/074/69074407.ipynb
| null | null |
[{"Id": 69074407, "ScriptId": 18715584, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7807419, "CreationDate": "07/26/2021 14:36:33", "VersionNumber": 5.0, "Title": "Housing Price Prediction Step by Step", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 86.0, "LinesInsertedFromPrevious": 46.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 40.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# **In this notebook,I try to analysis house price dataset and predict the house prices by regression methods.**
# **I start with introdusing the dataset,then I do data analysis and house price prediction step by step.**
# # **Import the dataset:**
# **This dataset have 1460 rows and 81 columns.The SalePrice is the target variable that I am trying to predict.At first I should import all necessary libraries and then read and import the dataset:**
import numpy as np # Import all necessary libraries
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_csv("../input/house-prices-advanced-regression-techniques/train.csv")
with open(
"../input/house-prices-advanced-regression-techniques/data_description.txt"
) as f:
print(f.read())
train.head()
# Cleaning the Id columns because it doesnt have important information.
train = train.drop("Id", axis=1)
train.info()
# # **Exploratory Data Analysis**
# **In this step I want to plot most of features and reach some useful analytical results. Drawing charts and examining the data before applying a model is a very good practice because we may detect some possible outliers or decide to do normalization.**
# ### **Target Value**
fig = plt.figure(figsize=(8, 4), dpi=100) # Draw histogram for target variable
sns.distplot(
train["SalePrice"], hist_kws=dict(edgecolor="w", linewidth=1), bins=25, color="r"
)
plt.title("Sales data distribution")
# **As we see the most amount of prices are between 100000 to 300000.This SalePrice distribution is not very normal so I use log for that to make more normal distribution.**
train["saleprice"] = np.log1p(train["SalePrice"]) # Use log function in numpy
fig = plt.figure(figsize=(8, 4), dpi=100)
sns.distplot(
train["saleprice"], hist_kws=dict(edgecolor="w", linewidth=1), bins=25, color="r"
)
plt.title("Sales data distribution")
# ### **Checking Missing Data**
# Let's check if the data set has any missing values.
100 * (train.isnull().sum() / len(train))
def missing_values_percent(train): # we can use this function in all dataframes.
nan_percent = 100 * (train.isnull().sum() / len(train))
nan_percent = nan_percent[nan_percent > 0].sort_values()
return nan_percent
nan_percent = missing_values_percent(train)
nan_percent
# Drawing barplot for these missing values.
plt.figure(figsize=(12, 6))
sns.barplot(x=nan_percent.index, y=nan_percent)
plt.xticks(rotation=45)
# determine the missing values that their percentages are between 0 and 5.
plt.figure(figsize=(12, 6))
sns.barplot(x=nan_percent.index, y=nan_percent)
plt.xticks(rotation=45)
plt.ylim(0, 5)
# ## **In this step we want to make a decision about our missing data**
train[train["Electrical"].isnull()]
# can delet this row
train = train.drop(labels=1379, axis=0)
nan_percent = missing_values_percent(
train
) # see Electrical has been droped from missing data.
plt.figure(figsize=(12, 6))
sns.barplot(x=nan_percent.index, y=nan_percent)
plt.xticks(rotation=45)
plt.ylim(0, 5)
| false | 0 | 910 | 0 | 910 | 910 |
||
69074108
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from pandas.api.types import CategoricalDtype
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from sklearn.model_selection import RandomizedSearchCV
train_data = pd.read_csv("../input/titanic/train.csv")
test_data = pd.read_csv("../input/titanic/test.csv")
train_data.head()
train_data.info()
features_nom = ["Sex", "Ticket", "Cabin", "Embarked"]
p_class = [1, 2, 3]
ordered_levels = {"Pclass": p_class}
# Add a None level for missing values
ordered_levels = {key: ["None"] + value for key, value in ordered_levels.items()}
def encode(df):
# Nominal categories
for name in features_nom:
df[name] = df[name].astype("category")
# Add a None category for missing values
if "None" not in df[name].cat.categories:
df[name].cat.add_categories("None", inplace=True)
# Ordinal categories
for name, levels in ordered_levels.items():
df[name] = df[name].astype(CategoricalDtype(levels, ordered=True))
return df
train_data = encode(train_data)
test_data = encode(test_data)
train_data.info()
test_data.info()
train_data.Survived.value_counts()
train_data.drop(["PassengerId", "Name", "Ticket", "Cabin"], axis=1, inplace=True)
test_data.drop(["PassengerId", "Name", "Ticket", "Cabin"], axis=1, inplace=True)
train_data.head()
test_data.head()
train_data.isnull().sum()
train_data["Age"].fillna((train_data["Age"].mean()), inplace=True)
train_data["Embarked"].fillna((train_data["Embarked"].mode()[0]), inplace=True)
train_data.isnull().sum()
test_data.isnull().sum()
test_data["Age"].fillna((test_data["Age"].mean()), inplace=True)
test_data["Fare"].fillna((test_data["Fare"].mean()), inplace=True)
test_data.isnull().sum()
train_data.describe()
# sns.pairplot(train_data)
# plt.show()
sns.heatmap(train_data.corr(), vmin=-1, vmax=1, annot=True)
plt.show()
le = LabelEncoder()
test_data[["Pclass", "Sex", "Embarked"]] = test_data[
["Pclass", "Sex", "Embarked"]
].apply(lambda col: le.fit_transform(col))
train_data[["Pclass", "Sex", "Embarked"]] = train_data[
["Pclass", "Sex", "Embarked"]
].apply(lambda col: le.fit_transform(col))
train_data.head()
test_data.head()
# normalizing highly skewed features (fare,parch,sibsp)
train_data["Fare"] = np.log(train_data["Fare"], where=train_data["Fare"] > 0)
test_data["Fare"] = np.log(test_data["Fare"], where=test_data["Fare"] > 0)
train_data.head()
train_data[train_data.Fare == 0]
train_data.Fare.sort_values()
# sns.pairplot(train_data)
# plt.show()
stand = StandardScaler()
stand.fit(train_data)
stand.fit(test_data)
sdata = stand.fit_transform(train_data.iloc[:, 1:])
stdata = stand.fit_transform(test_data)
sdata = pd.DataFrame(sdata, columns=train_data.columns[1:])
sdata
stdata = pd.DataFrame(stdata, columns=test_data.columns)
stdata
pc = PCA(n_components=7)
pct = PCA(n_components=7)
x = sdata
y = train_data.iloc[:, 0]
x
pcs = pc.fit_transform(x)
pcst = pct.fit_transform(stdata)
pc.explained_variance_
pct.explained_variance_
sns.lineplot(range(1, 1 + len(pc.explained_variance_)), pc.explained_variance_)
sns.lineplot(range(1, 1 + len(pct.explained_variance_)), pct.explained_variance_)
pc = ["PC" + str(i) for i in range(1, 8)]
x_pcs = pd.DataFrame(pcs, columns=pc)
pct = ["PC" + str(i) for i in range(1, 8)]
x_pcst = pd.DataFrame(pcst, columns=pct)
x_pcs
x_pcst
x = x.join(x_pcs.iloc[:, :3])
xt = stdata.join(x_pcst.iloc[:, :3])
xt
y.value_counts()
x
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
x_train
x_test
y_train
model = RandomForestClassifier(n_estimators=10, max_depth=5, random_state=1)
model.fit(x_train, y_train)
predictions = model.predict(x_test)
predictions
confusion_matrix(y_test, predictions)
sns.heatmap(confusion_matrix(y_test, predictions), annot=True, fmt="g")
plt.xlabel("Predicted")
plt.ylabel("Actual")
print(classification_report(y_test, predictions))
test_data_new = pd.read_csv("../input/titanic/test.csv")
xt
test_data
predictions_test = model.predict(xt)
output = pd.DataFrame(
{"PassengerId": test_data_new.PassengerId, "Survived": predictions_test}
)
output.to_csv("./sub8.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/074/69074108.ipynb
| null | null |
[{"Id": 69074108, "ScriptId": 18810040, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7890508, "CreationDate": "07/26/2021 14:32:50", "VersionNumber": 7.0, "Title": "titan5", "EvaluationDate": "07/26/2021", "IsChange": false, "TotalLines": 242.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 242.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from pandas.api.types import CategoricalDtype
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from sklearn.model_selection import RandomizedSearchCV
train_data = pd.read_csv("../input/titanic/train.csv")
test_data = pd.read_csv("../input/titanic/test.csv")
train_data.head()
train_data.info()
features_nom = ["Sex", "Ticket", "Cabin", "Embarked"]
p_class = [1, 2, 3]
ordered_levels = {"Pclass": p_class}
# Add a None level for missing values
ordered_levels = {key: ["None"] + value for key, value in ordered_levels.items()}
def encode(df):
# Nominal categories
for name in features_nom:
df[name] = df[name].astype("category")
# Add a None category for missing values
if "None" not in df[name].cat.categories:
df[name].cat.add_categories("None", inplace=True)
# Ordinal categories
for name, levels in ordered_levels.items():
df[name] = df[name].astype(CategoricalDtype(levels, ordered=True))
return df
train_data = encode(train_data)
test_data = encode(test_data)
train_data.info()
test_data.info()
train_data.Survived.value_counts()
train_data.drop(["PassengerId", "Name", "Ticket", "Cabin"], axis=1, inplace=True)
test_data.drop(["PassengerId", "Name", "Ticket", "Cabin"], axis=1, inplace=True)
train_data.head()
test_data.head()
train_data.isnull().sum()
train_data["Age"].fillna((train_data["Age"].mean()), inplace=True)
train_data["Embarked"].fillna((train_data["Embarked"].mode()[0]), inplace=True)
train_data.isnull().sum()
test_data.isnull().sum()
test_data["Age"].fillna((test_data["Age"].mean()), inplace=True)
test_data["Fare"].fillna((test_data["Fare"].mean()), inplace=True)
test_data.isnull().sum()
train_data.describe()
# sns.pairplot(train_data)
# plt.show()
sns.heatmap(train_data.corr(), vmin=-1, vmax=1, annot=True)
plt.show()
le = LabelEncoder()
test_data[["Pclass", "Sex", "Embarked"]] = test_data[
["Pclass", "Sex", "Embarked"]
].apply(lambda col: le.fit_transform(col))
train_data[["Pclass", "Sex", "Embarked"]] = train_data[
["Pclass", "Sex", "Embarked"]
].apply(lambda col: le.fit_transform(col))
train_data.head()
test_data.head()
# normalizing highly skewed features (fare,parch,sibsp)
train_data["Fare"] = np.log(train_data["Fare"], where=train_data["Fare"] > 0)
test_data["Fare"] = np.log(test_data["Fare"], where=test_data["Fare"] > 0)
train_data.head()
train_data[train_data.Fare == 0]
train_data.Fare.sort_values()
# sns.pairplot(train_data)
# plt.show()
stand = StandardScaler()
stand.fit(train_data)
stand.fit(test_data)
sdata = stand.fit_transform(train_data.iloc[:, 1:])
stdata = stand.fit_transform(test_data)
sdata = pd.DataFrame(sdata, columns=train_data.columns[1:])
sdata
stdata = pd.DataFrame(stdata, columns=test_data.columns)
stdata
pc = PCA(n_components=7)
pct = PCA(n_components=7)
x = sdata
y = train_data.iloc[:, 0]
x
pcs = pc.fit_transform(x)
pcst = pct.fit_transform(stdata)
pc.explained_variance_
pct.explained_variance_
sns.lineplot(range(1, 1 + len(pc.explained_variance_)), pc.explained_variance_)
sns.lineplot(range(1, 1 + len(pct.explained_variance_)), pct.explained_variance_)
pc = ["PC" + str(i) for i in range(1, 8)]
x_pcs = pd.DataFrame(pcs, columns=pc)
pct = ["PC" + str(i) for i in range(1, 8)]
x_pcst = pd.DataFrame(pcst, columns=pct)
x_pcs
x_pcst
x = x.join(x_pcs.iloc[:, :3])
xt = stdata.join(x_pcst.iloc[:, :3])
xt
y.value_counts()
x
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
x_train
x_test
y_train
model = RandomForestClassifier(n_estimators=10, max_depth=5, random_state=1)
model.fit(x_train, y_train)
predictions = model.predict(x_test)
predictions
confusion_matrix(y_test, predictions)
sns.heatmap(confusion_matrix(y_test, predictions), annot=True, fmt="g")
plt.xlabel("Predicted")
plt.ylabel("Actual")
print(classification_report(y_test, predictions))
test_data_new = pd.read_csv("../input/titanic/test.csv")
xt
test_data
predictions_test = model.predict(xt)
output = pd.DataFrame(
{"PassengerId": test_data_new.PassengerId, "Survived": predictions_test}
)
output.to_csv("./sub8.csv", index=False)
| false | 0 | 1,576 | 0 | 1,576 | 1,576 |
||
69074529
|
<jupyter_start><jupyter_text>smallsemi
# Source
[GAP System](https://www.gap-system.org/Packages/smallsemi.html)
# Authors
[Andreas Distler](mailto:[email protected]), [James Mitchell](http://tinyurl.com/jdmitchell)
# Description
The Smallsemi package is a data library of semigroups of small size. It provides all semigroups with at most 8 elements as well as various information about these objects.
The databases of the semigroups can be retrieved from a `data` folder.
# License
GPL-3.0-or-later
Kaggle dataset identifier: smallsemi
<jupyter_script>"""
Copyright 2019-2021 Boris Shminke
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from neural_semigroups import Magma
from neural_semigroups.utils import hide_cells, partial_table_to_cube
import torch
from typing import Tuple
# examples for other cardinalities are located in recspective directories
cardinality = 5
def transform(
cayley_table: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
this is a data augmentation function similar to those which are used for
pictures, but insted of rotations and translations we use other
transformations
:param cayley_table: a tuple from a single tensor --- a Cayley table of
some magma
:returns: a tuple of a probabilistic representations of a partial Cayley
table and the respecting full one
"""
# in the ``smallsemi`` package they store only table up to isomorphism _or_
# anti-isomorphism, that's why here we apply isomorphisms or
# anti-isomorphisms with equal probabilies
if torch.randn((1,)).cpu().item() > 0.5:
full_table = Magma(cayley_table[0]).random_isomorphism()
else:
full_table = Magma(cayley_table[0]).random_isomorphism().T
# we want to reconstruct an math:``n\times n`` table given math:``n`` cells
partial_cube = partial_table_to_cube(
hide_cells(full_table, cardinality * cardinality - cardinality)
)
return partial_cube, partial_table_to_cube(full_table)
from neural_semigroups.smallsemi_dataset import Smallsemi
# when running not on Kaggle, one can set ``download`` parameter to ``True`` to
# download original ``smallsemi`` databases
data = Smallsemi(
root="/kaggle/input/smallsemi", cardinality=cardinality, transform=transform
)
from torch.utils.data.dataset import random_split
from torch.utils.data import DataLoader
data_size = len(data)
print(data_size)
# depending on the size of available data we leave either 1024 equivalence
# classes or one third of them (whichever is smaller) for each training and
# validation sets
test_size = min(len(data) // 3, 1024)
data_loaders = tuple(
DataLoader(data_split, batch_size=32)
for data_split in random_split(
data, [data_size - 2 * test_size, test_size, test_size]
)
)
# Since we try to reconstruct an associative Cayley table from only a handfull of cells, it's hardly probable that we end up with an original table. Most of the time the network returns other solutions for the same task (since the solution is none unique). That's why we used a special loss function described [here](https://neural-semigroups.readthedocs.io/en/latest/package-documentation.html#associator-loss).
from neural_semigroups.associator_loss import AssociatorLoss
from torch import Tensor
def loss(prediction: Tensor, target: Tensor) -> Tensor:
return AssociatorLoss()(prediction)
from neural_semigroups import MagmaDAE
# here we use a typical hourglass shape of an autoencoder
dae = MagmaDAE(
cardinality=cardinality,
hidden_dims=[cardinality**3, cardinality**2, cardinality],
)
# Since we want to upload training logs to tensorboard.dev, we first remove all the previous experiments from the same folder
from neural_semigroups.training_helpers import learning_pipeline
from ignite.metrics.loss import Loss
from neural_semigroups.training_helpers import associative_ratio, guessed_ratio
params = {"learning_rate": 0.001, "epochs": 1000}
metrics = {
"loss": Loss(loss),
"associative_ratio": Loss(associative_ratio),
"guessed_ratio": Loss(guessed_ratio),
}
learning_pipeline(params, dae, loss, metrics, data_loaders)
torch.onnx.export(dae, next(iter(data_loaders[0]))[0], f"dae-{cardinality}.onnx")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/074/69074529.ipynb
|
smallsemi
|
inpefess
|
[{"Id": 69074529, "ScriptId": 14302251, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 237956, "CreationDate": "07/26/2021 14:38:02", "VersionNumber": 9.0, "Title": "Neural Semigroups DAE (dim 5)", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 118.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 117.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 91830595, "KernelVersionId": 69074529, "SourceDatasetVersionId": 1710577}]
|
[{"Id": 1710577, "DatasetId": 1014290, "DatasourceVersionId": 1747380, "CreatorUserId": 237956, "LicenseName": "Other (specified in description)", "CreationDate": "12/03/2020 23:11:19", "VersionNumber": 1.0, "Title": "smallsemi", "Slug": "smallsemi", "Subtitle": "https://www.gap-system.org/Packages/smallsemi.html", "Description": "# Source\n\n[GAP System](https://www.gap-system.org/Packages/smallsemi.html)\n\n# Authors\n\n[Andreas Distler](mailto:[email protected]), [James Mitchell](http://tinyurl.com/jdmitchell)\n\n# Description\n\nThe Smallsemi package is a data library of semigroups of small size. It provides all semigroups with at most 8 elements as well as various information about these objects.\n\nThe databases of the semigroups can be retrieved from a `data` folder.\n\n\n# License\n\nGPL-3.0-or-later", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1014290, "CreatorUserId": 237956, "OwnerUserId": 237956.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1710577.0, "CurrentDatasourceVersionId": 1747380.0, "ForumId": 1031067, "Type": 2, "CreationDate": "12/03/2020 23:11:19", "LastActivityDate": "12/03/2020", "TotalViews": 1076, "TotalDownloads": 4, "TotalVotes": 1, "TotalKernels": 3}]
|
[{"Id": 237956, "UserName": "inpefess", "DisplayName": "Boris Shminke", "RegisterDate": "09/19/2014", "PerformanceTier": 1}]
|
"""
Copyright 2019-2021 Boris Shminke
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from neural_semigroups import Magma
from neural_semigroups.utils import hide_cells, partial_table_to_cube
import torch
from typing import Tuple
# examples for other cardinalities are located in recspective directories
cardinality = 5
def transform(
cayley_table: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
this is a data augmentation function similar to those which are used for
pictures, but insted of rotations and translations we use other
transformations
:param cayley_table: a tuple from a single tensor --- a Cayley table of
some magma
:returns: a tuple of a probabilistic representations of a partial Cayley
table and the respecting full one
"""
# in the ``smallsemi`` package they store only table up to isomorphism _or_
# anti-isomorphism, that's why here we apply isomorphisms or
# anti-isomorphisms with equal probabilies
if torch.randn((1,)).cpu().item() > 0.5:
full_table = Magma(cayley_table[0]).random_isomorphism()
else:
full_table = Magma(cayley_table[0]).random_isomorphism().T
# we want to reconstruct an math:``n\times n`` table given math:``n`` cells
partial_cube = partial_table_to_cube(
hide_cells(full_table, cardinality * cardinality - cardinality)
)
return partial_cube, partial_table_to_cube(full_table)
from neural_semigroups.smallsemi_dataset import Smallsemi
# when running not on Kaggle, one can set ``download`` parameter to ``True`` to
# download original ``smallsemi`` databases
data = Smallsemi(
root="/kaggle/input/smallsemi", cardinality=cardinality, transform=transform
)
from torch.utils.data.dataset import random_split
from torch.utils.data import DataLoader
data_size = len(data)
print(data_size)
# depending on the size of available data we leave either 1024 equivalence
# classes or one third of them (whichever is smaller) for each training and
# validation sets
test_size = min(len(data) // 3, 1024)
data_loaders = tuple(
DataLoader(data_split, batch_size=32)
for data_split in random_split(
data, [data_size - 2 * test_size, test_size, test_size]
)
)
# Since we try to reconstruct an associative Cayley table from only a handfull of cells, it's hardly probable that we end up with an original table. Most of the time the network returns other solutions for the same task (since the solution is none unique). That's why we used a special loss function described [here](https://neural-semigroups.readthedocs.io/en/latest/package-documentation.html#associator-loss).
from neural_semigroups.associator_loss import AssociatorLoss
from torch import Tensor
def loss(prediction: Tensor, target: Tensor) -> Tensor:
return AssociatorLoss()(prediction)
from neural_semigroups import MagmaDAE
# here we use a typical hourglass shape of an autoencoder
dae = MagmaDAE(
cardinality=cardinality,
hidden_dims=[cardinality**3, cardinality**2, cardinality],
)
# Since we want to upload training logs to tensorboard.dev, we first remove all the previous experiments from the same folder
from neural_semigroups.training_helpers import learning_pipeline
from ignite.metrics.loss import Loss
from neural_semigroups.training_helpers import associative_ratio, guessed_ratio
params = {"learning_rate": 0.001, "epochs": 1000}
metrics = {
"loss": Loss(loss),
"associative_ratio": Loss(associative_ratio),
"guessed_ratio": Loss(guessed_ratio),
}
learning_pipeline(params, dae, loss, metrics, data_loaders)
torch.onnx.export(dae, next(iter(data_loaders[0]))[0], f"dae-{cardinality}.onnx")
| false | 0 | 1,137 | 0 | 1,304 | 1,137 |
||
69074322
|
<jupyter_start><jupyter_text>Avocado Prices
### Context
It is a well known fact that Millenials LOVE Avocado Toast. It's also a well known fact that all Millenials live in their parents basements.
Clearly, they aren't buying home because they are buying too much Avocado Toast!
But maybe there's hope... if a Millenial could find a city with cheap avocados, they could live out the Millenial American Dream.
### Content
This data was downloaded from the Hass Avocado Board website in May of 2018 & compiled into a single CSV. Here's how the [Hass Avocado Board describes the data on their website][1]:
> The table below represents weekly 2018 retail scan data for National retail volume (units) and price. Retail scan data comes directly from retailers’ cash registers based on actual retail sales of Hass avocados. Starting in 2013, the table below reflects an expanded, multi-outlet retail data set. Multi-outlet reporting includes an aggregation of the following channels: grocery, mass, club, drug, dollar and military. The Average Price (of avocados) in the table reflects a per unit (per avocado) cost, even when multiple units (avocados) are sold in bags. The Product Lookup codes (PLU’s) in the table are only for Hass avocados. Other varieties of avocados (e.g. greenskins) are not included in this table.
Some relevant columns in the dataset:
- `Date` - The date of the observation
- `AveragePrice` - the average price of a single avocado
- `type` - conventional or organic
- `year` - the year
- `Region` - the city or region of the observation
- `Total Volume` - Total number of avocados sold
- `4046` - Total number of avocados with PLU 4046 sold
- `4225` - Total number of avocados with PLU 4225 sold
- `4770` - Total number of avocados with PLU 4770 sold
Kaggle dataset identifier: avocado-prices
<jupyter_code>import pandas as pd
df = pd.read_csv('avocado-prices/avocado.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 18249 entries, 0 to 18248
Data columns (total 14 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Unnamed: 0 18249 non-null int64
1 Date 18249 non-null object
2 AveragePrice 18249 non-null float64
3 Total Volume 18249 non-null float64
4 4046 18249 non-null float64
5 4225 18249 non-null float64
6 4770 18249 non-null float64
7 Total Bags 18249 non-null float64
8 Small Bags 18249 non-null float64
9 Large Bags 18249 non-null float64
10 XLarge Bags 18249 non-null float64
11 type 18249 non-null object
12 year 18249 non-null int64
13 region 18249 non-null object
dtypes: float64(9), int64(2), object(3)
memory usage: 1.9+ MB
<jupyter_text>Examples:
{
"Unnamed: 0": 0,
"Date": "2015-12-27 00:00:00",
"AveragePrice": 1.33,
"Total Volume": 64236.62,
"4046": 1036.74,
"4225": 54454.85,
"4770": 48.16,
"Total Bags": 8696.87,
"Small Bags": 8603.62,
"Large Bags": 93.25,
"XLarge Bags": 0,
"type": "conventional",
"year": 2015,
"region": "Albany"
}
{
"Unnamed: 0": 1,
"Date": "2015-12-20 00:00:00",
"AveragePrice": 1.35,
"Total Volume": 54876.98,
"4046": 674.28,
"4225": 44638.81,
"4770": 58.33,
"Total Bags": 9505.56,
"Small Bags": 9408.07,
"Large Bags": 97.49,
"XLarge Bags": 0,
"type": "conventional",
"year": 2015,
"region": "Albany"
}
{
"Unnamed: 0": 2,
"Date": "2015-12-13 00:00:00",
"AveragePrice": 0.93,
"Total Volume": 118220.22,
"4046": 794.7,
"4225": 109149.67,
"4770": 130.5,
"Total Bags": 8145.35,
"Small Bags": 8042.21,
"Large Bags": 103.14,
"XLarge Bags": 0,
"type": "conventional",
"year": 2015,
"region": "Albany"
}
{
"Unnamed: 0": 3,
"Date": "2015-12-06 00:00:00",
"AveragePrice": 1.08,
"Total Volume": 78992.15,
"4046": 1132.0,
"4225": 71976.41,
"4770": 72.58,
"Total Bags": 5811.16,
"Small Bags": 5677.4,
"Large Bags": 133.76,
"XLarge Bags": 0,
"type": "conventional",
"year": 2015,
"region": "Albany"
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
Avacado = pd.read_csv("../input/avocado-prices/avocado.csv", index_col=0)
Avacado.head()
Avacado.describe()
Avacado.drop(["Date", "region"], axis=1)
le = LabelEncoder()
Avacado["type"] = le.fit_transform(Avacado["type"])
Avacado.head(5)
target = Avacado[["AveragePrice"]]
features = Avacado.drop(["AveragePrice", "Date", "region"], axis=1)
features.head()
# #splitting the data
X_train, X_test, y_train, y_test = train_test_split(
features, target, test_size=0.25, random_state=10
)
my_lr_model = LinearRegression()
my_lr_model.fit(X_train, y_train)
my_lr_model.coef_
my_lr_model.intercept_
# Model Validation
from sklearn.metrics import mean_squared_error
# Prediction on test feature set
y_predictions = pd.DataFrame(my_lr_model.predict(X_test))
y_predictions.head()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/074/69074322.ipynb
|
avocado-prices
|
neuromusic
|
[{"Id": 69074322, "ScriptId": 18846467, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7166621, "CreationDate": "07/26/2021 14:35:25", "VersionNumber": 3.0, "Title": "Avacado", "EvaluationDate": NaN, "IsChange": false, "TotalLines": 63.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 63.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 91830253, "KernelVersionId": 69074322, "SourceDatasetVersionId": 38613}]
|
[{"Id": 38613, "DatasetId": 30292, "DatasourceVersionId": 40416, "CreatorUserId": 34547, "LicenseName": "Database: Open Database, Contents: \u00a9 Original Authors", "CreationDate": "06/06/2018 05:28:35", "VersionNumber": 1.0, "Title": "Avocado Prices", "Slug": "avocado-prices", "Subtitle": "Historical data on avocado prices and sales volume in multiple US markets", "Description": "### Context\n\nIt is a well known fact that Millenials LOVE Avocado Toast. It's also a well known fact that all Millenials live in their parents basements.\n\nClearly, they aren't buying home because they are buying too much Avocado Toast!\n\nBut maybe there's hope... if a Millenial could find a city with cheap avocados, they could live out the Millenial American Dream.\n\n### Content\n\nThis data was downloaded from the Hass Avocado Board website in May of 2018 & compiled into a single CSV. Here's how the [Hass Avocado Board describes the data on their website][1]:\n\n> The table below represents weekly 2018 retail scan data for National retail volume (units) and price. Retail scan data comes directly from retailers\u2019 cash registers based on actual retail sales of Hass avocados. Starting in 2013, the table below reflects an expanded, multi-outlet retail data set. Multi-outlet reporting includes an aggregation of the following channels: grocery, mass, club, drug, dollar and military. The Average Price (of avocados) in the table reflects a per unit (per avocado) cost, even when multiple units (avocados) are sold in bags. The Product Lookup codes (PLU\u2019s) in the table are only for Hass avocados. Other varieties of avocados (e.g. greenskins) are not included in this table.\n\nSome relevant columns in the dataset:\n\n- `Date` - The date of the observation\n- `AveragePrice` - the average price of a single avocado\n- `type` - conventional or organic\n- `year` - the year\n- `Region` - the city or region of the observation\n- `Total Volume` - Total number of avocados sold\n- `4046` - Total number of avocados with PLU 4046 sold\n- `4225` - Total number of avocados with PLU 4225 sold\n- `4770` - Total number of avocados with PLU 4770 sold\n\n### Acknowledgements\n\nMany thanks to the Hass Avocado Board for sharing this data!!\n\nhttp://www.hassavocadoboard.com/retail/volume-and-price-data\n\n### Inspiration\n\nIn which cities can millenials have their avocado toast AND buy a home?\n\nWas the Avocadopocalypse of 2017 real?\n\n\n [1]: http://www.hassavocadoboard.com/retail/volume-and-price-data", "VersionNotes": "Initial release", "TotalCompressedBytes": 1989197.0, "TotalUncompressedBytes": 643741.0}]
|
[{"Id": 30292, "CreatorUserId": 34547, "OwnerUserId": 34547.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 38613.0, "CurrentDatasourceVersionId": 40416.0, "ForumId": 38581, "Type": 2, "CreationDate": "06/06/2018 05:28:35", "LastActivityDate": "06/06/2018", "TotalViews": 1258337, "TotalDownloads": 248594, "TotalVotes": 3480, "TotalKernels": 423}]
|
[{"Id": 34547, "UserName": "neuromusic", "DisplayName": "Justin Kiggins", "RegisterDate": "03/07/2012", "PerformanceTier": 0}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
Avacado = pd.read_csv("../input/avocado-prices/avocado.csv", index_col=0)
Avacado.head()
Avacado.describe()
Avacado.drop(["Date", "region"], axis=1)
le = LabelEncoder()
Avacado["type"] = le.fit_transform(Avacado["type"])
Avacado.head(5)
target = Avacado[["AveragePrice"]]
features = Avacado.drop(["AveragePrice", "Date", "region"], axis=1)
features.head()
# #splitting the data
X_train, X_test, y_train, y_test = train_test_split(
features, target, test_size=0.25, random_state=10
)
my_lr_model = LinearRegression()
my_lr_model.fit(X_train, y_train)
my_lr_model.coef_
my_lr_model.intercept_
# Model Validation
from sklearn.metrics import mean_squared_error
# Prediction on test feature set
y_predictions = pd.DataFrame(my_lr_model.predict(X_test))
y_predictions.head()
|
[{"avocado-prices/avocado.csv": {"column_names": "[\"Unnamed: 0\", \"Date\", \"AveragePrice\", \"Total Volume\", \"4046\", \"4225\", \"4770\", \"Total Bags\", \"Small Bags\", \"Large Bags\", \"XLarge Bags\", \"type\", \"year\", \"region\"]", "column_data_types": "{\"Unnamed: 0\": \"int64\", \"Date\": \"object\", \"AveragePrice\": \"float64\", \"Total Volume\": \"float64\", \"4046\": \"float64\", \"4225\": \"float64\", \"4770\": \"float64\", \"Total Bags\": \"float64\", \"Small Bags\": \"float64\", \"Large Bags\": \"float64\", \"XLarge Bags\": \"float64\", \"type\": \"object\", \"year\": \"int64\", \"region\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 18249 entries, 0 to 18248\nData columns (total 14 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Unnamed: 0 18249 non-null int64 \n 1 Date 18249 non-null object \n 2 AveragePrice 18249 non-null float64\n 3 Total Volume 18249 non-null float64\n 4 4046 18249 non-null float64\n 5 4225 18249 non-null float64\n 6 4770 18249 non-null float64\n 7 Total Bags 18249 non-null float64\n 8 Small Bags 18249 non-null float64\n 9 Large Bags 18249 non-null float64\n 10 XLarge Bags 18249 non-null float64\n 11 type 18249 non-null object \n 12 year 18249 non-null int64 \n 13 region 18249 non-null object \ndtypes: float64(9), int64(2), object(3)\nmemory usage: 1.9+ MB\n", "summary": "{\"Unnamed: 0\": {\"count\": 18249.0, \"mean\": 24.232231903117977, \"std\": 15.481044753757136, \"min\": 0.0, \"25%\": 10.0, \"50%\": 24.0, \"75%\": 38.0, \"max\": 52.0}, \"AveragePrice\": {\"count\": 18249.0, \"mean\": 1.405978409775878, \"std\": 0.40267655549555065, \"min\": 0.44, \"25%\": 1.1, \"50%\": 1.37, \"75%\": 1.66, \"max\": 3.25}, \"Total Volume\": {\"count\": 18249.0, \"mean\": 850644.0130089321, \"std\": 3453545.3553994712, \"min\": 84.56, \"25%\": 10838.58, \"50%\": 107376.76, \"75%\": 432962.29, \"max\": 62505646.52}, \"4046\": {\"count\": 18249.0, \"mean\": 293008.4245306592, \"std\": 1264989.0817627772, \"min\": 0.0, \"25%\": 854.07, \"50%\": 8645.3, \"75%\": 111020.2, \"max\": 22743616.17}, \"4225\": {\"count\": 18249.0, \"mean\": 295154.56835607433, \"std\": 1204120.4011350507, \"min\": 0.0, \"25%\": 3008.78, \"50%\": 29061.02, \"75%\": 150206.86, \"max\": 20470572.61}, \"4770\": {\"count\": 18249.0, \"mean\": 22839.73599265713, \"std\": 107464.06843537073, \"min\": 0.0, \"25%\": 0.0, \"50%\": 184.99, \"75%\": 6243.42, \"max\": 2546439.11}, \"Total Bags\": {\"count\": 18249.0, \"mean\": 239639.20205983886, \"std\": 986242.3992164118, \"min\": 0.0, \"25%\": 5088.64, \"50%\": 39743.83, \"75%\": 110783.37, \"max\": 19373134.37}, \"Small Bags\": {\"count\": 18249.0, \"mean\": 182194.68669570936, \"std\": 746178.5149617889, \"min\": 0.0, \"25%\": 2849.42, \"50%\": 26362.82, \"75%\": 83337.67, \"max\": 13384586.8}, \"Large Bags\": {\"count\": 18249.0, \"mean\": 54338.08814455587, \"std\": 243965.96454740883, \"min\": 0.0, \"25%\": 127.47, \"50%\": 2647.71, \"75%\": 22029.25, \"max\": 5719096.61}, \"XLarge Bags\": {\"count\": 18249.0, \"mean\": 3106.426507205874, \"std\": 17692.894651916486, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 132.5, \"max\": 551693.65}, \"year\": {\"count\": 18249.0, \"mean\": 2016.1478985149872, \"std\": 0.9399384671405984, \"min\": 2015.0, \"25%\": 2015.0, \"50%\": 2016.0, \"75%\": 2017.0, \"max\": 2018.0}}", "examples": "{\"Unnamed: 0\":{\"0\":0,\"1\":1,\"2\":2,\"3\":3},\"Date\":{\"0\":\"2015-12-27\",\"1\":\"2015-12-20\",\"2\":\"2015-12-13\",\"3\":\"2015-12-06\"},\"AveragePrice\":{\"0\":1.33,\"1\":1.35,\"2\":0.93,\"3\":1.08},\"Total Volume\":{\"0\":64236.62,\"1\":54876.98,\"2\":118220.22,\"3\":78992.15},\"4046\":{\"0\":1036.74,\"1\":674.28,\"2\":794.7,\"3\":1132.0},\"4225\":{\"0\":54454.85,\"1\":44638.81,\"2\":109149.67,\"3\":71976.41},\"4770\":{\"0\":48.16,\"1\":58.33,\"2\":130.5,\"3\":72.58},\"Total Bags\":{\"0\":8696.87,\"1\":9505.56,\"2\":8145.35,\"3\":5811.16},\"Small Bags\":{\"0\":8603.62,\"1\":9408.07,\"2\":8042.21,\"3\":5677.4},\"Large Bags\":{\"0\":93.25,\"1\":97.49,\"2\":103.14,\"3\":133.76},\"XLarge Bags\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"type\":{\"0\":\"conventional\",\"1\":\"conventional\",\"2\":\"conventional\",\"3\":\"conventional\"},\"year\":{\"0\":2015,\"1\":2015,\"2\":2015,\"3\":2015},\"region\":{\"0\":\"Albany\",\"1\":\"Albany\",\"2\":\"Albany\",\"3\":\"Albany\"}}"}}]
| true | 1 |
<start_data_description><data_path>avocado-prices/avocado.csv:
<column_names>
['Unnamed: 0', 'Date', 'AveragePrice', 'Total Volume', '4046', '4225', '4770', 'Total Bags', 'Small Bags', 'Large Bags', 'XLarge Bags', 'type', 'year', 'region']
<column_types>
{'Unnamed: 0': 'int64', 'Date': 'object', 'AveragePrice': 'float64', 'Total Volume': 'float64', '4046': 'float64', '4225': 'float64', '4770': 'float64', 'Total Bags': 'float64', 'Small Bags': 'float64', 'Large Bags': 'float64', 'XLarge Bags': 'float64', 'type': 'object', 'year': 'int64', 'region': 'object'}
<dataframe_Summary>
{'Unnamed: 0': {'count': 18249.0, 'mean': 24.232231903117977, 'std': 15.481044753757136, 'min': 0.0, '25%': 10.0, '50%': 24.0, '75%': 38.0, 'max': 52.0}, 'AveragePrice': {'count': 18249.0, 'mean': 1.405978409775878, 'std': 0.40267655549555065, 'min': 0.44, '25%': 1.1, '50%': 1.37, '75%': 1.66, 'max': 3.25}, 'Total Volume': {'count': 18249.0, 'mean': 850644.0130089321, 'std': 3453545.3553994712, 'min': 84.56, '25%': 10838.58, '50%': 107376.76, '75%': 432962.29, 'max': 62505646.52}, '4046': {'count': 18249.0, 'mean': 293008.4245306592, 'std': 1264989.0817627772, 'min': 0.0, '25%': 854.07, '50%': 8645.3, '75%': 111020.2, 'max': 22743616.17}, '4225': {'count': 18249.0, 'mean': 295154.56835607433, 'std': 1204120.4011350507, 'min': 0.0, '25%': 3008.78, '50%': 29061.02, '75%': 150206.86, 'max': 20470572.61}, '4770': {'count': 18249.0, 'mean': 22839.73599265713, 'std': 107464.06843537073, 'min': 0.0, '25%': 0.0, '50%': 184.99, '75%': 6243.42, 'max': 2546439.11}, 'Total Bags': {'count': 18249.0, 'mean': 239639.20205983886, 'std': 986242.3992164118, 'min': 0.0, '25%': 5088.64, '50%': 39743.83, '75%': 110783.37, 'max': 19373134.37}, 'Small Bags': {'count': 18249.0, 'mean': 182194.68669570936, 'std': 746178.5149617889, 'min': 0.0, '25%': 2849.42, '50%': 26362.82, '75%': 83337.67, 'max': 13384586.8}, 'Large Bags': {'count': 18249.0, 'mean': 54338.08814455587, 'std': 243965.96454740883, 'min': 0.0, '25%': 127.47, '50%': 2647.71, '75%': 22029.25, 'max': 5719096.61}, 'XLarge Bags': {'count': 18249.0, 'mean': 3106.426507205874, 'std': 17692.894651916486, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 132.5, 'max': 551693.65}, 'year': {'count': 18249.0, 'mean': 2016.1478985149872, 'std': 0.9399384671405984, 'min': 2015.0, '25%': 2015.0, '50%': 2016.0, '75%': 2017.0, 'max': 2018.0}}
<dataframe_info>
RangeIndex: 18249 entries, 0 to 18248
Data columns (total 14 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Unnamed: 0 18249 non-null int64
1 Date 18249 non-null object
2 AveragePrice 18249 non-null float64
3 Total Volume 18249 non-null float64
4 4046 18249 non-null float64
5 4225 18249 non-null float64
6 4770 18249 non-null float64
7 Total Bags 18249 non-null float64
8 Small Bags 18249 non-null float64
9 Large Bags 18249 non-null float64
10 XLarge Bags 18249 non-null float64
11 type 18249 non-null object
12 year 18249 non-null int64
13 region 18249 non-null object
dtypes: float64(9), int64(2), object(3)
memory usage: 1.9+ MB
<some_examples>
{'Unnamed: 0': {'0': 0, '1': 1, '2': 2, '3': 3}, 'Date': {'0': '2015-12-27', '1': '2015-12-20', '2': '2015-12-13', '3': '2015-12-06'}, 'AveragePrice': {'0': 1.33, '1': 1.35, '2': 0.93, '3': 1.08}, 'Total Volume': {'0': 64236.62, '1': 54876.98, '2': 118220.22, '3': 78992.15}, '4046': {'0': 1036.74, '1': 674.28, '2': 794.7, '3': 1132.0}, '4225': {'0': 54454.85, '1': 44638.81, '2': 109149.67, '3': 71976.41}, '4770': {'0': 48.16, '1': 58.33, '2': 130.5, '3': 72.58}, 'Total Bags': {'0': 8696.87, '1': 9505.56, '2': 8145.35, '3': 5811.16}, 'Small Bags': {'0': 8603.62, '1': 9408.07, '2': 8042.21, '3': 5677.4}, 'Large Bags': {'0': 93.25, '1': 97.49, '2': 103.14, '3': 133.76}, 'XLarge Bags': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'type': {'0': 'conventional', '1': 'conventional', '2': 'conventional', '3': 'conventional'}, 'year': {'0': 2015, '1': 2015, '2': 2015, '3': 2015}, 'region': {'0': 'Albany', '1': 'Albany', '2': 'Albany', '3': 'Albany'}}
<end_description>
| 517 | 0 | 2,205 | 517 |
69074147
|
<jupyter_start><jupyter_text>Used Bikes Prices in India
### Context
This dataset contains information about approx 32000 used bikes scraped from www.droom.in
### Content
This dataset comprises bikes a range of all used bikes sold on droom.in. It includes features like power, kilometers drive, Age of the bike etc.
Kaggle dataset identifier: used-bikes-prices-in-india
<jupyter_code>import pandas as pd
df = pd.read_csv('used-bikes-prices-in-india/Used_Bikes.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 32648 entries, 0 to 32647
Data columns (total 8 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 bike_name 32648 non-null object
1 price 32648 non-null float64
2 city 32648 non-null object
3 kms_driven 32648 non-null float64
4 owner 32648 non-null object
5 age 32648 non-null float64
6 power 32648 non-null float64
7 brand 32648 non-null object
dtypes: float64(4), object(4)
memory usage: 2.0+ MB
<jupyter_text>Examples:
{
"bike_name": "TVS Star City Plus Dual Tone 110cc",
"price": 35000,
"city": "Ahmedabad",
"kms_driven": 17654,
"owner": "First Owner",
"age": 3,
"power": 110,
"brand": "TVS"
}
{
"bike_name": "Royal Enfield Classic 350cc",
"price": 119900,
"city": "Delhi",
"kms_driven": 11000,
"owner": "First Owner",
"age": 4,
"power": 350,
"brand": "Royal Enfield"
}
{
"bike_name": "Triumph Daytona 675R",
"price": 600000,
"city": "Delhi",
"kms_driven": 110,
"owner": "First Owner",
"age": 8,
"power": 675,
"brand": "Triumph"
}
{
"bike_name": "TVS Apache RTR 180cc",
"price": 65000,
"city": "Bangalore",
"kms_driven": 16329,
"owner": "First Owner",
"age": 4,
"power": 180,
"brand": "TVS"
}
<jupyter_script>import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Modules for EDA
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
plt.style.use("seaborn")
# Machine learning packages
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, r2_score
import joblib
df = pd.read_csv("../input/used-bikes-prices-in-india/Used_Bikes.csv")
df.shape
df.info()
df.isna().sum()
df.head()
# # **Let's Figure Out unique bike brands**
bikes = df["bike_name"].str.split(" ")
bikes.head()
bike_brand = set()
for bike in bikes:
bike_brand.add(bike[0])
bike_brands = pd.Series(list(bike_brand), name="Brand")
bike_brands
k = 0
for i in df["bike_name"]:
brand = i.split(" ")[0]
df["bike_name"].iloc[k] = brand
k += 1
df.rename(columns={"bike_name": "Bike Brand"}, inplace=True)
df.head()
df["Bike Brand"].value_counts()
# # **Replaceing Bike brands to others which are less than 1000**
brands = df["Bike Brand"].value_counts()
bike_brands_less_than_100 = brands[brands <= 1000]
bike_brands_less_than_100
print("Other brands total", sum(bike_brands_less_than_100))
others = bike_brands_less_than_100.keys()
others
df["Bike Brand"].replace(others, "Others", inplace=True)
df["Bike Brand"].value_counts().plot(kind="barh")
plt.gca().invert_yaxis()
plt.show()
bike_groups = df.groupby("Bike Brand")
def get_average_plot_data(col, scale=None):
brands = df["Bike Brand"].unique()
avgs = []
for brand in brands:
average = bike_groups.get_group(brand)[col].mean()
avgs.append(average)
df1 = pd.DataFrame({"Brand": brands, f"Average {col}": avgs})
x = df1[df1.columns[1]]
y = df1[df1.columns[0]]
sns.barplot(data=df1, x=x, y=y)
plt.title(f"Average {col} of various brands")
if scale:
plt.xscale(scale)
plt.show()
# # **Average age of each bike brand**
get_average_plot_data("age")
# # **Average price of each bike brand**
get_average_plot_data("price", "symlog")
# # **Average KMs driven of each bike brand**
get_average_plot_data("kms_driven")
# # **Average power of each bike brand**
get_average_plot_data("power")
# # **Pair Plot**
cols_to_plot = ["Bike Brand", "price", "kms_driven", "age", "power"]
plt.figure(figsize=(10, 10))
sns.pairplot(df[cols_to_plot], hue="Bike Brand")
plt.show()
# # **City Counts**
df["city"].value_counts()
# # **Popular cities**
city_counts = df["city"].value_counts()
city_counts[city_counts >= 500]
# # **Setting cities to others where city frequency is < 500**
other_cities = city_counts[city_counts < 500]
df["city"].replace(other_cities.keys(), "Others", inplace=True)
plt.figure(figsize=(10, 10))
df["city"].value_counts().plot(kind="barh")
plt.gca().invert_yaxis()
plt.show()
df.drop("brand", inplace=True, axis=1)
df.head()
df["owner"].value_counts()
df["owner"].replace(
["Second Owner", "Third Owner", "Fourth Owner Or More"],
"Second Owner or more",
inplace=True,
)
df["owner"].value_counts()
# # **Feature Engineering**
# ## **One Hot encoding**
cols_to_encode = ["Bike Brand", "city", "owner"]
dummies = pd.get_dummies(df[cols_to_encode], drop_first=True)
dummies.sample(10)
# ## **Feature Scaling**
cols_to_scale = ["kms_driven", "age", "power"]
scale = MinMaxScaler()
scalled = scale.fit_transform(df[cols_to_scale])
i = 0
for col in cols_to_scale:
df[col] = scalled[:, i]
i += 1
df.head()
df.drop(cols_to_encode, axis=1, inplace=True)
df.head()
new_df = pd.concat([dummies, df], axis=1)
new_df.shape
new_df.head()
sum(new_df.isna().sum())
# # **Splitting and Training data**
x, y = new_df.drop(["price"], axis=1), new_df["price"]
x.shape, y.shape
y.head()
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
x_train.shape, x_test.shape
y_train.shape, y_test.shape
# # **Model Building and predictions**
model = LinearRegression()
model.fit(x_train, y_train)
model.score(x_test, y_test)
model.score(x_train, y_train)
# # **That's a descent score**
# # **Cross Validation scores**
models = [
LinearRegression(),
Ridge(),
Lasso(),
KNeighborsRegressor(),
SVR(kernel="linear"),
]
mean_scores = []
for model in models:
print("Model:", model)
cv_scores = cross_val_score(model, x, y, cv=5)
print("Cross Val Scores:", cv_scores)
print("Mean score:", cv_scores.mean())
mean_scores.append(cv_scores.mean())
print("\n")
mds = []
for i in range(len(models)):
mds.append(str(models[i]))
mds
mean_df = pd.DataFrame({"Model": mds, "Mean CVScore": mean_scores})
sns.barplot(data=mean_df, y="Model", x="Mean CVScore")
plt.show()
svm_model = SVR()
svm_model.fit(x_train, y_train)
svm_model.score(x_test, y_test)
y_pred_test = model.predict(x_test)
mean_squared_error(y_test, y_pred_test)
# # **Actual vs Predicted**
def actual_vs_predicted(model, data, y_true, title=None):
pred = model.predict(data)
apdf = pd.DataFrame({"Actual": y_true, "Predicted": np.round(pred)})
plt.figure(figsize=(10, 10))
sns.scatterplot(data=apdf, x="Actual", y="Predicted")
plt.title(title)
plt.show()
actual_vs_predicted(model, x_train, y_train, "Test Data")
actual_vs_predicted(model, x_train, y_train, "Test Data")
# # **Let's use RandomForestRegressor**
rfr_model = RandomForestRegressor()
rfr_model.fit(x_train, y_train)
rfr_model.score(x_test, y_test)
rfr_model.score(x_train, y_train)
actual_vs_predicted(rfr_model, x_test, y_test, "RandomForestRegressor Test data")
actual_vs_predicted(rfr_model, x_train, y_train, "RandomForestRegressor Train data")
# # **Saving RandomForestRegressor model as file**
joblib.dump(rfr_model, "RFR-Model")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/074/69074147.ipynb
|
used-bikes-prices-in-india
|
saisaathvik
|
[{"Id": 69074147, "ScriptId": 18846583, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4656934, "CreationDate": "07/26/2021 14:33:18", "VersionNumber": 1.0, "Title": "Used bike prediction", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 256.0, "LinesInsertedFromPrevious": 256.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 91829751, "KernelVersionId": 69074147, "SourceDatasetVersionId": 2292177}]
|
[{"Id": 2292177, "DatasetId": 1381603, "DatasourceVersionId": 2333407, "CreatorUserId": 5519712, "LicenseName": "CC0: Public Domain", "CreationDate": "06/01/2021 08:57:52", "VersionNumber": 1.0, "Title": "Used Bikes Prices in India", "Slug": "used-bikes-prices-in-india", "Subtitle": "Dataset of ~32000 used Bike data scraped from www.droom.in", "Description": "### Context\n\nThis dataset contains information about approx 32000 used bikes scraped from www.droom.in\n\n\n### Content\n\nThis dataset comprises bikes a range of all used bikes sold on droom.in. It includes features like power, kilometers drive, Age of the bike etc.\n\n\n### Acknowledgements\n\nAll data was scraped from www.droom.in using Webscraper.io and Instant Data Scraper tools\n\n\n### Inspiration\n\nThe aim to model a resale valuation for used bikes and predict the price of used bikes. This can be helpful while selling a used bike or buying a used bike.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1381603, "CreatorUserId": 5519712, "OwnerUserId": 5519712.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2292177.0, "CurrentDatasourceVersionId": 2333407.0, "ForumId": 1400793, "Type": 2, "CreationDate": "06/01/2021 08:57:52", "LastActivityDate": "06/01/2021", "TotalViews": 18119, "TotalDownloads": 2770, "TotalVotes": 47, "TotalKernels": 18}]
|
[{"Id": 5519712, "UserName": "saisaathvik", "DisplayName": "Sai Saathvik Domala", "RegisterDate": "07/24/2020", "PerformanceTier": 1}]
|
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Modules for EDA
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
plt.style.use("seaborn")
# Machine learning packages
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, r2_score
import joblib
df = pd.read_csv("../input/used-bikes-prices-in-india/Used_Bikes.csv")
df.shape
df.info()
df.isna().sum()
df.head()
# # **Let's Figure Out unique bike brands**
bikes = df["bike_name"].str.split(" ")
bikes.head()
bike_brand = set()
for bike in bikes:
bike_brand.add(bike[0])
bike_brands = pd.Series(list(bike_brand), name="Brand")
bike_brands
k = 0
for i in df["bike_name"]:
brand = i.split(" ")[0]
df["bike_name"].iloc[k] = brand
k += 1
df.rename(columns={"bike_name": "Bike Brand"}, inplace=True)
df.head()
df["Bike Brand"].value_counts()
# # **Replaceing Bike brands to others which are less than 1000**
brands = df["Bike Brand"].value_counts()
bike_brands_less_than_100 = brands[brands <= 1000]
bike_brands_less_than_100
print("Other brands total", sum(bike_brands_less_than_100))
others = bike_brands_less_than_100.keys()
others
df["Bike Brand"].replace(others, "Others", inplace=True)
df["Bike Brand"].value_counts().plot(kind="barh")
plt.gca().invert_yaxis()
plt.show()
bike_groups = df.groupby("Bike Brand")
def get_average_plot_data(col, scale=None):
brands = df["Bike Brand"].unique()
avgs = []
for brand in brands:
average = bike_groups.get_group(brand)[col].mean()
avgs.append(average)
df1 = pd.DataFrame({"Brand": brands, f"Average {col}": avgs})
x = df1[df1.columns[1]]
y = df1[df1.columns[0]]
sns.barplot(data=df1, x=x, y=y)
plt.title(f"Average {col} of various brands")
if scale:
plt.xscale(scale)
plt.show()
# # **Average age of each bike brand**
get_average_plot_data("age")
# # **Average price of each bike brand**
get_average_plot_data("price", "symlog")
# # **Average KMs driven of each bike brand**
get_average_plot_data("kms_driven")
# # **Average power of each bike brand**
get_average_plot_data("power")
# # **Pair Plot**
cols_to_plot = ["Bike Brand", "price", "kms_driven", "age", "power"]
plt.figure(figsize=(10, 10))
sns.pairplot(df[cols_to_plot], hue="Bike Brand")
plt.show()
# # **City Counts**
df["city"].value_counts()
# # **Popular cities**
city_counts = df["city"].value_counts()
city_counts[city_counts >= 500]
# # **Setting cities to others where city frequency is < 500**
other_cities = city_counts[city_counts < 500]
df["city"].replace(other_cities.keys(), "Others", inplace=True)
plt.figure(figsize=(10, 10))
df["city"].value_counts().plot(kind="barh")
plt.gca().invert_yaxis()
plt.show()
df.drop("brand", inplace=True, axis=1)
df.head()
df["owner"].value_counts()
df["owner"].replace(
["Second Owner", "Third Owner", "Fourth Owner Or More"],
"Second Owner or more",
inplace=True,
)
df["owner"].value_counts()
# # **Feature Engineering**
# ## **One Hot encoding**
cols_to_encode = ["Bike Brand", "city", "owner"]
dummies = pd.get_dummies(df[cols_to_encode], drop_first=True)
dummies.sample(10)
# ## **Feature Scaling**
cols_to_scale = ["kms_driven", "age", "power"]
scale = MinMaxScaler()
scalled = scale.fit_transform(df[cols_to_scale])
i = 0
for col in cols_to_scale:
df[col] = scalled[:, i]
i += 1
df.head()
df.drop(cols_to_encode, axis=1, inplace=True)
df.head()
new_df = pd.concat([dummies, df], axis=1)
new_df.shape
new_df.head()
sum(new_df.isna().sum())
# # **Splitting and Training data**
x, y = new_df.drop(["price"], axis=1), new_df["price"]
x.shape, y.shape
y.head()
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
x_train.shape, x_test.shape
y_train.shape, y_test.shape
# # **Model Building and predictions**
model = LinearRegression()
model.fit(x_train, y_train)
model.score(x_test, y_test)
model.score(x_train, y_train)
# # **That's a descent score**
# # **Cross Validation scores**
models = [
LinearRegression(),
Ridge(),
Lasso(),
KNeighborsRegressor(),
SVR(kernel="linear"),
]
mean_scores = []
for model in models:
print("Model:", model)
cv_scores = cross_val_score(model, x, y, cv=5)
print("Cross Val Scores:", cv_scores)
print("Mean score:", cv_scores.mean())
mean_scores.append(cv_scores.mean())
print("\n")
mds = []
for i in range(len(models)):
mds.append(str(models[i]))
mds
mean_df = pd.DataFrame({"Model": mds, "Mean CVScore": mean_scores})
sns.barplot(data=mean_df, y="Model", x="Mean CVScore")
plt.show()
svm_model = SVR()
svm_model.fit(x_train, y_train)
svm_model.score(x_test, y_test)
y_pred_test = model.predict(x_test)
mean_squared_error(y_test, y_pred_test)
# # **Actual vs Predicted**
def actual_vs_predicted(model, data, y_true, title=None):
pred = model.predict(data)
apdf = pd.DataFrame({"Actual": y_true, "Predicted": np.round(pred)})
plt.figure(figsize=(10, 10))
sns.scatterplot(data=apdf, x="Actual", y="Predicted")
plt.title(title)
plt.show()
actual_vs_predicted(model, x_train, y_train, "Test Data")
actual_vs_predicted(model, x_train, y_train, "Test Data")
# # **Let's use RandomForestRegressor**
rfr_model = RandomForestRegressor()
rfr_model.fit(x_train, y_train)
rfr_model.score(x_test, y_test)
rfr_model.score(x_train, y_train)
actual_vs_predicted(rfr_model, x_test, y_test, "RandomForestRegressor Test data")
actual_vs_predicted(rfr_model, x_train, y_train, "RandomForestRegressor Train data")
# # **Saving RandomForestRegressor model as file**
joblib.dump(rfr_model, "RFR-Model")
|
[{"used-bikes-prices-in-india/Used_Bikes.csv": {"column_names": "[\"bike_name\", \"price\", \"city\", \"kms_driven\", \"owner\", \"age\", \"power\", \"brand\"]", "column_data_types": "{\"bike_name\": \"object\", \"price\": \"float64\", \"city\": \"object\", \"kms_driven\": \"float64\", \"owner\": \"object\", \"age\": \"float64\", \"power\": \"float64\", \"brand\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 32648 entries, 0 to 32647\nData columns (total 8 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 bike_name 32648 non-null object \n 1 price 32648 non-null float64\n 2 city 32648 non-null object \n 3 kms_driven 32648 non-null float64\n 4 owner 32648 non-null object \n 5 age 32648 non-null float64\n 6 power 32648 non-null float64\n 7 brand 32648 non-null object \ndtypes: float64(4), object(4)\nmemory usage: 2.0+ MB\n", "summary": "{\"price\": {\"count\": 32648.0, \"mean\": 68295.41763660868, \"std\": 90718.59520524176, \"min\": 4400.0, \"25%\": 25000.0, \"50%\": 43000.0, \"75%\": 80000.0, \"max\": 1900000.0}, \"kms_driven\": {\"count\": 32648.0, \"mean\": 26344.625183778484, \"std\": 22208.527694960114, \"min\": 1.0, \"25%\": 12000.0, \"50%\": 20373.0, \"75%\": 35000.0, \"max\": 750000.0}, \"age\": {\"count\": 32648.0, \"mean\": 8.048211222739525, \"std\": 4.031700112790309, \"min\": 1.0, \"25%\": 5.0, \"50%\": 7.0, \"75%\": 10.0, \"max\": 63.0}, \"power\": {\"count\": 32648.0, \"mean\": 213.51130237686843, \"std\": 134.42886836184613, \"min\": 100.0, \"25%\": 150.0, \"50%\": 150.0, \"75%\": 220.0, \"max\": 1800.0}}", "examples": "{\"bike_name\":{\"0\":\"TVS Star City Plus Dual Tone 110cc\",\"1\":\"Royal Enfield Classic 350cc\",\"2\":\"Triumph Daytona 675R\",\"3\":\"TVS Apache RTR 180cc\"},\"price\":{\"0\":35000.0,\"1\":119900.0,\"2\":600000.0,\"3\":65000.0},\"city\":{\"0\":\"Ahmedabad\",\"1\":\"Delhi\",\"2\":\"Delhi\",\"3\":\"Bangalore\"},\"kms_driven\":{\"0\":17654.0,\"1\":11000.0,\"2\":110.0,\"3\":16329.0},\"owner\":{\"0\":\"First Owner\",\"1\":\"First Owner\",\"2\":\"First Owner\",\"3\":\"First Owner\"},\"age\":{\"0\":3.0,\"1\":4.0,\"2\":8.0,\"3\":4.0},\"power\":{\"0\":110.0,\"1\":350.0,\"2\":675.0,\"3\":180.0},\"brand\":{\"0\":\"TVS\",\"1\":\"Royal Enfield\",\"2\":\"Triumph\",\"3\":\"TVS\"}}"}}]
| true | 1 |
<start_data_description><data_path>used-bikes-prices-in-india/Used_Bikes.csv:
<column_names>
['bike_name', 'price', 'city', 'kms_driven', 'owner', 'age', 'power', 'brand']
<column_types>
{'bike_name': 'object', 'price': 'float64', 'city': 'object', 'kms_driven': 'float64', 'owner': 'object', 'age': 'float64', 'power': 'float64', 'brand': 'object'}
<dataframe_Summary>
{'price': {'count': 32648.0, 'mean': 68295.41763660868, 'std': 90718.59520524176, 'min': 4400.0, '25%': 25000.0, '50%': 43000.0, '75%': 80000.0, 'max': 1900000.0}, 'kms_driven': {'count': 32648.0, 'mean': 26344.625183778484, 'std': 22208.527694960114, 'min': 1.0, '25%': 12000.0, '50%': 20373.0, '75%': 35000.0, 'max': 750000.0}, 'age': {'count': 32648.0, 'mean': 8.048211222739525, 'std': 4.031700112790309, 'min': 1.0, '25%': 5.0, '50%': 7.0, '75%': 10.0, 'max': 63.0}, 'power': {'count': 32648.0, 'mean': 213.51130237686843, 'std': 134.42886836184613, 'min': 100.0, '25%': 150.0, '50%': 150.0, '75%': 220.0, 'max': 1800.0}}
<dataframe_info>
RangeIndex: 32648 entries, 0 to 32647
Data columns (total 8 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 bike_name 32648 non-null object
1 price 32648 non-null float64
2 city 32648 non-null object
3 kms_driven 32648 non-null float64
4 owner 32648 non-null object
5 age 32648 non-null float64
6 power 32648 non-null float64
7 brand 32648 non-null object
dtypes: float64(4), object(4)
memory usage: 2.0+ MB
<some_examples>
{'bike_name': {'0': 'TVS Star City Plus Dual Tone 110cc', '1': 'Royal Enfield Classic 350cc', '2': 'Triumph Daytona 675R', '3': 'TVS Apache RTR 180cc'}, 'price': {'0': 35000.0, '1': 119900.0, '2': 600000.0, '3': 65000.0}, 'city': {'0': 'Ahmedabad', '1': 'Delhi', '2': 'Delhi', '3': 'Bangalore'}, 'kms_driven': {'0': 17654.0, '1': 11000.0, '2': 110.0, '3': 16329.0}, 'owner': {'0': 'First Owner', '1': 'First Owner', '2': 'First Owner', '3': 'First Owner'}, 'age': {'0': 3.0, '1': 4.0, '2': 8.0, '3': 4.0}, 'power': {'0': 110.0, '1': 350.0, '2': 675.0, '3': 180.0}, 'brand': {'0': 'TVS', '1': 'Royal Enfield', '2': 'Triumph', '3': 'TVS'}}
<end_description>
| 2,140 | 0 | 2,871 | 2,140 |
69074291
|
<jupyter_start><jupyter_text>Covid19 Tokyo
tokyo_covid19_patients.csv
- date
tokyo_covid19_positivity.csv
- date
- pcr_positives
- antigen_positives
- pcr_negatives
- antigen_negatives
- examined
covid19 open data in tokyo in japan
https://stopcovid19.metro.tokyo.lg.jp/
Kaggle dataset identifier: covid19-tokyo
<jupyter_script>import os
import numpy as np
import pandas as pd
import random
import seaborn as sns
import datetime as datetime
import matplotlib.dates as dates
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from contextlib import contextmanager
from time import time
from tqdm import tqdm
import lightgbm as lgbm
from sklearn.metrics import classification_report, log_loss, accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
# # data : tokyo_covid19_patients.csv
data0 = pd.read_csv("../input/covid19-tokyo/tokyo_covid19_patients - 0726.csv")
data0[-5:]
data0["pcr_positives"] = 1
data1 = data0.groupby("date", as_index=False).sum()
data1[-5:]
data1["positives mean 7-day"] = data1["pcr_positives"].rolling(window=7).mean()
data1["positives max 7-day"] = data1["pcr_positives"].rolling(window=7).max()
data1["positives min 7-day"] = data1["pcr_positives"].rolling(window=7).min()
data1[-5:].T
fig = make_subplots(specs=[[{"secondary_y": False}]])
fig.add_trace(
go.Scatter(
x=data1["date"], y=data1["positives mean 7-day"], name="positives mean 7-day"
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
x=data1["date"], y=data1["positives max 7-day"], name="positives max 7-day"
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
x=data1["date"], y=data1["positives min 7-day"], name="positives min 7-day"
),
secondary_y=False,
)
fig.update_layout(
autosize=False,
width=700,
height=500,
title_text="Examined Positives (rolling 7-day) in Tokyo",
)
fig.update_xaxes(title_text="Date")
fig.update_yaxes(title_text="Cases", secondary_y=False)
fig.show()
col0 = data1.columns.to_list()
col1 = col0 + ["pm-7", "slope"]
data2 = pd.DataFrame(columns=col1)
data2[col0] = data1
n = len(data1)
for i in range(n):
pmi = data2["positives mean 7-day"][i]
data2.loc[i + 7, "pm-7"] = pmi
# Slope value is defined as the following formula.
# 'pm-7' means 'positives mean 7-day' value 7 days before.
data2["slope"] = (data2["positives mean 7-day"] - data2["pm-7"]) / 7
data3 = data2[["date", "pcr_positives", "positives mean 7-day", "pm-7", "slope"]]
data4 = data3[14:-7]
print(data4[243:258]) ### 3rd wave
print()
print(data4[372:387]) ### 4th wave
print()
print(data4[468:483]) ### 5th wave
print()
print(data4[-14:]) ### latest
# # A slope value is a sensitive benchmark for an occurence of infectious explosion in the near future.
# * On 2020-11-08, the slope value exceeded 3.0. This was the begginning of the 3th wave.
# * On 2021-03-14, the slope value exceeded 3.0. This was the begginning of the 4th wave.
# * On 2021-06-22, the slope value exceeded 3.0. This was the begginning of the 5th wave.
# # Now in the 5th wave! The slope value has reached more than 20 since 2021/07/08.
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Scatter(x=data4["date"], y=data4["slope"], name="slope"),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
x=data4["date"], y=data4["positives mean 7-day"], name="positives mean 7-day"
),
secondary_y="positives mean 7-day",
)
fig.update_layout(
autosize=False,
width=700,
height=500,
title_text="Slope change of positives mean 7-day in Tokyo",
)
fig.update_xaxes(title_text="Date")
fig.update_yaxes(title_text="Slope", secondary_y=False)
fig.update_yaxes(title_text="positives mean 7-day", secondary_y=True)
fig.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/074/69074291.ipynb
|
covid19-tokyo
|
stpeteishii
|
[{"Id": 69074291, "ScriptId": 18607918, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2648923, "CreationDate": "07/26/2021 14:35:05", "VersionNumber": 9.0, "Title": "Covid19 Tokyo Infection Situation2", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 92.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 91.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 91830066, "KernelVersionId": 69074291, "SourceDatasetVersionId": 2465311}]
|
[{"Id": 2465311, "DatasetId": 1416595, "DatasourceVersionId": 2507756, "CreatorUserId": 2648923, "LicenseName": "Attribution 4.0 International (CC BY 4.0)", "CreationDate": "07/26/2021 14:29:26", "VersionNumber": 26.0, "Title": "Covid19 Tokyo", "Slug": "covid19-tokyo", "Subtitle": "Covid19 daily infection situation in Tokyo Japan", "Description": "tokyo_covid19_patients.csv\n- date\n\ntokyo_covid19_positivity.csv\n- date\t\n- pcr_positives\t\n- antigen_positives\t\n- pcr_negatives\t\n- antigen_negatives\t\n- examined\n \ncovid19 open data in tokyo in japan\nhttps://stopcovid19.metro.tokyo.lg.jp/", "VersionNotes": "Data Update 2021/07/26", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1416595, "CreatorUserId": 2648923, "OwnerUserId": 2648923.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2640924.0, "CurrentDatasourceVersionId": 2684880.0, "ForumId": 1435972, "Type": 2, "CreationDate": "06/18/2021 03:29:26", "LastActivityDate": "06/18/2021", "TotalViews": 5435, "TotalDownloads": 72, "TotalVotes": 8, "TotalKernels": 9}]
|
[{"Id": 2648923, "UserName": "stpeteishii", "DisplayName": "stpete_ishii", "RegisterDate": "12/26/2018", "PerformanceTier": 2}]
|
import os
import numpy as np
import pandas as pd
import random
import seaborn as sns
import datetime as datetime
import matplotlib.dates as dates
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from contextlib import contextmanager
from time import time
from tqdm import tqdm
import lightgbm as lgbm
from sklearn.metrics import classification_report, log_loss, accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
# # data : tokyo_covid19_patients.csv
data0 = pd.read_csv("../input/covid19-tokyo/tokyo_covid19_patients - 0726.csv")
data0[-5:]
data0["pcr_positives"] = 1
data1 = data0.groupby("date", as_index=False).sum()
data1[-5:]
data1["positives mean 7-day"] = data1["pcr_positives"].rolling(window=7).mean()
data1["positives max 7-day"] = data1["pcr_positives"].rolling(window=7).max()
data1["positives min 7-day"] = data1["pcr_positives"].rolling(window=7).min()
data1[-5:].T
fig = make_subplots(specs=[[{"secondary_y": False}]])
fig.add_trace(
go.Scatter(
x=data1["date"], y=data1["positives mean 7-day"], name="positives mean 7-day"
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
x=data1["date"], y=data1["positives max 7-day"], name="positives max 7-day"
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
x=data1["date"], y=data1["positives min 7-day"], name="positives min 7-day"
),
secondary_y=False,
)
fig.update_layout(
autosize=False,
width=700,
height=500,
title_text="Examined Positives (rolling 7-day) in Tokyo",
)
fig.update_xaxes(title_text="Date")
fig.update_yaxes(title_text="Cases", secondary_y=False)
fig.show()
col0 = data1.columns.to_list()
col1 = col0 + ["pm-7", "slope"]
data2 = pd.DataFrame(columns=col1)
data2[col0] = data1
n = len(data1)
for i in range(n):
pmi = data2["positives mean 7-day"][i]
data2.loc[i + 7, "pm-7"] = pmi
# Slope value is defined as the following formula.
# 'pm-7' means 'positives mean 7-day' value 7 days before.
data2["slope"] = (data2["positives mean 7-day"] - data2["pm-7"]) / 7
data3 = data2[["date", "pcr_positives", "positives mean 7-day", "pm-7", "slope"]]
data4 = data3[14:-7]
print(data4[243:258]) ### 3rd wave
print()
print(data4[372:387]) ### 4th wave
print()
print(data4[468:483]) ### 5th wave
print()
print(data4[-14:]) ### latest
# # A slope value is a sensitive benchmark for an occurence of infectious explosion in the near future.
# * On 2020-11-08, the slope value exceeded 3.0. This was the begginning of the 3th wave.
# * On 2021-03-14, the slope value exceeded 3.0. This was the begginning of the 4th wave.
# * On 2021-06-22, the slope value exceeded 3.0. This was the begginning of the 5th wave.
# # Now in the 5th wave! The slope value has reached more than 20 since 2021/07/08.
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Scatter(x=data4["date"], y=data4["slope"], name="slope"),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
x=data4["date"], y=data4["positives mean 7-day"], name="positives mean 7-day"
),
secondary_y="positives mean 7-day",
)
fig.update_layout(
autosize=False,
width=700,
height=500,
title_text="Slope change of positives mean 7-day in Tokyo",
)
fig.update_xaxes(title_text="Date")
fig.update_yaxes(title_text="Slope", secondary_y=False)
fig.update_yaxes(title_text="positives mean 7-day", secondary_y=True)
fig.show()
| false | 1 | 1,300 | 0 | 1,431 | 1,300 |
||
69074657
|
<jupyter_start><jupyter_text>lama_whl
Kaggle dataset identifier: lama-whl
<jupyter_script># !python setup.py build > /dev/null
# !python setup.py install > /dev/null
import textstat
import numpy as np
import pandas as pd
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import re
from sklearn.feature_extraction.text import TfidfVectorizer
import transformers
import torch
from transformers import BertTokenizer
from sklearn.metrics import mean_squared_error as mse
from sklearn.model_selection import KFold
import lightgbm as lgb
from fastprogress.fastprogress import progress_bar
from sklearn.metrics import mean_squared_error
from lightautoml.automl.presets.text_presets import TabularNLPAutoML
from lightautoml.tasks import Task
ss = pd.read_csv("../input/commonlitreadabilityprize/sample_submission.csv")
train_df = pd.read_csv("../input/commonlitreadabilityprize/train.csv")
test_df = pd.read_csv("../input/commonlitreadabilityprize/test.csv")
train_df.target.min(), train_df.target.max()
TIMEOUT = 15000 # Time in seconds for automl run
TARGET_NAME = "target" # Target column name
def rmse(x, y):
return np.sqrt(mean_squared_error(x, y))
task = Task("reg", metric=rmse)
roles = {
"target": TARGET_NAME,
"text": ["excerpt"],
"drop": ["id", "standard_error", "url_legal", "license"],
}
# # preprocess
def preprocess(data):
excerpt_processed = []
for e in progress_bar(data["excerpt"]):
# find alphabets
e = re.sub("[^a-zA-Z]", " ", e)
# convert to lower case
e = e.lower()
# tokenize words
e = nltk.word_tokenize(e)
# remove stopwords
e = [word for word in e if not word in set(stopwords.words("english"))]
# lemmatization
lemma = nltk.WordNetLemmatizer()
e = [lemma.lemmatize(word) for word in e]
e = " ".join(e)
excerpt_processed.append(e)
return excerpt_processed
train_df["excerpt_preprocessed"] = preprocess(train_df)
# test_df["excerpt_preprocessed"] = preprocess(test_df)
# # Handcrafted features from Kaggle notebooks
from textblob.tokenizers import SentenceTokenizer, WordTokenizer
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import random
import os
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import (
train_test_split,
StratifiedShuffleSplit,
StratifiedKFold,
)
# import textstat
plt.style.use("seaborn-talk")
from readcalc import readcalc
from sklearn.preprocessing import StandardScaler
import joblib
import spacy
sp = spacy.load("en_core_web_sm")
def pos_to_id(pos_name):
return sp.vocab[pos_name].orth
content_poss = ["ADJ", "NOUN", "VERB", "ADV"]
def count_poss(text, poss_names):
text = sp(text)
poss_ids = [pos_to_id(pos_name) for pos_name in poss_names]
pos_freq_dict = text.count_by(spacy.attrs.POS)
poss_sum = sum([pos_freq_dict.get(pos_id, 0) for pos_id in poss_ids])
return poss_sum
count_poss("my name is", ["PRON", "NOUN"])
# !pip download textstat ReadabilityCalculator
# !pip install *.whl
sent_tokenizer = SentenceTokenizer()
word_tokenizer = WordTokenizer()
# with open('../input/clrauxdata/dale-chall-3000-words.txt') as f:
# words = f.readlines()[0].split()
# common_words = dict(zip(words, [True] * len(words)))
# # df.sent_cnt.plot(kind='kde')
feats_to_drop = [
"sents_n",
"words_n",
"long_words_n",
#'difficult_words_n',
"content_words_n",
"prons_n",
"chars_n",
"syllables_n",
]
doc_feats = [
"chars_per_word",
"chars_per_sent",
"syllables_per_word",
"syllables_per_sent",
"words_per_sent",
"long_words_doc_ratio",
"difficult_words_doc_ratio",
"prons_doc_ratio",
"flesch_reading_ease",
"flesch_kincaid_grade",
"ari",
"cli",
"gunning_fog",
"lix",
"rix",
"smog",
"dcrs",
"lexical_diversity",
"content_diversity",
"lwf",
]
def create_handcrafted_features(df):
df["sents_n"] = df.excerpt.apply(textstat.sentence_count)
df["words_n"] = df.excerpt.apply(textstat.lexicon_count)
df["long_words_n"] = df.excerpt.apply(
lambda t: readcalc.ReadCalc(t).get_words_longer_than_X(6)
)
# df['difficult_words_n'] = df.excerpt.apply(lambda t: sum([bool(common_words.get(word)) for word in word_tokenizer.tokenize(t, include_punc=False)]))
df["content_words_n"] = df.excerpt.apply(lambda t: count_poss(t, content_poss))
df["prons_n"] = df.excerpt.apply(lambda t: count_poss(t, ["PRON"]))
df["chars_n"] = df.excerpt.str.len()
df["syllables_n"] = df.excerpt.apply(textstat.syllable_count)
print("\tstage 1 finished..")
df["chars_per_word_"] = df.chars_n / df.words_n
df["chars_per_sent_"] = df.chars_n / df.sents_n
df["syllables_per_word_"] = df.syllables_n / df.words_n
df["syllables_per_sent_"] = df.syllables_n / df.sents_n
df["words_per_sent_"] = df.words_n / df.sents_n
df["long_words_doc_ratio_"] = df.long_words_n / df.words_n
# df['difficult_words_doc_ratio'] = df.difficult_words_n / df.words_n
df["prons_doc_ratio"] = df.prons_n / df.words_n
print("\tstage 2 finished..")
df["flesch_reading_ease_"] = df.excerpt.apply(textstat.flesch_reading_ease)
df["flesch_kincaid_grade_"] = df.excerpt.apply(textstat.flesch_kincaid_grade)
df["ari_"] = df.excerpt.apply(textstat.automated_readability_index)
df["cli_"] = df.excerpt.apply(textstat.coleman_liau_index)
df["gunning_fog"] = df.excerpt.apply(textstat.gunning_fog)
df["lix_"] = df.excerpt.apply(lambda t: readcalc.ReadCalc(t).get_lix_index())
df["rix_"] = df.long_words_n / df.sents_n
df["smog_"] = df.excerpt.apply(lambda t: readcalc.ReadCalc(t).get_smog_index())
df["dcrs_"] = df.excerpt.apply(textstat.dale_chall_readability_score)
df["lexical_diversity_"] = len(set(df.words_n)) / df.words_n
df["content_diversity_"] = df.content_words_n / df.words_n
df["lwf_"] = df.excerpt.apply(textstat.linsear_write_formula)
print("\tstage 3 finished..")
return df
# train_df = create_handcrafted_features(train_df)
# train_df.drop(feats_to_drop, inplace=True, axis=1)
# # TextStat
def text_2_statistics(data):
flesch_reading_ease_list, smog_index_list = [], []
flesch_kincaid_grade_list, coleman_liau_index_list = [], []
automated_readability_index_list, dale_chall_readability_score_list = [], []
difficult_words_list, linsear_write_formula_list = [], []
gunning_fog_list, text_standard_list = [], []
fernandez_huerta_list, szigriszt_pazos_list = [], []
gutierrez_polini_list, crawford_list = [], []
for sentence in progress_bar(data["excerpt"]):
flesch_reading_ease_list.append(textstat.flesch_reading_ease(sentence))
smog_index_list.append(textstat.smog_index(sentence))
flesch_kincaid_grade_list.append(textstat.flesch_kincaid_grade(sentence))
coleman_liau_index_list.append(textstat.coleman_liau_index(sentence))
automated_readability_index_list.append(
textstat.automated_readability_index(sentence)
)
dale_chall_readability_score_list.append(
textstat.dale_chall_readability_score(sentence)
)
difficult_words_list.append(textstat.difficult_words(sentence))
linsear_write_formula_list.append(textstat.linsear_write_formula(sentence))
gunning_fog_list.append(textstat.gunning_fog(sentence))
text_standard_list.append(textstat.text_standard(sentence, float_output=True))
fernandez_huerta_list.append(textstat.fernandez_huerta(sentence))
szigriszt_pazos_list.append(textstat.szigriszt_pazos(sentence))
gutierrez_polini_list.append(textstat.gutierrez_polini(sentence))
crawford_list.append(textstat.crawford(sentence))
statistics_dict = {
"flesch_reading_ease": flesch_reading_ease_list,
"smog_index": smog_index_list,
"flesch_kincaid_grade": flesch_kincaid_grade_list,
"coleman_liau_index": coleman_liau_index_list,
"automated_readability_index": automated_readability_index_list,
"dale_chall_readability_score": dale_chall_readability_score_list,
"difficult_words": difficult_words_list,
"linsear_write_formula": linsear_write_formula_list,
"gunning_fog": gunning_fog_list,
"text_standard": text_standard_list,
"fernandez_huerta": fernandez_huerta_list,
"szigriszt_pazos": szigriszt_pazos_list,
"gutierrez_polini": gutierrez_polini_list,
"crawford": crawford_list,
}
return statistics_dict
statistics_dict = text_2_statistics(train_df)
for k, v in statistics_dict.items():
train_df[k] = v
# train_txt_stat = pd.DataFrame(statistics_dict)
# # TF-IDF
vectorizer = TfidfVectorizer(max_features=1000)
train_bags = vectorizer.fit_transform(train_df["excerpt_preprocessed"].values).toarray()
train_bag_of_words_df = pd.DataFrame(train_bags)
train_bag_of_words_df.columns = vectorizer.get_feature_names()
for col in train_bag_of_words_df.columns:
train_df[col] = train_bag_of_words_df[col].values
del train_bag_of_words_df
# train_df.head()
# -------------------
def count_words_in_sentences(data):
counts = []
for sentence in progress_bar(data["excerpt_preprocessed"]):
words = sentence.split()
counts.append(len(words))
return counts
train_df["excerpt_word_counts_by_preprocessed"] = count_words_in_sentences(train_df)
# # NLTK features
from typing import List, Dict, Union
import nltk
import numpy as np
from nltk import ne_chunk, pos_tag, word_tokenize
from nltk.tree import Tree
def get_named_entities(text: str) -> List[str]:
continuous_chunk = []
current_chunk = []
for i in ne_chunk(pos_tag(word_tokenize(text))):
if isinstance(i, Tree):
current_chunk.append(" ".join(token for token, pos in i.leaves()))
elif current_chunk:
named_entity = " ".join(current_chunk)
continuous_chunk.append(named_entity)
current_chunk = []
named_entity = " ".join(current_chunk)
continuous_chunk.append(named_entity)
return continuous_chunk
_raw_tags = frozenset(
{
"LS",
"TO",
"VBN",
"''",
"WP",
"UH",
"VBG",
"JJ",
"VBZ",
"--",
"VBP",
"NN",
"DT",
"PRP",
":",
"WP$",
"NNPS",
"PRP$",
"WDT",
"(",
")",
".",
",",
"``",
"$",
"RB",
"RBR",
"RBS",
"VBD",
"IN",
"FW",
"RP",
"JJR",
"JJS",
"PDT",
"MD",
"VB",
"WRB",
"NNP",
"EX",
"NNS",
"SYM",
"CC",
"CD",
"POS",
}
)
_general_tags = frozenset({"gVB", "gNN", "gPR", "gWP", "gRB", "gJJ"})
_tagset = (*_raw_tags, *_general_tags)
def generate_text_features(text: str) -> Dict[str, Union[int, float]]:
total_count = dict.fromkeys(_tagset, 0)
tokenized_text = nltk.word_tokenize(text)
inv_text_len = 1 / len(tokenized_text)
for word, pos in nltk.pos_tag(tokenized_text):
total_count[pos] += inv_text_len
general_tag = f"g{pos[:2]}"
if general_tag in _general_tags:
total_count[general_tag] += inv_text_len
max_in_sent = dict.fromkeys(_tagset, 0)
min_in_sent = dict.fromkeys(_tagset, 0)
mean_in_sent = dict.fromkeys(_tagset, 0)
general_tags = set()
tags = set()
sentences = nltk.sent_tokenize(text)
num_sentences = len(sentences)
num_words = []
words_len = []
for sentence in map(nltk.word_tokenize, sentences):
cur_sentence_stat = dict.fromkeys(_tagset, 0)
num_words.append(len(sentence))
inv_sent_len = 1 / len(sentence)
for word, pos in nltk.pos_tag(sentence):
words_len.append(len(word))
cur_sentence_stat[pos] += inv_sent_len
tags.add(pos)
general_tag = f"g{pos[:2]}"
if general_tag in _general_tags:
general_tags.add(general_tag)
cur_sentence_stat[general_tag] += inv_sent_len
for tag in _tagset:
max_in_sent[tag] = max(max_in_sent[tag], cur_sentence_stat[tag])
min_in_sent[tag] = min(min_in_sent[tag], cur_sentence_stat[tag])
mean_in_sent[tag] += cur_sentence_stat[tag] / num_sentences
res = {}
for k, v in total_count.items():
res[f"TOTAL_{k}"] = v
for k, v in max_in_sent.items():
res[f"MAX_{k}"] = v
for k, v in min_in_sent.items():
res[f"MIN_{k}"] = v
for k, v in mean_in_sent.items():
res[f"MEAN_{k}"] = v
num_words = np.array(num_words)
words_len = np.array(words_len)
res["NUM_SENTENCES"] = len(num_words)
res["MEAN_NUM_WORDS"] = num_words.mean()
res["STD_NUM_WORDS"] = num_words.std()
res["NUM_WORDS"] = len(words_len)
res["MEAN_WORD_LEN"] = words_len.mean()
res["STD_WORD_LEN"] = words_len.std()
res["TAGS_UNIQUE"] = len(tags)
res["GENERAL_TAGS_UNIQUE"] = len(general_tags)
named_entities = get_named_entities(text)
res["NAMED_ENTITIES_PER_SENTENCE"] = len(named_entities) / num_sentences
res["UNIQUE_NAMED_ENTITIES_PER_SENTENCE"] = len(set(named_entities)) / num_sentences
return res
def max_word_lenght(sentence):
words = sentence.split()
average = max(len(word) for word in words)
return average
def get_all_nltk_feats(text):
res = generate_text_features(text)
res["number_get_named_entities"] = len(get_named_entities(text))
res["max_word_lenght"] = max_word_lenght(text)
new_res = {}
for k, v in res.items():
new_res[k] = [v]
return new_res
# txt = 'Say hello to my little friend, Bro! I love you, Sarra!'
# nltk_feats = get_all_nltk_feats(txt)
# nltk_feats
# txt = 'Say hello to my little friend, Bro! I love you, Sarra!'
# nltk_feats = count_part_of_speechs(txt)
nltk_feats_df = pd.DataFrame()
for txt in progress_bar(train_df["excerpt"]):
nltk_feats_dict = get_all_nltk_feats(txt)
nltk_feats_df = nltk_feats_df.append(pd.DataFrame(nltk_feats_dict))
for col in nltk_feats_df.columns:
train_df[col] = nltk_feats_df[col].values
del nltk_feats_df, corr
train_df.head()
# # K-Best filtering
# from sklearn.feature_selection import SelectKBest, f_regression
# nltk_feats_df.fillna(0, inplace=True)
# feature_names = list(nltk_feats_df.columns.values)
# kb = SelectKBest(f_regression, k=60)
# kb.fit(nltk_feats_df, train_df['target'])
# mask = kb.get_support() #list of booleans
# new_features = [] # The list of your K best features
# for bool, feature in zip(mask, feature_names):
# if bool:
# new_features.append(feature)
# nltk_feats_df = pd.DataFrame(kb.transform(nltk_feats_df), columns=new_features)
# # nltk_feats_df = pd.DataFrame(kb.transform(X_test))
nltk_feats_df["target"] = train_df["target"]
corr = abs(nltk_feats_df.corr())
import seaborn as sns
# calculate the correlation matrix
# corr = abs(train_df.corr())
from matplotlib.pyplot import figure
figure(figsize=(10, 32), dpi=100)
# plot the heatmap
sns.heatmap(corr, xticklabels=["target"], yticklabels=corr.columns)
def preprocess_text(df):
df["len_tokens"] = df["excerpt"].str.strip().str.split(" ").apply(len)
df["len"] = df["excerpt"].str.strip().apply(len)
df["len_sent"] = df["excerpt"].str.strip().str.split(".").apply(len)
df["n_comm"] = df["excerpt"].str.strip().str.split(",").apply(len)
_t = df["excerpt"].str.strip().str.split(" ").values
df["d_mean"] = [np.sum([j.isdigit() for j in i]) for i in _t]
df["u_mean"] = [np.sum([j.isupper() for j in i]) for i in _t]
preprocess_text(train_df)
# Важно проверить число вот тут!
print(train_df.shape)
train_df.head()
automl = TabularNLPAutoML(
task=task,
timeout=TIMEOUT,
general_params={
"nested_cv": True,
"use_algos": [
[
"linear_l2",
"nn",
"lgb",
"lgb_tuned",
"cb",
]
],
},
text_params={"lang": "en", "bert_model": "../input/roberta-base"},
reader_params={"cv": 5},
linear_pipeline_params={"text_features": "embed"},
autonlp_params={
"model_name": "pooled_bert",
"transformer_params": {
"model_params": {"pooling": "mean"},
"dataset_params": {"max_length": 220}, # поменял max_length. было 220
"loader_params": {"batch_size": 64, "shuffle": False, "num_workers": 4},
},
},
nn_params={
"opt_params": {"lr": 3e-5},
"lang": "en",
"path_to_save": "./models",
"bert_name": "../input/roberta-base",
"snap_params": {"k": 1, "early_stopping": True, "patience": 2, "swa": False},
"init_bias": False,
"pooling": "mean",
"max_length": 220,
"bs": 32,
"n_epochs": 20, # поменял max_length. было 220
"use_cont": False,
"use_cat": False,
},
)
oof_pred = automl.fit_predict(train_df, roles=roles)
print("")
print(rmse(train_df[TARGET_NAME], oof_pred.data[:, 0]))
from lightautoml.addons.interpretation import LimeTextExplainer
lime = LimeTextExplainer(automl, feature_selection="lasso", force_order=False)
df = train_df.iloc[0]
exp = lime.explain_instance(df, perturb_column="excerpt")
exp.visualize_in_notebook()
print(df[TARGET_NAME])
df = train_df.iloc[1]
exp = lime.explain_instance(df, perturb_column="excerpt")
exp.visualize_in_notebook()
print(df[TARGET_NAME])
# df = train_df.iloc[100]
# exp = lime.explain_instance(df, perturb_column='excerpt')
# exp.visualize_in_notebook()
# print(df[TARGET_NAME])
df = train_df.iloc[777]
exp = lime.explain_instance(df, perturb_column="excerpt")
exp.visualize_in_notebook()
print(df[TARGET_NAME])
# df = train_df.iloc[2222]
# exp = lime.explain_instance(df, perturb_column='excerpt')
# exp.visualize_in_notebook()
# print(df[TARGET_NAME])
import pickle
with open("LAMA_model.pkl", "wb") as f:
pickle.dump(automl, f)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/074/69074657.ipynb
|
lama-whl
|
simakov
|
[{"Id": 69074657, "ScriptId": 18812912, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3484014, "CreationDate": "07/26/2021 14:39:41", "VersionNumber": 14.0, "Title": "LAMA starter", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 586.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 585.0, "LinesInsertedFromFork": 516.0, "LinesDeletedFromFork": 24.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 70.0, "TotalVotes": 0}]
|
[{"Id": 91830851, "KernelVersionId": 69074657, "SourceDatasetVersionId": 2225001}, {"Id": 91830850, "KernelVersionId": 69074657, "SourceDatasetVersionId": 1042664}, {"Id": 91830849, "KernelVersionId": 69074657, "SourceDatasetVersionId": 819665}]
|
[{"Id": 2225001, "DatasetId": 1015691, "DatasourceVersionId": 2266732, "CreatorUserId": 597945, "LicenseName": "Unknown", "CreationDate": "05/12/2021 14:19:11", "VersionNumber": 18.0, "Title": "lama_whl", "Slug": "lama-whl", "Subtitle": NaN, "Description": NaN, "VersionNotes": "18", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1015691, "CreatorUserId": 597945, "OwnerUserId": 597945.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2225001.0, "CurrentDatasourceVersionId": 2266732.0, "ForumId": 1032478, "Type": 2, "CreationDate": "12/04/2020 15:52:59", "LastActivityDate": "12/04/2020", "TotalViews": 2709, "TotalDownloads": 11, "TotalVotes": 1, "TotalKernels": 8}]
|
[{"Id": 597945, "UserName": "simakov", "DisplayName": "DmitryS", "RegisterDate": "04/26/2016", "PerformanceTier": 3}]
|
# !python setup.py build > /dev/null
# !python setup.py install > /dev/null
import textstat
import numpy as np
import pandas as pd
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import re
from sklearn.feature_extraction.text import TfidfVectorizer
import transformers
import torch
from transformers import BertTokenizer
from sklearn.metrics import mean_squared_error as mse
from sklearn.model_selection import KFold
import lightgbm as lgb
from fastprogress.fastprogress import progress_bar
from sklearn.metrics import mean_squared_error
from lightautoml.automl.presets.text_presets import TabularNLPAutoML
from lightautoml.tasks import Task
ss = pd.read_csv("../input/commonlitreadabilityprize/sample_submission.csv")
train_df = pd.read_csv("../input/commonlitreadabilityprize/train.csv")
test_df = pd.read_csv("../input/commonlitreadabilityprize/test.csv")
train_df.target.min(), train_df.target.max()
TIMEOUT = 15000 # Time in seconds for automl run
TARGET_NAME = "target" # Target column name
def rmse(x, y):
return np.sqrt(mean_squared_error(x, y))
task = Task("reg", metric=rmse)
roles = {
"target": TARGET_NAME,
"text": ["excerpt"],
"drop": ["id", "standard_error", "url_legal", "license"],
}
# # preprocess
def preprocess(data):
excerpt_processed = []
for e in progress_bar(data["excerpt"]):
# find alphabets
e = re.sub("[^a-zA-Z]", " ", e)
# convert to lower case
e = e.lower()
# tokenize words
e = nltk.word_tokenize(e)
# remove stopwords
e = [word for word in e if not word in set(stopwords.words("english"))]
# lemmatization
lemma = nltk.WordNetLemmatizer()
e = [lemma.lemmatize(word) for word in e]
e = " ".join(e)
excerpt_processed.append(e)
return excerpt_processed
train_df["excerpt_preprocessed"] = preprocess(train_df)
# test_df["excerpt_preprocessed"] = preprocess(test_df)
# # Handcrafted features from Kaggle notebooks
from textblob.tokenizers import SentenceTokenizer, WordTokenizer
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import random
import os
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import (
train_test_split,
StratifiedShuffleSplit,
StratifiedKFold,
)
# import textstat
plt.style.use("seaborn-talk")
from readcalc import readcalc
from sklearn.preprocessing import StandardScaler
import joblib
import spacy
sp = spacy.load("en_core_web_sm")
def pos_to_id(pos_name):
return sp.vocab[pos_name].orth
content_poss = ["ADJ", "NOUN", "VERB", "ADV"]
def count_poss(text, poss_names):
text = sp(text)
poss_ids = [pos_to_id(pos_name) for pos_name in poss_names]
pos_freq_dict = text.count_by(spacy.attrs.POS)
poss_sum = sum([pos_freq_dict.get(pos_id, 0) for pos_id in poss_ids])
return poss_sum
count_poss("my name is", ["PRON", "NOUN"])
# !pip download textstat ReadabilityCalculator
# !pip install *.whl
sent_tokenizer = SentenceTokenizer()
word_tokenizer = WordTokenizer()
# with open('../input/clrauxdata/dale-chall-3000-words.txt') as f:
# words = f.readlines()[0].split()
# common_words = dict(zip(words, [True] * len(words)))
# # df.sent_cnt.plot(kind='kde')
feats_to_drop = [
"sents_n",
"words_n",
"long_words_n",
#'difficult_words_n',
"content_words_n",
"prons_n",
"chars_n",
"syllables_n",
]
doc_feats = [
"chars_per_word",
"chars_per_sent",
"syllables_per_word",
"syllables_per_sent",
"words_per_sent",
"long_words_doc_ratio",
"difficult_words_doc_ratio",
"prons_doc_ratio",
"flesch_reading_ease",
"flesch_kincaid_grade",
"ari",
"cli",
"gunning_fog",
"lix",
"rix",
"smog",
"dcrs",
"lexical_diversity",
"content_diversity",
"lwf",
]
def create_handcrafted_features(df):
df["sents_n"] = df.excerpt.apply(textstat.sentence_count)
df["words_n"] = df.excerpt.apply(textstat.lexicon_count)
df["long_words_n"] = df.excerpt.apply(
lambda t: readcalc.ReadCalc(t).get_words_longer_than_X(6)
)
# df['difficult_words_n'] = df.excerpt.apply(lambda t: sum([bool(common_words.get(word)) for word in word_tokenizer.tokenize(t, include_punc=False)]))
df["content_words_n"] = df.excerpt.apply(lambda t: count_poss(t, content_poss))
df["prons_n"] = df.excerpt.apply(lambda t: count_poss(t, ["PRON"]))
df["chars_n"] = df.excerpt.str.len()
df["syllables_n"] = df.excerpt.apply(textstat.syllable_count)
print("\tstage 1 finished..")
df["chars_per_word_"] = df.chars_n / df.words_n
df["chars_per_sent_"] = df.chars_n / df.sents_n
df["syllables_per_word_"] = df.syllables_n / df.words_n
df["syllables_per_sent_"] = df.syllables_n / df.sents_n
df["words_per_sent_"] = df.words_n / df.sents_n
df["long_words_doc_ratio_"] = df.long_words_n / df.words_n
# df['difficult_words_doc_ratio'] = df.difficult_words_n / df.words_n
df["prons_doc_ratio"] = df.prons_n / df.words_n
print("\tstage 2 finished..")
df["flesch_reading_ease_"] = df.excerpt.apply(textstat.flesch_reading_ease)
df["flesch_kincaid_grade_"] = df.excerpt.apply(textstat.flesch_kincaid_grade)
df["ari_"] = df.excerpt.apply(textstat.automated_readability_index)
df["cli_"] = df.excerpt.apply(textstat.coleman_liau_index)
df["gunning_fog"] = df.excerpt.apply(textstat.gunning_fog)
df["lix_"] = df.excerpt.apply(lambda t: readcalc.ReadCalc(t).get_lix_index())
df["rix_"] = df.long_words_n / df.sents_n
df["smog_"] = df.excerpt.apply(lambda t: readcalc.ReadCalc(t).get_smog_index())
df["dcrs_"] = df.excerpt.apply(textstat.dale_chall_readability_score)
df["lexical_diversity_"] = len(set(df.words_n)) / df.words_n
df["content_diversity_"] = df.content_words_n / df.words_n
df["lwf_"] = df.excerpt.apply(textstat.linsear_write_formula)
print("\tstage 3 finished..")
return df
# train_df = create_handcrafted_features(train_df)
# train_df.drop(feats_to_drop, inplace=True, axis=1)
# # TextStat
def text_2_statistics(data):
flesch_reading_ease_list, smog_index_list = [], []
flesch_kincaid_grade_list, coleman_liau_index_list = [], []
automated_readability_index_list, dale_chall_readability_score_list = [], []
difficult_words_list, linsear_write_formula_list = [], []
gunning_fog_list, text_standard_list = [], []
fernandez_huerta_list, szigriszt_pazos_list = [], []
gutierrez_polini_list, crawford_list = [], []
for sentence in progress_bar(data["excerpt"]):
flesch_reading_ease_list.append(textstat.flesch_reading_ease(sentence))
smog_index_list.append(textstat.smog_index(sentence))
flesch_kincaid_grade_list.append(textstat.flesch_kincaid_grade(sentence))
coleman_liau_index_list.append(textstat.coleman_liau_index(sentence))
automated_readability_index_list.append(
textstat.automated_readability_index(sentence)
)
dale_chall_readability_score_list.append(
textstat.dale_chall_readability_score(sentence)
)
difficult_words_list.append(textstat.difficult_words(sentence))
linsear_write_formula_list.append(textstat.linsear_write_formula(sentence))
gunning_fog_list.append(textstat.gunning_fog(sentence))
text_standard_list.append(textstat.text_standard(sentence, float_output=True))
fernandez_huerta_list.append(textstat.fernandez_huerta(sentence))
szigriszt_pazos_list.append(textstat.szigriszt_pazos(sentence))
gutierrez_polini_list.append(textstat.gutierrez_polini(sentence))
crawford_list.append(textstat.crawford(sentence))
statistics_dict = {
"flesch_reading_ease": flesch_reading_ease_list,
"smog_index": smog_index_list,
"flesch_kincaid_grade": flesch_kincaid_grade_list,
"coleman_liau_index": coleman_liau_index_list,
"automated_readability_index": automated_readability_index_list,
"dale_chall_readability_score": dale_chall_readability_score_list,
"difficult_words": difficult_words_list,
"linsear_write_formula": linsear_write_formula_list,
"gunning_fog": gunning_fog_list,
"text_standard": text_standard_list,
"fernandez_huerta": fernandez_huerta_list,
"szigriszt_pazos": szigriszt_pazos_list,
"gutierrez_polini": gutierrez_polini_list,
"crawford": crawford_list,
}
return statistics_dict
statistics_dict = text_2_statistics(train_df)
for k, v in statistics_dict.items():
train_df[k] = v
# train_txt_stat = pd.DataFrame(statistics_dict)
# # TF-IDF
vectorizer = TfidfVectorizer(max_features=1000)
train_bags = vectorizer.fit_transform(train_df["excerpt_preprocessed"].values).toarray()
train_bag_of_words_df = pd.DataFrame(train_bags)
train_bag_of_words_df.columns = vectorizer.get_feature_names()
for col in train_bag_of_words_df.columns:
train_df[col] = train_bag_of_words_df[col].values
del train_bag_of_words_df
# train_df.head()
# -------------------
def count_words_in_sentences(data):
counts = []
for sentence in progress_bar(data["excerpt_preprocessed"]):
words = sentence.split()
counts.append(len(words))
return counts
train_df["excerpt_word_counts_by_preprocessed"] = count_words_in_sentences(train_df)
# # NLTK features
from typing import List, Dict, Union
import nltk
import numpy as np
from nltk import ne_chunk, pos_tag, word_tokenize
from nltk.tree import Tree
def get_named_entities(text: str) -> List[str]:
continuous_chunk = []
current_chunk = []
for i in ne_chunk(pos_tag(word_tokenize(text))):
if isinstance(i, Tree):
current_chunk.append(" ".join(token for token, pos in i.leaves()))
elif current_chunk:
named_entity = " ".join(current_chunk)
continuous_chunk.append(named_entity)
current_chunk = []
named_entity = " ".join(current_chunk)
continuous_chunk.append(named_entity)
return continuous_chunk
_raw_tags = frozenset(
{
"LS",
"TO",
"VBN",
"''",
"WP",
"UH",
"VBG",
"JJ",
"VBZ",
"--",
"VBP",
"NN",
"DT",
"PRP",
":",
"WP$",
"NNPS",
"PRP$",
"WDT",
"(",
")",
".",
",",
"``",
"$",
"RB",
"RBR",
"RBS",
"VBD",
"IN",
"FW",
"RP",
"JJR",
"JJS",
"PDT",
"MD",
"VB",
"WRB",
"NNP",
"EX",
"NNS",
"SYM",
"CC",
"CD",
"POS",
}
)
_general_tags = frozenset({"gVB", "gNN", "gPR", "gWP", "gRB", "gJJ"})
_tagset = (*_raw_tags, *_general_tags)
def generate_text_features(text: str) -> Dict[str, Union[int, float]]:
total_count = dict.fromkeys(_tagset, 0)
tokenized_text = nltk.word_tokenize(text)
inv_text_len = 1 / len(tokenized_text)
for word, pos in nltk.pos_tag(tokenized_text):
total_count[pos] += inv_text_len
general_tag = f"g{pos[:2]}"
if general_tag in _general_tags:
total_count[general_tag] += inv_text_len
max_in_sent = dict.fromkeys(_tagset, 0)
min_in_sent = dict.fromkeys(_tagset, 0)
mean_in_sent = dict.fromkeys(_tagset, 0)
general_tags = set()
tags = set()
sentences = nltk.sent_tokenize(text)
num_sentences = len(sentences)
num_words = []
words_len = []
for sentence in map(nltk.word_tokenize, sentences):
cur_sentence_stat = dict.fromkeys(_tagset, 0)
num_words.append(len(sentence))
inv_sent_len = 1 / len(sentence)
for word, pos in nltk.pos_tag(sentence):
words_len.append(len(word))
cur_sentence_stat[pos] += inv_sent_len
tags.add(pos)
general_tag = f"g{pos[:2]}"
if general_tag in _general_tags:
general_tags.add(general_tag)
cur_sentence_stat[general_tag] += inv_sent_len
for tag in _tagset:
max_in_sent[tag] = max(max_in_sent[tag], cur_sentence_stat[tag])
min_in_sent[tag] = min(min_in_sent[tag], cur_sentence_stat[tag])
mean_in_sent[tag] += cur_sentence_stat[tag] / num_sentences
res = {}
for k, v in total_count.items():
res[f"TOTAL_{k}"] = v
for k, v in max_in_sent.items():
res[f"MAX_{k}"] = v
for k, v in min_in_sent.items():
res[f"MIN_{k}"] = v
for k, v in mean_in_sent.items():
res[f"MEAN_{k}"] = v
num_words = np.array(num_words)
words_len = np.array(words_len)
res["NUM_SENTENCES"] = len(num_words)
res["MEAN_NUM_WORDS"] = num_words.mean()
res["STD_NUM_WORDS"] = num_words.std()
res["NUM_WORDS"] = len(words_len)
res["MEAN_WORD_LEN"] = words_len.mean()
res["STD_WORD_LEN"] = words_len.std()
res["TAGS_UNIQUE"] = len(tags)
res["GENERAL_TAGS_UNIQUE"] = len(general_tags)
named_entities = get_named_entities(text)
res["NAMED_ENTITIES_PER_SENTENCE"] = len(named_entities) / num_sentences
res["UNIQUE_NAMED_ENTITIES_PER_SENTENCE"] = len(set(named_entities)) / num_sentences
return res
def max_word_lenght(sentence):
words = sentence.split()
average = max(len(word) for word in words)
return average
def get_all_nltk_feats(text):
res = generate_text_features(text)
res["number_get_named_entities"] = len(get_named_entities(text))
res["max_word_lenght"] = max_word_lenght(text)
new_res = {}
for k, v in res.items():
new_res[k] = [v]
return new_res
# txt = 'Say hello to my little friend, Bro! I love you, Sarra!'
# nltk_feats = get_all_nltk_feats(txt)
# nltk_feats
# txt = 'Say hello to my little friend, Bro! I love you, Sarra!'
# nltk_feats = count_part_of_speechs(txt)
nltk_feats_df = pd.DataFrame()
for txt in progress_bar(train_df["excerpt"]):
nltk_feats_dict = get_all_nltk_feats(txt)
nltk_feats_df = nltk_feats_df.append(pd.DataFrame(nltk_feats_dict))
for col in nltk_feats_df.columns:
train_df[col] = nltk_feats_df[col].values
del nltk_feats_df, corr
train_df.head()
# # K-Best filtering
# from sklearn.feature_selection import SelectKBest, f_regression
# nltk_feats_df.fillna(0, inplace=True)
# feature_names = list(nltk_feats_df.columns.values)
# kb = SelectKBest(f_regression, k=60)
# kb.fit(nltk_feats_df, train_df['target'])
# mask = kb.get_support() #list of booleans
# new_features = [] # The list of your K best features
# for bool, feature in zip(mask, feature_names):
# if bool:
# new_features.append(feature)
# nltk_feats_df = pd.DataFrame(kb.transform(nltk_feats_df), columns=new_features)
# # nltk_feats_df = pd.DataFrame(kb.transform(X_test))
nltk_feats_df["target"] = train_df["target"]
corr = abs(nltk_feats_df.corr())
import seaborn as sns
# calculate the correlation matrix
# corr = abs(train_df.corr())
from matplotlib.pyplot import figure
figure(figsize=(10, 32), dpi=100)
# plot the heatmap
sns.heatmap(corr, xticklabels=["target"], yticklabels=corr.columns)
def preprocess_text(df):
df["len_tokens"] = df["excerpt"].str.strip().str.split(" ").apply(len)
df["len"] = df["excerpt"].str.strip().apply(len)
df["len_sent"] = df["excerpt"].str.strip().str.split(".").apply(len)
df["n_comm"] = df["excerpt"].str.strip().str.split(",").apply(len)
_t = df["excerpt"].str.strip().str.split(" ").values
df["d_mean"] = [np.sum([j.isdigit() for j in i]) for i in _t]
df["u_mean"] = [np.sum([j.isupper() for j in i]) for i in _t]
preprocess_text(train_df)
# Важно проверить число вот тут!
print(train_df.shape)
train_df.head()
automl = TabularNLPAutoML(
task=task,
timeout=TIMEOUT,
general_params={
"nested_cv": True,
"use_algos": [
[
"linear_l2",
"nn",
"lgb",
"lgb_tuned",
"cb",
]
],
},
text_params={"lang": "en", "bert_model": "../input/roberta-base"},
reader_params={"cv": 5},
linear_pipeline_params={"text_features": "embed"},
autonlp_params={
"model_name": "pooled_bert",
"transformer_params": {
"model_params": {"pooling": "mean"},
"dataset_params": {"max_length": 220}, # поменял max_length. было 220
"loader_params": {"batch_size": 64, "shuffle": False, "num_workers": 4},
},
},
nn_params={
"opt_params": {"lr": 3e-5},
"lang": "en",
"path_to_save": "./models",
"bert_name": "../input/roberta-base",
"snap_params": {"k": 1, "early_stopping": True, "patience": 2, "swa": False},
"init_bias": False,
"pooling": "mean",
"max_length": 220,
"bs": 32,
"n_epochs": 20, # поменял max_length. было 220
"use_cont": False,
"use_cat": False,
},
)
oof_pred = automl.fit_predict(train_df, roles=roles)
print("")
print(rmse(train_df[TARGET_NAME], oof_pred.data[:, 0]))
from lightautoml.addons.interpretation import LimeTextExplainer
lime = LimeTextExplainer(automl, feature_selection="lasso", force_order=False)
df = train_df.iloc[0]
exp = lime.explain_instance(df, perturb_column="excerpt")
exp.visualize_in_notebook()
print(df[TARGET_NAME])
df = train_df.iloc[1]
exp = lime.explain_instance(df, perturb_column="excerpt")
exp.visualize_in_notebook()
print(df[TARGET_NAME])
# df = train_df.iloc[100]
# exp = lime.explain_instance(df, perturb_column='excerpt')
# exp.visualize_in_notebook()
# print(df[TARGET_NAME])
df = train_df.iloc[777]
exp = lime.explain_instance(df, perturb_column="excerpt")
exp.visualize_in_notebook()
print(df[TARGET_NAME])
# df = train_df.iloc[2222]
# exp = lime.explain_instance(df, perturb_column='excerpt')
# exp.visualize_in_notebook()
# print(df[TARGET_NAME])
import pickle
with open("LAMA_model.pkl", "wb") as f:
pickle.dump(automl, f)
| false | 3 | 6,042 | 0 | 6,062 | 6,042 |
||
69074526
|
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import stats
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
df = pd.read_csv("/kaggle/input/titanic/train.csv")
test_df = pd.read_csv("/kaggle/input/titanic/test.csv")
# sns.boxplot(x=df['SibSp'])
# men = df.loc[df.Age <= 2]["Survived"]
# rate_men = sum(men)/len(men)
# print(sum(men),len(men))
# print("% of men who survived:", rate_men)
df.info()
test_df.info()
from sklearn import preprocessing
def getTitle(data):
for dataset in data:
dataset["Title"] = dataset["Name"].str.extract(" ([A-Za-z]+)\.")
dataset["Title"] = dataset["Title"].replace(
[
"Lady",
"Countess",
"Capt",
"Col",
"Don",
"Dr",
"Major",
"Rev",
"Sir",
"Jonkheer",
"Dona",
"Mlle",
"Mme",
],
"Other",
)
dataset["Title"] = dataset["Title"].replace("Ms", "Miss")
return data
def featureEngineer(data):
columns = ["Fare", "Age"]
for col in columns:
data[col].fillna(data[col].mean(), inplace=True)
data.Embarked.fillna("U", inplace=True)
labelEn = preprocessing.LabelEncoder()
columns = ["SibSp", "Parch"]
for col in columns:
data[col] = labelEn.fit_transform(data[col])
oneHotFeatures = ["Sex", "Embarked", "Title"]
data = data.join(pd.get_dummies(data[oneHotFeatures]))
data = data.drop(
["PassengerId", "Name", "Sex", "Ticket", "Cabin", "Embarked", "Title"], axis=1
)
return data
X = df.copy()
Y = test_df.copy()
import matplotlib.pyplot as plt
y = df.pop("Survived")
train_test_data = [X, Y]
train_test_data = getTitle(train_test_data)
X = featureEngineer(X)
X = X.drop(["Survived", "Embarked_U"], axis=1)
Y = featureEngineer(Y)
X.head()
Y.head()
# z_scores = stats.zscore(X)
# z=np.abs(stats.zscore(X))
# abs_z_scores = np.abs(z_scores)
# filtered_entries = (abs_z_scores < 5).all(axis=1)
# X = X[filtered_entries]
# y = X.pop("Survived")
# bins = [0, 5, 10, 20, 28, 40, 90]
# labels=[0,1,2,3,5,6]
# X['AgeBins'] = pd.cut(X['Age'], bins=bins, labels=labels, include_lowest=True)
# Y['AgeBins'] = pd.cut(Y['Age'], bins=bins, labels=labels, include_lowest=True)
# print (X)
# bins = [0, 8.0, 15.0, 32.0, 600.0]
# labels=[0,1,2,3]
# X['FareBins'] = pd.cut(X['Fare'], bins=bins, labels=labels, include_lowest=True)
# Y['FareBins'] = pd.cut(Y['Fare'], bins=bins, labels=labels, include_lowest=True)
# print (X)
# from sklearn.preprocessing import MinMaxScaler
# scaler = MinMaxScaler()
# X['ScaledFare'] =(X['Fare']-X['Fare'].min())/(X['Fare'].max()-X['Fare'].min())
# X.drop(['Fare'],axis=1)
# Y['ScaledFare'] =(Y['Fare']-Y['Fare'].min())/(Y['Fare'].max()-Y['Fare'].min())
# Y.drop(['Fare'],axis=1)
# X['Fare'].plot(kind = 'bar')
# from sklearn.cluster import KMeans
# features = ["Age"]
# X_scaled = X.loc[:, features]
# X_scaled = (X_scaled - X_scaled.mean(axis=0)) / X_scaled.std(axis=0)
# kmeans = KMeans(n_clusters=5, n_init=10, random_state=0)
# X["AgeCluster"] = kmeans.fit_predict(X_scaled)
# import tensorflow as tf
# resolution_in_degrees = 10.0
# feature_columns = []
# latitude_as_a_numeric_column = tf.feature_column.numeric_column("Age")
# latitude_boundaries = list(np.arange(int(min(X['Age'])),
# int(max(X['Age'])),
# resolution_in_degrees))
# latitude = tf.feature_column.bucketized_column(latitude_as_a_numeric_column,
# latitude_boundaries)
# feature_columns.append(latitude)
# from sklearn.feature_selection import mutual_info_regression
# def make_mi_scores(X, y):
# X = X.copy()
# for colname in X.select_dtypes(["object", "category"]):
# X[colname], _ = X[colname].factorize()
# # All discrete features should now have integer dtypes
# discrete_features = [pd.api.types.is_integer_dtype(t) for t in X.dtypes]
# mi_scores = mutual_info_regression(X, y, discrete_features=discrete_features, random_state=0)
# mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
# mi_scores = mi_scores.sort_values(ascending=False)
# return mi_scores
# mi_scores = make_mi_scores(X, y)
# print(mi_scores)
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(Y)
output = pd.DataFrame({"PassengerId": test_df.PassengerId, "Survived": predictions})
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/074/69074526.ipynb
| null | null |
[{"Id": 69074526, "ScriptId": 18778595, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7890016, "CreationDate": "07/26/2021 14:38:02", "VersionNumber": 12.0, "Title": "170676P notebook", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 140.0, "LinesInsertedFromPrevious": 25.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 115.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import stats
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
df = pd.read_csv("/kaggle/input/titanic/train.csv")
test_df = pd.read_csv("/kaggle/input/titanic/test.csv")
# sns.boxplot(x=df['SibSp'])
# men = df.loc[df.Age <= 2]["Survived"]
# rate_men = sum(men)/len(men)
# print(sum(men),len(men))
# print("% of men who survived:", rate_men)
df.info()
test_df.info()
from sklearn import preprocessing
def getTitle(data):
for dataset in data:
dataset["Title"] = dataset["Name"].str.extract(" ([A-Za-z]+)\.")
dataset["Title"] = dataset["Title"].replace(
[
"Lady",
"Countess",
"Capt",
"Col",
"Don",
"Dr",
"Major",
"Rev",
"Sir",
"Jonkheer",
"Dona",
"Mlle",
"Mme",
],
"Other",
)
dataset["Title"] = dataset["Title"].replace("Ms", "Miss")
return data
def featureEngineer(data):
columns = ["Fare", "Age"]
for col in columns:
data[col].fillna(data[col].mean(), inplace=True)
data.Embarked.fillna("U", inplace=True)
labelEn = preprocessing.LabelEncoder()
columns = ["SibSp", "Parch"]
for col in columns:
data[col] = labelEn.fit_transform(data[col])
oneHotFeatures = ["Sex", "Embarked", "Title"]
data = data.join(pd.get_dummies(data[oneHotFeatures]))
data = data.drop(
["PassengerId", "Name", "Sex", "Ticket", "Cabin", "Embarked", "Title"], axis=1
)
return data
X = df.copy()
Y = test_df.copy()
import matplotlib.pyplot as plt
y = df.pop("Survived")
train_test_data = [X, Y]
train_test_data = getTitle(train_test_data)
X = featureEngineer(X)
X = X.drop(["Survived", "Embarked_U"], axis=1)
Y = featureEngineer(Y)
X.head()
Y.head()
# z_scores = stats.zscore(X)
# z=np.abs(stats.zscore(X))
# abs_z_scores = np.abs(z_scores)
# filtered_entries = (abs_z_scores < 5).all(axis=1)
# X = X[filtered_entries]
# y = X.pop("Survived")
# bins = [0, 5, 10, 20, 28, 40, 90]
# labels=[0,1,2,3,5,6]
# X['AgeBins'] = pd.cut(X['Age'], bins=bins, labels=labels, include_lowest=True)
# Y['AgeBins'] = pd.cut(Y['Age'], bins=bins, labels=labels, include_lowest=True)
# print (X)
# bins = [0, 8.0, 15.0, 32.0, 600.0]
# labels=[0,1,2,3]
# X['FareBins'] = pd.cut(X['Fare'], bins=bins, labels=labels, include_lowest=True)
# Y['FareBins'] = pd.cut(Y['Fare'], bins=bins, labels=labels, include_lowest=True)
# print (X)
# from sklearn.preprocessing import MinMaxScaler
# scaler = MinMaxScaler()
# X['ScaledFare'] =(X['Fare']-X['Fare'].min())/(X['Fare'].max()-X['Fare'].min())
# X.drop(['Fare'],axis=1)
# Y['ScaledFare'] =(Y['Fare']-Y['Fare'].min())/(Y['Fare'].max()-Y['Fare'].min())
# Y.drop(['Fare'],axis=1)
# X['Fare'].plot(kind = 'bar')
# from sklearn.cluster import KMeans
# features = ["Age"]
# X_scaled = X.loc[:, features]
# X_scaled = (X_scaled - X_scaled.mean(axis=0)) / X_scaled.std(axis=0)
# kmeans = KMeans(n_clusters=5, n_init=10, random_state=0)
# X["AgeCluster"] = kmeans.fit_predict(X_scaled)
# import tensorflow as tf
# resolution_in_degrees = 10.0
# feature_columns = []
# latitude_as_a_numeric_column = tf.feature_column.numeric_column("Age")
# latitude_boundaries = list(np.arange(int(min(X['Age'])),
# int(max(X['Age'])),
# resolution_in_degrees))
# latitude = tf.feature_column.bucketized_column(latitude_as_a_numeric_column,
# latitude_boundaries)
# feature_columns.append(latitude)
# from sklearn.feature_selection import mutual_info_regression
# def make_mi_scores(X, y):
# X = X.copy()
# for colname in X.select_dtypes(["object", "category"]):
# X[colname], _ = X[colname].factorize()
# # All discrete features should now have integer dtypes
# discrete_features = [pd.api.types.is_integer_dtype(t) for t in X.dtypes]
# mi_scores = mutual_info_regression(X, y, discrete_features=discrete_features, random_state=0)
# mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
# mi_scores = mi_scores.sort_values(ascending=False)
# return mi_scores
# mi_scores = make_mi_scores(X, y)
# print(mi_scores)
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(Y)
output = pd.DataFrame({"PassengerId": test_df.PassengerId, "Survived": predictions})
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
| false | 0 | 1,654 | 0 | 1,654 | 1,654 |
||
69074743
|
#
#
# # Introduction 🦾
# This notebook addresses the prediction of emission values from three key pollutants - carbon monoxide ($CO$), benzene ($C_6H_6$) and nitrogen oxides ($NO_X$) - using sensor readouts as well as date and time at measurement, relative and absolute humidity and temperature. We will employ some feature engineering to encode a few temporal components, conduct cross-validation (CV) and fit a Gradient Boosting Regression (GBR) model. This dataset is part of the Tabular Playground Series - July 2021 competition.
# It came to my attention that most top entries in this competition exploit some leaked test data using pseudo-labeling, thereby cutting down error by a substantial margin. I am not particularly fond of leveraging leaked information and as such **no external data is used in this analysis**.
# We will go over feature engineering, CV and model building using the optimal CV hyperparameter values. To start things off we will load some important utilities, set random seed and a bunch of other constants and load the CSV files.
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.multioutput import MultiOutputRegressor
from sklearn.model_selection import RepeatedKFold, GridSearchCV
# Path to the files, set seed
PATH = "../input/tabular-playground-series-jul-2021/"
SEED = 999
N_FOLDS = 3
N_REPEATS = 5
TARGET_VARS = ["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"]
np.random.seed(SEED)
# Load CSV files
train = pd.read_csv(PATH + "train.csv")
test = pd.read_csv(PATH + "test.csv")
subm = pd.read_csv(PATH + "sample_submission.csv", index_col="date_time")
# # Feature engineering 🔨
# The training predictor set is of size 7111 x 9, whereas the target set is 7111 x 3. The test predictor set is of size 2247 x 9. Given the relative paucity of predictors in the dataset we will make the best of it by investigating ways of engineering the underlying predictors. In the present analysis I consider the following:
# * Capture periodicity over month and hour using $sin$ and $cos$ encodings. If we had picked a categorical encoding instead, the model would be oblivious to the fact January and December are consecutive months. The same holds for hour of the day. As for weekdays, since there are only seven values this particular encoding might degrade model performance
# * Identify weekends using a single binary feature. One might expect weekends to associate with lower emissions.
# To extract these features we will process both train and test data using a custom utility defined underneath, `extract_datetime_feats`.
def sin_cos_encoding(df, dt, feat_name, max_val):
# Encode variable using sin and cos
df["sin_" + feat_name] = np.sin(2 * np.pi * (dt / max_val))
df["cos_" + feat_name] = np.cos(2 * np.pi * (dt / max_val))
return None
def extract_dt_feats(df):
# Extract month and hour
date_enc = pd.to_datetime(df.date_time)
month = date_enc.dt.month
hour = date_enc.dt.hour
# Add features, compute and add is_weekend
sin_cos_encoding(df, month, "month", 12)
sin_cos_encoding(df, hour, "hour", 23)
df["is_weekend"] = date_enc.dt.day_name().isin(["Saturday", "Sunday"]) * 1
return df
# Expand features from train and test
x_train = extract_dt_feats(train.copy())
x_test = extract_dt_feats(test.copy())
# Visualize relationship between is_weekend and targets
sns.pairplot(
x_train, hue="is_weekend", vars=TARGET_VARS, corner=True, plot_kws={"alpha": 0.1}
)
# With this simple feature engineering step we increased the number of available features from 9 to 14, although `date_time` will not be used. The above scatterplots, which depict the bivariate distributions of the three target variables - highly intercorrelated - suggest that indeed weekends associate with lower emissions. We note, however, just over a hundred odd measurements of high $CO$ and $NO_X$ but comparatively low $C_6H_6$; these we might regard as outliers to be excluded from the analysis. Finally, the three target variables appear to be left-skewed and could therefore be better modeled following a log-transformation.
# I propose *i)* filtering out the said outliers using arbitrary cutoffs for $C_6H_6$ and $NO_X$, *ii)* log-transforming the three target variables and *iii)* splitting predictors and targets from `train` into `x_train` and `y_train`, accordingly; in the case of `test` we simply produce the corresponding `x_test`.
# Identify and discard odd datapoints / outliers
outliers = np.where(
(x_train.target_benzene == x_train.target_benzene.min())
& (x_train.target_nitrogen_oxides > 50)
)[0]
x_train = x_train.drop(outliers)
# Log-transform target vars
x_train[TARGET_VARS] = np.log(x_train[TARGET_VARS] + 1)
# Plot again, in log-scale
sns.pairplot(
x_train, hue="is_weekend", vars=TARGET_VARS, corner=True, plot_kws={"alpha": 0.1}
)
# Split train X and Y, drop date_time from train and test
y_train = pd.concat([x_train.pop(target) for target in TARGET_VARS], axis=1)
x_train.drop(columns="date_time", inplace=True)
x_test.drop(columns="date_time", inplace=True)
# Upon log-transformation, the correlation among the three emission targets is more evident. Also, few outliers were apparently left behind after our filtering.
# # Repeated *k*-Fold Cross-Validation ⏳
# In order to select appropriate hyperparameters for the GBR model, 5x repeated three-fold cross-validation (CV) will be employed first. We will experiment with different values of `learning_rate`, `max_depth` and `subsample`.
# Define hyperparameter and CV parameter values
pars = {
"estimator__learning_rate": [0.01, 0.05, 0.1],
"estimator__max_depth": [3, 5, 10],
"estimator__subsample": [0.5, 0.75, 1.0],
"estimator__n_estimators": [500],
}
cv_pars = RepeatedKFold(n_splits=N_FOLDS, n_repeats=N_REPEATS)
# Build and initialize CV
cv_model = MultiOutputRegressor(GradientBoostingRegressor())
crossval = GridSearchCV(
cv_model, pars, scoring="neg_mean_squared_error", cv=cv_pars, n_jobs=-1
)
crossval.fit(x_train, y_train)
# Visualize CV error
error = np.vstack(
[
crossval.cv_results_["split{}_test_score".format(str(i))]
for i in range(N_FOLDS * N_REPEATS)
]
)
plt.figure(figsize=(16, 4))
plt.boxplot(error)
plt.ylabel("neg_MSE")
# # Fit model 🧠
# Next, we take the optimal hyperparameters to fit the GBR model to the entire training set - technically, a GBR model will be fit on each of the three targets. These optimal values are contained in `crossval.best_params_`.
# Final model using optimal cross-validation parameters
opt_pars = crossval.best_params_
model = MultiOutputRegressor(
GradientBoostingRegressor(
learning_rate=opt_pars["estimator__learning_rate"],
max_depth=opt_pars["estimator__max_depth"],
loss=opt_pars["estimator__loss"],
n_estimators=opt_pars["estimator__n_estimators"],
)
)
model.fit(x_train, y_train)
print("The optimal hyperparameter values are:\n", opt_pars)
# # Test set predictions ✍️
# Finally, we predict on the testing set and write our predictions for all three emission targets. To revert the log-transformation that is embedded into the model, we simply take $e^{\hat{y}} - 1$.
# Get predictions
preds = model.predict(x_test)
# Recover original units
inv_preds = np.exp(preds) - 1
# Write to submission table, export
subm.iloc[:, :] = inv_preds
subm.to_csv("submission.csv")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/074/69074743.ipynb
| null | null |
[{"Id": 69074743, "ScriptId": 18695350, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 568438, "CreationDate": "07/26/2021 14:40:36", "VersionNumber": 34.0, "Title": "TPS Jul 2021 - Gradient Boosting", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 142.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 141.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
#
#
# # Introduction 🦾
# This notebook addresses the prediction of emission values from three key pollutants - carbon monoxide ($CO$), benzene ($C_6H_6$) and nitrogen oxides ($NO_X$) - using sensor readouts as well as date and time at measurement, relative and absolute humidity and temperature. We will employ some feature engineering to encode a few temporal components, conduct cross-validation (CV) and fit a Gradient Boosting Regression (GBR) model. This dataset is part of the Tabular Playground Series - July 2021 competition.
# It came to my attention that most top entries in this competition exploit some leaked test data using pseudo-labeling, thereby cutting down error by a substantial margin. I am not particularly fond of leveraging leaked information and as such **no external data is used in this analysis**.
# We will go over feature engineering, CV and model building using the optimal CV hyperparameter values. To start things off we will load some important utilities, set random seed and a bunch of other constants and load the CSV files.
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.multioutput import MultiOutputRegressor
from sklearn.model_selection import RepeatedKFold, GridSearchCV
# Path to the files, set seed
PATH = "../input/tabular-playground-series-jul-2021/"
SEED = 999
N_FOLDS = 3
N_REPEATS = 5
TARGET_VARS = ["target_carbon_monoxide", "target_benzene", "target_nitrogen_oxides"]
np.random.seed(SEED)
# Load CSV files
train = pd.read_csv(PATH + "train.csv")
test = pd.read_csv(PATH + "test.csv")
subm = pd.read_csv(PATH + "sample_submission.csv", index_col="date_time")
# # Feature engineering 🔨
# The training predictor set is of size 7111 x 9, whereas the target set is 7111 x 3. The test predictor set is of size 2247 x 9. Given the relative paucity of predictors in the dataset we will make the best of it by investigating ways of engineering the underlying predictors. In the present analysis I consider the following:
# * Capture periodicity over month and hour using $sin$ and $cos$ encodings. If we had picked a categorical encoding instead, the model would be oblivious to the fact January and December are consecutive months. The same holds for hour of the day. As for weekdays, since there are only seven values this particular encoding might degrade model performance
# * Identify weekends using a single binary feature. One might expect weekends to associate with lower emissions.
# To extract these features we will process both train and test data using a custom utility defined underneath, `extract_datetime_feats`.
def sin_cos_encoding(df, dt, feat_name, max_val):
# Encode variable using sin and cos
df["sin_" + feat_name] = np.sin(2 * np.pi * (dt / max_val))
df["cos_" + feat_name] = np.cos(2 * np.pi * (dt / max_val))
return None
def extract_dt_feats(df):
# Extract month and hour
date_enc = pd.to_datetime(df.date_time)
month = date_enc.dt.month
hour = date_enc.dt.hour
# Add features, compute and add is_weekend
sin_cos_encoding(df, month, "month", 12)
sin_cos_encoding(df, hour, "hour", 23)
df["is_weekend"] = date_enc.dt.day_name().isin(["Saturday", "Sunday"]) * 1
return df
# Expand features from train and test
x_train = extract_dt_feats(train.copy())
x_test = extract_dt_feats(test.copy())
# Visualize relationship between is_weekend and targets
sns.pairplot(
x_train, hue="is_weekend", vars=TARGET_VARS, corner=True, plot_kws={"alpha": 0.1}
)
# With this simple feature engineering step we increased the number of available features from 9 to 14, although `date_time` will not be used. The above scatterplots, which depict the bivariate distributions of the three target variables - highly intercorrelated - suggest that indeed weekends associate with lower emissions. We note, however, just over a hundred odd measurements of high $CO$ and $NO_X$ but comparatively low $C_6H_6$; these we might regard as outliers to be excluded from the analysis. Finally, the three target variables appear to be left-skewed and could therefore be better modeled following a log-transformation.
# I propose *i)* filtering out the said outliers using arbitrary cutoffs for $C_6H_6$ and $NO_X$, *ii)* log-transforming the three target variables and *iii)* splitting predictors and targets from `train` into `x_train` and `y_train`, accordingly; in the case of `test` we simply produce the corresponding `x_test`.
# Identify and discard odd datapoints / outliers
outliers = np.where(
(x_train.target_benzene == x_train.target_benzene.min())
& (x_train.target_nitrogen_oxides > 50)
)[0]
x_train = x_train.drop(outliers)
# Log-transform target vars
x_train[TARGET_VARS] = np.log(x_train[TARGET_VARS] + 1)
# Plot again, in log-scale
sns.pairplot(
x_train, hue="is_weekend", vars=TARGET_VARS, corner=True, plot_kws={"alpha": 0.1}
)
# Split train X and Y, drop date_time from train and test
y_train = pd.concat([x_train.pop(target) for target in TARGET_VARS], axis=1)
x_train.drop(columns="date_time", inplace=True)
x_test.drop(columns="date_time", inplace=True)
# Upon log-transformation, the correlation among the three emission targets is more evident. Also, few outliers were apparently left behind after our filtering.
# # Repeated *k*-Fold Cross-Validation ⏳
# In order to select appropriate hyperparameters for the GBR model, 5x repeated three-fold cross-validation (CV) will be employed first. We will experiment with different values of `learning_rate`, `max_depth` and `subsample`.
# Define hyperparameter and CV parameter values
pars = {
"estimator__learning_rate": [0.01, 0.05, 0.1],
"estimator__max_depth": [3, 5, 10],
"estimator__subsample": [0.5, 0.75, 1.0],
"estimator__n_estimators": [500],
}
cv_pars = RepeatedKFold(n_splits=N_FOLDS, n_repeats=N_REPEATS)
# Build and initialize CV
cv_model = MultiOutputRegressor(GradientBoostingRegressor())
crossval = GridSearchCV(
cv_model, pars, scoring="neg_mean_squared_error", cv=cv_pars, n_jobs=-1
)
crossval.fit(x_train, y_train)
# Visualize CV error
error = np.vstack(
[
crossval.cv_results_["split{}_test_score".format(str(i))]
for i in range(N_FOLDS * N_REPEATS)
]
)
plt.figure(figsize=(16, 4))
plt.boxplot(error)
plt.ylabel("neg_MSE")
# # Fit model 🧠
# Next, we take the optimal hyperparameters to fit the GBR model to the entire training set - technically, a GBR model will be fit on each of the three targets. These optimal values are contained in `crossval.best_params_`.
# Final model using optimal cross-validation parameters
opt_pars = crossval.best_params_
model = MultiOutputRegressor(
GradientBoostingRegressor(
learning_rate=opt_pars["estimator__learning_rate"],
max_depth=opt_pars["estimator__max_depth"],
loss=opt_pars["estimator__loss"],
n_estimators=opt_pars["estimator__n_estimators"],
)
)
model.fit(x_train, y_train)
print("The optimal hyperparameter values are:\n", opt_pars)
# # Test set predictions ✍️
# Finally, we predict on the testing set and write our predictions for all three emission targets. To revert the log-transformation that is embedded into the model, we simply take $e^{\hat{y}} - 1$.
# Get predictions
preds = model.predict(x_test)
# Recover original units
inv_preds = np.exp(preds) - 1
# Write to submission table, export
subm.iloc[:, :] = inv_preds
subm.to_csv("submission.csv")
| false | 0 | 2,170 | 0 | 2,170 | 2,170 |
||
69074214
|
<jupyter_start><jupyter_text>Trivago RecSys Challenge Data 2019
### Acknowledgements
https://recsys.trivago.cloud/challenge/dataset/
Kaggle dataset identifier: trivagorecsyschallengedata2019
<jupyter_code>import pandas as pd
df = pd.read_csv('trivagorecsyschallengedata2019/item_metadata.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 927142 entries, 0 to 927141
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 item_id 927142 non-null int64
1 properties 927142 non-null object
dtypes: int64(1), object(1)
memory usage: 14.1+ MB
<jupyter_text>Examples:
{
"item_id": 5101,
"properties": "Satellite TV|Golf Course|Airport Shuttle|Cosmetic Mirror|Safe (Hotel)|Telephone|Hotel|Sitting Area (Rooms)|Reception (24/7)|Air Conditioning|Hypoallergenic Rooms|Cable TV|Hotel Bar|Pool Table|Bathtub|Satisfactory Rating|Room Service|Luxury Hotel|Terrace (Hotel)|Television|Minigolf...(truncated)",
}
{
"item_id": 5416,
"properties": "Satellite TV|Cosmetic Mirror|Safe (Hotel)|Telephone|Hotel|Sitting Area (Rooms)|Reception (24/7)|Wheelchair Accessible|Hypoallergenic Rooms|Hotel Bar|Bathtub|Satisfactory Rating|Luxury Hotel|Terrace (Hotel)|Very Good Rating|Television|Business Hotel|Shower|Cot|Hairdryer|From 3 Star...(truncated)",
}
{
"item_id": 5834,
"properties": "Satellite TV|Cosmetic Mirror|Safe (Hotel)|Telephone|Hotel|Reception (24/7)|Satisfactory Rating|Hiking Trail|Luxury Hotel|Terrace (Hotel)|Very Good Rating|Minigolf|Business Hotel|Shower|Cot|Hairdryer|Beach|From 3 Stars|Good Rating|Family Friendly|Desk|WiFi (Public Areas)|Openable W...(truncated)",
}
{
"item_id": 5910,
"properties": "Satellite TV|Sailing|Cosmetic Mirror|Telephone|Hotel|Cable TV|Hotel Bar|Bathtub|Satisfactory Rating|Room Service|Luxury Hotel|Terrace (Hotel)|Television|Business Hotel|Shower|From 3 Stars|Good Rating|Radio|4 Star|From 4 Stars|Family Friendly|Tennis Court (Indoor)|WiFi (Public Area...(truncated)",
}
<jupyter_code>import pandas as pd
df = pd.read_csv('trivagorecsyschallengedata2019/train.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 15932992 entries, 0 to 15932991
Data columns (total 12 columns):
# Column Dtype
--- ------ -----
0 user_id object
1 session_id object
2 timestamp int64
3 step int64
4 action_type object
5 reference object
6 platform object
7 city object
8 device object
9 current_filters object
10 impressions object
11 prices object
dtypes: int64(2), object(10)
memory usage: 1.4+ GB
<jupyter_text>Examples:
{
"user_id": "00RL8Z82B2Z1",
"session_id": "aff3928535f48",
"timestamp": "2018-11-01 01:57:40",
"step": 1,
"action_type": "search for poi",
"reference": "Newtown",
"platform": "AU",
"city": "Sydney, Australia",
"device": "mobile",
"current_filters": NaN,
"impressions": NaN,
"prices": NaN
}
{
"user_id": "00RL8Z82B2Z1",
"session_id": "aff3928535f48",
"timestamp": "2018-11-01 01:58:42",
"step": 2,
"action_type": "interaction item image",
"reference": "666856",
"platform": "AU",
"city": "Sydney, Australia",
"device": "mobile",
"current_filters": NaN,
"impressions": NaN,
"prices": NaN
}
{
"user_id": "00RL8Z82B2Z1",
"session_id": "aff3928535f48",
"timestamp": "2018-11-01 01:58:42",
"step": 3,
"action_type": "interaction item image",
"reference": "666856",
"platform": "AU",
"city": "Sydney, Australia",
"device": "mobile",
"current_filters": NaN,
"impressions": NaN,
"prices": NaN
}
{
"user_id": "00RL8Z82B2Z1",
"session_id": "aff3928535f48",
"timestamp": "2018-11-01 01:58:52",
"step": 4,
"action_type": "interaction item image",
"reference": "666856",
"platform": "AU",
"city": "Sydney, Australia",
"device": "mobile",
"current_filters": NaN,
"impressions": NaN,
"prices": NaN
}
<jupyter_script>import numpy as np
import pandas as pd
import os
from tqdm import tqdm
triv = pd.read_csv("../input/trivagorecsyschallengedata2019/train.csv")
triv.head()
len(triv)
triv.value_counts("action_type")
user_group = triv.groupby("user_id").count().reset_index()
user_group.head()
user_group.sort_values("step", ascending=False)
triv[triv.user_id == "6JWWFFNUMY6Y"].value_counts("action_type")
triv["is_clickout"] = triv.action_type == "clickout item"
triv
"i"
test = pd.read_csv("../input/trivagorecsyschallengedata2019/train.csv")
len(test) / (len(test) + len(triv))
len(triv[triv.action_type == "clickout item"])
test.head()
triv.action_type.nunique()
rating = triv[triv.reference.str.isnumeric()][
["user_id", "action_type", "reference", "timestamp"]
]
rating.action_type.value_counts()
rating.action_type.astype("category").cat.codes.value_counts()
# interaction item image=2,
# clickout item=0,
# interaction item info=3,
# interaction item rating=4,
# interaction item deals=1,
# search for item=5,
rating.action_type = rating.action_type.astype("category").cat.codes
rating = rating[["user_id", "reference", "action_type", "timestamp"]]
rating.isnull().sum()
wuser_dict = {}
witem_dict = {}
u = rating["user_id"].nunique() // 6
it = rating["reference"].nunique() // 5
for i, userid in tqdm(enumerate(rating["user_id"].unique())):
if i < u:
wuser_dict[userid] = True
else:
wuser_dict[userid] = False
for i, itemid in tqdm(enumerate(rating["reference"].unique())):
if i < it:
witem_dict[itemid] = True
else:
witem_dict[itemid] = False
from collections import defaultdict
warm_state = defaultdict(list)
warm_state_y = defaultdict(list)
user_cold_state = defaultdict(list)
user_cold_state_y = defaultdict(list)
item_cold_state = defaultdict(list)
item_cold_state_y = defaultdict(list)
user_and_item_cold_state = defaultdict(list)
user_and_item_cold_state_y = defaultdict(list)
npdata = rating.to_numpy()
npdata
for rat in npdata:
if wuser_dict[rat[0]] and witem_dict[rat[1]]:
warm_state[rat[0]].append(rat[1])
warm_state_y[rat[0]].append(rat[2])
elif not wuser_dict[rat[0]] and witem_dict[rat[1]]:
user_cold_state[rat[0]].append(rat[1])
user_cold_state_y[rat[0]].append(rat[2])
elif wuser_dict[rat[0]] and not witem_dict[rat[1]]:
item_cold_state[rat[0]].append(rat[1])
item_cold_state_y[rat[0]].append(rat[2])
else:
user_and_item_cold_state[rat[0]].append(rat[1])
user_and_item_cold_state_y[rat[0]].append(rat[2])
import json
with open("warm_state.json", "w", encoding="utf-8") as outfile:
outfile.write(json.dumps(warm_state))
with open("warm_state_y.json", "w", encoding="utf-8") as outfile:
outfile.write(json.dumps(warm_state_y))
with open("user_cold_state.json", "w", encoding="utf-8") as outfile:
outfile.write(json.dumps(user_cold_state))
with open("user_cold_state_y.json", "w", encoding="utf-8") as outfile:
outfile.write(json.dumps(user_cold_state_y))
with open("item_cold_state.json", "w", encoding="utf-8") as outfile:
outfile.write(json.dumps(item_cold_state))
with open("item_cold_state_y.json", "w", encoding="utf-8") as outfile:
outfile.write(json.dumps(item_cold_state_y))
with open("user_and_item_cold_state.json", "w", encoding="utf-8") as outfile:
outfile.write(json.dumps(user_and_item_cold_state))
with open("user_and_item_cold_state_y.json", "w", encoding="utf-8") as outfile:
outfile.write(json.dumps(user_and_item_cold_state_y))
df = triv
features = ["action_type", "reference", "platform", "city", "device", "current_filters"]
for feature in features:
with open(f"m_{feature}.txt", "w") as f:
f.writelines(["%s\n" % item for item in triv[feature].unique()])
print(len(df[feature].unique()))
df[["user_id", "city", "device", "platform"]].to_csv("users.csv")
"done"
rating.head()
rating.to_csv("ratings.csv")
import datetime
rating["timestamp"].map(lambda x: datetime.datetime.fromtimestamp(x))
triv.info()
item_meta = pd.read_csv("../input/trivagorecsyschallengedata2019/item_metadata.csv")
item_meta.value_counts("properties")
items = set()
for item in item_meta.properties:
listy = set(item.split("|"))
items.update(listy)
len(items)
with open(f"m_item_properties.txt", "w") as f:
f.writelines(["%s\n" % item for item in items])
items
item_meta.to_csv("item_info.csv")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/074/69074214.ipynb
|
trivagorecsyschallengedata2019
|
pranavmahajan725
|
[{"Id": 69074214, "ScriptId": 18538582, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7864611, "CreationDate": "07/26/2021 14:34:11", "VersionNumber": 2.0, "Title": "Trivago Meta Learning", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 164.0, "LinesInsertedFromPrevious": 26.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 138.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 91829899, "KernelVersionId": 69074214, "SourceDatasetVersionId": 498231}]
|
[{"Id": 498231, "DatasetId": 233749, "DatasourceVersionId": 514327, "CreatorUserId": 1293201, "LicenseName": "Unknown", "CreationDate": "06/16/2019 14:10:20", "VersionNumber": 1.0, "Title": "Trivago RecSys Challenge Data 2019", "Slug": "trivagorecsyschallengedata2019", "Subtitle": NaN, "Description": "### Acknowledgements\n\nhttps://recsys.trivago.cloud/challenge/dataset/", "VersionNotes": "Initial release", "TotalCompressedBytes": 488022369.0, "TotalUncompressedBytes": 488022369.0}]
|
[{"Id": 233749, "CreatorUserId": 1293201, "OwnerUserId": 1293201.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 498231.0, "CurrentDatasourceVersionId": 514327.0, "ForumId": 244910, "Type": 2, "CreationDate": "06/16/2019 14:10:20", "LastActivityDate": "06/16/2019", "TotalViews": 7258, "TotalDownloads": 590, "TotalVotes": 9, "TotalKernels": 4}]
|
[{"Id": 1293201, "UserName": "pranavmahajan725", "DisplayName": "Pranav Mahajan", "RegisterDate": "09/26/2017", "PerformanceTier": 1}]
|
import numpy as np
import pandas as pd
import os
from tqdm import tqdm
triv = pd.read_csv("../input/trivagorecsyschallengedata2019/train.csv")
triv.head()
len(triv)
triv.value_counts("action_type")
user_group = triv.groupby("user_id").count().reset_index()
user_group.head()
user_group.sort_values("step", ascending=False)
triv[triv.user_id == "6JWWFFNUMY6Y"].value_counts("action_type")
triv["is_clickout"] = triv.action_type == "clickout item"
triv
"i"
test = pd.read_csv("../input/trivagorecsyschallengedata2019/train.csv")
len(test) / (len(test) + len(triv))
len(triv[triv.action_type == "clickout item"])
test.head()
triv.action_type.nunique()
rating = triv[triv.reference.str.isnumeric()][
["user_id", "action_type", "reference", "timestamp"]
]
rating.action_type.value_counts()
rating.action_type.astype("category").cat.codes.value_counts()
# interaction item image=2,
# clickout item=0,
# interaction item info=3,
# interaction item rating=4,
# interaction item deals=1,
# search for item=5,
rating.action_type = rating.action_type.astype("category").cat.codes
rating = rating[["user_id", "reference", "action_type", "timestamp"]]
rating.isnull().sum()
wuser_dict = {}
witem_dict = {}
u = rating["user_id"].nunique() // 6
it = rating["reference"].nunique() // 5
for i, userid in tqdm(enumerate(rating["user_id"].unique())):
if i < u:
wuser_dict[userid] = True
else:
wuser_dict[userid] = False
for i, itemid in tqdm(enumerate(rating["reference"].unique())):
if i < it:
witem_dict[itemid] = True
else:
witem_dict[itemid] = False
from collections import defaultdict
warm_state = defaultdict(list)
warm_state_y = defaultdict(list)
user_cold_state = defaultdict(list)
user_cold_state_y = defaultdict(list)
item_cold_state = defaultdict(list)
item_cold_state_y = defaultdict(list)
user_and_item_cold_state = defaultdict(list)
user_and_item_cold_state_y = defaultdict(list)
npdata = rating.to_numpy()
npdata
for rat in npdata:
if wuser_dict[rat[0]] and witem_dict[rat[1]]:
warm_state[rat[0]].append(rat[1])
warm_state_y[rat[0]].append(rat[2])
elif not wuser_dict[rat[0]] and witem_dict[rat[1]]:
user_cold_state[rat[0]].append(rat[1])
user_cold_state_y[rat[0]].append(rat[2])
elif wuser_dict[rat[0]] and not witem_dict[rat[1]]:
item_cold_state[rat[0]].append(rat[1])
item_cold_state_y[rat[0]].append(rat[2])
else:
user_and_item_cold_state[rat[0]].append(rat[1])
user_and_item_cold_state_y[rat[0]].append(rat[2])
import json
with open("warm_state.json", "w", encoding="utf-8") as outfile:
outfile.write(json.dumps(warm_state))
with open("warm_state_y.json", "w", encoding="utf-8") as outfile:
outfile.write(json.dumps(warm_state_y))
with open("user_cold_state.json", "w", encoding="utf-8") as outfile:
outfile.write(json.dumps(user_cold_state))
with open("user_cold_state_y.json", "w", encoding="utf-8") as outfile:
outfile.write(json.dumps(user_cold_state_y))
with open("item_cold_state.json", "w", encoding="utf-8") as outfile:
outfile.write(json.dumps(item_cold_state))
with open("item_cold_state_y.json", "w", encoding="utf-8") as outfile:
outfile.write(json.dumps(item_cold_state_y))
with open("user_and_item_cold_state.json", "w", encoding="utf-8") as outfile:
outfile.write(json.dumps(user_and_item_cold_state))
with open("user_and_item_cold_state_y.json", "w", encoding="utf-8") as outfile:
outfile.write(json.dumps(user_and_item_cold_state_y))
df = triv
features = ["action_type", "reference", "platform", "city", "device", "current_filters"]
for feature in features:
with open(f"m_{feature}.txt", "w") as f:
f.writelines(["%s\n" % item for item in triv[feature].unique()])
print(len(df[feature].unique()))
df[["user_id", "city", "device", "platform"]].to_csv("users.csv")
"done"
rating.head()
rating.to_csv("ratings.csv")
import datetime
rating["timestamp"].map(lambda x: datetime.datetime.fromtimestamp(x))
triv.info()
item_meta = pd.read_csv("../input/trivagorecsyschallengedata2019/item_metadata.csv")
item_meta.value_counts("properties")
items = set()
for item in item_meta.properties:
listy = set(item.split("|"))
items.update(listy)
len(items)
with open(f"m_item_properties.txt", "w") as f:
f.writelines(["%s\n" % item for item in items])
items
item_meta.to_csv("item_info.csv")
|
[{"trivagorecsyschallengedata2019/item_metadata.csv": {"column_names": "[\"item_id\", \"properties\"]", "column_data_types": "{\"item_id\": \"int64\", \"properties\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 927142 entries, 0 to 927141\nData columns (total 2 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 item_id 927142 non-null int64 \n 1 properties 927142 non-null object\ndtypes: int64(1), object(1)\nmemory usage: 14.1+ MB\n", "summary": "{\"item_id\": {\"count\": 927142.0, \"mean\": 5324387.229661692, \"std\": 3498382.4633818315, \"min\": 5001.0, \"25%\": 2127068.5, \"50%\": 5155462.0, \"75%\": 8662521.0, \"max\": 11284432.0}}", "examples": "{\"item_id\":{\"0\":5101,\"1\":5416,\"2\":5834,\"3\":5910},\"properties\":{\"0\":\"Satellite TV|Golf Course|Airport Shuttle|Cosmetic Mirror|Safe (Hotel)|Telephone|Hotel|Sitting Area (Rooms)|Reception (24\\/7)|Air Conditioning|Hypoallergenic Rooms|Cable TV|Hotel Bar|Pool Table|Bathtub|Satisfactory Rating|Room Service|Luxury Hotel|Terrace (Hotel)|Television|Minigolf|Business Hotel|Shower|Cot|Gym|Hairdryer|Hypoallergenic Bedding|Accessible Parking|From 3 Stars|Good Rating|Radio|4 Star|From 4 Stars|Family Friendly|Desk|Tennis Court (Indoor)|Balcony|WiFi (Public Areas)|Openable Windows|Express Check-In \\/ Check-Out|Restaurant|Laundry Service|Ironing Board|Tennis Court|From 2 Stars|Business Centre|Bowling|Conference Rooms|Electric Kettle|Accessible Hotel|Porter|Bike Rental|Non-Smoking Rooms|Car Park|Safe (Rooms)|Fitness|Fan|Flatscreen TV|Computer with Internet|WiFi (Rooms)|Lift|Central Heating\",\"1\":\"Satellite TV|Cosmetic Mirror|Safe (Hotel)|Telephone|Hotel|Sitting Area (Rooms)|Reception (24\\/7)|Wheelchair Accessible|Hypoallergenic Rooms|Hotel Bar|Bathtub|Satisfactory Rating|Luxury Hotel|Terrace (Hotel)|Very Good Rating|Television|Business Hotel|Shower|Cot|Hairdryer|From 3 Stars|Good Rating|Radio|4 Star|From 4 Stars|Family Friendly|Desk|WiFi (Public Areas)|Openable Windows|Spa (Wellness Facility)|Laundry Service|Free WiFi (Combined)|From 2 Stars|Conference Rooms|Sauna|Bike Rental|Free WiFi (Rooms)|Non-Smoking Rooms|Car Park|Flatscreen TV|Excellent Rating|Computer with Internet|Pet Friendly|WiFi (Rooms)|Free WiFi (Public Areas)|Lift\",\"2\":\"Satellite TV|Cosmetic Mirror|Safe (Hotel)|Telephone|Hotel|Reception (24\\/7)|Satisfactory Rating|Hiking Trail|Luxury Hotel|Terrace (Hotel)|Very Good Rating|Minigolf|Business Hotel|Shower|Cot|Hairdryer|Beach|From 3 Stars|Good Rating|Family Friendly|Desk|WiFi (Public Areas)|Openable Windows|Free WiFi (Combined)|Boat Rental|Gay-friendly|From 2 Stars|Bowling|3 Star|Free WiFi (Rooms)|Non-Smoking Rooms|Car Park|Safe (Rooms)|Flatscreen TV|Singles|Computer with Internet|WiFi (Rooms)|Free WiFi (Public Areas)|Lift|Central Heating\",\"3\":\"Satellite TV|Sailing|Cosmetic Mirror|Telephone|Hotel|Cable TV|Hotel Bar|Bathtub|Satisfactory Rating|Room Service|Luxury Hotel|Terrace (Hotel)|Television|Business Hotel|Shower|From 3 Stars|Good Rating|Radio|4 Star|From 4 Stars|Family Friendly|Tennis Court (Indoor)|WiFi (Public Areas)|Openable Windows|Restaurant|Laundry Service|Free WiFi (Combined)|Tennis Court|From 2 Stars|Solarium|Conference Rooms|Bike Rental|Non-Smoking Rooms|Car Park|Concierge|Safe (Rooms)|Computer with Internet|Pet Friendly|Free WiFi (Public Areas)|Lift|Central Heating\"}}"}}, {"trivagorecsyschallengedata2019/train.csv": {"column_names": "[\"user_id\", \"session_id\", \"timestamp\", \"step\", \"action_type\", \"reference\", \"platform\", \"city\", \"device\", \"current_filters\", \"impressions\", \"prices\"]", "column_data_types": "{\"user_id\": \"object\", \"session_id\": \"object\", \"timestamp\": \"int64\", \"step\": \"int64\", \"action_type\": \"object\", \"reference\": \"object\", \"platform\": \"object\", \"city\": \"object\", \"device\": \"object\", \"current_filters\": \"object\", \"impressions\": \"object\", \"prices\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 15932992 entries, 0 to 15932991\nData columns (total 12 columns):\n # Column Dtype \n--- ------ ----- \n 0 user_id object\n 1 session_id object\n 2 timestamp int64 \n 3 step int64 \n 4 action_type object\n 5 reference object\n 6 platform object\n 7 city object\n 8 device object\n 9 current_filters object\n 10 impressions object\n 11 prices object\ndtypes: int64(2), object(10)\nmemory usage: 1.4+ GB\n", "summary": "{\"timestamp\": {\"count\": 15932992.0, \"mean\": 1541304041.0163977, \"std\": 150309.10171553047, \"min\": 1541030408.0, \"25%\": 1541173676.0, \"50%\": 1541319766.0, \"75%\": 1541436748.0, \"max\": 1541548799.0}, \"step\": {\"count\": 15932992.0, \"mean\": 75.58612186587428, \"std\": 144.5524397817697, \"min\": 1.0, \"25%\": 8.0, \"50%\": 28.0, \"75%\": 81.0, \"max\": 3522.0}}", "examples": "{\"user_id\":{\"0\":\"00RL8Z82B2Z1\",\"1\":\"00RL8Z82B2Z1\",\"2\":\"00RL8Z82B2Z1\",\"3\":\"00RL8Z82B2Z1\"},\"session_id\":{\"0\":\"aff3928535f48\",\"1\":\"aff3928535f48\",\"2\":\"aff3928535f48\",\"3\":\"aff3928535f48\"},\"timestamp\":{\"0\":1541037460,\"1\":1541037522,\"2\":1541037522,\"3\":1541037532},\"step\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"action_type\":{\"0\":\"search for poi\",\"1\":\"interaction item image\",\"2\":\"interaction item image\",\"3\":\"interaction item image\"},\"reference\":{\"0\":\"Newtown\",\"1\":\"666856\",\"2\":\"666856\",\"3\":\"666856\"},\"platform\":{\"0\":\"AU\",\"1\":\"AU\",\"2\":\"AU\",\"3\":\"AU\"},\"city\":{\"0\":\"Sydney, Australia\",\"1\":\"Sydney, Australia\",\"2\":\"Sydney, Australia\",\"3\":\"Sydney, Australia\"},\"device\":{\"0\":\"mobile\",\"1\":\"mobile\",\"2\":\"mobile\",\"3\":\"mobile\"},\"current_filters\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null},\"impressions\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null},\"prices\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null}}"}}]
| true | 2 |
<start_data_description><data_path>trivagorecsyschallengedata2019/item_metadata.csv:
<column_names>
['item_id', 'properties']
<column_types>
{'item_id': 'int64', 'properties': 'object'}
<dataframe_Summary>
{'item_id': {'count': 927142.0, 'mean': 5324387.229661692, 'std': 3498382.4633818315, 'min': 5001.0, '25%': 2127068.5, '50%': 5155462.0, '75%': 8662521.0, 'max': 11284432.0}}
<dataframe_info>
RangeIndex: 927142 entries, 0 to 927141
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 item_id 927142 non-null int64
1 properties 927142 non-null object
dtypes: int64(1), object(1)
memory usage: 14.1+ MB
<some_examples>
{'item_id': {'0': 5101, '1': 5416, '2': 5834, '3': 5910}, 'properties': {'0': 'Satellite TV|Golf Course|Airport Shuttle|Cosmetic Mirror|Safe (Hotel)|Telephone|Hotel|Sitting Area (Rooms)|Reception (24/7)|Air Conditioning|Hypoallergenic Rooms|Cable TV|Hotel Bar|Pool Table|Bathtub|Satisfactory Rating|Room Service|Luxury Hotel|Terrace (Hotel)|Television|Minigolf|Business Hotel|Shower|Cot|Gym|Hairdryer|Hypoallergenic Bedding|Accessible Parking|From 3 Stars|Good Rating|Radio|4 Star|From 4 Stars|Family Friendly|Desk|Tennis Court (Indoor)|Balcony|WiFi (Public Areas)|Openable Windows|Express Check-In / Check-Out|Restaurant|Laundry Service|Ironing Board|Tennis Court|From 2 Stars|Business Centre|Bowling|Conference Rooms|Electric Kettle|Accessible Hotel|Porter|Bike Rental|Non-Smoking Rooms|Car Park|Safe (Rooms)|Fitness|Fan|Flatscreen TV|Computer with Internet|WiFi (Rooms)|Lift|Central Heating', '1': 'Satellite TV|Cosmetic Mirror|Safe (Hotel)|Telephone|Hotel|Sitting Area (Rooms)|Reception (24/7)|Wheelchair Accessible|Hypoallergenic Rooms|Hotel Bar|Bathtub|Satisfactory Rating|Luxury Hotel|Terrace (Hotel)|Very Good Rating|Television|Business Hotel|Shower|Cot|Hairdryer|From 3 Stars|Good Rating|Radio|4 Star|From 4 Stars|Family Friendly|Desk|WiFi (Public Areas)|Openable Windows|Spa (Wellness Facility)|Laundry Service|Free WiFi (Combined)|From 2 Stars|Conference Rooms|Sauna|Bike Rental|Free WiFi (Rooms)|Non-Smoking Rooms|Car Park|Flatscreen TV|Excellent Rating|Computer with Internet|Pet Friendly|WiFi (Rooms)|Free WiFi (Public Areas)|Lift', '2': 'Satellite TV|Cosmetic Mirror|Safe (Hotel)|Telephone|Hotel|Reception (24/7)|Satisfactory Rating|Hiking Trail|Luxury Hotel|Terrace (Hotel)|Very Good Rating|Minigolf|Business Hotel|Shower|Cot|Hairdryer|Beach|From 3 Stars|Good Rating|Family Friendly|Desk|WiFi (Public Areas)|Openable Windows|Free WiFi (Combined)|Boat Rental|Gay-friendly|From 2 Stars|Bowling|3 Star|Free WiFi (Rooms)|Non-Smoking Rooms|Car Park|Safe (Rooms)|Flatscreen TV|Singles|Computer with Internet|WiFi (Rooms)|Free WiFi (Public Areas)|Lift|Central Heating', '3': 'Satellite TV|Sailing|Cosmetic Mirror|Telephone|Hotel|Cable TV|Hotel Bar|Bathtub|Satisfactory Rating|Room Service|Luxury Hotel|Terrace (Hotel)|Television|Business Hotel|Shower|From 3 Stars|Good Rating|Radio|4 Star|From 4 Stars|Family Friendly|Tennis Court (Indoor)|WiFi (Public Areas)|Openable Windows|Restaurant|Laundry Service|Free WiFi (Combined)|Tennis Court|From 2 Stars|Solarium|Conference Rooms|Bike Rental|Non-Smoking Rooms|Car Park|Concierge|Safe (Rooms)|Computer with Internet|Pet Friendly|Free WiFi (Public Areas)|Lift|Central Heating'}}
<end_description>
<start_data_description><data_path>trivagorecsyschallengedata2019/train.csv:
<column_names>
['user_id', 'session_id', 'timestamp', 'step', 'action_type', 'reference', 'platform', 'city', 'device', 'current_filters', 'impressions', 'prices']
<column_types>
{'user_id': 'object', 'session_id': 'object', 'timestamp': 'int64', 'step': 'int64', 'action_type': 'object', 'reference': 'object', 'platform': 'object', 'city': 'object', 'device': 'object', 'current_filters': 'object', 'impressions': 'object', 'prices': 'object'}
<dataframe_Summary>
{'timestamp': {'count': 15932992.0, 'mean': 1541304041.0163977, 'std': 150309.10171553047, 'min': 1541030408.0, '25%': 1541173676.0, '50%': 1541319766.0, '75%': 1541436748.0, 'max': 1541548799.0}, 'step': {'count': 15932992.0, 'mean': 75.58612186587428, 'std': 144.5524397817697, 'min': 1.0, '25%': 8.0, '50%': 28.0, '75%': 81.0, 'max': 3522.0}}
<dataframe_info>
RangeIndex: 15932992 entries, 0 to 15932991
Data columns (total 12 columns):
# Column Dtype
--- ------ -----
0 user_id object
1 session_id object
2 timestamp int64
3 step int64
4 action_type object
5 reference object
6 platform object
7 city object
8 device object
9 current_filters object
10 impressions object
11 prices object
dtypes: int64(2), object(10)
memory usage: 1.4+ GB
<some_examples>
{'user_id': {'0': '00RL8Z82B2Z1', '1': '00RL8Z82B2Z1', '2': '00RL8Z82B2Z1', '3': '00RL8Z82B2Z1'}, 'session_id': {'0': 'aff3928535f48', '1': 'aff3928535f48', '2': 'aff3928535f48', '3': 'aff3928535f48'}, 'timestamp': {'0': 1541037460, '1': 1541037522, '2': 1541037522, '3': 1541037532}, 'step': {'0': 1, '1': 2, '2': 3, '3': 4}, 'action_type': {'0': 'search for poi', '1': 'interaction item image', '2': 'interaction item image', '3': 'interaction item image'}, 'reference': {'0': 'Newtown', '1': '666856', '2': '666856', '3': '666856'}, 'platform': {'0': 'AU', '1': 'AU', '2': 'AU', '3': 'AU'}, 'city': {'0': 'Sydney, Australia', '1': 'Sydney, Australia', '2': 'Sydney, Australia', '3': 'Sydney, Australia'}, 'device': {'0': 'mobile', '1': 'mobile', '2': 'mobile', '3': 'mobile'}, 'current_filters': {'0': None, '1': None, '2': None, '3': None}, 'impressions': {'0': None, '1': None, '2': None, '3': None}, 'prices': {'0': None, '1': None, '2': None, '3': None}}
<end_description>
| 1,535 | 0 | 3,046 | 1,535 |
69074344
|
<jupyter_start><jupyter_text>Palmer Archipelago (Antarctica) penguin data
Please refer to the official [Github page](https://github.com/allisonhorst/penguins/blob/master/README.md) for details and license information. The details below have also been taken from there.
Artwork: @allison_horst
# Palmer Archipelago (Antarctica) penguin data
Data were collected and made available by [Dr. Kristen Gorman](https://www.uaf.edu/cfos/people/faculty/detail/kristen-gorman.php) and the [Palmer Station, Antarctica LTER](https://pal.lternet.edu/), a member of the [Long Term Ecological Research Network](https://lternet.edu/).
**Thank you** to Dr. Gorman, Palmer Station LTER and the LTER Network! Special thanks to Marty Downs (Director, LTER Network Office) for help regarding the data license & use.
## License & citation
- **Data are available by** [CC-0](https://creativecommons.org/share-your-work/public-domain/cc0/) license in accordance with the [Palmer Station LTER Data Policy](http://pal.lternet.edu/data/policies) and the [LTER Data Access Policy for Type I data](https://lternet.edu/data-access-policy/).
- **Please cite this data using:** Gorman KB, Williams TD, Fraser WR (2014) Ecological Sexual Dimorphism and Environmental Variability within a Community of Antarctic Penguins (Genus *Pygoscelis*). PLoS ONE 9(3): e90081. doi:10.1371/journal.pone.0090081
## Summary:
The data folder contains two CSV files. For intro courses/examples, you probably want to use the first one (penguins_size.csv).
- **penguins_size.csv**: Simplified data from original penguin data sets. Contains variables:
- `species`: penguin species (Chinstrap, Adélie, or Gentoo)
- `culmen_length_mm`: culmen length (mm)
- `culmen_depth_mm`: culmen depth (mm)
- `flipper_length_mm`: flipper length (mm)
- `body_mass_g`: body mass (g)
- `island`: island name (Dream, Torgersen, or Biscoe) in the Palmer Archipelago (Antarctica)
- `sex`: penguin sex
- **penguins_lter.csv**: Original combined data for 3 penguin species (aggregated from individual links below)
#### Meet the penguins:

#### What are culmen length & depth?
The culmen is "the upper ridge of a bird's beak" (definition from Oxford Languages).
For this penguin data, the culmen length and culmen depth are measured as shown below (thanks Kristen Gorman for clarifying!):

<hr>
## Data:
These data are originally published in:
[**Gorman KB, Williams TD, Fraser WR** (2014) Ecological Sexual Dimorphism and Environmental Variability within a Community of Antarctic Penguins (Genus *Pygoscelis*). PLoS ONE 9(3): e90081. doi:10.1371/journal.pone.0090081](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0090081)
Anyone interested in publishing the data should contact [Dr. Kristen Gorman](https://www.uaf.edu/cfos/people/faculty/detail/kristen-gorman.php) about analysis and working together on any final products.
From Gorman et al. (2014): "Data reported here are publicly available within the PAL-LTER data system (datasets #219, 220, and 221): http://oceaninformatics.ucsd.edu/datazoo/data/pallter/datasets. These data are additionally archived within the United States (US) LTER Network’s Information System Data Portal: https://portal.lternet.edu/. Individuals interested in using these data are therefore expected to follow the US LTER Network’s Data Access Policy, Requirements and Use Agreement: https://lternet.edu/data-access-policy/."
From the LTER data access policy: "The consumer of these data (“Data User” herein) has an ethical obligation to cite it appropriately in any publication that results from its use. The Data User should realize that these data may be actively used by others for ongoing research and that coordination may be necessary to prevent duplicate publication. The Data User is urged to contact the authors of these data if any questions about methodology or results occur. Where appropriate, the Data User is encouraged to consider collaboration or coauthorship with the authors. The Data User should realize that misinterpretation of data may occur if used out of context of the original study. While substantial efforts are made to ensure the accuracy of data and associated documentation, complete accuracy of data sets cannot be guaranteed. All data are made available “as is.” The Data User should be aware, however, that data are updated periodically and it is the responsibility of the Data User to check for new versions of the data. The data authors and the repository where these data were obtained shall not be liable for damages resulting from any use or misinterpretation of the data. Thank you."
## Links to original data & metadata:
Original data accessed via the [Environmental Data Initiative](https://environmentaldatainitiative.org/):
**Adélie penguins:** Palmer Station Antarctica LTER and K. Gorman. 2020. Structural size measurements and isotopic signatures of foraging among adult male and female Adélie penguins (*Pygoscelis adeliae*) nesting along the Palmer Archipelago near Palmer Station, 2007-2009 ver 5. Environmental Data Initiative. https://doi.org/10.6073/pasta/98b16d7d563f265cb52372c8ca99e60f (Accessed 2020-06-08).
**Gentoo penguins:** Palmer Station Antarctica LTER and K. Gorman. 2020. Structural size measurements and isotopic signatures of foraging among adult male and female Gentoo penguin (*Pygoscelis papua*) nesting along the Palmer Archipelago near Palmer Station, 2007-2009 ver 5. Environmental Data Initiative. https://doi.org/10.6073/pasta/7fca67fb28d56ee2ffa3d9370ebda689 (Accessed 2020-06-08).
**Chinstrap penguins:** Palmer Station Antarctica LTER and K. Gorman. 2020. Structural size measurements and isotopic signatures of foraging among adult male and female Chinstrap penguin (*Pygoscelis antarcticus*) nesting along the Palmer Archipelago near Palmer Station, 2007-2009 ver 6. Environmental Data Initiative. https://doi.org/10.6073/pasta/c14dfcfada8ea13a17536e73eb6fbe9e (Accessed 2020-06-08).
Kaggle dataset identifier: palmer-archipelago-antarctica-penguin-data
<jupyter_script># # 20 Burning XGBoost FAQs Answered to Use the Library Like a Pro
# ## Gradient-boost your XGBoost knowledge by learning these crucial lessons
# 
#
# Photo by
# Haithem Ferdi
# on
# Unsplash. All images are by the author unless specified otherwise.
#
# ## Setup
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import xgboost as xgb
from matplotlib import rcParams
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score, cross_validate, train_test_split
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
rcParams["font.size"] = 15
iris = sns.load_dataset("iris").dropna()
penguins = sns.load_dataset("penguins").dropna()
i_input, i_target = iris.drop("species", axis=1), iris[["species"]]
p_input, p_target = penguins.drop("body_mass_g", axis=1), penguins[["body_mass_g"]]
p_input = pd.get_dummies(p_input)
le = LabelEncoder()
i_target = le.fit_transform(i_target.values.ravel())
X_train_i, X_test_i, y_train_i, y_test_i = train_test_split(
i_input, i_target, test_size=0.2, random_state=1121218
)
X_train_p, X_test_p, y_train_p, y_test_p = train_test_split(
p_input, p_target, test_size=0.2, random_state=1121218
)
# XGBoost is a real beast.
# It is a tree-based power horse that is behind the winning solutions of many tabular competitions and datathons. Currently, it is the “hottest” ML framework of the “sexiest” job in the world.
# While basic modeling with XGBoost can be straightforward, you need to master the nitty-gritty to achieve maximum performance.
# With that said, I present you this article, which is the result of
# - hours of reading the documentation (it wasn't fun)
# - crying through some awful but useful Kaggle kernels
# - hundreds of Google keyword searches
# - completely exhausting my Medium membership by reading a lotta articles
# The post answers 20 most burning questions on XGBoost and its API. These should be enough to make you look like you have been using the library forever.
# ## 1. Which API should I choose - Scikit-learn or the core learning API?
# XGBoost in Python has two APIs — Scikit-learn compatible (estimators have the familiar `fit/predict` pattern) and the core XGBoost-native API (a global `train` function for both classification and regression).
# The majority of the Python community, including Kagglers and myself, use the Scikit-learn API.
import xgboost as xgb
# Regression
reg = xgb.XGBRegressor()
# Classification
clf = xgb.XGBClassifier()
# ```python
# reg.fit(X_train, y_train)
# clf.fit(X_train, y_train)
# ```
# This API enables you to integrate XGBoost estimators into your familiar workflow. The benefits are (and are not limited to):
# - the ability to pass core XGB algorithms into [Sklearn pipelines](https://towardsdatascience.com/how-to-use-sklearn-pipelines-for-ridiculously-neat-code-a61ab66ca90d?source=your_stories_page-------------------------------------)
# - using a more efficient cross-validation workflow
# - avoiding the hassles that come with learning a new API, etc.
# ## 2. How do I completely control the randomness in XGBoost?
# > The rest of the references to XGBoost algorithms mainly imply the Sklearn-compatible XGBRegressor and XGBClassifier (or similar) estimators.
# The estimators have the `random_state` parameter (the alternative seed has been deprecated but still works). However, running XGBoost with default parameters will yield identical results even with different seeds.
reg1 = xgb.XGBRegressor(random_state=1).fit(X_train_p, y_train_p)
reg2 = xgb.XGBRegressor(random_state=2).fit(X_train_p, y_train_p)
reg1.score(X_test_p, y_test_p) == reg2.score(X_test_p, y_test_p)
# This behavior is because XGBoost induces randomness only when `subsample` or any other parameter that starts with `colsample_by*` prefix is used. As the names suggest, these parameters have a lot to do with [random sampling](https://towardsdatascience.com/why-bootstrap-sampling-is-the-badass-tool-of-probabilistic-thinking-5d8c7343fb67?source=your_stories_page-------------------------------------).
# ## 3. What are objectives in XGBoost and how to specify them for different tasks?
# Both regression and classification tasks have different types. They change depending on the objective function, the distributions they can work with, and their loss function.
# You can switch between these implementations with the `objective` parameter. It accepts special code strings provided by XGBoost.
# Regression objectives have the `reg:` prefix while classification starts either with `binary:` or `multi:`.
# I will leave it to you to explore the full list of objectives from [this documentation page](https://xgboost.readthedocs.io/en/latest/parameter.html#learning-task-parameters) as there are quite a few.
# Also, specifying the correct objective and metric gets rid of that unbelievably annoying warning you get when fitting XGB classifiers.
# ## 4. Which booster should I use in XGBoost - gblinear, gbtree, dart?
# > XGBoost has 3 types of gradient boosted learners - these are gradient boosted (GB) linear functions, GB trees and DART trees. You can switch the learners using the `booster` parameter.
# If you ask Kagglers, they will choose boosted trees over linear functions on any day (as do I). The reason is that trees can capture non-linear, complex relationships that linear functions cannot.
# So, the only question is which tree booster should you pass to the `booster` parameter - `gbtree` or `dart`?
# I won’t bother you with the full differences here. The thing you should know is that XGBoost uses an ensemble of decision tree-based models when used with gbtree booster.
# DART trees are an improvement (to be yet validated) where they introduce random dropping of the subset of the decision trees to prevent overfitting.
# In the few small experiments I did with default parameters for `gbtree` and `dart`, I got slightly better scores with dart when I set the `rate_drop` between 0.1 and 0.3.
# For more details, I refer you to [this page](https://xgboost.readthedocs.io/en/latest/tutorials/dart.html) of the XGB documentation to learn about the nuances and additional hyperparameters.
# ## 5. Which tree method should I use in XGBoost?
# There are 5 types of algorithms that control tree construction. You should pass `hist` to `tree_method` if you are doing distributed training.
# For other scenarios, the default (and recommended) is `auto` which changes from `exact` for small-to-medium datasets to `approx.` for large datasets.
# ## 6. What is a boosting round in XGBoost?
# As we said, XGBoost is an ensemble of gradient boosted decision trees. Each tree in the ensemble is called a base or weak learner. A weak learner is any algorithm that performs slightly better than random guessing.
# By combining the predictions of multiples of weak learners, XGBoost yields a final, robust prediction (skipping a lot of details now).
# Each time we fit a tree to the data, it is called a single boosting round.
# So, to specify the number of trees to be built, pass an integer to `num_boost_round` of the Learning API or to `n_estimators` of the Sklearn API.
# Typically, too few trees lead to underfitting, and a too large number of trees lead to overfitting. You will normally tune this parameter with hyperparameter optimization.
# ## 7. What is `early_stopping_rounds` in XGBoost?
# From one boosting round to the next, XGBoost builds upon the predictions of the last tree.
# If the predictions do not improve after a sequence of rounds, it is sensible to stop training even if we are not at a hard stop for `num_boost_round` or `n_estimators`.
# To achieve this, XGBoost provides `early_stopping_rounds` parameter. For example, setting it to 50 means we stop the training if the predictions have not improved for the last 50 rounds.
# It is a good practice to set a higher number for `n_estimators` and change early stopping accordingly to achieve better results.
# Before I show an example of how it is done in code, there are two other XGBoost parameters to discuss.
# ## 8. What are `eval_set`s in XGBoost?
# Early stopping is only enabled when you pass a set of evaluation data to the `fit` method. These evaluation sets are used to keep track of the ensemble's performance from one round to the next.
# A tree is trained on the passed training sets at each round, and to see if the score has been improving, it makes predictions on the passed evaluation sets. Here is what it looks like in code:
reg = xgb.XGBRegressor(objective="reg:squarederror", n_estimators=1000)
reg = reg.fit(
X_train_p,
y_train_p,
eval_set=[(X_test_p, y_test_p)],
early_stopping_rounds=5,
)
# > Set `verbose` to False to get rid of the log messages.
# After the 14th iteration, the score starts decreasing. So the training stops at the 19th iteration because 5 rounds of early stopping is applied.
# It is also possible to pass multiple evaluation sets to `eval_set` as a tuple, but only the last pair will be used when used alongside early stopping.
# > Check out [this post](https://machinelearningmastery.com/avoid-overfitting-by-early-stopping-with-xgboost-in-python/) to learn more about early stopping and evaluation sets.
# ## 9. When do evaluation metrics have effect in XGBoost?
# You can specify various evaluation metrics using the `eval_metric` of the fit method. Passed metrics only affect internally - for example, they are used to assess the quality of the predictions during early stopping.
# You should change the metric according to the objective you choose. You can find the full list of objectives and their supported metrics on [this page](https://xgboost.readthedocs.io/en/latest/parameter.html#learning-task-parameters) of the documentation.
# Below is an example of an XGBoost classifier with multi-class log loss and ROC AUC as metrics:
clf = xgb.XGBClassifier(
objective="multi:softprob", n_estimators=200, use_label_encoder=False
)
eval_set = [(X_test_i, y_test_i)]
_ = clf.fit(
X_train_i,
y_train_i,
eval_set=eval_set,
eval_metric=["auc", "mlogloss"],
early_stopping_rounds=5,
)
# No matter what metric you pass to `eval_metric`, it only affects the fit function. So, when you call `score()` on the classifier, it will still yield accuracy, which is the default in Sklearn:
# ## 10. What is learning rate (eta) in XGBoost?
# Each time XGBoost adds a new tree to the ensemble, it is used to correct the residual errors of the last group of trees.
# The problem is that this approach is fast and powerful, making the algorithm quickly learn and overfit the training data. So, XGBoost or any other gradient boosting algorithm has a `learning_rate` parameter that controls the speed of fitting and combats overfitting.
# Typical values for `learning_rate` range from 0.1 to 0.3, but it is possible to go beyond these, especially towards 0.
# Whatever value passed to `learning_rate`, it plays as a weighting factor for the corrections made by new trees. So, a lower learning rate means we place less importance on the corrections of the new trees, hence avoiding overfitting.
# A good practice is to set a low number for `learning_rate` and use early stopping with a larger number of estimators (`n_estimators`):
reg = xgb.XGBRegressor(
objective="reg:squaredlogerror", n_estimators=1000, learning_rate=0.01
)
eval_set = [(X_test_p, y_test_p)]
_ = reg.fit(
X_train_p,
y_train_p,
eval_set=eval_set,
early_stopping_rounds=10,
eval_metric="rmsle",
verbose=False,
)
# You will immediately see the effect of slow `learning_rate` because early stopping will be applied much later during training (in the above case, after the 430th iteration).
# However, each dataset is different, so you need to tune this parameter with hyperparameter optimization.
# > Check out [this post](https://machinelearningmastery.com/tune-learning-rate-for-gradient-boosting-with-xgboost-in-python/) on how to tune learning rate.
# ## 11. Should you let XGBoost deal with missing values?
# For this, I will give the advice I've got from two different Kaggle Competition Grandmasters.
# 1. If you give `np.nan` to tree-based models, then, at each node split, the missing values are either send to the left child or the right child of the node, depending on what's best. So, at each split, missing values get special treatment, which may lead to overfitting. A simple solution that works pretty well with trees is to fill in nulls with a value different than the rest of the samples, like -999.
# 2. Even though packages like XGBoost and LightGBM can treat nulls without preprocessing, it is always a good idea to develop your own imputation strategy.
# For real-world datasets, you should always investigate the type of missingness (MCAR, MAR, MNAR) and choose an imputation strategy (value-based [mean, median, mode] or model-based [KNN imputers or tree-based imputers]).
# If you are not familiar with these terms, I got you covered [here](https://towardsdatascience.com/going-beyond-the-simpleimputer-for-missing-data-imputation-dd8ba168d505?source=your_stories_page-------------------------------------).
# ## 12. What is the best way of doing cross-validation with XGBoost?
# Even though XGBoost comes with built-in CV support, always go for the Sklearn CV splitters.
# When I say Sklearn, I don't mean the basic utility functions like `cross_val_score` or `cross_validate`.
# No one cross-validates that way in 2021 (well, at least not on Kaggle).
# The method that gives more flexibility and control over the CV process is to use the `.split` function of Sklearn CV splitters and implement your own CV logic inside a `for` loop.
# Here is what a 5-fold CV looks like in code:
from sklearn.metrics import mean_squared_log_error
from sklearn.model_selection import KFold
cv = KFold(
n_splits=5,
shuffle=True,
random_state=1121218,
)
fold = 0
scores = np.empty(5)
for train_idx, test_idx in cv.split(p_input, p_target):
print(f"Started fold {fold}...")
# Create the training sets from training indices
X_cv_train, y_cv_train = p_input.iloc[train_idx], p_target.iloc[train_idx]
# Create the test sets from test indices
X_cv_test, y_cv_test = p_input.iloc[test_idx], p_target.iloc[test_idx]
# Init/fit XGB
model = xgb.XGBRegressor(
objective="reg:squarederror", n_estimators=10000, learning_rate=0.05
)
model.fit(
X_cv_train,
y_cv_train,
eval_set=[(X_cv_test, y_cv_test)],
early_stopping_rounds=50,
verbose=False,
)
# Generate preds, evaluate
preds = model.predict(X_cv_test)
rmsle = np.sqrt(mean_squared_log_error(y_cv_test, preds))
print("RMSLE of fold {}: {:.4f}\n".format(fold, rmsle))
scores[fold] = rmsle
fold += 1
print("Overall RMSLE: {:.4f}".format(np.mean(scores)))
# Doing CV inside a `for` loop enables you to use evaluation sets and early stopping, while simple functions like `cross_val_score` does not.
# ## 13. How to use XGBoost in [Sklearn Pipelines](https://towardsdatascience.com/how-to-use-sklearn-pipelines-for-ridiculously-neat-code-a61ab66ca90d)?
# If you use the Sklearn API, you can include XGBoost estimators as the last step to the pipeline (just like other Sklearn classes):
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
# Make a simple pipeline
xgb_pipe = Pipeline(
steps=[
("scale", StandardScaler()),
("clf", xgb.XGBClassifier(objective="multi:softmax", use_label_encoder=False)),
]
)
# If you want to use `fit` parameters of XGBoost within pipelines, you can easily pass them to the pipeline's `fit` method. The only difference is that you should use the `stepname__parameter` syntax:
_ = xgb_pipe.fit(
X_train_i.values,
y_train_i, # Make sure to pass the rest after the data
clf__eval_set=[(X_test_i.values, y_test_i)],
clf__eval_metric="mlogloss",
clf__verbose=False,
clf__early_stopping_rounds=10,
)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/074/69074344.ipynb
|
palmer-archipelago-antarctica-penguin-data
|
parulpandey
|
[{"Id": 69074344, "ScriptId": 18852222, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4686011, "CreationDate": "07/26/2021 14:35:43", "VersionNumber": 2.0, "Title": "20 Burning XGBoost FAQs Answered to Use Like a Pro", "EvaluationDate": "07/26/2021", "IsChange": false, "TotalLines": 403.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 403.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 42}]
|
[{"Id": 91830277, "KernelVersionId": 69074344, "SourceDatasetVersionId": 1228604}, {"Id": 91830276, "KernelVersionId": 69074344, "SourceDatasetVersionId": 23404}, {"Id": 91830275, "KernelVersionId": 69074344, "SourceDatasetVersionId": 2368}, {"Id": 91830274, "KernelVersionId": 69074344, "SourceDatasetVersionId": 420}]
|
[{"Id": 1228604, "DatasetId": 703056, "DatasourceVersionId": 1260169, "CreatorUserId": 391404, "LicenseName": "CC0: Public Domain", "CreationDate": "06/09/2020 10:14:54", "VersionNumber": 1.0, "Title": "Palmer Archipelago (Antarctica) penguin data", "Slug": "palmer-archipelago-antarctica-penguin-data", "Subtitle": "Drop in replacement for Iris Dataset", "Description": "Please refer to the official [Github page](https://github.com/allisonhorst/penguins/blob/master/README.md) for details and license information. The details below have also been taken from there.\n\nArtwork: @allison_horst\n\n# Palmer Archipelago (Antarctica) penguin data\n\nData were collected and made available by [Dr. Kristen Gorman](https://www.uaf.edu/cfos/people/faculty/detail/kristen-gorman.php) and the [Palmer Station, Antarctica LTER](https://pal.lternet.edu/), a member of the [Long Term Ecological Research Network](https://lternet.edu/). \n\n**Thank you** to Dr. Gorman, Palmer Station LTER and the LTER Network! Special thanks to Marty Downs (Director, LTER Network Office) for help regarding the data license & use.\n\n## License & citation\n\n- **Data are available by** [CC-0](https://creativecommons.org/share-your-work/public-domain/cc0/) license in accordance with the [Palmer Station LTER Data Policy](http://pal.lternet.edu/data/policies) and the [LTER Data Access Policy for Type I data](https://lternet.edu/data-access-policy/).\n\n- **Please cite this data using:** Gorman KB, Williams TD, Fraser WR (2014) Ecological Sexual Dimorphism and Environmental Variability within a Community of Antarctic Penguins (Genus *Pygoscelis*). PLoS ONE 9(3): e90081. doi:10.1371/journal.pone.0090081\n\n\n## Summary:\n\nThe data folder contains two CSV files. For intro courses/examples, you probably want to use the first one (penguins_size.csv). \n\n- **penguins_size.csv**: Simplified data from original penguin data sets. Contains variables:\n\n - `species`: penguin species (Chinstrap, Ad\u00e9lie, or Gentoo)\n - `culmen_length_mm`: culmen length (mm) \n - `culmen_depth_mm`: culmen depth (mm) \n - `flipper_length_mm`: flipper length (mm) \n - `body_mass_g`: body mass (g) \n - `island`: island name (Dream, Torgersen, or Biscoe) in the Palmer Archipelago (Antarctica)\n - `sex`: penguin sex\n\n- **penguins_lter.csv**: Original combined data for 3 penguin species (aggregated from individual links below) \n\n#### Meet the penguins: \n\n\n#### What are culmen length & depth? \n\nThe culmen is \"the upper ridge of a bird's beak\" (definition from Oxford Languages). \n\nFor this penguin data, the culmen length and culmen depth are measured as shown below (thanks Kristen Gorman for clarifying!):\n\n\n<hr>\n## Data: \n\nThese data are originally published in: \n\n[**Gorman KB, Williams TD, Fraser WR** (2014) Ecological Sexual Dimorphism and Environmental Variability within a Community of Antarctic Penguins (Genus *Pygoscelis*). PLoS ONE 9(3): e90081. doi:10.1371/journal.pone.0090081](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0090081)\n\nAnyone interested in publishing the data should contact [Dr. Kristen Gorman](https://www.uaf.edu/cfos/people/faculty/detail/kristen-gorman.php) about analysis and working together on any final products.\n\nFrom Gorman et al. (2014): \"Data reported here are publicly available within the PAL-LTER data system (datasets #219, 220, and 221): http://oceaninformatics.ucsd.edu/datazoo/data/pallter/datasets. These data are additionally archived within the United States (US) LTER Network\u2019s Information System Data Portal: https://portal.lternet.edu/. Individuals interested in using these data are therefore expected to follow the US LTER Network\u2019s Data Access Policy, Requirements and Use Agreement: https://lternet.edu/data-access-policy/.\"\n\nFrom the LTER data access policy: \"The consumer of these data (\u201cData User\u201d herein) has an ethical obligation to cite it appropriately in any publication that results from its use. The Data User should realize that these data may be actively used by others for ongoing research and that coordination may be necessary to prevent duplicate publication. The Data User is urged to contact the authors of these data if any questions about methodology or results occur. Where appropriate, the Data User is encouraged to consider collaboration or coauthorship with the authors. The Data User should realize that misinterpretation of data may occur if used out of context of the original study. While substantial efforts are made to ensure the accuracy of data and associated documentation, complete accuracy of data sets cannot be guaranteed. All data are made available \u201cas is.\u201d The Data User should be aware, however, that data are updated periodically and it is the responsibility of the Data User to check for new versions of the data. The data authors and the repository where these data were obtained shall not be liable for damages resulting from any use or misinterpretation of the data. Thank you.\"\n\n## Links to original data & metadata:\n\nOriginal data accessed via the [Environmental Data Initiative](https://environmentaldatainitiative.org/): \n\n**Ad\u00e9lie penguins:** Palmer Station Antarctica LTER and K. Gorman. 2020. Structural size measurements and isotopic signatures of foraging among adult male and female Ad\u00e9lie penguins (*Pygoscelis adeliae*) nesting along the Palmer Archipelago near Palmer Station, 2007-2009 ver 5. Environmental Data Initiative. https://doi.org/10.6073/pasta/98b16d7d563f265cb52372c8ca99e60f (Accessed 2020-06-08).\n\n**Gentoo penguins:** Palmer Station Antarctica LTER and K. Gorman. 2020. Structural size measurements and isotopic signatures of foraging among adult male and female Gentoo penguin (*Pygoscelis papua*) nesting along the Palmer Archipelago near Palmer Station, 2007-2009 ver 5. Environmental Data Initiative. https://doi.org/10.6073/pasta/7fca67fb28d56ee2ffa3d9370ebda689 (Accessed 2020-06-08).\n\n**Chinstrap penguins:** Palmer Station Antarctica LTER and K. Gorman. 2020. Structural size measurements and isotopic signatures of foraging among adult male and female Chinstrap penguin (*Pygoscelis antarcticus*) nesting along the Palmer Archipelago near Palmer Station, 2007-2009 ver 6. Environmental Data Initiative. https://doi.org/10.6073/pasta/c14dfcfada8ea13a17536e73eb6fbe9e (Accessed 2020-06-08).", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 703056, "CreatorUserId": 391404, "OwnerUserId": 391404.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1228604.0, "CurrentDatasourceVersionId": 1260169.0, "ForumId": 717743, "Type": 2, "CreationDate": "06/09/2020 10:14:54", "LastActivityDate": "06/09/2020", "TotalViews": 158509, "TotalDownloads": 51754, "TotalVotes": 423, "TotalKernels": 239}]
|
[{"Id": 391404, "UserName": "parulpandey", "DisplayName": "Parul Pandey", "RegisterDate": "07/26/2015", "PerformanceTier": 4}]
|
# # 20 Burning XGBoost FAQs Answered to Use the Library Like a Pro
# ## Gradient-boost your XGBoost knowledge by learning these crucial lessons
# 
#
# Photo by
# Haithem Ferdi
# on
# Unsplash. All images are by the author unless specified otherwise.
#
# ## Setup
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import xgboost as xgb
from matplotlib import rcParams
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score, cross_validate, train_test_split
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
rcParams["font.size"] = 15
iris = sns.load_dataset("iris").dropna()
penguins = sns.load_dataset("penguins").dropna()
i_input, i_target = iris.drop("species", axis=1), iris[["species"]]
p_input, p_target = penguins.drop("body_mass_g", axis=1), penguins[["body_mass_g"]]
p_input = pd.get_dummies(p_input)
le = LabelEncoder()
i_target = le.fit_transform(i_target.values.ravel())
X_train_i, X_test_i, y_train_i, y_test_i = train_test_split(
i_input, i_target, test_size=0.2, random_state=1121218
)
X_train_p, X_test_p, y_train_p, y_test_p = train_test_split(
p_input, p_target, test_size=0.2, random_state=1121218
)
# XGBoost is a real beast.
# It is a tree-based power horse that is behind the winning solutions of many tabular competitions and datathons. Currently, it is the “hottest” ML framework of the “sexiest” job in the world.
# While basic modeling with XGBoost can be straightforward, you need to master the nitty-gritty to achieve maximum performance.
# With that said, I present you this article, which is the result of
# - hours of reading the documentation (it wasn't fun)
# - crying through some awful but useful Kaggle kernels
# - hundreds of Google keyword searches
# - completely exhausting my Medium membership by reading a lotta articles
# The post answers 20 most burning questions on XGBoost and its API. These should be enough to make you look like you have been using the library forever.
# ## 1. Which API should I choose - Scikit-learn or the core learning API?
# XGBoost in Python has two APIs — Scikit-learn compatible (estimators have the familiar `fit/predict` pattern) and the core XGBoost-native API (a global `train` function for both classification and regression).
# The majority of the Python community, including Kagglers and myself, use the Scikit-learn API.
import xgboost as xgb
# Regression
reg = xgb.XGBRegressor()
# Classification
clf = xgb.XGBClassifier()
# ```python
# reg.fit(X_train, y_train)
# clf.fit(X_train, y_train)
# ```
# This API enables you to integrate XGBoost estimators into your familiar workflow. The benefits are (and are not limited to):
# - the ability to pass core XGB algorithms into [Sklearn pipelines](https://towardsdatascience.com/how-to-use-sklearn-pipelines-for-ridiculously-neat-code-a61ab66ca90d?source=your_stories_page-------------------------------------)
# - using a more efficient cross-validation workflow
# - avoiding the hassles that come with learning a new API, etc.
# ## 2. How do I completely control the randomness in XGBoost?
# > The rest of the references to XGBoost algorithms mainly imply the Sklearn-compatible XGBRegressor and XGBClassifier (or similar) estimators.
# The estimators have the `random_state` parameter (the alternative seed has been deprecated but still works). However, running XGBoost with default parameters will yield identical results even with different seeds.
reg1 = xgb.XGBRegressor(random_state=1).fit(X_train_p, y_train_p)
reg2 = xgb.XGBRegressor(random_state=2).fit(X_train_p, y_train_p)
reg1.score(X_test_p, y_test_p) == reg2.score(X_test_p, y_test_p)
# This behavior is because XGBoost induces randomness only when `subsample` or any other parameter that starts with `colsample_by*` prefix is used. As the names suggest, these parameters have a lot to do with [random sampling](https://towardsdatascience.com/why-bootstrap-sampling-is-the-badass-tool-of-probabilistic-thinking-5d8c7343fb67?source=your_stories_page-------------------------------------).
# ## 3. What are objectives in XGBoost and how to specify them for different tasks?
# Both regression and classification tasks have different types. They change depending on the objective function, the distributions they can work with, and their loss function.
# You can switch between these implementations with the `objective` parameter. It accepts special code strings provided by XGBoost.
# Regression objectives have the `reg:` prefix while classification starts either with `binary:` or `multi:`.
# I will leave it to you to explore the full list of objectives from [this documentation page](https://xgboost.readthedocs.io/en/latest/parameter.html#learning-task-parameters) as there are quite a few.
# Also, specifying the correct objective and metric gets rid of that unbelievably annoying warning you get when fitting XGB classifiers.
# ## 4. Which booster should I use in XGBoost - gblinear, gbtree, dart?
# > XGBoost has 3 types of gradient boosted learners - these are gradient boosted (GB) linear functions, GB trees and DART trees. You can switch the learners using the `booster` parameter.
# If you ask Kagglers, they will choose boosted trees over linear functions on any day (as do I). The reason is that trees can capture non-linear, complex relationships that linear functions cannot.
# So, the only question is which tree booster should you pass to the `booster` parameter - `gbtree` or `dart`?
# I won’t bother you with the full differences here. The thing you should know is that XGBoost uses an ensemble of decision tree-based models when used with gbtree booster.
# DART trees are an improvement (to be yet validated) where they introduce random dropping of the subset of the decision trees to prevent overfitting.
# In the few small experiments I did with default parameters for `gbtree` and `dart`, I got slightly better scores with dart when I set the `rate_drop` between 0.1 and 0.3.
# For more details, I refer you to [this page](https://xgboost.readthedocs.io/en/latest/tutorials/dart.html) of the XGB documentation to learn about the nuances and additional hyperparameters.
# ## 5. Which tree method should I use in XGBoost?
# There are 5 types of algorithms that control tree construction. You should pass `hist` to `tree_method` if you are doing distributed training.
# For other scenarios, the default (and recommended) is `auto` which changes from `exact` for small-to-medium datasets to `approx.` for large datasets.
# ## 6. What is a boosting round in XGBoost?
# As we said, XGBoost is an ensemble of gradient boosted decision trees. Each tree in the ensemble is called a base or weak learner. A weak learner is any algorithm that performs slightly better than random guessing.
# By combining the predictions of multiples of weak learners, XGBoost yields a final, robust prediction (skipping a lot of details now).
# Each time we fit a tree to the data, it is called a single boosting round.
# So, to specify the number of trees to be built, pass an integer to `num_boost_round` of the Learning API or to `n_estimators` of the Sklearn API.
# Typically, too few trees lead to underfitting, and a too large number of trees lead to overfitting. You will normally tune this parameter with hyperparameter optimization.
# ## 7. What is `early_stopping_rounds` in XGBoost?
# From one boosting round to the next, XGBoost builds upon the predictions of the last tree.
# If the predictions do not improve after a sequence of rounds, it is sensible to stop training even if we are not at a hard stop for `num_boost_round` or `n_estimators`.
# To achieve this, XGBoost provides `early_stopping_rounds` parameter. For example, setting it to 50 means we stop the training if the predictions have not improved for the last 50 rounds.
# It is a good practice to set a higher number for `n_estimators` and change early stopping accordingly to achieve better results.
# Before I show an example of how it is done in code, there are two other XGBoost parameters to discuss.
# ## 8. What are `eval_set`s in XGBoost?
# Early stopping is only enabled when you pass a set of evaluation data to the `fit` method. These evaluation sets are used to keep track of the ensemble's performance from one round to the next.
# A tree is trained on the passed training sets at each round, and to see if the score has been improving, it makes predictions on the passed evaluation sets. Here is what it looks like in code:
reg = xgb.XGBRegressor(objective="reg:squarederror", n_estimators=1000)
reg = reg.fit(
X_train_p,
y_train_p,
eval_set=[(X_test_p, y_test_p)],
early_stopping_rounds=5,
)
# > Set `verbose` to False to get rid of the log messages.
# After the 14th iteration, the score starts decreasing. So the training stops at the 19th iteration because 5 rounds of early stopping is applied.
# It is also possible to pass multiple evaluation sets to `eval_set` as a tuple, but only the last pair will be used when used alongside early stopping.
# > Check out [this post](https://machinelearningmastery.com/avoid-overfitting-by-early-stopping-with-xgboost-in-python/) to learn more about early stopping and evaluation sets.
# ## 9. When do evaluation metrics have effect in XGBoost?
# You can specify various evaluation metrics using the `eval_metric` of the fit method. Passed metrics only affect internally - for example, they are used to assess the quality of the predictions during early stopping.
# You should change the metric according to the objective you choose. You can find the full list of objectives and their supported metrics on [this page](https://xgboost.readthedocs.io/en/latest/parameter.html#learning-task-parameters) of the documentation.
# Below is an example of an XGBoost classifier with multi-class log loss and ROC AUC as metrics:
clf = xgb.XGBClassifier(
objective="multi:softprob", n_estimators=200, use_label_encoder=False
)
eval_set = [(X_test_i, y_test_i)]
_ = clf.fit(
X_train_i,
y_train_i,
eval_set=eval_set,
eval_metric=["auc", "mlogloss"],
early_stopping_rounds=5,
)
# No matter what metric you pass to `eval_metric`, it only affects the fit function. So, when you call `score()` on the classifier, it will still yield accuracy, which is the default in Sklearn:
# ## 10. What is learning rate (eta) in XGBoost?
# Each time XGBoost adds a new tree to the ensemble, it is used to correct the residual errors of the last group of trees.
# The problem is that this approach is fast and powerful, making the algorithm quickly learn and overfit the training data. So, XGBoost or any other gradient boosting algorithm has a `learning_rate` parameter that controls the speed of fitting and combats overfitting.
# Typical values for `learning_rate` range from 0.1 to 0.3, but it is possible to go beyond these, especially towards 0.
# Whatever value passed to `learning_rate`, it plays as a weighting factor for the corrections made by new trees. So, a lower learning rate means we place less importance on the corrections of the new trees, hence avoiding overfitting.
# A good practice is to set a low number for `learning_rate` and use early stopping with a larger number of estimators (`n_estimators`):
reg = xgb.XGBRegressor(
objective="reg:squaredlogerror", n_estimators=1000, learning_rate=0.01
)
eval_set = [(X_test_p, y_test_p)]
_ = reg.fit(
X_train_p,
y_train_p,
eval_set=eval_set,
early_stopping_rounds=10,
eval_metric="rmsle",
verbose=False,
)
# You will immediately see the effect of slow `learning_rate` because early stopping will be applied much later during training (in the above case, after the 430th iteration).
# However, each dataset is different, so you need to tune this parameter with hyperparameter optimization.
# > Check out [this post](https://machinelearningmastery.com/tune-learning-rate-for-gradient-boosting-with-xgboost-in-python/) on how to tune learning rate.
# ## 11. Should you let XGBoost deal with missing values?
# For this, I will give the advice I've got from two different Kaggle Competition Grandmasters.
# 1. If you give `np.nan` to tree-based models, then, at each node split, the missing values are either send to the left child or the right child of the node, depending on what's best. So, at each split, missing values get special treatment, which may lead to overfitting. A simple solution that works pretty well with trees is to fill in nulls with a value different than the rest of the samples, like -999.
# 2. Even though packages like XGBoost and LightGBM can treat nulls without preprocessing, it is always a good idea to develop your own imputation strategy.
# For real-world datasets, you should always investigate the type of missingness (MCAR, MAR, MNAR) and choose an imputation strategy (value-based [mean, median, mode] or model-based [KNN imputers or tree-based imputers]).
# If you are not familiar with these terms, I got you covered [here](https://towardsdatascience.com/going-beyond-the-simpleimputer-for-missing-data-imputation-dd8ba168d505?source=your_stories_page-------------------------------------).
# ## 12. What is the best way of doing cross-validation with XGBoost?
# Even though XGBoost comes with built-in CV support, always go for the Sklearn CV splitters.
# When I say Sklearn, I don't mean the basic utility functions like `cross_val_score` or `cross_validate`.
# No one cross-validates that way in 2021 (well, at least not on Kaggle).
# The method that gives more flexibility and control over the CV process is to use the `.split` function of Sklearn CV splitters and implement your own CV logic inside a `for` loop.
# Here is what a 5-fold CV looks like in code:
from sklearn.metrics import mean_squared_log_error
from sklearn.model_selection import KFold
cv = KFold(
n_splits=5,
shuffle=True,
random_state=1121218,
)
fold = 0
scores = np.empty(5)
for train_idx, test_idx in cv.split(p_input, p_target):
print(f"Started fold {fold}...")
# Create the training sets from training indices
X_cv_train, y_cv_train = p_input.iloc[train_idx], p_target.iloc[train_idx]
# Create the test sets from test indices
X_cv_test, y_cv_test = p_input.iloc[test_idx], p_target.iloc[test_idx]
# Init/fit XGB
model = xgb.XGBRegressor(
objective="reg:squarederror", n_estimators=10000, learning_rate=0.05
)
model.fit(
X_cv_train,
y_cv_train,
eval_set=[(X_cv_test, y_cv_test)],
early_stopping_rounds=50,
verbose=False,
)
# Generate preds, evaluate
preds = model.predict(X_cv_test)
rmsle = np.sqrt(mean_squared_log_error(y_cv_test, preds))
print("RMSLE of fold {}: {:.4f}\n".format(fold, rmsle))
scores[fold] = rmsle
fold += 1
print("Overall RMSLE: {:.4f}".format(np.mean(scores)))
# Doing CV inside a `for` loop enables you to use evaluation sets and early stopping, while simple functions like `cross_val_score` does not.
# ## 13. How to use XGBoost in [Sklearn Pipelines](https://towardsdatascience.com/how-to-use-sklearn-pipelines-for-ridiculously-neat-code-a61ab66ca90d)?
# If you use the Sklearn API, you can include XGBoost estimators as the last step to the pipeline (just like other Sklearn classes):
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
# Make a simple pipeline
xgb_pipe = Pipeline(
steps=[
("scale", StandardScaler()),
("clf", xgb.XGBClassifier(objective="multi:softmax", use_label_encoder=False)),
]
)
# If you want to use `fit` parameters of XGBoost within pipelines, you can easily pass them to the pipeline's `fit` method. The only difference is that you should use the `stepname__parameter` syntax:
_ = xgb_pipe.fit(
X_train_i.values,
y_train_i, # Make sure to pass the rest after the data
clf__eval_set=[(X_test_i.values, y_test_i)],
clf__eval_metric="mlogloss",
clf__verbose=False,
clf__early_stopping_rounds=10,
)
| false | 0 | 4,532 | 42 | 6,457 | 4,532 |
||
69179816
|
# **Here is my notebook to solve this task using simple dnn (dense neural network)!**
# # Import libs
import numpy as np
import pandas as pd
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping
train = pd.read_csv("../input/commonlitreadabilityprize/train.csv")
train.head(3)
# During all experiments I found out the best way to preprocess the text. Somehow non alphabet characters doens't need to be deleted.
def text_preprocessing(text):
tokenized = word_tokenize(text)
text = [i for i in tokenized if i not in stopwords.words("english")]
text = " ".join(text)
return text
x = train.excerpt.apply(text_preprocessing)
y = train.target
tokenizer = Tokenizer()
# fit only train set
tokenizer.fit_on_texts(x)
# in case you have validation/test dataset don't forget to transform val/test data on already fitted train data
x = tokenizer.texts_to_sequences(x)
# find out the longest sequence length for padding
len_seq_list = [len(s) for s in x]
max_seq_len = np.max(len_seq_list)
# pad sequence for first embedding layer
x = tf.keras.preprocessing.sequence.pad_sequences(
x, padding="post", truncating="post", maxlen=max_seq_len
)
# hyperparameters
voc_size = len(tokenizer.index_word) + 1
epochs = 70
batch_size = 1024
embedding_dim = 512
dropout_rate = 0.5
# # Build model
model = tf.keras.Sequential(
[
tf.keras.layers.Embedding(voc_size, embedding_dim, input_length=max_seq_len),
# gap1d layer shows better accuracy than flatten layer
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(512, activation="relu"),
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dense(256, activation="relu"),
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dense(1),
]
)
model.summary()
# Decent callbacks to control train/val loss to not let the model to overfit.
early_stopping = EarlyStopping(patience=5, restore_best_weights=True, verbose=1)
lr_reduce = ReduceLROnPlateau(patience=2, verbose=1)
callbacks = [early_stopping, lr_reduce]
# # Training
model.compile(optimizer=tf.keras.optimizers.Adam(), loss="mean_squared_error")
history = model.fit(
x,
y,
epochs=epochs,
batch_size=batch_size,
validation_split=0.2,
callbacks=callbacks,
)
# # Make a submission file
sub = pd.read_csv("../input/commonlitreadabilityprize/sample_submission.csv")
test = pd.read_csv("../input/commonlitreadabilityprize/test.csv")
test_data = test.excerpt.apply(text_preprocessing)
test_data = tokenizer.texts_to_sequences(test_data)
test_data = tf.keras.preprocessing.sequence.pad_sequences(
test_data, padding="post", truncating="post", maxlen=max_seq_len
)
test_pred = model.predict(test_data)
sub["target"] = test_pred
sub.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/179/69179816.ipynb
| null | null |
[{"Id": 69179816, "ScriptId": 18582054, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5198228, "CreationDate": "07/27/2021 18:29:15", "VersionNumber": 17.0, "Title": "simple dnn to achieve decent score", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 117.0, "LinesInsertedFromPrevious": 36.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 81.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# **Here is my notebook to solve this task using simple dnn (dense neural network)!**
# # Import libs
import numpy as np
import pandas as pd
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping
train = pd.read_csv("../input/commonlitreadabilityprize/train.csv")
train.head(3)
# During all experiments I found out the best way to preprocess the text. Somehow non alphabet characters doens't need to be deleted.
def text_preprocessing(text):
tokenized = word_tokenize(text)
text = [i for i in tokenized if i not in stopwords.words("english")]
text = " ".join(text)
return text
x = train.excerpt.apply(text_preprocessing)
y = train.target
tokenizer = Tokenizer()
# fit only train set
tokenizer.fit_on_texts(x)
# in case you have validation/test dataset don't forget to transform val/test data on already fitted train data
x = tokenizer.texts_to_sequences(x)
# find out the longest sequence length for padding
len_seq_list = [len(s) for s in x]
max_seq_len = np.max(len_seq_list)
# pad sequence for first embedding layer
x = tf.keras.preprocessing.sequence.pad_sequences(
x, padding="post", truncating="post", maxlen=max_seq_len
)
# hyperparameters
voc_size = len(tokenizer.index_word) + 1
epochs = 70
batch_size = 1024
embedding_dim = 512
dropout_rate = 0.5
# # Build model
model = tf.keras.Sequential(
[
tf.keras.layers.Embedding(voc_size, embedding_dim, input_length=max_seq_len),
# gap1d layer shows better accuracy than flatten layer
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(512, activation="relu"),
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dense(256, activation="relu"),
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dropout(dropout_rate),
tf.keras.layers.Dense(1),
]
)
model.summary()
# Decent callbacks to control train/val loss to not let the model to overfit.
early_stopping = EarlyStopping(patience=5, restore_best_weights=True, verbose=1)
lr_reduce = ReduceLROnPlateau(patience=2, verbose=1)
callbacks = [early_stopping, lr_reduce]
# # Training
model.compile(optimizer=tf.keras.optimizers.Adam(), loss="mean_squared_error")
history = model.fit(
x,
y,
epochs=epochs,
batch_size=batch_size,
validation_split=0.2,
callbacks=callbacks,
)
# # Make a submission file
sub = pd.read_csv("../input/commonlitreadabilityprize/sample_submission.csv")
test = pd.read_csv("../input/commonlitreadabilityprize/test.csv")
test_data = test.excerpt.apply(text_preprocessing)
test_data = tokenizer.texts_to_sequences(test_data)
test_data = tf.keras.preprocessing.sequence.pad_sequences(
test_data, padding="post", truncating="post", maxlen=max_seq_len
)
test_pred = model.predict(test_data)
sub["target"] = test_pred
sub.to_csv("submission.csv", index=False)
| false | 0 | 914 | 0 | 914 | 914 |
||
69179557
|
<jupyter_start><jupyter_text>Flight Take Off Data - JFK Airport
### Context
This data was scraped under a Academic Paper under Review by IEEE transportation
### Content
This file contains data about flights leaving from JKF ariport between Nov 2019-Dec-2020. Taxi-Out prediction has been an important concept as it helps in calculating Runway time and directly impact the cost of the flight.
Kaggle dataset identifier: flight-take-off-data-jfk-airport
<jupyter_code>import pandas as pd
df = pd.read_csv('flight-take-off-data-jfk-airport/M1_final.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 28820 entries, 0 to 28819
Data columns (total 23 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 MONTH 28820 non-null int64
1 DAY_OF_MONTH 28820 non-null int64
2 DAY_OF_WEEK 28820 non-null int64
3 OP_UNIQUE_CARRIER 28820 non-null object
4 TAIL_NUM 28820 non-null object
5 DEST 28820 non-null object
6 DEP_DELAY 28820 non-null int64
7 CRS_ELAPSED_TIME 28820 non-null int64
8 DISTANCE 28820 non-null int64
9 CRS_DEP_M 28820 non-null int64
10 DEP_TIME_M 28820 non-null int64
11 CRS_ARR_M 28820 non-null int64
12 Temperature 28820 non-null int64
13 Dew Point 28820 non-null object
14 Humidity 28820 non-null int64
15 Wind 28818 non-null object
16 Wind Speed 28820 non-null int64
17 Wind Gust 28820 non-null int64
18 Pressure 28820 non-null float64
19 Condition 28820 non-null object
20 sch_dep 28820 non-null int64
21 sch_arr 28820 non-null int64
22 TAXI_OUT 28820 non-null int64
dtypes: float64(1), int64(16), object(6)
memory usage: 5.1+ MB
<jupyter_text>Examples:
{
"MONTH": 11,
"DAY_OF_MONTH": 1,
"DAY_OF_WEEK": 5,
"OP_UNIQUE_CARRIER": "B6",
"TAIL_NUM": "N828JB",
"DEST": "CHS",
"DEP_DELAY": -1,
"CRS_ELAPSED_TIME": 124,
"DISTANCE": 636,
"CRS_DEP_M": 324,
"DEP_TIME_M": 323,
"CRS_ARR_M": 448,
"Temperature": 48,
"Dew Point": 34,
"Humidity": 58,
"Wind": "W",
"Wind Speed": 25,
"Wind Gust": 38,
"Pressure": 29.86,
"Condition": "Fair / Windy",
"...": "and 3 more columns"
}
{
"MONTH": 11,
"DAY_OF_MONTH": 1,
"DAY_OF_WEEK": 5,
"OP_UNIQUE_CARRIER": "B6",
"TAIL_NUM": "N992JB",
"DEST": "LAX",
"DEP_DELAY": -7,
"CRS_ELAPSED_TIME": 371,
"DISTANCE": 2475,
"CRS_DEP_M": 340,
"DEP_TIME_M": 333,
"CRS_ARR_M": 531,
"Temperature": 48,
"Dew Point": 34,
"Humidity": 58,
"Wind": "W",
"Wind Speed": 25,
"Wind Gust": 38,
"Pressure": 29.86,
"Condition": "Fair / Windy",
"...": "and 3 more columns"
}
{
"MONTH": 11,
"DAY_OF_MONTH": 1,
"DAY_OF_WEEK": 5,
"OP_UNIQUE_CARRIER": "B6",
"TAIL_NUM": "N959JB",
"DEST": "FLL",
"DEP_DELAY": 40,
"CRS_ELAPSED_TIME": 181,
"DISTANCE": 1069,
"CRS_DEP_M": 301,
"DEP_TIME_M": 341,
"CRS_ARR_M": 482,
"Temperature": 48,
"Dew Point": 34,
"Humidity": 58,
"Wind": "W",
"Wind Speed": 25,
"Wind Gust": 38,
"Pressure": 29.86,
"Condition": "Fair / Windy",
"...": "and 3 more columns"
}
{
"MONTH": 11,
"DAY_OF_MONTH": 1,
"DAY_OF_WEEK": 5,
"OP_UNIQUE_CARRIER": "B6",
"TAIL_NUM": "N999JQ",
"DEST": "MCO",
"DEP_DELAY": -2,
"CRS_ELAPSED_TIME": 168,
"DISTANCE": 944,
"CRS_DEP_M": 345,
"DEP_TIME_M": 343,
"CRS_ARR_M": 513,
"Temperature": 48,
"Dew Point": 34,
"Humidity": 58,
"Wind": "W",
"Wind Speed": 25,
"Wind Gust": 38,
"Pressure": 29.86,
"Condition": "Fair / Windy",
"...": "and 3 more columns"
}
<jupyter_script>import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from warnings import filterwarnings
filterwarnings("ignore")
pd.options.display.max_columns = None
pd.options.display.max_rows = None
from sklearn.preprocessing import LabelEncoder
# to suppress the notation 'e'
pd.options.display.float_format = "{:.6f}".format
from sklearn.model_selection import train_test_split
import statsmodels
import statsmodels.api as sm
import statsmodels.stats.api as sms
import statsmodels.formula.api as smf
from statsmodels.graphics.gofplots import qqplot
from statsmodels.stats.outliers_influence import variance_inflation_factor
import statsmodels.tsa.api as smt
from scipy import stats
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import StandardScaler
import pip as pip
pip.main(["install", "mlxtend"])
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from sklearn.feature_selection import RFE
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn import preprocessing
from sklearn.linear_model import SGDRegressor
# import function for ridge regression
from sklearn.linear_model import Ridge
# import function for lasso regression
from sklearn.linear_model import Lasso
# import function for elastic net regression
from sklearn.linear_model import ElasticNet
# import function to perform GridSearchCV
from sklearn.model_selection import GridSearchCV
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data = pd.read_csv("../input/flight-take-off-data-jfk-airport/M1_final.csv")
data.head()
data.shape
data.info()
df_cat = data.select_dtypes(include=np.object)
df_num = data.select_dtypes(include=np.number)
print("categorical : ", df_cat.columns)
print("numerical : ", df_num.columns)
plt.rcParams["figure.figsize"] = [12, 12]
df_num.hist()
plt.show()
# skewness and kurtosis
j = []
skew = []
kurtosis = []
for i in df_num.columns[3:]:
j.append(i)
skew.append(data[i].skew())
kurtosis.append(data[i].kurt())
skew_kurtosis = pd.DataFrame({"column name": j, "skew": skew, "kurtosis": kurtosis})
skew_kurtosis
k = 1
plt.figure(figsize=(30, 30))
for i in df_num.columns[3:]:
plt.subplot(5, 3, k)
sns.distplot(data[i])
k += 1
# # EDA process
#
plt.figure(figsize=(20, 15))
sns.heatmap(data.corr(), annot=True)
plt.show()
data.isnull().sum()
# we find missing values in Wind
data.describe().T
for i in data.columns:
print(i)
print("---------")
print(data[i].value_counts())
print("+++")
data[data.isnull().any(axis=1)]
data["Wind"].groupby((data["DEST"] == "FLL")).value_counts()
data["Wind"].groupby((data["DEST"] == "PWM")).value_counts()
data["Wind"].value_counts()
data["Wind"].replace(np.nan, "W", inplace=True)
data.isnull().sum()
# null values treated
# #### Outlier Treatment
k = 1
plt.figure(figsize=(20, 20))
for i in df_num.columns[3:]:
if data[i].dtypes != "object":
plt.subplot(4, 4, k)
sns.boxplot(x=data[i])
k += 1
sns.boxplot(x="DAY_OF_WEEK", y="TAXI_OUT", data=data)
Q3 = data["DEP_DELAY"].quantile(0.75)
Q1 = data["DEP_DELAY"].quantile(0.25)
IQR = Q3 - Q1
UL = Q3 + (1.5) * IQR
LL = Q1 - (1.5) * IQR
df = data[(data["DEP_DELAY"] >= LL) & (data["DEP_DELAY"] <= UL)]
df.shape
k = 1
plt.figure(figsize=(20, 20))
for i in df.columns[3:]:
if df[i].dtypes != "object":
plt.subplot(5, 3, k)
sns.boxplot(x=df[i])
k += 1
data.head()
# #### Model with dataset with outliers
label_encoder = LabelEncoder()
data["OP_UNIQUE_CARRIER"] = label_encoder.fit_transform(
data["OP_UNIQUE_CARRIER"].astype(str)
)
data["DEST"] = label_encoder.fit_transform(data["DEST"].astype(str))
data["Wind"] = label_encoder.fit_transform(data["Wind"].astype(str))
data["Condition"] = label_encoder.fit_transform(data["Condition"].astype(str))
data.head()
data.drop("TAIL_NUM", axis=1, inplace=True)
print(data.shape)
data["Dew Point"] = data["Dew Point"].astype(int)
data.info()
X = data.drop("TAXI_OUT", 1)
Y = data["TAXI_OUT"]
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1, random_state=1)
print(X_train.shape, X_test.shape, Y_train.shape, Y_test.shape)
# OLS model
Xc = sm.add_constant(X)
model_v1 = sm.OLS(Y, Xc).fit()
model_v1.summary()
model_v1.resid.skew() # Normality of Residuals
# Checking assumptions
# Use original data (To Build the Model)
sns.distplot(model_v1.resid)
# Linear Regression
linear_regression = LinearRegression()
linear_regression.fit(X_train.values, Y_train.values)
rmse = mean_squared_error(Y_test, linear_regression.predict(X_test)) ** 0.5
print(rmse)
# Ridge Regression
ridge_regression = Ridge(alpha=0.05, normalize=True)
ridge_regression.fit(X_train, Y_train)
rmse2 = mean_squared_error(Y_test, ridge_regression.predict(X_test)) ** 0.5
print(rmse2)
# Lasso Regression
lasso_regression = Lasso(alpha=1, max_iter=1000, tol=0.01)
lasso_regression.fit(X_train, Y_train)
rmse3 = mean_squared_error(Y_test, lasso_regression.predict(X_test)) ** 0.5
print(rmse3)
# #### After outliers treatment
label_encoder = LabelEncoder()
df["OP_UNIQUE_CARRIER"] = label_encoder.fit_transform(
df["OP_UNIQUE_CARRIER"].astype(str)
)
df["DEST"] = label_encoder.fit_transform(df["DEST"].astype(str))
df["Wind"] = label_encoder.fit_transform(df["Wind"].astype(str))
df["Condition"] = label_encoder.fit_transform(df["Condition"].astype(str))
df.head()
df.drop("TAIL_NUM", axis=1, inplace=True)
print(df.shape)
df["Dew Point"] = df["Dew Point"].astype(int)
df.info()
X = df.drop("TAXI_OUT", 1)
Y = df["TAXI_OUT"]
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1, random_state=1)
print(X_train.shape, X_test.shape, Y_train.shape, Y_test.shape)
# OLS model
Xc = sm.add_constant(X)
model_v1 = sm.OLS(Y, Xc).fit()
model_v1.summary()
model_v1.resid.skew() # Normality of Residuals
# Checking assumptions
# Use original data (To Build the Model)
sns.distplot(model_v1.resid)
# Linear Regression
linear_regression = LinearRegression()
linear_regression.fit(X_train.values, Y_train.values)
rmse_af = mean_squared_error(Y_test, linear_regression.predict(X_test)) ** 0.5
print(rmse_af)
# Ridge Regression
ridge_regression = Ridge(alpha=0.05, normalize=True)
ridge_regression.fit(X_train, Y_train)
rmse2_af = mean_squared_error(Y_test, ridge_regression.predict(X_test)) ** 0.5
print(rmse2_af)
# Lasso Regression
lasso_regression = Lasso(alpha=1, max_iter=1000, tol=0.01)
lasso_regression.fit(X_train, Y_train)
rmse3_af = mean_squared_error(Y_test, lasso_regression.predict(X_test)) ** 0.5
print(rmse3_af)
models = pd.DataFrame(
{
"Model": ["Linear Regression", "Ridge Regression", "Lasso Regression"],
"rmse_before_outlier treatment": [rmse, rmse2, rmse3],
"rmse_after_outlier treatment": [rmse_af, rmse2_af, rmse3_af],
}
)
models
Model = np.array(["Linear Regression", "Ridge Regression", "Lasso Regression"])
rmse_before_outlier_treatment = np.array([rmse, rmse2, rmse3])
rmse_after_outlier_treatment = np.array([rmse_af, rmse2_af, rmse3_af])
plt.plot(Model, rmse_before_outlier_treatment)
plt.plot(Model, rmse_after_outlier_treatment)
plt.legend(["rmse_before_outlier_treatment", "rmse_after_outlier_treatment"])
plt.xlabel("ML Models")
plt.ylabel("RMSE")
plt.show()
models.to_csv("models.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/179/69179557.ipynb
|
flight-take-off-data-jfk-airport
|
deepankurk
|
[{"Id": 69179557, "ScriptId": 18884100, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1214231, "CreationDate": "07/27/2021 18:24:48", "VersionNumber": 1.0, "Title": "EDA and regression model", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 314.0, "LinesInsertedFromPrevious": 314.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92034205, "KernelVersionId": 69179557, "SourceDatasetVersionId": 2322935}]
|
[{"Id": 2322935, "DatasetId": 1402100, "DatasourceVersionId": 2364429, "CreatorUserId": 3757755, "LicenseName": "Community Data License Agreement - Sharing - Version 1.0", "CreationDate": "06/11/2021 05:26:18", "VersionNumber": 1.0, "Title": "Flight Take Off Data - JFK Airport", "Slug": "flight-take-off-data-jfk-airport", "Subtitle": "This data contains flight details taking off from JFK airport.", "Description": "### Context\n\nThis data was scraped under a Academic Paper under Review by IEEE transportation\n\n### Content\n\nThis file contains data about flights leaving from JKF ariport between Nov 2019-Dec-2020. Taxi-Out prediction has been an important concept as it helps in calculating Runway time and directly impact the cost of the flight.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1402100, "CreatorUserId": 3757755, "OwnerUserId": 3757755.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2322935.0, "CurrentDatasourceVersionId": 2364429.0, "ForumId": 1421397, "Type": 2, "CreationDate": "06/11/2021 05:26:18", "LastActivityDate": "06/11/2021", "TotalViews": 20462, "TotalDownloads": 1934, "TotalVotes": 34, "TotalKernels": 49}]
|
[{"Id": 3757755, "UserName": "deepankurk", "DisplayName": "Deepankur Kansal", "RegisterDate": "09/24/2019", "PerformanceTier": 0}]
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from warnings import filterwarnings
filterwarnings("ignore")
pd.options.display.max_columns = None
pd.options.display.max_rows = None
from sklearn.preprocessing import LabelEncoder
# to suppress the notation 'e'
pd.options.display.float_format = "{:.6f}".format
from sklearn.model_selection import train_test_split
import statsmodels
import statsmodels.api as sm
import statsmodels.stats.api as sms
import statsmodels.formula.api as smf
from statsmodels.graphics.gofplots import qqplot
from statsmodels.stats.outliers_influence import variance_inflation_factor
import statsmodels.tsa.api as smt
from scipy import stats
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import StandardScaler
import pip as pip
pip.main(["install", "mlxtend"])
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from sklearn.feature_selection import RFE
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn import preprocessing
from sklearn.linear_model import SGDRegressor
# import function for ridge regression
from sklearn.linear_model import Ridge
# import function for lasso regression
from sklearn.linear_model import Lasso
# import function for elastic net regression
from sklearn.linear_model import ElasticNet
# import function to perform GridSearchCV
from sklearn.model_selection import GridSearchCV
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data = pd.read_csv("../input/flight-take-off-data-jfk-airport/M1_final.csv")
data.head()
data.shape
data.info()
df_cat = data.select_dtypes(include=np.object)
df_num = data.select_dtypes(include=np.number)
print("categorical : ", df_cat.columns)
print("numerical : ", df_num.columns)
plt.rcParams["figure.figsize"] = [12, 12]
df_num.hist()
plt.show()
# skewness and kurtosis
j = []
skew = []
kurtosis = []
for i in df_num.columns[3:]:
j.append(i)
skew.append(data[i].skew())
kurtosis.append(data[i].kurt())
skew_kurtosis = pd.DataFrame({"column name": j, "skew": skew, "kurtosis": kurtosis})
skew_kurtosis
k = 1
plt.figure(figsize=(30, 30))
for i in df_num.columns[3:]:
plt.subplot(5, 3, k)
sns.distplot(data[i])
k += 1
# # EDA process
#
plt.figure(figsize=(20, 15))
sns.heatmap(data.corr(), annot=True)
plt.show()
data.isnull().sum()
# we find missing values in Wind
data.describe().T
for i in data.columns:
print(i)
print("---------")
print(data[i].value_counts())
print("+++")
data[data.isnull().any(axis=1)]
data["Wind"].groupby((data["DEST"] == "FLL")).value_counts()
data["Wind"].groupby((data["DEST"] == "PWM")).value_counts()
data["Wind"].value_counts()
data["Wind"].replace(np.nan, "W", inplace=True)
data.isnull().sum()
# null values treated
# #### Outlier Treatment
k = 1
plt.figure(figsize=(20, 20))
for i in df_num.columns[3:]:
if data[i].dtypes != "object":
plt.subplot(4, 4, k)
sns.boxplot(x=data[i])
k += 1
sns.boxplot(x="DAY_OF_WEEK", y="TAXI_OUT", data=data)
Q3 = data["DEP_DELAY"].quantile(0.75)
Q1 = data["DEP_DELAY"].quantile(0.25)
IQR = Q3 - Q1
UL = Q3 + (1.5) * IQR
LL = Q1 - (1.5) * IQR
df = data[(data["DEP_DELAY"] >= LL) & (data["DEP_DELAY"] <= UL)]
df.shape
k = 1
plt.figure(figsize=(20, 20))
for i in df.columns[3:]:
if df[i].dtypes != "object":
plt.subplot(5, 3, k)
sns.boxplot(x=df[i])
k += 1
data.head()
# #### Model with dataset with outliers
label_encoder = LabelEncoder()
data["OP_UNIQUE_CARRIER"] = label_encoder.fit_transform(
data["OP_UNIQUE_CARRIER"].astype(str)
)
data["DEST"] = label_encoder.fit_transform(data["DEST"].astype(str))
data["Wind"] = label_encoder.fit_transform(data["Wind"].astype(str))
data["Condition"] = label_encoder.fit_transform(data["Condition"].astype(str))
data.head()
data.drop("TAIL_NUM", axis=1, inplace=True)
print(data.shape)
data["Dew Point"] = data["Dew Point"].astype(int)
data.info()
X = data.drop("TAXI_OUT", 1)
Y = data["TAXI_OUT"]
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1, random_state=1)
print(X_train.shape, X_test.shape, Y_train.shape, Y_test.shape)
# OLS model
Xc = sm.add_constant(X)
model_v1 = sm.OLS(Y, Xc).fit()
model_v1.summary()
model_v1.resid.skew() # Normality of Residuals
# Checking assumptions
# Use original data (To Build the Model)
sns.distplot(model_v1.resid)
# Linear Regression
linear_regression = LinearRegression()
linear_regression.fit(X_train.values, Y_train.values)
rmse = mean_squared_error(Y_test, linear_regression.predict(X_test)) ** 0.5
print(rmse)
# Ridge Regression
ridge_regression = Ridge(alpha=0.05, normalize=True)
ridge_regression.fit(X_train, Y_train)
rmse2 = mean_squared_error(Y_test, ridge_regression.predict(X_test)) ** 0.5
print(rmse2)
# Lasso Regression
lasso_regression = Lasso(alpha=1, max_iter=1000, tol=0.01)
lasso_regression.fit(X_train, Y_train)
rmse3 = mean_squared_error(Y_test, lasso_regression.predict(X_test)) ** 0.5
print(rmse3)
# #### After outliers treatment
label_encoder = LabelEncoder()
df["OP_UNIQUE_CARRIER"] = label_encoder.fit_transform(
df["OP_UNIQUE_CARRIER"].astype(str)
)
df["DEST"] = label_encoder.fit_transform(df["DEST"].astype(str))
df["Wind"] = label_encoder.fit_transform(df["Wind"].astype(str))
df["Condition"] = label_encoder.fit_transform(df["Condition"].astype(str))
df.head()
df.drop("TAIL_NUM", axis=1, inplace=True)
print(df.shape)
df["Dew Point"] = df["Dew Point"].astype(int)
df.info()
X = df.drop("TAXI_OUT", 1)
Y = df["TAXI_OUT"]
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1, random_state=1)
print(X_train.shape, X_test.shape, Y_train.shape, Y_test.shape)
# OLS model
Xc = sm.add_constant(X)
model_v1 = sm.OLS(Y, Xc).fit()
model_v1.summary()
model_v1.resid.skew() # Normality of Residuals
# Checking assumptions
# Use original data (To Build the Model)
sns.distplot(model_v1.resid)
# Linear Regression
linear_regression = LinearRegression()
linear_regression.fit(X_train.values, Y_train.values)
rmse_af = mean_squared_error(Y_test, linear_regression.predict(X_test)) ** 0.5
print(rmse_af)
# Ridge Regression
ridge_regression = Ridge(alpha=0.05, normalize=True)
ridge_regression.fit(X_train, Y_train)
rmse2_af = mean_squared_error(Y_test, ridge_regression.predict(X_test)) ** 0.5
print(rmse2_af)
# Lasso Regression
lasso_regression = Lasso(alpha=1, max_iter=1000, tol=0.01)
lasso_regression.fit(X_train, Y_train)
rmse3_af = mean_squared_error(Y_test, lasso_regression.predict(X_test)) ** 0.5
print(rmse3_af)
models = pd.DataFrame(
{
"Model": ["Linear Regression", "Ridge Regression", "Lasso Regression"],
"rmse_before_outlier treatment": [rmse, rmse2, rmse3],
"rmse_after_outlier treatment": [rmse_af, rmse2_af, rmse3_af],
}
)
models
Model = np.array(["Linear Regression", "Ridge Regression", "Lasso Regression"])
rmse_before_outlier_treatment = np.array([rmse, rmse2, rmse3])
rmse_after_outlier_treatment = np.array([rmse_af, rmse2_af, rmse3_af])
plt.plot(Model, rmse_before_outlier_treatment)
plt.plot(Model, rmse_after_outlier_treatment)
plt.legend(["rmse_before_outlier_treatment", "rmse_after_outlier_treatment"])
plt.xlabel("ML Models")
plt.ylabel("RMSE")
plt.show()
models.to_csv("models.csv", index=False)
|
[{"flight-take-off-data-jfk-airport/M1_final.csv": {"column_names": "[\"MONTH\", \"DAY_OF_MONTH\", \"DAY_OF_WEEK\", \"OP_UNIQUE_CARRIER\", \"TAIL_NUM\", \"DEST\", \"DEP_DELAY\", \"CRS_ELAPSED_TIME\", \"DISTANCE\", \"CRS_DEP_M\", \"DEP_TIME_M\", \"CRS_ARR_M\", \"Temperature\", \"Dew Point\", \"Humidity\", \"Wind\", \"Wind Speed\", \"Wind Gust\", \"Pressure\", \"Condition\", \"sch_dep\", \"sch_arr\", \"TAXI_OUT\"]", "column_data_types": "{\"MONTH\": \"int64\", \"DAY_OF_MONTH\": \"int64\", \"DAY_OF_WEEK\": \"int64\", \"OP_UNIQUE_CARRIER\": \"object\", \"TAIL_NUM\": \"object\", \"DEST\": \"object\", \"DEP_DELAY\": \"int64\", \"CRS_ELAPSED_TIME\": \"int64\", \"DISTANCE\": \"int64\", \"CRS_DEP_M\": \"int64\", \"DEP_TIME_M\": \"int64\", \"CRS_ARR_M\": \"int64\", \"Temperature\": \"int64\", \"Dew Point\": \"object\", \"Humidity\": \"int64\", \"Wind\": \"object\", \"Wind Speed\": \"int64\", \"Wind Gust\": \"int64\", \"Pressure\": \"float64\", \"Condition\": \"object\", \"sch_dep\": \"int64\", \"sch_arr\": \"int64\", \"TAXI_OUT\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 28820 entries, 0 to 28819\nData columns (total 23 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 MONTH 28820 non-null int64 \n 1 DAY_OF_MONTH 28820 non-null int64 \n 2 DAY_OF_WEEK 28820 non-null int64 \n 3 OP_UNIQUE_CARRIER 28820 non-null object \n 4 TAIL_NUM 28820 non-null object \n 5 DEST 28820 non-null object \n 6 DEP_DELAY 28820 non-null int64 \n 7 CRS_ELAPSED_TIME 28820 non-null int64 \n 8 DISTANCE 28820 non-null int64 \n 9 CRS_DEP_M 28820 non-null int64 \n 10 DEP_TIME_M 28820 non-null int64 \n 11 CRS_ARR_M 28820 non-null int64 \n 12 Temperature 28820 non-null int64 \n 13 Dew Point 28820 non-null object \n 14 Humidity 28820 non-null int64 \n 15 Wind 28818 non-null object \n 16 Wind Speed 28820 non-null int64 \n 17 Wind Gust 28820 non-null int64 \n 18 Pressure 28820 non-null float64\n 19 Condition 28820 non-null object \n 20 sch_dep 28820 non-null int64 \n 21 sch_arr 28820 non-null int64 \n 22 TAXI_OUT 28820 non-null int64 \ndtypes: float64(1), int64(16), object(6)\nmemory usage: 5.1+ MB\n", "summary": "{\"MONTH\": {\"count\": 28820.0, \"mean\": 7.894240111034004, \"std\": 4.9917230630328655, \"min\": 1.0, \"25%\": 1.0, \"50%\": 11.0, \"75%\": 12.0, \"max\": 12.0}, \"DAY_OF_MONTH\": {\"count\": 28820.0, \"mean\": 16.021096460791117, \"std\": 8.750179415215479, \"min\": 1.0, \"25%\": 8.0, \"50%\": 16.0, \"75%\": 24.0, \"max\": 31.0}, \"DAY_OF_WEEK\": {\"count\": 28820.0, \"mean\": 4.0089521165857045, \"std\": 1.9852302615300481, \"min\": 1.0, \"25%\": 2.0, \"50%\": 4.0, \"75%\": 6.0, \"max\": 7.0}, \"DEP_DELAY\": {\"count\": 28820.0, \"mean\": 6.3749826509368495, \"std\": 38.735144439877125, \"min\": -22.0, \"25%\": -6.0, \"50%\": -3.0, \"75%\": 2.0, \"max\": 1276.0}, \"CRS_ELAPSED_TIME\": {\"count\": 28820.0, \"mean\": 225.2882026370576, \"std\": 119.48241695341228, \"min\": 57.0, \"25%\": 124.0, \"50%\": 188.0, \"75%\": 365.0, \"max\": 697.0}, \"DISTANCE\": {\"count\": 28820.0, \"mean\": 1267.746079111728, \"std\": 889.3432459591204, \"min\": 94.0, \"25%\": 483.0, \"50%\": 1029.0, \"75%\": 2248.0, \"max\": 4983.0}, \"CRS_DEP_M\": {\"count\": 28820.0, \"mean\": 831.0038514920194, \"std\": 299.3985247377925, \"min\": 301.0, \"25%\": 545.0, \"50%\": 856.0, \"75%\": 1095.0, \"max\": 1439.0}, \"DEP_TIME_M\": {\"count\": 28820.0, \"mean\": 828.9346981263012, \"std\": 305.864102843798, \"min\": 1.0, \"25%\": 542.0, \"50%\": 854.0, \"75%\": 1097.0, \"max\": 1440.0}, \"CRS_ARR_M\": {\"count\": 28820.0, \"mean\": 910.8742886884108, \"std\": 345.41174259565156, \"min\": 1.0, \"25%\": 667.0, \"50%\": 918.0, \"75%\": 1193.0, \"max\": 1439.0}, \"Temperature\": {\"count\": 28820.0, \"mean\": 41.48983344899376, \"std\": 8.043533356428775, \"min\": 17.0, \"25%\": 36.0, \"50%\": 42.0, \"75%\": 47.0, \"max\": 68.0}, \"Humidity\": {\"count\": 28820.0, \"mean\": 57.73261623872311, \"std\": 23.4686764823488, \"min\": 0.0, \"25%\": 46.0, \"50%\": 59.0, \"75%\": 74.0, \"max\": 97.0}, \"Wind Speed\": {\"count\": 28820.0, \"mean\": 12.367626648161, \"std\": 6.2592975243673115, \"min\": 0.0, \"25%\": 8.0, \"50%\": 12.0, \"75%\": 16.0, \"max\": 36.0}, \"Wind Gust\": {\"count\": 28820.0, \"mean\": 5.535322692574601, \"std\": 11.886457297453232, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 49.0}, \"Pressure\": {\"count\": 28820.0, \"mean\": 30.092433032616242, \"std\": 0.29616045801220897, \"min\": 29.2, \"25%\": 29.88, \"50%\": 30.11, \"75%\": 30.32, \"max\": 30.75}, \"sch_dep\": {\"count\": 28820.0, \"mean\": 31.091256072172104, \"std\": 9.510358944535243, \"min\": 0.0, \"25%\": 26.0, \"50%\": 30.0, \"75%\": 37.0, \"max\": 55.0}, \"sch_arr\": {\"count\": 28820.0, \"mean\": 28.43213046495489, \"std\": 8.263043072281194, \"min\": 0.0, \"25%\": 21.0, \"50%\": 30.0, \"75%\": 35.0, \"max\": 46.0}, \"TAXI_OUT\": {\"count\": 28820.0, \"mean\": 20.85857043719639, \"std\": 6.851914541797431, \"min\": 5.0, \"25%\": 16.0, \"50%\": 19.0, \"75%\": 25.0, \"max\": 41.0}}", "examples": "{\"MONTH\":{\"0\":11,\"1\":11,\"2\":11,\"3\":11},\"DAY_OF_MONTH\":{\"0\":1,\"1\":1,\"2\":1,\"3\":1},\"DAY_OF_WEEK\":{\"0\":5,\"1\":5,\"2\":5,\"3\":5},\"OP_UNIQUE_CARRIER\":{\"0\":\"B6\",\"1\":\"B6\",\"2\":\"B6\",\"3\":\"B6\"},\"TAIL_NUM\":{\"0\":\"N828JB\",\"1\":\"N992JB\",\"2\":\"N959JB\",\"3\":\"N999JQ\"},\"DEST\":{\"0\":\"CHS\",\"1\":\"LAX\",\"2\":\"FLL\",\"3\":\"MCO\"},\"DEP_DELAY\":{\"0\":-1,\"1\":-7,\"2\":40,\"3\":-2},\"CRS_ELAPSED_TIME\":{\"0\":124,\"1\":371,\"2\":181,\"3\":168},\"DISTANCE\":{\"0\":636,\"1\":2475,\"2\":1069,\"3\":944},\"CRS_DEP_M\":{\"0\":324,\"1\":340,\"2\":301,\"3\":345},\"DEP_TIME_M\":{\"0\":323,\"1\":333,\"2\":341,\"3\":343},\"CRS_ARR_M\":{\"0\":448,\"1\":531,\"2\":482,\"3\":513},\"Temperature\":{\"0\":48,\"1\":48,\"2\":48,\"3\":48},\"Dew Point\":{\"0\":\"34\",\"1\":\"34\",\"2\":\"34\",\"3\":\"34\"},\"Humidity\":{\"0\":58,\"1\":58,\"2\":58,\"3\":58},\"Wind\":{\"0\":\"W\",\"1\":\"W\",\"2\":\"W\",\"3\":\"W\"},\"Wind Speed\":{\"0\":25,\"1\":25,\"2\":25,\"3\":25},\"Wind Gust\":{\"0\":38,\"1\":38,\"2\":38,\"3\":38},\"Pressure\":{\"0\":29.86,\"1\":29.86,\"2\":29.86,\"3\":29.86},\"Condition\":{\"0\":\"Fair \\/ Windy\",\"1\":\"Fair \\/ Windy\",\"2\":\"Fair \\/ Windy\",\"3\":\"Fair \\/ Windy\"},\"sch_dep\":{\"0\":9,\"1\":9,\"2\":9,\"3\":9},\"sch_arr\":{\"0\":17,\"1\":17,\"2\":17,\"3\":17},\"TAXI_OUT\":{\"0\":14,\"1\":15,\"2\":22,\"3\":12}}"}}]
| true | 1 |
<start_data_description><data_path>flight-take-off-data-jfk-airport/M1_final.csv:
<column_names>
['MONTH', 'DAY_OF_MONTH', 'DAY_OF_WEEK', 'OP_UNIQUE_CARRIER', 'TAIL_NUM', 'DEST', 'DEP_DELAY', 'CRS_ELAPSED_TIME', 'DISTANCE', 'CRS_DEP_M', 'DEP_TIME_M', 'CRS_ARR_M', 'Temperature', 'Dew Point', 'Humidity', 'Wind', 'Wind Speed', 'Wind Gust', 'Pressure', 'Condition', 'sch_dep', 'sch_arr', 'TAXI_OUT']
<column_types>
{'MONTH': 'int64', 'DAY_OF_MONTH': 'int64', 'DAY_OF_WEEK': 'int64', 'OP_UNIQUE_CARRIER': 'object', 'TAIL_NUM': 'object', 'DEST': 'object', 'DEP_DELAY': 'int64', 'CRS_ELAPSED_TIME': 'int64', 'DISTANCE': 'int64', 'CRS_DEP_M': 'int64', 'DEP_TIME_M': 'int64', 'CRS_ARR_M': 'int64', 'Temperature': 'int64', 'Dew Point': 'object', 'Humidity': 'int64', 'Wind': 'object', 'Wind Speed': 'int64', 'Wind Gust': 'int64', 'Pressure': 'float64', 'Condition': 'object', 'sch_dep': 'int64', 'sch_arr': 'int64', 'TAXI_OUT': 'int64'}
<dataframe_Summary>
{'MONTH': {'count': 28820.0, 'mean': 7.894240111034004, 'std': 4.9917230630328655, 'min': 1.0, '25%': 1.0, '50%': 11.0, '75%': 12.0, 'max': 12.0}, 'DAY_OF_MONTH': {'count': 28820.0, 'mean': 16.021096460791117, 'std': 8.750179415215479, 'min': 1.0, '25%': 8.0, '50%': 16.0, '75%': 24.0, 'max': 31.0}, 'DAY_OF_WEEK': {'count': 28820.0, 'mean': 4.0089521165857045, 'std': 1.9852302615300481, 'min': 1.0, '25%': 2.0, '50%': 4.0, '75%': 6.0, 'max': 7.0}, 'DEP_DELAY': {'count': 28820.0, 'mean': 6.3749826509368495, 'std': 38.735144439877125, 'min': -22.0, '25%': -6.0, '50%': -3.0, '75%': 2.0, 'max': 1276.0}, 'CRS_ELAPSED_TIME': {'count': 28820.0, 'mean': 225.2882026370576, 'std': 119.48241695341228, 'min': 57.0, '25%': 124.0, '50%': 188.0, '75%': 365.0, 'max': 697.0}, 'DISTANCE': {'count': 28820.0, 'mean': 1267.746079111728, 'std': 889.3432459591204, 'min': 94.0, '25%': 483.0, '50%': 1029.0, '75%': 2248.0, 'max': 4983.0}, 'CRS_DEP_M': {'count': 28820.0, 'mean': 831.0038514920194, 'std': 299.3985247377925, 'min': 301.0, '25%': 545.0, '50%': 856.0, '75%': 1095.0, 'max': 1439.0}, 'DEP_TIME_M': {'count': 28820.0, 'mean': 828.9346981263012, 'std': 305.864102843798, 'min': 1.0, '25%': 542.0, '50%': 854.0, '75%': 1097.0, 'max': 1440.0}, 'CRS_ARR_M': {'count': 28820.0, 'mean': 910.8742886884108, 'std': 345.41174259565156, 'min': 1.0, '25%': 667.0, '50%': 918.0, '75%': 1193.0, 'max': 1439.0}, 'Temperature': {'count': 28820.0, 'mean': 41.48983344899376, 'std': 8.043533356428775, 'min': 17.0, '25%': 36.0, '50%': 42.0, '75%': 47.0, 'max': 68.0}, 'Humidity': {'count': 28820.0, 'mean': 57.73261623872311, 'std': 23.4686764823488, 'min': 0.0, '25%': 46.0, '50%': 59.0, '75%': 74.0, 'max': 97.0}, 'Wind Speed': {'count': 28820.0, 'mean': 12.367626648161, 'std': 6.2592975243673115, 'min': 0.0, '25%': 8.0, '50%': 12.0, '75%': 16.0, 'max': 36.0}, 'Wind Gust': {'count': 28820.0, 'mean': 5.535322692574601, 'std': 11.886457297453232, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 49.0}, 'Pressure': {'count': 28820.0, 'mean': 30.092433032616242, 'std': 0.29616045801220897, 'min': 29.2, '25%': 29.88, '50%': 30.11, '75%': 30.32, 'max': 30.75}, 'sch_dep': {'count': 28820.0, 'mean': 31.091256072172104, 'std': 9.510358944535243, 'min': 0.0, '25%': 26.0, '50%': 30.0, '75%': 37.0, 'max': 55.0}, 'sch_arr': {'count': 28820.0, 'mean': 28.43213046495489, 'std': 8.263043072281194, 'min': 0.0, '25%': 21.0, '50%': 30.0, '75%': 35.0, 'max': 46.0}, 'TAXI_OUT': {'count': 28820.0, 'mean': 20.85857043719639, 'std': 6.851914541797431, 'min': 5.0, '25%': 16.0, '50%': 19.0, '75%': 25.0, 'max': 41.0}}
<dataframe_info>
RangeIndex: 28820 entries, 0 to 28819
Data columns (total 23 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 MONTH 28820 non-null int64
1 DAY_OF_MONTH 28820 non-null int64
2 DAY_OF_WEEK 28820 non-null int64
3 OP_UNIQUE_CARRIER 28820 non-null object
4 TAIL_NUM 28820 non-null object
5 DEST 28820 non-null object
6 DEP_DELAY 28820 non-null int64
7 CRS_ELAPSED_TIME 28820 non-null int64
8 DISTANCE 28820 non-null int64
9 CRS_DEP_M 28820 non-null int64
10 DEP_TIME_M 28820 non-null int64
11 CRS_ARR_M 28820 non-null int64
12 Temperature 28820 non-null int64
13 Dew Point 28820 non-null object
14 Humidity 28820 non-null int64
15 Wind 28818 non-null object
16 Wind Speed 28820 non-null int64
17 Wind Gust 28820 non-null int64
18 Pressure 28820 non-null float64
19 Condition 28820 non-null object
20 sch_dep 28820 non-null int64
21 sch_arr 28820 non-null int64
22 TAXI_OUT 28820 non-null int64
dtypes: float64(1), int64(16), object(6)
memory usage: 5.1+ MB
<some_examples>
{'MONTH': {'0': 11, '1': 11, '2': 11, '3': 11}, 'DAY_OF_MONTH': {'0': 1, '1': 1, '2': 1, '3': 1}, 'DAY_OF_WEEK': {'0': 5, '1': 5, '2': 5, '3': 5}, 'OP_UNIQUE_CARRIER': {'0': 'B6', '1': 'B6', '2': 'B6', '3': 'B6'}, 'TAIL_NUM': {'0': 'N828JB', '1': 'N992JB', '2': 'N959JB', '3': 'N999JQ'}, 'DEST': {'0': 'CHS', '1': 'LAX', '2': 'FLL', '3': 'MCO'}, 'DEP_DELAY': {'0': -1, '1': -7, '2': 40, '3': -2}, 'CRS_ELAPSED_TIME': {'0': 124, '1': 371, '2': 181, '3': 168}, 'DISTANCE': {'0': 636, '1': 2475, '2': 1069, '3': 944}, 'CRS_DEP_M': {'0': 324, '1': 340, '2': 301, '3': 345}, 'DEP_TIME_M': {'0': 323, '1': 333, '2': 341, '3': 343}, 'CRS_ARR_M': {'0': 448, '1': 531, '2': 482, '3': 513}, 'Temperature': {'0': 48, '1': 48, '2': 48, '3': 48}, 'Dew Point': {'0': '34', '1': '34', '2': '34', '3': '34'}, 'Humidity': {'0': 58, '1': 58, '2': 58, '3': 58}, 'Wind': {'0': 'W', '1': 'W', '2': 'W', '3': 'W'}, 'Wind Speed': {'0': 25, '1': 25, '2': 25, '3': 25}, 'Wind Gust': {'0': 38, '1': 38, '2': 38, '3': 38}, 'Pressure': {'0': 29.86, '1': 29.86, '2': 29.86, '3': 29.86}, 'Condition': {'0': 'Fair / Windy', '1': 'Fair / Windy', '2': 'Fair / Windy', '3': 'Fair / Windy'}, 'sch_dep': {'0': 9, '1': 9, '2': 9, '3': 9}, 'sch_arr': {'0': 17, '1': 17, '2': 17, '3': 17}, 'TAXI_OUT': {'0': 14, '1': 15, '2': 22, '3': 12}}
<end_description>
| 2,830 | 0 | 4,454 | 2,830 |
69179538
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import random as rnd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import cross_val_score
from numpy import mean
from numpy import std
train = pd.read_csv("/kaggle/input/titanic/train.csv")
test = pd.read_csv("/kaggle/input/titanic/test.csv")
combine = [train, test]
train.head()
train.describe()
sns.heatmap(train.isnull(), yticklabels=False, cbar=False, cmap="viridis")
sns.displot(train["Age"].dropna(), kde=False, bins=40)
def ageCal(cols):
age = cols[0]
pclass = cols[1]
if pd.isnull(age):
if pclass == 1:
return 37
if pclass == 2:
return 29
else:
return 24
else:
return age
train["Age"] = train[["Age", "Pclass"]].apply(ageCal, axis=1)
sns.heatmap(train.isnull(), yticklabels=False, cbar=False, cmap="viridis")
train.drop("Cabin", axis=1, inplace=True)
test.drop("Cabin", axis=1, inplace=True)
sns.heatmap(train.isnull(), yticklabels=False, cbar=False, cmap="viridis")
train.drop("PassengerId", axis=1, inplace=True)
for dataset in combine:
dataset["Sex"] = dataset["Sex"].map({"female": 1, "male": 0}).astype(int)
for dataset in combine:
dataset["FamilySize"] = dataset["SibSp"] + dataset["Parch"] + 1
train = train.drop(["Name", "Ticket"], axis=1)
test = test.drop(["Name", "Ticket"], axis=1)
combine = [train, test]
freq_port = train.Embarked.dropna().mode()[0]
for dataset in combine:
dataset["Embarked"] = dataset["Embarked"].fillna(freq_port)
for dataset in combine:
dataset["Embarked"] = dataset["Embarked"].map({"S": 0, "C": 1, "Q": 2}).astype(int)
train["Embarked"].fillna(value=train["Embarked"].mode(), inplace=True)
test["Embarked"].fillna(value=test["Embarked"].mode(), inplace=True)
train["Fare"].fillna(value=train["Fare"].mean(), inplace=True)
test["Fare"].fillna(value=test["Fare"].mean(), inplace=True)
X_train = train.drop("Survived", axis=1)
Y_train = train["Survived"]
X_test = test.drop("PassengerId", axis=1)
X_train.head()
def score_dataset(
X, y, model=RandomForestClassifier(n_estimators=100, random_state=2, max_depth=5)
):
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=0)
# Metric is Accuracy
score = cross_val_score(
model, X, y, scoring="accuracy", cv=cv, n_jobs=-1, error_score="raise"
)
print("Accuracy: %.3f (%.3f)" % (mean(score), std(score)))
score_dataset(X_train, Y_train)
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=2)
model.fit(X_train, Y_train)
# predictions = model.predict(X_test)
# output = pd.DataFrame({'PassengerId': test.PassengerId, 'Survived': predictions})
# output.to_csv('my_submission.csv', index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/179/69179538.ipynb
| null | null |
[{"Id": 69179538, "ScriptId": 18884069, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7890626, "CreationDate": "07/27/2021 18:24:32", "VersionNumber": 4.0, "Title": "170576J_Titanic", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 111.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 110.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import random as rnd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import cross_val_score
from numpy import mean
from numpy import std
train = pd.read_csv("/kaggle/input/titanic/train.csv")
test = pd.read_csv("/kaggle/input/titanic/test.csv")
combine = [train, test]
train.head()
train.describe()
sns.heatmap(train.isnull(), yticklabels=False, cbar=False, cmap="viridis")
sns.displot(train["Age"].dropna(), kde=False, bins=40)
def ageCal(cols):
age = cols[0]
pclass = cols[1]
if pd.isnull(age):
if pclass == 1:
return 37
if pclass == 2:
return 29
else:
return 24
else:
return age
train["Age"] = train[["Age", "Pclass"]].apply(ageCal, axis=1)
sns.heatmap(train.isnull(), yticklabels=False, cbar=False, cmap="viridis")
train.drop("Cabin", axis=1, inplace=True)
test.drop("Cabin", axis=1, inplace=True)
sns.heatmap(train.isnull(), yticklabels=False, cbar=False, cmap="viridis")
train.drop("PassengerId", axis=1, inplace=True)
for dataset in combine:
dataset["Sex"] = dataset["Sex"].map({"female": 1, "male": 0}).astype(int)
for dataset in combine:
dataset["FamilySize"] = dataset["SibSp"] + dataset["Parch"] + 1
train = train.drop(["Name", "Ticket"], axis=1)
test = test.drop(["Name", "Ticket"], axis=1)
combine = [train, test]
freq_port = train.Embarked.dropna().mode()[0]
for dataset in combine:
dataset["Embarked"] = dataset["Embarked"].fillna(freq_port)
for dataset in combine:
dataset["Embarked"] = dataset["Embarked"].map({"S": 0, "C": 1, "Q": 2}).astype(int)
train["Embarked"].fillna(value=train["Embarked"].mode(), inplace=True)
test["Embarked"].fillna(value=test["Embarked"].mode(), inplace=True)
train["Fare"].fillna(value=train["Fare"].mean(), inplace=True)
test["Fare"].fillna(value=test["Fare"].mean(), inplace=True)
X_train = train.drop("Survived", axis=1)
Y_train = train["Survived"]
X_test = test.drop("PassengerId", axis=1)
X_train.head()
def score_dataset(
X, y, model=RandomForestClassifier(n_estimators=100, random_state=2, max_depth=5)
):
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=0)
# Metric is Accuracy
score = cross_val_score(
model, X, y, scoring="accuracy", cv=cv, n_jobs=-1, error_score="raise"
)
print("Accuracy: %.3f (%.3f)" % (mean(score), std(score)))
score_dataset(X_train, Y_train)
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=2)
model.fit(X_train, Y_train)
# predictions = model.predict(X_test)
# output = pd.DataFrame({'PassengerId': test.PassengerId, 'Survived': predictions})
# output.to_csv('my_submission.csv', index=False)
| false | 0 | 1,145 | 0 | 1,145 | 1,145 |
||
69179705
|
<jupyter_start><jupyter_text>deapdata
Kaggle dataset identifier: deapdata
<jupyter_code>import pandas as pd
df = pd.read_csv('deapdata/(S01)/Raw EEG Data/.csv format/S01G3AllRawChannels.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 38252 entries, 0 to 38251
Data columns (total 15 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 AF3 38252 non-null float64
1 AF4 38252 non-null float64
2 F3 38252 non-null float64
3 F4 38252 non-null float64
4 F7 38252 non-null float64
5 F8 38252 non-null float64
6 FC5 38252 non-null float64
7 FC6 38252 non-null float64
8 O1 38252 non-null float64
9 O2 38252 non-null float64
10 P7 38252 non-null float64
11 P8 38252 non-null float64
12 T7 38252 non-null float64
13 T8 38252 non-null float64
14 Unnamed: 14 0 non-null float64
dtypes: float64(15)
memory usage: 4.4 MB
<jupyter_text>Examples:
{
"AF3": 55.8972,
"AF4": -35.8975,
"F3": 96.9231,
"F4": -20.5129,
"F7": -61.5386,
"F8": 51.282,
"FC5": 2.0515,
"FC6": -11.7947,
"O1": 45.1282,
"O2": -2.0515,
"P7": 78.9744,
"P8": -6.6667000000000005,
"T7": 23.5896,
"T8": -5.1282,
"Unnamed: 14": NaN
}
{
"AF3": 55.3845,
"AF4": -33.3333,
"F3": 93.3333,
"F4": -14.8718,
"F7": -69.7437,
"F8": 42.5642,
"FC5": 3.0769,
"FC6": -11.7947,
"O1": 34.8718,
"O2": -6.6667000000000005,
"P7": 74.3591,
"P8": -9.7434,
"T7": 10.2566,
"T8": -3.0769,
"Unnamed: 14": NaN
}
{
"AF3": 52.3081,
"AF4": -30.2561,
"F3": 95.3848,
"F4": -7.1792,
"F7": -66.6665,
"F8": 36.4106,
"FC5": 0.0,
"FC6": -11.282,
"O1": 41.5386,
"O2": 0.0,
"P7": 75.8975,
"P8": -13.3333,
"T7": 8.2051,
"T8": -6.1538,
"Unnamed: 14": NaN
}
{
"AF3": 55.3848,
"AF4": -28.718,
"F3": 101.0259,
"F4": -2.564,
"F7": -53.3333,
"F8": 45.1284,
"FC5": 2.0513,
"FC6": -11.2822,
"O1": 52.3076,
"O2": -2.0513,
"P7": 74.3589,
"P8": -17.436,
"T7": 14.8716,
"T8": -5.6411,
"Unnamed: 14": NaN
}
<jupyter_code>import pandas as pd
df = pd.read_csv('deapdata/(S01)/Raw EEG Data/.csv format/S01G4AllRawChannels.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 38252 entries, 0 to 38251
Data columns (total 15 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 AF3 38252 non-null float64
1 AF4 38252 non-null float64
2 F3 38252 non-null float64
3 F4 38252 non-null float64
4 F7 38252 non-null float64
5 F8 38252 non-null float64
6 FC5 38252 non-null float64
7 FC6 38252 non-null float64
8 O1 38252 non-null float64
9 O2 38252 non-null float64
10 P7 38252 non-null float64
11 P8 38252 non-null float64
12 T7 38252 non-null float64
13 T8 38252 non-null float64
14 Unnamed: 14 0 non-null float64
dtypes: float64(15)
memory usage: 4.4 MB
<jupyter_text>Examples:
{
"AF3": -3.8462,
"AF4": 0.7690400000000001,
"F3": 1.2822,
"F4": 5.3848,
"F7": -5.3848,
"F8": -5.3848,
"FC5": -17.6924,
"FC6": -1.2822,
"O1": 27.9487,
"O2": 7.9487000000000005,
"P7": 13.5898,
"P8": 13.0771,
"T7": -0.7690400000000001,
"T8": -4.3589,
"Unnamed: 14": NaN
}
{
"AF3": -10.5129,
"AF4": -4.3591,
"F3": -2.3079,
"F4": -0.25659000000000004,
"F7": -13.0769,
"F8": -9.9998,
"FC5": -27.4358,
"FC6": 0.25659000000000004,
"O1": 12.9487,
"O2": 9.4871,
"P7": 8.9744,
"P8": 10.5129,
"T7": 1.282,
"T8": 0.76929,
"Unnamed: 14": NaN
}
{
"AF3": -5.8975,
"AF4": -2.8205999999999998,
"F3": 0.25659000000000004,
"F4": -0.25659000000000004,
"F7": -12.5642,
"F8": -13.0769,
"FC5": -32.5642,
"FC6": 2.8205999999999998,
"O1": -2.0513,
"O2": 11.0256,
"P7": 14.6155,
"P8": 9.4871,
"T7": 2.8205999999999998,
"T8": 0.25659000000000004,
"Unnamed: 14": NaN
}
{
"AF3": 0.5129400000000001,
"AF4": 3.0769,
"F3": 3.5896,
"F4": 12.3079,
"F7": -6.6667000000000005,
"F8": -2.5642,
"FC5": -21.0256,
"FC6": -2.051,
"O1": -0.5129400000000001,
"O2": -1.0256,
"P7": 4.6155,
"P8": 6.1541,
"T7": -3.5896,
"T8": 1.5383,
"Unnamed: 14": NaN
}
<jupyter_code>import pandas as pd
df = pd.read_csv('deapdata/(S01)/Raw EEG Data/.csv format/S01G1AllRawChannels.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 38252 entries, 0 to 38251
Data columns (total 15 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 AF3 38252 non-null float64
1 AF4 38252 non-null float64
2 F3 38252 non-null float64
3 F4 38252 non-null float64
4 F7 38252 non-null float64
5 F8 38252 non-null float64
6 FC5 38252 non-null float64
7 FC6 38252 non-null float64
8 O1 38252 non-null float64
9 O2 38252 non-null float64
10 P7 38252 non-null float64
11 P8 38252 non-null float64
12 T7 38252 non-null float64
13 T8 38252 non-null float64
14 Unnamed: 14 0 non-null float64
dtypes: float64(15)
memory usage: 4.4 MB
<jupyter_text>Examples:
{
"AF3": -35.1282,
"AF4": -16.1538,
"F3": -44.8718,
"F4": 1.7949000000000002,
"F7": 44.8716,
"F8": -1.7949000000000002,
"FC5": -5.8975,
"FC6": -3.8462,
"O1": 27.436,
"O2": -10.5127,
"P7": 5.8975,
"P8": 7.9487000000000005,
"T7": 12.564,
"T8": 18.2051,
"Unnamed: 14": NaN
}
{
"AF3": -32.5642,
"AF4": -22.3079,
"F3": -47.9487,
"F4": -11.0256,
"F7": 40.2561,
"F8": -16.7949,
"FC5": -20.8975,
"FC6": -2.8205999999999998,
"O1": 20.2561,
"O2": 2.8205999999999998,
"P7": 20.2561,
"P8": 5.8972,
"T7": 19.2307,
"T8": 4.3591,
"Unnamed: 14": NaN
}
{
"AF3": -27.1794,
"AF4": -13.8459,
"F3": -39.4871,
"F4": -3.0769,
"F7": 47.1794,
"F8": -1.7949000000000002,
"FC5": -5.8975,
"FC6": -1.5383,
"O1": 31.282,
"O2": -4.1028,
"P7": 10.2566,
"P8": 9.7434,
"T7": 17.9485,
"T8": 11.282,
"Unnamed: 14": NaN
}
{
"AF3": -32.0515,
"AF4": -14.6157,
"F3": -42.3079,
"F4": -4.3589,
"F7": 42.3076,
"F8": 3.8462,
"FC5": -11.0259,
"FC6": -2.3076,
"O1": 22.3076,
"O2": -11.0259,
"P7": 3.333,
"P8": 2.3076,
"T7": 7.4355,
"T8": 10.5127,
"Unnamed: 14": NaN
}
<jupyter_code>import pandas as pd
df = pd.read_csv('deapdata/(S01)/Raw EEG Data/.csv format/S01G2AllRawChannels.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 38252 entries, 0 to 38251
Data columns (total 15 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 AF3 38252 non-null float64
1 AF4 38252 non-null float64
2 F3 38252 non-null float64
3 F4 38252 non-null float64
4 F7 38252 non-null float64
5 F8 38252 non-null float64
6 FC5 38252 non-null float64
7 FC6 38252 non-null float64
8 O1 38252 non-null float64
9 O2 38252 non-null float64
10 P7 38252 non-null float64
11 P8 38252 non-null float64
12 T7 38252 non-null float64
13 T8 38252 non-null float64
14 Unnamed: 14 0 non-null float64
dtypes: float64(15)
memory usage: 4.4 MB
<jupyter_text>Examples:
{
"AF3": 49.4872,
"AF4": 9.9999,
"F3": 51.5385,
"F4": -41.2821,
"F7": -30.0001,
"F8": -58.7179,
"FC5": -73.5897,
"FC6": -8.9745,
"O1": -3.3334,
"O2": 44.3588,
"P7": 140.2562,
"P8": 54.6151,
"T7": 3.3334,
"T8": -31.5385,
"Unnamed: 14": NaN
}
{
"AF3": 46.4104,
"AF4": 24.9999,
"F3": 66.5385,
"F4": -26.2821,
"F7": -15.0001,
"F8": -43.7179,
"FC5": -58.5897,
"FC6": -13.5898,
"O1": 11.6666,
"O2": 29.3588,
"P7": 128.4617,
"P8": 50.5129,
"T7": -3.3333,
"T8": -16.5385,
"Unnamed: 14": NaN
}
{
"AF3": 31.4104,
"AF4": 39.9999,
"F3": 66.6665,
"F4": -11.2821,
"F7": -0.0001225,
"F8": -28.7179,
"FC5": -46.1538,
"FC6": -28.5898,
"O1": 26.6666,
"O2": 14.3588,
"P7": 113.4617,
"P8": 35.5129,
"T7": -18.3333,
"T8": -11.7949,
"Unnamed: 14": NaN
}
{
"AF3": 16.4104,
"AF4": 40.0,
"F3": 73.3335,
"F4": 0.0,
"F7": -3.5898,
"F8": -13.7179,
"FC5": -49.2307,
"FC6": -43.5898,
"O1": 41.6666,
"O2": -0.64124,
"P7": 98.4617,
"P8": 31.2822,
"T7": -33.3333,
"T8": 0.0,
"Unnamed: 14": NaN
}
<jupyter_script>import os
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.image import imread
import seaborn as sns
from scipy import signal
from scipy.fft import fft, fftfreq
from scipy.fft import fftshift
import tensorflow as tf
import keras
from keras.models import Sequential, load_model
from keras.layers import (
Dense,
Conv2D,
LSTM,
MaxPooling2D,
Flatten,
Dropout,
BatchNormalization,
)
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from keras.utils.np_utils import to_categorical
from sklearn.metrics import classification_report, confusion_matrix
data1_1 = pd.read_csv(
"../input/deapdata/(S01)/Raw EEG Data/.csv format/S01G1AllRawChannels.csv"
)
data2_1 = pd.read_csv(
"../input/deapdata/(S01)/Raw EEG Data/.csv format/S01G2AllRawChannels.csv"
)
data3_1 = pd.read_csv(
"../input/deapdata/(S01)/Raw EEG Data/.csv format/S01G3AllRawChannels.csv"
)
data4_1 = pd.read_csv(
"../input/deapdata/(S01)/Raw EEG Data/.csv format/S01G4AllRawChannels.csv"
)
data1_1.insert(14, "label", 0)
data2_1.insert(14, "label", 1)
data3_1.insert(14, "label", 2)
data4_1.insert(14, "label", 3)
data1_1.drop(["Unnamed: 14"], axis=1, inplace=True)
data2_1.drop(["Unnamed: 14"], axis=1, inplace=True)
data3_1.drop(["Unnamed: 14"], axis=1, inplace=True)
data4_1.drop(["Unnamed: 14"], axis=1, inplace=True)
sv = pd.concat(
[
data1_1,
data2_1,
data3_1,
data4_1,
]
)
sv = sv.sample(frac=1)
sv
data = sv.to_numpy()
def get_train():
seq = data
# seq = array(seq)
X, y = seq[:, 0:14], seq[:, 14]
# y=labels=np.argmax(y, axis=1)
X = X.reshape((len(X), 14, 1))
print(X.shape, y.shape, X.dtype)
return X, y
X, y = get_train()
print(pd.DataFrame(y))
from keras.utils import np_utils
n_classes = 4
Y_t = np_utils.to_categorical(y, n_classes)
Y_t
train_x, test_x, train_y, test_y = train_test_split(X, Y_t, test_size=0.2, stratify=y)
train_x.shape, test_x.shape, train_y.shape, test_y.shape
from tensorflow.keras.layers import Conv1D, MaxPooling1D, BatchNormalization
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import ShuffleSplit
# define model
cnn = Sequential()
cnn.add(Conv1D(64, kernel_size=(3), activation="relu", input_shape=(14, 1))) # 0.72
cnn.add(Conv1D(64, kernel_size=(2), activation="relu", input_shape=(14, 1))) # 0.7103
cnn.add(Flatten())
cnn.add(Dense(64, activation="relu")) # accuracy: 0.7137
cnn.add(Dense(4, activation="softmax"))
adam = Adam(lr=0.0001)
cnn.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
cnn.summary()
cnn.fit(
train_x,
train_y,
batch_size=32,
epochs=10,
verbose=1,
validation_data=(test_x, test_y),
)
cnn.save("cnn.h5")
def cnn_model():
model = Sequential()
model.add(Conv1D(64, kernel_size=(8), activation="relu", input_shape=(14, 1)))
# model.add(MaxPooling1D(pool_size = (1)))
# model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(
Conv1D(
64,
kernel_size=(4),
activation="relu",
)
)
# model.add(MaxPooling1D(pool_size = (1)))
# model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Conv1D(64, kernel_size=(2), activation="relu"))
model.add(Dropout(0.1))
model.add(MaxPooling1D(pool_size=(1)))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(64, activation="relu"))
model.add(Dense(4, activation="softmax"))
adam = Adam(lr=0.0001)
model.compile(optimizer=adam, loss="categorical_crossentropy", metrics=["accuracy"])
return model
model = cnn_model()
model.summary()
# keras.utils.plot_model(model, show_shapes=True)
import seaborn as sns
# set early stopping criteria
pat = 1
n_folds = 5
epochs = 10
batch_size = 32
early_stopping = EarlyStopping(monitor="val_loss", patience=pat, verbose=1)
model_checkpoint = ModelCheckpoint("subjek1CNN.h5", verbose=1, save_best_only=True)
def fit_and_evaluate(t_x, val_x, t_y, val_y, EPOCHS=epochs, BATCH_SIZE=batch_size):
model = None
model = cnn_model()
results = model.fit(
t_x,
t_y,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
callbacks=[early_stopping, model_checkpoint],
validation_split=0.2,
)
print("Val Score: ", model.evaluate(val_x, val_y))
return results
model_history = []
for i in range(n_folds):
print("Training on Fold: ", i + 1)
t_x, val_x, t_y, val_y = train_test_split(
train_x, train_y, test_size=0.2, shuffle=True
)
model_history.append(fit_and_evaluate(t_x, val_x, t_y, val_y, epochs, batch_size))
model_ = load_model("subjek1CNN.h5")
x_pred = model_.predict_classes(val_x)
y_pred = np.argmax(val_y, axis=1)
cm = confusion_matrix(x_pred, y_pred)
print(classification_report(x_pred, y_pred))
plt.figure(figsize=(5, 5))
sign = ["G1", "G2", "G3", "G4"]
sns.heatmap(
cm,
cmap="Blues",
xticklabels=sign,
yticklabels=sign,
linecolor="black",
linewidth=1,
annot=True,
fmt="",
)
plt.show()
plt.rcParams["figure.figsize"] = (15, 5)
fig, (ax1, ax2) = plt.subplots(ncols=2, sharex=True)
ax1.plot(model_history[0].history["accuracy"], label="Training Fold 1 accuration")
ax1.plot(model_history[1].history["accuracy"], label="Training Fold 2 accuration")
ax1.plot(model_history[2].history["accuracy"], label="Training Fold 3 accuration")
ax1.plot(model_history[3].history["accuracy"], label="Training Fold 4 accuration")
ax1.plot(model_history[4].history["accuracy"], label="Training Fold 5 accuration")
ax1.legend()
ax2.plot(model_history[1].history["loss"], label="Training Fold 1 loss")
ax2.plot(model_history[1].history["loss"], label="Training Fold 2 loss")
ax2.plot(model_history[2].history["loss"], label="Training Fold 3 loss")
ax2.plot(model_history[3].history["loss"], label="Training Fold 4 loss ")
ax2.plot(model_history[4].history["loss"], label="Training Fold 5 loss")
ax2.legend()
plt.show()
fig, (plt1, plt2) = plt.subplots(ncols=2, sharex=True)
plt1.plot(
model_history[0].history["accuracy"], label="Train Accuracy Fold 1", color="black"
)
plt1.plot(
model_history[0].history["val_accuracy"],
label="Val Accuracy Fold 1",
color="orange",
linestyle="dashdot",
)
plt1.legend()
plt2.plot(model_history[0].history["loss"], label="Train Loss Fold 1", color="black")
plt2.plot(
model_history[0].history["val_loss"],
label="Val Loss Fold 1",
color="orange",
linestyle="dashdot",
)
plt2.legend()
plt.show()
fig, (plt1, plt2) = plt.subplots(ncols=2, sharex=True)
plt1.plot(
model_history[1].history["accuracy"], label="Train Accuracy Fold 2", color="red"
)
plt1.plot(
model_history[1].history["val_accuracy"],
label="Val Accuracy Fold 2",
color="orange",
linestyle="dashdot",
)
plt1.legend()
plt2.plot(model_history[1].history["loss"], label="Train Loss Fold 2", color="red")
plt2.plot(
model_history[1].history["val_loss"],
label="Val Loss Fold 2",
color="orange",
linestyle="dashdot",
)
plt2.legend()
plt.show()
fig, (plt1, plt2) = plt.subplots(ncols=2, sharex=True)
plt1.plot(
model_history[2].history["accuracy"], label="Train Accuracy Fold 3", color="green"
)
plt1.plot(
model_history[2].history["val_accuracy"],
label="Val Accuracy Fold 3",
color="orange",
linestyle="dashdot",
)
plt1.legend()
plt2.plot(model_history[2].history["loss"], label="Train Loss Fold 3", color="green")
plt2.plot(
model_history[2].history["val_loss"],
label="Val Loss Fold 3",
color="orange",
linestyle="dashdot",
)
plt2.legend()
plt.show()
fig, (plt1, plt2) = plt.subplots(ncols=2, sharex=True)
plt1.plot(
model_history[3].history["accuracy"], label="Train Accuracy Fold 4", color="blue"
)
plt1.plot(
model_history[3].history["val_accuracy"],
label="Val Accuracy Fold 4",
color="orange",
linestyle="dashdot",
)
plt1.legend()
plt2.plot(model_history[3].history["loss"], label="Train Loss Fold 4", color="blue")
plt2.plot(
model_history[3].history["val_loss"],
label="Val Loss Fold 4",
color="orange",
linestyle="dashdot",
)
plt2.legend()
plt.show()
fig, (plt1, plt2) = plt.subplots(ncols=2, sharex=True)
plt1.plot(
model_history[4].history["accuracy"], label="Train Accuracy Fold 5", color="purple"
)
plt1.plot(
model_history[4].history["val_accuracy"],
label="Val Accuracy Fold 5",
color="orange",
linestyle="dashdot",
)
plt1.legend()
plt2.plot(model_history[4].history["loss"], label="Train Loss Fold 4", color="purple")
plt2.plot(
model_history[4].history["val_loss"],
label="Val Loss Fold 4",
color="orange",
linestyle="dashdot",
)
plt2.legend()
plt.show()
model = load_model("cnn.h5")
a = model.evaluate(test_x, test_y)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/179/69179705.ipynb
|
deapdata
|
widhiwinata
|
[{"Id": 69179705, "ScriptId": 18872316, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4959048, "CreationDate": "07/27/2021 18:27:20", "VersionNumber": 5.0, "Title": "Klasifikasi Gamemo", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 255.0, "LinesInsertedFromPrevious": 167.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 88.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92034506, "KernelVersionId": 69179705, "SourceDatasetVersionId": 2407063}]
|
[{"Id": 2407063, "DatasetId": 1455929, "DatasourceVersionId": 2449132, "CreatorUserId": 4959048, "LicenseName": "Unknown", "CreationDate": "07/08/2021 15:03:32", "VersionNumber": 1.0, "Title": "deapdata", "Slug": "deapdata", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1455929, "CreatorUserId": 4959048, "OwnerUserId": 4959048.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2407063.0, "CurrentDatasourceVersionId": 2449132.0, "ForumId": 1475502, "Type": 2, "CreationDate": "07/08/2021 15:03:32", "LastActivityDate": "07/08/2021", "TotalViews": 1749, "TotalDownloads": 118, "TotalVotes": 3, "TotalKernels": 5}]
|
[{"Id": 4959048, "UserName": "widhiwinata", "DisplayName": "Widhi Winata Sakti", "RegisterDate": "04/25/2020", "PerformanceTier": 0}]
|
import os
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.image import imread
import seaborn as sns
from scipy import signal
from scipy.fft import fft, fftfreq
from scipy.fft import fftshift
import tensorflow as tf
import keras
from keras.models import Sequential, load_model
from keras.layers import (
Dense,
Conv2D,
LSTM,
MaxPooling2D,
Flatten,
Dropout,
BatchNormalization,
)
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from keras.utils.np_utils import to_categorical
from sklearn.metrics import classification_report, confusion_matrix
data1_1 = pd.read_csv(
"../input/deapdata/(S01)/Raw EEG Data/.csv format/S01G1AllRawChannels.csv"
)
data2_1 = pd.read_csv(
"../input/deapdata/(S01)/Raw EEG Data/.csv format/S01G2AllRawChannels.csv"
)
data3_1 = pd.read_csv(
"../input/deapdata/(S01)/Raw EEG Data/.csv format/S01G3AllRawChannels.csv"
)
data4_1 = pd.read_csv(
"../input/deapdata/(S01)/Raw EEG Data/.csv format/S01G4AllRawChannels.csv"
)
data1_1.insert(14, "label", 0)
data2_1.insert(14, "label", 1)
data3_1.insert(14, "label", 2)
data4_1.insert(14, "label", 3)
data1_1.drop(["Unnamed: 14"], axis=1, inplace=True)
data2_1.drop(["Unnamed: 14"], axis=1, inplace=True)
data3_1.drop(["Unnamed: 14"], axis=1, inplace=True)
data4_1.drop(["Unnamed: 14"], axis=1, inplace=True)
sv = pd.concat(
[
data1_1,
data2_1,
data3_1,
data4_1,
]
)
sv = sv.sample(frac=1)
sv
data = sv.to_numpy()
def get_train():
seq = data
# seq = array(seq)
X, y = seq[:, 0:14], seq[:, 14]
# y=labels=np.argmax(y, axis=1)
X = X.reshape((len(X), 14, 1))
print(X.shape, y.shape, X.dtype)
return X, y
X, y = get_train()
print(pd.DataFrame(y))
from keras.utils import np_utils
n_classes = 4
Y_t = np_utils.to_categorical(y, n_classes)
Y_t
train_x, test_x, train_y, test_y = train_test_split(X, Y_t, test_size=0.2, stratify=y)
train_x.shape, test_x.shape, train_y.shape, test_y.shape
from tensorflow.keras.layers import Conv1D, MaxPooling1D, BatchNormalization
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import ShuffleSplit
# define model
cnn = Sequential()
cnn.add(Conv1D(64, kernel_size=(3), activation="relu", input_shape=(14, 1))) # 0.72
cnn.add(Conv1D(64, kernel_size=(2), activation="relu", input_shape=(14, 1))) # 0.7103
cnn.add(Flatten())
cnn.add(Dense(64, activation="relu")) # accuracy: 0.7137
cnn.add(Dense(4, activation="softmax"))
adam = Adam(lr=0.0001)
cnn.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
cnn.summary()
cnn.fit(
train_x,
train_y,
batch_size=32,
epochs=10,
verbose=1,
validation_data=(test_x, test_y),
)
cnn.save("cnn.h5")
def cnn_model():
model = Sequential()
model.add(Conv1D(64, kernel_size=(8), activation="relu", input_shape=(14, 1)))
# model.add(MaxPooling1D(pool_size = (1)))
# model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(
Conv1D(
64,
kernel_size=(4),
activation="relu",
)
)
# model.add(MaxPooling1D(pool_size = (1)))
# model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Conv1D(64, kernel_size=(2), activation="relu"))
model.add(Dropout(0.1))
model.add(MaxPooling1D(pool_size=(1)))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(64, activation="relu"))
model.add(Dense(4, activation="softmax"))
adam = Adam(lr=0.0001)
model.compile(optimizer=adam, loss="categorical_crossentropy", metrics=["accuracy"])
return model
model = cnn_model()
model.summary()
# keras.utils.plot_model(model, show_shapes=True)
import seaborn as sns
# set early stopping criteria
pat = 1
n_folds = 5
epochs = 10
batch_size = 32
early_stopping = EarlyStopping(monitor="val_loss", patience=pat, verbose=1)
model_checkpoint = ModelCheckpoint("subjek1CNN.h5", verbose=1, save_best_only=True)
def fit_and_evaluate(t_x, val_x, t_y, val_y, EPOCHS=epochs, BATCH_SIZE=batch_size):
model = None
model = cnn_model()
results = model.fit(
t_x,
t_y,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
callbacks=[early_stopping, model_checkpoint],
validation_split=0.2,
)
print("Val Score: ", model.evaluate(val_x, val_y))
return results
model_history = []
for i in range(n_folds):
print("Training on Fold: ", i + 1)
t_x, val_x, t_y, val_y = train_test_split(
train_x, train_y, test_size=0.2, shuffle=True
)
model_history.append(fit_and_evaluate(t_x, val_x, t_y, val_y, epochs, batch_size))
model_ = load_model("subjek1CNN.h5")
x_pred = model_.predict_classes(val_x)
y_pred = np.argmax(val_y, axis=1)
cm = confusion_matrix(x_pred, y_pred)
print(classification_report(x_pred, y_pred))
plt.figure(figsize=(5, 5))
sign = ["G1", "G2", "G3", "G4"]
sns.heatmap(
cm,
cmap="Blues",
xticklabels=sign,
yticklabels=sign,
linecolor="black",
linewidth=1,
annot=True,
fmt="",
)
plt.show()
plt.rcParams["figure.figsize"] = (15, 5)
fig, (ax1, ax2) = plt.subplots(ncols=2, sharex=True)
ax1.plot(model_history[0].history["accuracy"], label="Training Fold 1 accuration")
ax1.plot(model_history[1].history["accuracy"], label="Training Fold 2 accuration")
ax1.plot(model_history[2].history["accuracy"], label="Training Fold 3 accuration")
ax1.plot(model_history[3].history["accuracy"], label="Training Fold 4 accuration")
ax1.plot(model_history[4].history["accuracy"], label="Training Fold 5 accuration")
ax1.legend()
ax2.plot(model_history[1].history["loss"], label="Training Fold 1 loss")
ax2.plot(model_history[1].history["loss"], label="Training Fold 2 loss")
ax2.plot(model_history[2].history["loss"], label="Training Fold 3 loss")
ax2.plot(model_history[3].history["loss"], label="Training Fold 4 loss ")
ax2.plot(model_history[4].history["loss"], label="Training Fold 5 loss")
ax2.legend()
plt.show()
fig, (plt1, plt2) = plt.subplots(ncols=2, sharex=True)
plt1.plot(
model_history[0].history["accuracy"], label="Train Accuracy Fold 1", color="black"
)
plt1.plot(
model_history[0].history["val_accuracy"],
label="Val Accuracy Fold 1",
color="orange",
linestyle="dashdot",
)
plt1.legend()
plt2.plot(model_history[0].history["loss"], label="Train Loss Fold 1", color="black")
plt2.plot(
model_history[0].history["val_loss"],
label="Val Loss Fold 1",
color="orange",
linestyle="dashdot",
)
plt2.legend()
plt.show()
fig, (plt1, plt2) = plt.subplots(ncols=2, sharex=True)
plt1.plot(
model_history[1].history["accuracy"], label="Train Accuracy Fold 2", color="red"
)
plt1.plot(
model_history[1].history["val_accuracy"],
label="Val Accuracy Fold 2",
color="orange",
linestyle="dashdot",
)
plt1.legend()
plt2.plot(model_history[1].history["loss"], label="Train Loss Fold 2", color="red")
plt2.plot(
model_history[1].history["val_loss"],
label="Val Loss Fold 2",
color="orange",
linestyle="dashdot",
)
plt2.legend()
plt.show()
fig, (plt1, plt2) = plt.subplots(ncols=2, sharex=True)
plt1.plot(
model_history[2].history["accuracy"], label="Train Accuracy Fold 3", color="green"
)
plt1.plot(
model_history[2].history["val_accuracy"],
label="Val Accuracy Fold 3",
color="orange",
linestyle="dashdot",
)
plt1.legend()
plt2.plot(model_history[2].history["loss"], label="Train Loss Fold 3", color="green")
plt2.plot(
model_history[2].history["val_loss"],
label="Val Loss Fold 3",
color="orange",
linestyle="dashdot",
)
plt2.legend()
plt.show()
fig, (plt1, plt2) = plt.subplots(ncols=2, sharex=True)
plt1.plot(
model_history[3].history["accuracy"], label="Train Accuracy Fold 4", color="blue"
)
plt1.plot(
model_history[3].history["val_accuracy"],
label="Val Accuracy Fold 4",
color="orange",
linestyle="dashdot",
)
plt1.legend()
plt2.plot(model_history[3].history["loss"], label="Train Loss Fold 4", color="blue")
plt2.plot(
model_history[3].history["val_loss"],
label="Val Loss Fold 4",
color="orange",
linestyle="dashdot",
)
plt2.legend()
plt.show()
fig, (plt1, plt2) = plt.subplots(ncols=2, sharex=True)
plt1.plot(
model_history[4].history["accuracy"], label="Train Accuracy Fold 5", color="purple"
)
plt1.plot(
model_history[4].history["val_accuracy"],
label="Val Accuracy Fold 5",
color="orange",
linestyle="dashdot",
)
plt1.legend()
plt2.plot(model_history[4].history["loss"], label="Train Loss Fold 4", color="purple")
plt2.plot(
model_history[4].history["val_loss"],
label="Val Loss Fold 4",
color="orange",
linestyle="dashdot",
)
plt2.legend()
plt.show()
model = load_model("cnn.h5")
a = model.evaluate(test_x, test_y)
|
[{"deapdata/(S01)/Raw EEG Data/.csv format/S01G3AllRawChannels.csv": {"column_names": "[\"AF3\", \"AF4\", \"F3\", \"F4\", \"F7\", \"F8\", \"FC5\", \"FC6\", \"O1\", \"O2\", \"P7\", \"P8\", \"T7\", \"T8\", \"Unnamed: 14\"]", "column_data_types": "{\"AF3\": \"float64\", \"AF4\": \"float64\", \"F3\": \"float64\", \"F4\": \"float64\", \"F7\": \"float64\", \"F8\": \"float64\", \"FC5\": \"float64\", \"FC6\": \"float64\", \"O1\": \"float64\", \"O2\": \"float64\", \"P7\": \"float64\", \"P8\": \"float64\", \"T7\": \"float64\", \"T8\": \"float64\", \"Unnamed: 14\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 38252 entries, 0 to 38251\nData columns (total 15 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 AF3 38252 non-null float64\n 1 AF4 38252 non-null float64\n 2 F3 38252 non-null float64\n 3 F4 38252 non-null float64\n 4 F7 38252 non-null float64\n 5 F8 38252 non-null float64\n 6 FC5 38252 non-null float64\n 7 FC6 38252 non-null float64\n 8 O1 38252 non-null float64\n 9 O2 38252 non-null float64\n 10 P7 38252 non-null float64\n 11 P8 38252 non-null float64\n 12 T7 38252 non-null float64\n 13 T8 38252 non-null float64\n 14 Unnamed: 14 0 non-null float64\ndtypes: float64(15)\nmemory usage: 4.4 MB\n", "summary": "{\"AF3\": {\"count\": 38252.0, \"mean\": 19.18873041226603, \"std\": 22.275878683562038, \"min\": -203.5896, \"25%\": 9.9998, \"50%\": 20.5128, \"75%\": 30.5128, \"max\": 149.7439}, \"AF4\": {\"count\": 38252.0, \"mean\": 4.216483834047892, \"std\": 19.365397456705626, \"min\": -141.0255, \"25%\": -3.5898499999999998, \"50%\": 3.5897, \"75%\": 13.3335, \"max\": 87.8207}, \"F3\": {\"count\": 38252.0, \"mean\": -0.25683919354543566, \"std\": 27.72974481142927, \"min\": -323.4617, \"25%\": -11.0255, \"50%\": 0.5127, \"75%\": 11.7949, \"max\": 261.5383}, \"F4\": {\"count\": 38252.0, \"mean\": -10.844779922553068, \"std\": 33.49612112398574, \"min\": -176.4103, \"25%\": -25.641, \"50%\": -13.0769, \"75%\": -0.76929, \"max\": 262.564}, \"F7\": {\"count\": 38252.0, \"mean\": -40.43746896707361, \"std\": 86.24070061095826, \"min\": -1140.2563, \"25%\": -64.1025, \"50%\": -39.231, \"75%\": -15.769, \"max\": 1076.0256}, \"F8\": {\"count\": 38252.0, \"mean\": -119.45622850805185, \"std\": 53.118604108549334, \"min\": -295.1282, \"25%\": -152.564, \"50%\": -135.6411, \"75%\": -106.4102, \"max\": 113.8462}, \"FC5\": {\"count\": 38252.0, \"mean\": -49.645873240248875, \"std\": 47.855448753242136, \"min\": -210.7693, \"25%\": -75.1282, \"50%\": -56.4104, \"75%\": -31.5383, \"max\": 331.2822}, \"FC6\": {\"count\": 38252.0, \"mean\": 32.84449653113563, \"std\": 31.55307134126084, \"min\": -99.7434, \"25%\": 15.2565, \"50%\": 32.564, \"75%\": 49.7435, \"max\": 181.5386}, \"O1\": {\"count\": 38252.0, \"mean\": -39.90751475505856, \"std\": 70.80948291443238, \"min\": -518.4614, \"25%\": -64.4871, \"50%\": -39.8717, \"75%\": -5.89745, \"max\": 318.9742}, \"O2\": {\"count\": 38252.0, \"mean\": 45.534384165625326, \"std\": 47.99886475639196, \"min\": -290.1284, \"25%\": 23.0771, \"50%\": 50.1284, \"75%\": 73.4613, \"max\": 187.4359}, \"P7\": {\"count\": 38252.0, \"mean\": 39.98248563493412, \"std\": 63.7088425959553, \"min\": -393.8462, \"25%\": 12.3076, \"50%\": 44.35895, \"75%\": 73.8459, \"max\": 713.5898}, \"P8\": {\"count\": 38252.0, \"mean\": 13.656483186212487, \"std\": 30.16629283981416, \"min\": -257.4358, \"25%\": 1.5386, \"50%\": 15.1282, \"75%\": 29.4873, \"max\": 133.077}, \"T7\": {\"count\": 38252.0, \"mean\": 0.30127110189009754, \"std\": 40.39435403564419, \"min\": -198.9744, \"25%\": -16.6665, \"50%\": -0.25647, \"75%\": 16.4104, \"max\": 322.3075}, \"T8\": {\"count\": 38252.0, \"mean\": -0.6924478969857788, \"std\": 19.298092746101943, \"min\": -121.0256, \"25%\": -9.231, \"50%\": -0.5127, \"75%\": 8.4617, \"max\": 80.1279}, \"Unnamed: 14\": {\"count\": 0.0, \"mean\": NaN, \"std\": NaN, \"min\": NaN, \"25%\": NaN, \"50%\": NaN, \"75%\": NaN, \"max\": NaN}}", "examples": "{\"AF3\":{\"0\":55.8972,\"1\":55.3845,\"2\":52.3081,\"3\":55.3848},\"AF4\":{\"0\":-35.8975,\"1\":-33.3333,\"2\":-30.2561,\"3\":-28.718},\"F3\":{\"0\":96.9231,\"1\":93.3333,\"2\":95.3848,\"3\":101.0259},\"F4\":{\"0\":-20.5129,\"1\":-14.8718,\"2\":-7.1792,\"3\":-2.564},\"F7\":{\"0\":-61.5386,\"1\":-69.7437,\"2\":-66.6665,\"3\":-53.3333},\"F8\":{\"0\":51.282,\"1\":42.5642,\"2\":36.4106,\"3\":45.1284},\"FC5\":{\"0\":2.0515,\"1\":3.0769,\"2\":0.0,\"3\":2.0513},\"FC6\":{\"0\":-11.7947,\"1\":-11.7947,\"2\":-11.282,\"3\":-11.2822},\"O1\":{\"0\":45.1282,\"1\":34.8718,\"2\":41.5386,\"3\":52.3076},\"O2\":{\"0\":-2.0515,\"1\":-6.6667,\"2\":0.0,\"3\":-2.0513},\"P7\":{\"0\":78.9744,\"1\":74.3591,\"2\":75.8975,\"3\":74.3589},\"P8\":{\"0\":-6.6667,\"1\":-9.7434,\"2\":-13.3333,\"3\":-17.436},\"T7\":{\"0\":23.5896,\"1\":10.2566,\"2\":8.2051,\"3\":14.8716},\"T8\":{\"0\":-5.1282,\"1\":-3.0769,\"2\":-6.1538,\"3\":-5.6411},\"Unnamed: 14\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null}}"}}, {"deapdata/(S01)/Raw EEG Data/.csv format/S01G4AllRawChannels.csv": {"column_names": "[\"AF3\", \"AF4\", \"F3\", \"F4\", \"F7\", \"F8\", \"FC5\", \"FC6\", \"O1\", \"O2\", \"P7\", \"P8\", \"T7\", \"T8\", \"Unnamed: 14\"]", "column_data_types": "{\"AF3\": \"float64\", \"AF4\": \"float64\", \"F3\": \"float64\", \"F4\": \"float64\", \"F7\": \"float64\", \"F8\": \"float64\", \"FC5\": \"float64\", \"FC6\": \"float64\", \"O1\": \"float64\", \"O2\": \"float64\", \"P7\": \"float64\", \"P8\": \"float64\", \"T7\": \"float64\", \"T8\": \"float64\", \"Unnamed: 14\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 38252 entries, 0 to 38251\nData columns (total 15 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 AF3 38252 non-null float64\n 1 AF4 38252 non-null float64\n 2 F3 38252 non-null float64\n 3 F4 38252 non-null float64\n 4 F7 38252 non-null float64\n 5 F8 38252 non-null float64\n 6 FC5 38252 non-null float64\n 7 FC6 38252 non-null float64\n 8 O1 38252 non-null float64\n 9 O2 38252 non-null float64\n 10 P7 38252 non-null float64\n 11 P8 38252 non-null float64\n 12 T7 38252 non-null float64\n 13 T8 38252 non-null float64\n 14 Unnamed: 14 0 non-null float64\ndtypes: float64(15)\nmemory usage: 4.4 MB\n", "summary": "{\"AF3\": {\"count\": 38252.0, \"mean\": 31.455274264613614, \"std\": 24.96442567549041, \"min\": -123.3335, \"25%\": 21.6666, \"50%\": 34.166700000000006, \"75%\": 45.3846, \"max\": 127.1794}, \"AF4\": {\"count\": 38252.0, \"mean\": -10.81308230618007, \"std\": 16.59929062635164, \"min\": -116.4104, \"25%\": -19.6154, \"50%\": -8.9744, \"75%\": -0.76916, \"max\": 85.3845}, \"F3\": {\"count\": 38252.0, \"mean\": 4.7450245103524, \"std\": 22.968161009870247, \"min\": -144.8718, \"25%\": -4.8718, \"50%\": 4.1025, \"75%\": 15.641, \"max\": 121.282}, \"F4\": {\"count\": 38252.0, \"mean\": -27.979218421272094, \"std\": 32.09978172405509, \"min\": -190.0001, \"25%\": -44.6154, \"50%\": -32.0511, \"75%\": -17.1794, \"max\": 170.2566}, \"F7\": {\"count\": 38252.0, \"mean\": -49.21957693717975, \"std\": 37.23052415394909, \"min\": -186.4102, \"25%\": -71.0258, \"50%\": -54.48715, \"75%\": -33.077, \"max\": 135.3845}, \"F8\": {\"count\": 38252.0, \"mean\": -110.95747487503922, \"std\": 49.281907646822326, \"min\": -239.4873, \"25%\": -138.5896, \"50%\": -121.9231, \"75%\": -102.8206, \"max\": 145.3845}, \"FC5\": {\"count\": 38252.0, \"mean\": -71.91768003267804, \"std\": 54.15804414721969, \"min\": -226.1539, \"25%\": -100.1282, \"50%\": -81.7949, \"75%\": -57.6924, \"max\": 300.7693}, \"FC6\": {\"count\": 38252.0, \"mean\": 78.62727964655443, \"std\": 41.44880176578124, \"min\": -93.3333, \"25%\": 65.6411, \"50%\": 85.1282, \"75%\": 102.6923, \"max\": 210.5128}, \"O1\": {\"count\": 38252.0, \"mean\": -98.40196197657639, \"std\": 84.93562298718871, \"min\": -466.5383, \"25%\": -136.7949, \"50%\": -109.7437, \"75%\": -58.2051, \"max\": 297.9489}, \"O2\": {\"count\": 38252.0, \"mean\": 106.48198168750653, \"std\": 71.79578332792381, \"min\": -297.6924, \"25%\": 75.384875, \"50%\": 122.3079, \"75%\": 152.083425, \"max\": 290.8973}, \"P7\": {\"count\": 38252.0, \"mean\": 107.91409173036703, \"std\": 81.7777861471348, \"min\": -335.6411, \"25%\": 68.81410000000001, \"50%\": 123.077, \"75%\": 158.205, \"max\": 293.2051}, \"P8\": {\"count\": 38252.0, \"mean\": 51.20300944938826, \"std\": 39.520622801321515, \"min\": -228.4617, \"25%\": 40.0, \"50%\": 58.2052, \"75%\": 73.2051, \"max\": 149.1025}, \"T7\": {\"count\": 38252.0, \"mean\": 4.875287061317056, \"std\": 36.12803768798307, \"min\": -124.1027, \"25%\": -13.8462, \"50%\": 2.3076, \"75%\": 21.0256, \"max\": 181.7949}, \"T8\": {\"count\": 38252.0, \"mean\": 2.2655495754470354, \"std\": 34.006675949840144, \"min\": -244.3589, \"25%\": -7.4359, \"50%\": 3.5897, \"75%\": 16.4102, \"max\": 156.1539}, \"Unnamed: 14\": {\"count\": 0.0, \"mean\": NaN, \"std\": NaN, \"min\": NaN, \"25%\": NaN, \"50%\": NaN, \"75%\": NaN, \"max\": NaN}}", "examples": "{\"AF3\":{\"0\":-3.8462,\"1\":-10.5129,\"2\":-5.8975,\"3\":0.51294},\"AF4\":{\"0\":0.76904,\"1\":-4.3591,\"2\":-2.8206,\"3\":3.0769},\"F3\":{\"0\":1.2822,\"1\":-2.3079,\"2\":0.25659,\"3\":3.5896},\"F4\":{\"0\":5.3848,\"1\":-0.25659,\"2\":-0.25659,\"3\":12.3079},\"F7\":{\"0\":-5.3848,\"1\":-13.0769,\"2\":-12.5642,\"3\":-6.6667},\"F8\":{\"0\":-5.3848,\"1\":-9.9998,\"2\":-13.0769,\"3\":-2.5642},\"FC5\":{\"0\":-17.6924,\"1\":-27.4358,\"2\":-32.5642,\"3\":-21.0256},\"FC6\":{\"0\":-1.2822,\"1\":0.25659,\"2\":2.8206,\"3\":-2.051},\"O1\":{\"0\":27.9487,\"1\":12.9487,\"2\":-2.0513,\"3\":-0.51294},\"O2\":{\"0\":7.9487,\"1\":9.4871,\"2\":11.0256,\"3\":-1.0256},\"P7\":{\"0\":13.5898,\"1\":8.9744,\"2\":14.6155,\"3\":4.6155},\"P8\":{\"0\":13.0771,\"1\":10.5129,\"2\":9.4871,\"3\":6.1541},\"T7\":{\"0\":-0.76904,\"1\":1.282,\"2\":2.8206,\"3\":-3.5896},\"T8\":{\"0\":-4.3589,\"1\":0.76929,\"2\":0.25659,\"3\":1.5383},\"Unnamed: 14\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null}}"}}, {"deapdata/(S01)/Raw EEG Data/.csv format/S01G1AllRawChannels.csv": {"column_names": "[\"AF3\", \"AF4\", \"F3\", \"F4\", \"F7\", \"F8\", \"FC5\", \"FC6\", \"O1\", \"O2\", \"P7\", \"P8\", \"T7\", \"T8\", \"Unnamed: 14\"]", "column_data_types": "{\"AF3\": \"float64\", \"AF4\": \"float64\", \"F3\": \"float64\", \"F4\": \"float64\", \"F7\": \"float64\", \"F8\": \"float64\", \"FC5\": \"float64\", \"FC6\": \"float64\", \"O1\": \"float64\", \"O2\": \"float64\", \"P7\": \"float64\", \"P8\": \"float64\", \"T7\": \"float64\", \"T8\": \"float64\", \"Unnamed: 14\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 38252 entries, 0 to 38251\nData columns (total 15 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 AF3 38252 non-null float64\n 1 AF4 38252 non-null float64\n 2 F3 38252 non-null float64\n 3 F4 38252 non-null float64\n 4 F7 38252 non-null float64\n 5 F8 38252 non-null float64\n 6 FC5 38252 non-null float64\n 7 FC6 38252 non-null float64\n 8 O1 38252 non-null float64\n 9 O2 38252 non-null float64\n 10 P7 38252 non-null float64\n 11 P8 38252 non-null float64\n 12 T7 38252 non-null float64\n 13 T8 38252 non-null float64\n 14 Unnamed: 14 0 non-null float64\ndtypes: float64(15)\nmemory usage: 4.4 MB\n", "summary": "{\"AF3\": {\"count\": 38252.0, \"mean\": 19.81258435199467, \"std\": 26.420141594392277, \"min\": -108.9744, \"25%\": 4.3589, \"50%\": 22.051, \"75%\": 36.4104, \"max\": 132.0511}, \"AF4\": {\"count\": 38252.0, \"mean\": 4.605463136764091, \"std\": 30.946253308355878, \"min\": -195.1282, \"25%\": -5.3846, \"50%\": 5.3845, \"75%\": 18.4614, \"max\": 144.1023}, \"F3\": {\"count\": 38252.0, \"mean\": 5.755797585263515, \"std\": 49.74573399540408, \"min\": -275.2565, \"25%\": -11.0256, \"50%\": 6.4102, \"75%\": 27.1797, \"max\": 514.6157}, \"F4\": {\"count\": 38252.0, \"mean\": -12.333643185833422, \"std\": 36.82892138583796, \"min\": -323.4615, \"25%\": -27.6924, \"50%\": -13.4617, \"75%\": 0.76904, \"max\": 224.1025}, \"F7\": {\"count\": 38252.0, \"mean\": -8.05865366144254, \"std\": 114.26245670817984, \"min\": -1215.0, \"25%\": -28.5896, \"50%\": -8.5896, \"75%\": 7.6924, \"max\": 1461.6665}, \"F8\": {\"count\": 38252.0, \"mean\": -42.13930906200983, \"std\": 34.715775768909616, \"min\": -155.641, \"25%\": -64.8717, \"50%\": -46.923, \"75%\": -23.718, \"max\": 109.231}, \"FC5\": {\"count\": 38252.0, \"mean\": -34.12430673502039, \"std\": 56.295547868800014, \"min\": -202.1794, \"25%\": -61.9231, \"50%\": -41.2821, \"75%\": -15.1282, \"max\": 373.5897}, \"FC6\": {\"count\": 38252.0, \"mean\": 18.108150474105926, \"std\": 40.87940472619958, \"min\": -138.4617, \"25%\": -8.718, \"50%\": 16.7949, \"75%\": 44.3591, \"max\": 190.0001}, \"O1\": {\"count\": 38252.0, \"mean\": -24.74092157710708, \"std\": 66.34201464813205, \"min\": -364.3589, \"25%\": -49.8717, \"50%\": -26.6667, \"75%\": 1.410325, \"max\": 297.4358}, \"O2\": {\"count\": 38252.0, \"mean\": 14.020159083472757, \"std\": 53.511912396323076, \"min\": -289.7437, \"25%\": -6.7948, \"50%\": 18.2052, \"75%\": 44.4872, \"max\": 203.8463}, \"P7\": {\"count\": 38252.0, \"mean\": 13.920253396619787, \"std\": 74.93193073872077, \"min\": -311.7949, \"25%\": -22.6923, \"50%\": 13.3334, \"75%\": 52.307625, \"max\": 354.6152}, \"P8\": {\"count\": 38252.0, \"mean\": 16.153962072702083, \"std\": 34.66924436338991, \"min\": -252.0515, \"25%\": 6.66685, \"50%\": 20.7695, \"75%\": 33.5897, \"max\": 149.2306}, \"T7\": {\"count\": 38252.0, \"mean\": 23.43778887317003, \"std\": 60.252733394433314, \"min\": -201.0256, \"25%\": -5.641, \"50%\": 22.5641, \"75%\": 54.3589, \"max\": 448.7179}, \"T8\": {\"count\": 38252.0, \"mean\": -6.894656323355641, \"std\": 31.488631983445853, \"min\": -184.8721, \"25%\": -23.5897, \"50%\": -7.6923, \"75%\": 5.3846, \"max\": 216.9231}, \"Unnamed: 14\": {\"count\": 0.0, \"mean\": NaN, \"std\": NaN, \"min\": NaN, \"25%\": NaN, \"50%\": NaN, \"75%\": NaN, \"max\": NaN}}", "examples": "{\"AF3\":{\"0\":-35.1282,\"1\":-32.5642,\"2\":-27.1794,\"3\":-32.0515},\"AF4\":{\"0\":-16.1538,\"1\":-22.3079,\"2\":-13.8459,\"3\":-14.6157},\"F3\":{\"0\":-44.8718,\"1\":-47.9487,\"2\":-39.4871,\"3\":-42.3079},\"F4\":{\"0\":1.7949,\"1\":-11.0256,\"2\":-3.0769,\"3\":-4.3589},\"F7\":{\"0\":44.8716,\"1\":40.2561,\"2\":47.1794,\"3\":42.3076},\"F8\":{\"0\":-1.7949,\"1\":-16.7949,\"2\":-1.7949,\"3\":3.8462},\"FC5\":{\"0\":-5.8975,\"1\":-20.8975,\"2\":-5.8975,\"3\":-11.0259},\"FC6\":{\"0\":-3.8462,\"1\":-2.8206,\"2\":-1.5383,\"3\":-2.3076},\"O1\":{\"0\":27.436,\"1\":20.2561,\"2\":31.282,\"3\":22.3076},\"O2\":{\"0\":-10.5127,\"1\":2.8206,\"2\":-4.1028,\"3\":-11.0259},\"P7\":{\"0\":5.8975,\"1\":20.2561,\"2\":10.2566,\"3\":3.333},\"P8\":{\"0\":7.9487,\"1\":5.8972,\"2\":9.7434,\"3\":2.3076},\"T7\":{\"0\":12.564,\"1\":19.2307,\"2\":17.9485,\"3\":7.4355},\"T8\":{\"0\":18.2051,\"1\":4.3591,\"2\":11.282,\"3\":10.5127},\"Unnamed: 14\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null}}"}}, {"deapdata/(S01)/Raw EEG Data/.csv format/S01G2AllRawChannels.csv": {"column_names": "[\"AF3\", \"AF4\", \"F3\", \"F4\", \"F7\", \"F8\", \"FC5\", \"FC6\", \"O1\", \"O2\", \"P7\", \"P8\", \"T7\", \"T8\", \"Unnamed: 14\"]", "column_data_types": "{\"AF3\": \"float64\", \"AF4\": \"float64\", \"F3\": \"float64\", \"F4\": \"float64\", \"F7\": \"float64\", \"F8\": \"float64\", \"FC5\": \"float64\", \"FC6\": \"float64\", \"O1\": \"float64\", \"O2\": \"float64\", \"P7\": \"float64\", \"P8\": \"float64\", \"T7\": \"float64\", \"T8\": \"float64\", \"Unnamed: 14\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 38252 entries, 0 to 38251\nData columns (total 15 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 AF3 38252 non-null float64\n 1 AF4 38252 non-null float64\n 2 F3 38252 non-null float64\n 3 F4 38252 non-null float64\n 4 F7 38252 non-null float64\n 5 F8 38252 non-null float64\n 6 FC5 38252 non-null float64\n 7 FC6 38252 non-null float64\n 8 O1 38252 non-null float64\n 9 O2 38252 non-null float64\n 10 P7 38252 non-null float64\n 11 P8 38252 non-null float64\n 12 T7 38252 non-null float64\n 13 T8 38252 non-null float64\n 14 Unnamed: 14 0 non-null float64\ndtypes: float64(15)\nmemory usage: 4.4 MB\n", "summary": "{\"AF3\": {\"count\": 38252.0, \"mean\": 21.090578115196593, \"std\": 17.109663014406895, \"min\": -56.4103, \"25%\": 9.3589, \"50%\": 20.7693, \"75%\": 31.923, \"max\": 90.6409}, \"AF4\": {\"count\": 38252.0, \"mean\": 2.196144924971243, \"std\": 23.745828740006736, \"min\": -162.3077, \"25%\": -6.1539, \"50%\": 2.5641, \"75%\": 13.718, \"max\": 121.0256}, \"F3\": {\"count\": 38252.0, \"mean\": 0.30590320886489564, \"std\": 33.26786485830204, \"min\": -179.2308, \"25%\": -12.69215, \"50%\": 2.11545, \"75%\": 17.436, \"max\": 216.5386}, \"F4\": {\"count\": 38252.0, \"mean\": -18.867657047644567, \"std\": 29.882399480391186, \"min\": -140.2565, \"25%\": -35.6409, \"50%\": -20.0, \"75%\": -4.3589, \"max\": 118.718}, \"F7\": {\"count\": 38252.0, \"mean\": -21.984125614281606, \"std\": 30.211778294265958, \"min\": -148.9744, \"25%\": -39.1025, \"50%\": -18.9745, \"75%\": -2.3076, \"max\": 125.8975}, \"F8\": {\"count\": 38252.0, \"mean\": -107.24027274882359, \"std\": 43.41858588856923, \"min\": -255.3846, \"25%\": -137.17944999999997, \"50%\": -114.6155, \"75%\": -82.564, \"max\": 39.231}, \"FC5\": {\"count\": 38252.0, \"mean\": -58.38577036913103, \"std\": 54.13251804281201, \"min\": -216.6666, \"25%\": -92.6923, \"50%\": -63.0769, \"75%\": -25.8973, \"max\": 174.8718}, \"FC6\": {\"count\": 38252.0, \"mean\": 43.906127060977205, \"std\": 37.468994930217725, \"min\": -159.4873, \"25%\": 19.6152, \"50%\": 42.8204, \"75%\": 66.2819, \"max\": 202.5643}, \"O1\": {\"count\": 38252.0, \"mean\": -48.83612592227858, \"std\": 67.01090733787751, \"min\": -310.5128, \"25%\": -84.6155, \"50%\": -49.2307, \"75%\": -5.1282, \"max\": 145.3844}, \"O2\": {\"count\": 38252.0, \"mean\": 54.64048271661613, \"std\": 58.08526966460737, \"min\": -181.7947, \"25%\": 20.7693, \"50%\": 62.1797, \"75%\": 93.3333, \"max\": 259.6155}, \"P7\": {\"count\": 38252.0, \"mean\": 52.021038014509045, \"std\": 87.35705759910468, \"min\": -358.7179, \"25%\": 3.3333, \"50%\": 50.8975, \"75%\": 103.974525, \"max\": 600.8977}, \"P8\": {\"count\": 38252.0, \"mean\": 15.074364157586531, \"std\": 31.098505388664396, \"min\": -131.7949, \"25%\": 1.0256, \"50%\": 17.0513, \"75%\": 34.1025, \"max\": 117.9487}, \"T7\": {\"count\": 38252.0, \"mean\": 20.609038134724983, \"std\": 82.33943101730816, \"min\": -483.0769, \"25%\": -3.205125, \"50%\": 20.5129, \"75%\": 48.333525, \"max\": 898.7181}, \"T8\": {\"count\": 38252.0, \"mean\": -11.621601354530481, \"std\": 18.032634899385897, \"min\": -107.0514, \"25%\": -22.0513, \"50%\": -9.4871, \"75%\": 0.0, \"max\": 64.1028}, \"Unnamed: 14\": {\"count\": 0.0, \"mean\": NaN, \"std\": NaN, \"min\": NaN, \"25%\": NaN, \"50%\": NaN, \"75%\": NaN, \"max\": NaN}}", "examples": "{\"AF3\":{\"0\":49.4872,\"1\":46.4104,\"2\":31.4104,\"3\":16.4104},\"AF4\":{\"0\":9.9999,\"1\":24.9999,\"2\":39.9999,\"3\":40.0},\"F3\":{\"0\":51.5385,\"1\":66.5385,\"2\":66.6665,\"3\":73.3335},\"F4\":{\"0\":-41.2821,\"1\":-26.2821,\"2\":-11.2821,\"3\":0.0},\"F7\":{\"0\":-30.0001,\"1\":-15.0001,\"2\":-0.0001225,\"3\":-3.5898},\"F8\":{\"0\":-58.7179,\"1\":-43.7179,\"2\":-28.7179,\"3\":-13.7179},\"FC5\":{\"0\":-73.5897,\"1\":-58.5897,\"2\":-46.1538,\"3\":-49.2307},\"FC6\":{\"0\":-8.9745,\"1\":-13.5898,\"2\":-28.5898,\"3\":-43.5898},\"O1\":{\"0\":-3.3334,\"1\":11.6666,\"2\":26.6666,\"3\":41.6666},\"O2\":{\"0\":44.3588,\"1\":29.3588,\"2\":14.3588,\"3\":-0.64124},\"P7\":{\"0\":140.2562,\"1\":128.4617,\"2\":113.4617,\"3\":98.4617},\"P8\":{\"0\":54.6151,\"1\":50.5129,\"2\":35.5129,\"3\":31.2822},\"T7\":{\"0\":3.3334,\"1\":-3.3333,\"2\":-18.3333,\"3\":-33.3333},\"T8\":{\"0\":-31.5385,\"1\":-16.5385,\"2\":-11.7949,\"3\":0.0},\"Unnamed: 14\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null}}"}}]
| true | 4 |
<start_data_description><data_path>deapdata/(S01)/Raw EEG Data/.csv format/S01G3AllRawChannels.csv:
<column_names>
['AF3', 'AF4', 'F3', 'F4', 'F7', 'F8', 'FC5', 'FC6', 'O1', 'O2', 'P7', 'P8', 'T7', 'T8', 'Unnamed: 14']
<column_types>
{'AF3': 'float64', 'AF4': 'float64', 'F3': 'float64', 'F4': 'float64', 'F7': 'float64', 'F8': 'float64', 'FC5': 'float64', 'FC6': 'float64', 'O1': 'float64', 'O2': 'float64', 'P7': 'float64', 'P8': 'float64', 'T7': 'float64', 'T8': 'float64', 'Unnamed: 14': 'float64'}
<dataframe_Summary>
{'AF3': {'count': 38252.0, 'mean': 19.18873041226603, 'std': 22.275878683562038, 'min': -203.5896, '25%': 9.9998, '50%': 20.5128, '75%': 30.5128, 'max': 149.7439}, 'AF4': {'count': 38252.0, 'mean': 4.216483834047892, 'std': 19.365397456705626, 'min': -141.0255, '25%': -3.5898499999999998, '50%': 3.5897, '75%': 13.3335, 'max': 87.8207}, 'F3': {'count': 38252.0, 'mean': -0.25683919354543566, 'std': 27.72974481142927, 'min': -323.4617, '25%': -11.0255, '50%': 0.5127, '75%': 11.7949, 'max': 261.5383}, 'F4': {'count': 38252.0, 'mean': -10.844779922553068, 'std': 33.49612112398574, 'min': -176.4103, '25%': -25.641, '50%': -13.0769, '75%': -0.76929, 'max': 262.564}, 'F7': {'count': 38252.0, 'mean': -40.43746896707361, 'std': 86.24070061095826, 'min': -1140.2563, '25%': -64.1025, '50%': -39.231, '75%': -15.769, 'max': 1076.0256}, 'F8': {'count': 38252.0, 'mean': -119.45622850805185, 'std': 53.118604108549334, 'min': -295.1282, '25%': -152.564, '50%': -135.6411, '75%': -106.4102, 'max': 113.8462}, 'FC5': {'count': 38252.0, 'mean': -49.645873240248875, 'std': 47.855448753242136, 'min': -210.7693, '25%': -75.1282, '50%': -56.4104, '75%': -31.5383, 'max': 331.2822}, 'FC6': {'count': 38252.0, 'mean': 32.84449653113563, 'std': 31.55307134126084, 'min': -99.7434, '25%': 15.2565, '50%': 32.564, '75%': 49.7435, 'max': 181.5386}, 'O1': {'count': 38252.0, 'mean': -39.90751475505856, 'std': 70.80948291443238, 'min': -518.4614, '25%': -64.4871, '50%': -39.8717, '75%': -5.89745, 'max': 318.9742}, 'O2': {'count': 38252.0, 'mean': 45.534384165625326, 'std': 47.99886475639196, 'min': -290.1284, '25%': 23.0771, '50%': 50.1284, '75%': 73.4613, 'max': 187.4359}, 'P7': {'count': 38252.0, 'mean': 39.98248563493412, 'std': 63.7088425959553, 'min': -393.8462, '25%': 12.3076, '50%': 44.35895, '75%': 73.8459, 'max': 713.5898}, 'P8': {'count': 38252.0, 'mean': 13.656483186212487, 'std': 30.16629283981416, 'min': -257.4358, '25%': 1.5386, '50%': 15.1282, '75%': 29.4873, 'max': 133.077}, 'T7': {'count': 38252.0, 'mean': 0.30127110189009754, 'std': 40.39435403564419, 'min': -198.9744, '25%': -16.6665, '50%': -0.25647, '75%': 16.4104, 'max': 322.3075}, 'T8': {'count': 38252.0, 'mean': -0.6924478969857788, 'std': 19.298092746101943, 'min': -121.0256, '25%': -9.231, '50%': -0.5127, '75%': 8.4617, 'max': 80.1279}, 'Unnamed: 14': {'count': 0.0, 'mean': nan, 'std': nan, 'min': nan, '25%': nan, '50%': nan, '75%': nan, 'max': nan}}
<dataframe_info>
RangeIndex: 38252 entries, 0 to 38251
Data columns (total 15 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 AF3 38252 non-null float64
1 AF4 38252 non-null float64
2 F3 38252 non-null float64
3 F4 38252 non-null float64
4 F7 38252 non-null float64
5 F8 38252 non-null float64
6 FC5 38252 non-null float64
7 FC6 38252 non-null float64
8 O1 38252 non-null float64
9 O2 38252 non-null float64
10 P7 38252 non-null float64
11 P8 38252 non-null float64
12 T7 38252 non-null float64
13 T8 38252 non-null float64
14 Unnamed: 14 0 non-null float64
dtypes: float64(15)
memory usage: 4.4 MB
<some_examples>
{'AF3': {'0': 55.8972, '1': 55.3845, '2': 52.3081, '3': 55.3848}, 'AF4': {'0': -35.8975, '1': -33.3333, '2': -30.2561, '3': -28.718}, 'F3': {'0': 96.9231, '1': 93.3333, '2': 95.3848, '3': 101.0259}, 'F4': {'0': -20.5129, '1': -14.8718, '2': -7.1792, '3': -2.564}, 'F7': {'0': -61.5386, '1': -69.7437, '2': -66.6665, '3': -53.3333}, 'F8': {'0': 51.282, '1': 42.5642, '2': 36.4106, '3': 45.1284}, 'FC5': {'0': 2.0515, '1': 3.0769, '2': 0.0, '3': 2.0513}, 'FC6': {'0': -11.7947, '1': -11.7947, '2': -11.282, '3': -11.2822}, 'O1': {'0': 45.1282, '1': 34.8718, '2': 41.5386, '3': 52.3076}, 'O2': {'0': -2.0515, '1': -6.6667, '2': 0.0, '3': -2.0513}, 'P7': {'0': 78.9744, '1': 74.3591, '2': 75.8975, '3': 74.3589}, 'P8': {'0': -6.6667, '1': -9.7434, '2': -13.3333, '3': -17.436}, 'T7': {'0': 23.5896, '1': 10.2566, '2': 8.2051, '3': 14.8716}, 'T8': {'0': -5.1282, '1': -3.0769, '2': -6.1538, '3': -5.6411}, 'Unnamed: 14': {'0': None, '1': None, '2': None, '3': None}}
<end_description>
<start_data_description><data_path>deapdata/(S01)/Raw EEG Data/.csv format/S01G4AllRawChannels.csv:
<column_names>
['AF3', 'AF4', 'F3', 'F4', 'F7', 'F8', 'FC5', 'FC6', 'O1', 'O2', 'P7', 'P8', 'T7', 'T8', 'Unnamed: 14']
<column_types>
{'AF3': 'float64', 'AF4': 'float64', 'F3': 'float64', 'F4': 'float64', 'F7': 'float64', 'F8': 'float64', 'FC5': 'float64', 'FC6': 'float64', 'O1': 'float64', 'O2': 'float64', 'P7': 'float64', 'P8': 'float64', 'T7': 'float64', 'T8': 'float64', 'Unnamed: 14': 'float64'}
<dataframe_Summary>
{'AF3': {'count': 38252.0, 'mean': 31.455274264613614, 'std': 24.96442567549041, 'min': -123.3335, '25%': 21.6666, '50%': 34.166700000000006, '75%': 45.3846, 'max': 127.1794}, 'AF4': {'count': 38252.0, 'mean': -10.81308230618007, 'std': 16.59929062635164, 'min': -116.4104, '25%': -19.6154, '50%': -8.9744, '75%': -0.76916, 'max': 85.3845}, 'F3': {'count': 38252.0, 'mean': 4.7450245103524, 'std': 22.968161009870247, 'min': -144.8718, '25%': -4.8718, '50%': 4.1025, '75%': 15.641, 'max': 121.282}, 'F4': {'count': 38252.0, 'mean': -27.979218421272094, 'std': 32.09978172405509, 'min': -190.0001, '25%': -44.6154, '50%': -32.0511, '75%': -17.1794, 'max': 170.2566}, 'F7': {'count': 38252.0, 'mean': -49.21957693717975, 'std': 37.23052415394909, 'min': -186.4102, '25%': -71.0258, '50%': -54.48715, '75%': -33.077, 'max': 135.3845}, 'F8': {'count': 38252.0, 'mean': -110.95747487503922, 'std': 49.281907646822326, 'min': -239.4873, '25%': -138.5896, '50%': -121.9231, '75%': -102.8206, 'max': 145.3845}, 'FC5': {'count': 38252.0, 'mean': -71.91768003267804, 'std': 54.15804414721969, 'min': -226.1539, '25%': -100.1282, '50%': -81.7949, '75%': -57.6924, 'max': 300.7693}, 'FC6': {'count': 38252.0, 'mean': 78.62727964655443, 'std': 41.44880176578124, 'min': -93.3333, '25%': 65.6411, '50%': 85.1282, '75%': 102.6923, 'max': 210.5128}, 'O1': {'count': 38252.0, 'mean': -98.40196197657639, 'std': 84.93562298718871, 'min': -466.5383, '25%': -136.7949, '50%': -109.7437, '75%': -58.2051, 'max': 297.9489}, 'O2': {'count': 38252.0, 'mean': 106.48198168750653, 'std': 71.79578332792381, 'min': -297.6924, '25%': 75.384875, '50%': 122.3079, '75%': 152.083425, 'max': 290.8973}, 'P7': {'count': 38252.0, 'mean': 107.91409173036703, 'std': 81.7777861471348, 'min': -335.6411, '25%': 68.81410000000001, '50%': 123.077, '75%': 158.205, 'max': 293.2051}, 'P8': {'count': 38252.0, 'mean': 51.20300944938826, 'std': 39.520622801321515, 'min': -228.4617, '25%': 40.0, '50%': 58.2052, '75%': 73.2051, 'max': 149.1025}, 'T7': {'count': 38252.0, 'mean': 4.875287061317056, 'std': 36.12803768798307, 'min': -124.1027, '25%': -13.8462, '50%': 2.3076, '75%': 21.0256, 'max': 181.7949}, 'T8': {'count': 38252.0, 'mean': 2.2655495754470354, 'std': 34.006675949840144, 'min': -244.3589, '25%': -7.4359, '50%': 3.5897, '75%': 16.4102, 'max': 156.1539}, 'Unnamed: 14': {'count': 0.0, 'mean': nan, 'std': nan, 'min': nan, '25%': nan, '50%': nan, '75%': nan, 'max': nan}}
<dataframe_info>
RangeIndex: 38252 entries, 0 to 38251
Data columns (total 15 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 AF3 38252 non-null float64
1 AF4 38252 non-null float64
2 F3 38252 non-null float64
3 F4 38252 non-null float64
4 F7 38252 non-null float64
5 F8 38252 non-null float64
6 FC5 38252 non-null float64
7 FC6 38252 non-null float64
8 O1 38252 non-null float64
9 O2 38252 non-null float64
10 P7 38252 non-null float64
11 P8 38252 non-null float64
12 T7 38252 non-null float64
13 T8 38252 non-null float64
14 Unnamed: 14 0 non-null float64
dtypes: float64(15)
memory usage: 4.4 MB
<some_examples>
{'AF3': {'0': -3.8462, '1': -10.5129, '2': -5.8975, '3': 0.51294}, 'AF4': {'0': 0.76904, '1': -4.3591, '2': -2.8206, '3': 3.0769}, 'F3': {'0': 1.2822, '1': -2.3079, '2': 0.25659, '3': 3.5896}, 'F4': {'0': 5.3848, '1': -0.25659, '2': -0.25659, '3': 12.3079}, 'F7': {'0': -5.3848, '1': -13.0769, '2': -12.5642, '3': -6.6667}, 'F8': {'0': -5.3848, '1': -9.9998, '2': -13.0769, '3': -2.5642}, 'FC5': {'0': -17.6924, '1': -27.4358, '2': -32.5642, '3': -21.0256}, 'FC6': {'0': -1.2822, '1': 0.25659, '2': 2.8206, '3': -2.051}, 'O1': {'0': 27.9487, '1': 12.9487, '2': -2.0513, '3': -0.51294}, 'O2': {'0': 7.9487, '1': 9.4871, '2': 11.0256, '3': -1.0256}, 'P7': {'0': 13.5898, '1': 8.9744, '2': 14.6155, '3': 4.6155}, 'P8': {'0': 13.0771, '1': 10.5129, '2': 9.4871, '3': 6.1541}, 'T7': {'0': -0.76904, '1': 1.282, '2': 2.8206, '3': -3.5896}, 'T8': {'0': -4.3589, '1': 0.76929, '2': 0.25659, '3': 1.5383}, 'Unnamed: 14': {'0': None, '1': None, '2': None, '3': None}}
<end_description>
<start_data_description><data_path>deapdata/(S01)/Raw EEG Data/.csv format/S01G1AllRawChannels.csv:
<column_names>
['AF3', 'AF4', 'F3', 'F4', 'F7', 'F8', 'FC5', 'FC6', 'O1', 'O2', 'P7', 'P8', 'T7', 'T8', 'Unnamed: 14']
<column_types>
{'AF3': 'float64', 'AF4': 'float64', 'F3': 'float64', 'F4': 'float64', 'F7': 'float64', 'F8': 'float64', 'FC5': 'float64', 'FC6': 'float64', 'O1': 'float64', 'O2': 'float64', 'P7': 'float64', 'P8': 'float64', 'T7': 'float64', 'T8': 'float64', 'Unnamed: 14': 'float64'}
<dataframe_Summary>
{'AF3': {'count': 38252.0, 'mean': 19.81258435199467, 'std': 26.420141594392277, 'min': -108.9744, '25%': 4.3589, '50%': 22.051, '75%': 36.4104, 'max': 132.0511}, 'AF4': {'count': 38252.0, 'mean': 4.605463136764091, 'std': 30.946253308355878, 'min': -195.1282, '25%': -5.3846, '50%': 5.3845, '75%': 18.4614, 'max': 144.1023}, 'F3': {'count': 38252.0, 'mean': 5.755797585263515, 'std': 49.74573399540408, 'min': -275.2565, '25%': -11.0256, '50%': 6.4102, '75%': 27.1797, 'max': 514.6157}, 'F4': {'count': 38252.0, 'mean': -12.333643185833422, 'std': 36.82892138583796, 'min': -323.4615, '25%': -27.6924, '50%': -13.4617, '75%': 0.76904, 'max': 224.1025}, 'F7': {'count': 38252.0, 'mean': -8.05865366144254, 'std': 114.26245670817984, 'min': -1215.0, '25%': -28.5896, '50%': -8.5896, '75%': 7.6924, 'max': 1461.6665}, 'F8': {'count': 38252.0, 'mean': -42.13930906200983, 'std': 34.715775768909616, 'min': -155.641, '25%': -64.8717, '50%': -46.923, '75%': -23.718, 'max': 109.231}, 'FC5': {'count': 38252.0, 'mean': -34.12430673502039, 'std': 56.295547868800014, 'min': -202.1794, '25%': -61.9231, '50%': -41.2821, '75%': -15.1282, 'max': 373.5897}, 'FC6': {'count': 38252.0, 'mean': 18.108150474105926, 'std': 40.87940472619958, 'min': -138.4617, '25%': -8.718, '50%': 16.7949, '75%': 44.3591, 'max': 190.0001}, 'O1': {'count': 38252.0, 'mean': -24.74092157710708, 'std': 66.34201464813205, 'min': -364.3589, '25%': -49.8717, '50%': -26.6667, '75%': 1.410325, 'max': 297.4358}, 'O2': {'count': 38252.0, 'mean': 14.020159083472757, 'std': 53.511912396323076, 'min': -289.7437, '25%': -6.7948, '50%': 18.2052, '75%': 44.4872, 'max': 203.8463}, 'P7': {'count': 38252.0, 'mean': 13.920253396619787, 'std': 74.93193073872077, 'min': -311.7949, '25%': -22.6923, '50%': 13.3334, '75%': 52.307625, 'max': 354.6152}, 'P8': {'count': 38252.0, 'mean': 16.153962072702083, 'std': 34.66924436338991, 'min': -252.0515, '25%': 6.66685, '50%': 20.7695, '75%': 33.5897, 'max': 149.2306}, 'T7': {'count': 38252.0, 'mean': 23.43778887317003, 'std': 60.252733394433314, 'min': -201.0256, '25%': -5.641, '50%': 22.5641, '75%': 54.3589, 'max': 448.7179}, 'T8': {'count': 38252.0, 'mean': -6.894656323355641, 'std': 31.488631983445853, 'min': -184.8721, '25%': -23.5897, '50%': -7.6923, '75%': 5.3846, 'max': 216.9231}, 'Unnamed: 14': {'count': 0.0, 'mean': nan, 'std': nan, 'min': nan, '25%': nan, '50%': nan, '75%': nan, 'max': nan}}
<dataframe_info>
RangeIndex: 38252 entries, 0 to 38251
Data columns (total 15 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 AF3 38252 non-null float64
1 AF4 38252 non-null float64
2 F3 38252 non-null float64
3 F4 38252 non-null float64
4 F7 38252 non-null float64
5 F8 38252 non-null float64
6 FC5 38252 non-null float64
7 FC6 38252 non-null float64
8 O1 38252 non-null float64
9 O2 38252 non-null float64
10 P7 38252 non-null float64
11 P8 38252 non-null float64
12 T7 38252 non-null float64
13 T8 38252 non-null float64
14 Unnamed: 14 0 non-null float64
dtypes: float64(15)
memory usage: 4.4 MB
<some_examples>
{'AF3': {'0': -35.1282, '1': -32.5642, '2': -27.1794, '3': -32.0515}, 'AF4': {'0': -16.1538, '1': -22.3079, '2': -13.8459, '3': -14.6157}, 'F3': {'0': -44.8718, '1': -47.9487, '2': -39.4871, '3': -42.3079}, 'F4': {'0': 1.7949, '1': -11.0256, '2': -3.0769, '3': -4.3589}, 'F7': {'0': 44.8716, '1': 40.2561, '2': 47.1794, '3': 42.3076}, 'F8': {'0': -1.7949, '1': -16.7949, '2': -1.7949, '3': 3.8462}, 'FC5': {'0': -5.8975, '1': -20.8975, '2': -5.8975, '3': -11.0259}, 'FC6': {'0': -3.8462, '1': -2.8206, '2': -1.5383, '3': -2.3076}, 'O1': {'0': 27.436, '1': 20.2561, '2': 31.282, '3': 22.3076}, 'O2': {'0': -10.5127, '1': 2.8206, '2': -4.1028, '3': -11.0259}, 'P7': {'0': 5.8975, '1': 20.2561, '2': 10.2566, '3': 3.333}, 'P8': {'0': 7.9487, '1': 5.8972, '2': 9.7434, '3': 2.3076}, 'T7': {'0': 12.564, '1': 19.2307, '2': 17.9485, '3': 7.4355}, 'T8': {'0': 18.2051, '1': 4.3591, '2': 11.282, '3': 10.5127}, 'Unnamed: 14': {'0': None, '1': None, '2': None, '3': None}}
<end_description>
<start_data_description><data_path>deapdata/(S01)/Raw EEG Data/.csv format/S01G2AllRawChannels.csv:
<column_names>
['AF3', 'AF4', 'F3', 'F4', 'F7', 'F8', 'FC5', 'FC6', 'O1', 'O2', 'P7', 'P8', 'T7', 'T8', 'Unnamed: 14']
<column_types>
{'AF3': 'float64', 'AF4': 'float64', 'F3': 'float64', 'F4': 'float64', 'F7': 'float64', 'F8': 'float64', 'FC5': 'float64', 'FC6': 'float64', 'O1': 'float64', 'O2': 'float64', 'P7': 'float64', 'P8': 'float64', 'T7': 'float64', 'T8': 'float64', 'Unnamed: 14': 'float64'}
<dataframe_Summary>
{'AF3': {'count': 38252.0, 'mean': 21.090578115196593, 'std': 17.109663014406895, 'min': -56.4103, '25%': 9.3589, '50%': 20.7693, '75%': 31.923, 'max': 90.6409}, 'AF4': {'count': 38252.0, 'mean': 2.196144924971243, 'std': 23.745828740006736, 'min': -162.3077, '25%': -6.1539, '50%': 2.5641, '75%': 13.718, 'max': 121.0256}, 'F3': {'count': 38252.0, 'mean': 0.30590320886489564, 'std': 33.26786485830204, 'min': -179.2308, '25%': -12.69215, '50%': 2.11545, '75%': 17.436, 'max': 216.5386}, 'F4': {'count': 38252.0, 'mean': -18.867657047644567, 'std': 29.882399480391186, 'min': -140.2565, '25%': -35.6409, '50%': -20.0, '75%': -4.3589, 'max': 118.718}, 'F7': {'count': 38252.0, 'mean': -21.984125614281606, 'std': 30.211778294265958, 'min': -148.9744, '25%': -39.1025, '50%': -18.9745, '75%': -2.3076, 'max': 125.8975}, 'F8': {'count': 38252.0, 'mean': -107.24027274882359, 'std': 43.41858588856923, 'min': -255.3846, '25%': -137.17944999999997, '50%': -114.6155, '75%': -82.564, 'max': 39.231}, 'FC5': {'count': 38252.0, 'mean': -58.38577036913103, 'std': 54.13251804281201, 'min': -216.6666, '25%': -92.6923, '50%': -63.0769, '75%': -25.8973, 'max': 174.8718}, 'FC6': {'count': 38252.0, 'mean': 43.906127060977205, 'std': 37.468994930217725, 'min': -159.4873, '25%': 19.6152, '50%': 42.8204, '75%': 66.2819, 'max': 202.5643}, 'O1': {'count': 38252.0, 'mean': -48.83612592227858, 'std': 67.01090733787751, 'min': -310.5128, '25%': -84.6155, '50%': -49.2307, '75%': -5.1282, 'max': 145.3844}, 'O2': {'count': 38252.0, 'mean': 54.64048271661613, 'std': 58.08526966460737, 'min': -181.7947, '25%': 20.7693, '50%': 62.1797, '75%': 93.3333, 'max': 259.6155}, 'P7': {'count': 38252.0, 'mean': 52.021038014509045, 'std': 87.35705759910468, 'min': -358.7179, '25%': 3.3333, '50%': 50.8975, '75%': 103.974525, 'max': 600.8977}, 'P8': {'count': 38252.0, 'mean': 15.074364157586531, 'std': 31.098505388664396, 'min': -131.7949, '25%': 1.0256, '50%': 17.0513, '75%': 34.1025, 'max': 117.9487}, 'T7': {'count': 38252.0, 'mean': 20.609038134724983, 'std': 82.33943101730816, 'min': -483.0769, '25%': -3.205125, '50%': 20.5129, '75%': 48.333525, 'max': 898.7181}, 'T8': {'count': 38252.0, 'mean': -11.621601354530481, 'std': 18.032634899385897, 'min': -107.0514, '25%': -22.0513, '50%': -9.4871, '75%': 0.0, 'max': 64.1028}, 'Unnamed: 14': {'count': 0.0, 'mean': nan, 'std': nan, 'min': nan, '25%': nan, '50%': nan, '75%': nan, 'max': nan}}
<dataframe_info>
RangeIndex: 38252 entries, 0 to 38251
Data columns (total 15 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 AF3 38252 non-null float64
1 AF4 38252 non-null float64
2 F3 38252 non-null float64
3 F4 38252 non-null float64
4 F7 38252 non-null float64
5 F8 38252 non-null float64
6 FC5 38252 non-null float64
7 FC6 38252 non-null float64
8 O1 38252 non-null float64
9 O2 38252 non-null float64
10 P7 38252 non-null float64
11 P8 38252 non-null float64
12 T7 38252 non-null float64
13 T8 38252 non-null float64
14 Unnamed: 14 0 non-null float64
dtypes: float64(15)
memory usage: 4.4 MB
<some_examples>
{'AF3': {'0': 49.4872, '1': 46.4104, '2': 31.4104, '3': 16.4104}, 'AF4': {'0': 9.9999, '1': 24.9999, '2': 39.9999, '3': 40.0}, 'F3': {'0': 51.5385, '1': 66.5385, '2': 66.6665, '3': 73.3335}, 'F4': {'0': -41.2821, '1': -26.2821, '2': -11.2821, '3': 0.0}, 'F7': {'0': -30.0001, '1': -15.0001, '2': -0.0001225, '3': -3.5898}, 'F8': {'0': -58.7179, '1': -43.7179, '2': -28.7179, '3': -13.7179}, 'FC5': {'0': -73.5897, '1': -58.5897, '2': -46.1538, '3': -49.2307}, 'FC6': {'0': -8.9745, '1': -13.5898, '2': -28.5898, '3': -43.5898}, 'O1': {'0': -3.3334, '1': 11.6666, '2': 26.6666, '3': 41.6666}, 'O2': {'0': 44.3588, '1': 29.3588, '2': 14.3588, '3': -0.64124}, 'P7': {'0': 140.2562, '1': 128.4617, '2': 113.4617, '3': 98.4617}, 'P8': {'0': 54.6151, '1': 50.5129, '2': 35.5129, '3': 31.2822}, 'T7': {'0': 3.3334, '1': -3.3333, '2': -18.3333, '3': -33.3333}, 'T8': {'0': -31.5385, '1': -16.5385, '2': -11.7949, '3': 0.0}, 'Unnamed: 14': {'0': None, '1': None, '2': None, '3': None}}
<end_description>
| 3,158 | 0 | 8,328 | 3,158 |
69179141
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_selection import mutual_info_regression
from sklearn.preprocessing import LabelEncoder
import warnings
warnings.filterwarnings("ignore")
from sklearn.ensemble import RandomForestClassifier
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
def make_mi_scores(X, y):
X = X.copy()
for colname in X.select_dtypes(["object", "category"]):
X[colname], _ = X[colname].factorize()
# All discrete features should now have integer dtypes
discrete_features = [pd.api.types.is_integer_dtype(t) for t in X.dtypes]
mi_scores = mutual_info_regression(
X, y, discrete_features=discrete_features, random_state=0
)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
def plot_mi_scores(scores):
scores = scores.sort_values(ascending=True)
width = np.arange(len(scores))
ticks = list(scores.index)
plt.barh(width, scores)
plt.yticks(width, ticks)
plt.title("Mutual Information Scores")
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
train_data.head()
train_data.select_dtypes(["object"]).nunique().sort_values(ascending=False)
# print(train_data.SibSp.nunique())
# print(train_data.Parch.nunique())
features = ["Sex", "Age", "Pclass", "Fare", "Cabin", "Embarked", "SibSp", "Parch"]
# sns.relplot(
# x="value", y="Survived", col="variable", data=train_data.melt(id_vars="Survived", value_vars=features), facet_kws=dict(sharex=False),
# );
train_data.size
# # Handling Missing Values
# Identify missing values from selected features
features.append("Survived")
X = train_data[features]
X.isnull().sum()
# since cabin has lots of missing values it's good to remove the column 'Cabin' from training data setdel
del X["Cabin"]
del test_data["Cabin"]
features.remove("Cabin")
# fill the missing 2 values of Embarked with mode
X.Embarked.fillna(X.Embarked.mode()[0], inplace=True)
test_data.Embarked.fillna(test_data.Embarked.mode()[0], inplace=True)
test_data.Fare.fillna(test_data.Fare.mode()[0], inplace=True)
# Extracting titles from names and add as a feature
train_names = train_data.Name.values
train_titles = []
test_names = test_data.Name.values
test_titles = []
for name in train_names:
title = name.split(",")[1].split(".")[0]
train_titles.append(title)
for name in test_names:
title = name.split(",")[1].split(".")[0]
test_titles.append(title)
print(len(train_titles))
print(len(test_titles))
# add title as a feature
train_data["Title"] = train_titles
X["Title"] = train_titles
test_data["Title"] = test_titles
# Label encoding for 'Title' feature
X["Title"] = labelEncoder.fit_transform(X["Title"].astype("str"))
test_data["Title"] = labelEncoder.fit_transform(test_data["Title"].astype("str"))
# Since there are significant outlires for the age, there is a possiblity have a error value for mean value.
# Mode would be a good replacement for null values of age column
# grpx = X.groupby(['Sex', 'Pclass'])
# grptest = test_data.groupby(['Sex', 'Pclass'])
# grpx.Age.apply(lambda x: x.fillna(x.mode()[0]))
# X.Age.fillna(X.Age.mean, inplace = True)
# grptest.Age.apply(lambda x: x.fillna(x.mode()[0]))
# test_data.Age.fillna(test_data.Age.mean, inplace = True)
X["Age"] = X["Age"].fillna(X["Age"].mode()[0])
test_data["Age"] = test_data["Age"].fillna(test_data["Age"].mode()[0])
test_data["Fare"] = test_data["Fare"].fillna(test_data["Fare"].mode()[0])
# X['Fare_Category'] = pd.cut(X['Fare'], bins=[0,7.90,14.45,31.28,120], labels=['Low','Mid','High_Mid','High'])
# test_data['Fare_Category'] = pd.cut(test_data['Fare'], bins=[0,7.90,14.45,31.28,120], labels=['Low','Mid','High_Mid','High'])
bins = [0, 2, 4, 13, 16, 20, 23, 25, 28, 33, 38, 44, 55, 65, 110]
labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
X["AgeGroup"] = pd.cut(X["Age"], bins=bins, labels=labels, right=False)
test_data["AgeGroup"] = pd.cut(test_data["Age"], bins=bins, labels=labels, right=False)
# # Feature Engineering
# Seems like it would be better to have additional features by combining Age with Sex and Age with Pclass. lets mark it down for future use here.
# SibSp + Parch will give us the sizes of the each person's families on the ship. Therefore a new feature called 'FamSize' would be a good feature.
# Add new feature "FamSize"
X["FamSize"] = X["SibSp"] + X["Parch"]
test_data["FamSize"] = test_data["SibSp"] + test_data["Parch"]
X.isnull().sum()
# # Label Encoding for Categorical values
X.head()
# Label encoding for categorical values
labelEncoder = LabelEncoder()
# Label encoding for Sex
X["Sex"] = labelEncoder.fit_transform(X["Sex"].astype("str"))
test_data["Sex"] = labelEncoder.fit_transform(test_data["Sex"].astype("str"))
# Label encoding for Embarked
X["Embarked"] = labelEncoder.fit_transform(X["Embarked"].astype("str"))
test_data["Embarked"] = labelEncoder.fit_transform(test_data["Embarked"].astype("str"))
# Label encoding for AgeGroup
X["AgeGroup"] = labelEncoder.fit_transform(X["AgeGroup"].astype("str"))
test_data["AgeGroup"] = labelEncoder.fit_transform(test_data["AgeGroup"].astype("str"))
# Label encoding for Is_Alone
# X['Is_Alone'] = labelEncoder.fit_transform(X['Is_Alone'].astype('str'))
# test_data['Is_Alone'] = labelEncoder.fit_transform(test_data['Is_Alone'].astype('str'))
# Label encoding for Fare_Category
# X['Fare_Category'] = labelEncoder.fit_transform(X['Fare_Category'].astype('str'))
# test_data['Fare_Category'] = labelEncoder.fit_transform(test_data['Fare_Category'].astype('str'))
# Let's try to find way to create new features using available relations between fetaures.
sns.relplot(
x="value",
y="Pclass",
hue="Survived",
col="variable",
height=4,
aspect=1,
facet_kws={"sharex": False},
col_wrap=3,
data=Xy.melt(
value_vars=["Title"],
id_vars=["Pclass", "Survived"],
),
)
c = sns.countplot(X.Title, hue=X.Survived, data=X)
# Standardize "fare" variable values
from sklearn.preprocessing import StandardScaler
# Standardize the continuous variables
cont = ["Fare"]
scalar = StandardScaler()
X[cont] = scalar.fit_transform(X[cont])
test_data[cont] = scalar.fit_transform(test_data[cont])
conta = ["Age"]
scalara = StandardScaler()
X[conta] = scalara.fit_transform(X[cont])
test_data[conta] = scalara.fit_transform(test_data[cont])
# Add KMean clustering
X.head()
# from sklearn.cluster import KMeans
# features_to_kmean = ['Sex','Pclass','Fare_Category','Age','Title','FamSize','Is_Alone','Embarked']
# X_train_scaled = X.loc[:, features_to_kmean]
# X_test_scaled = test_data.loc[:, features_to_kmean]
# kmeans = KMeans(n_clusters=10, n_init=10, random_state=0)
# #X["Cluster_all"] = kmeans.fit_predict(train_data)
# X["Cluster_selected"] = kmeans.fit_predict(X_train_scaled)
# test_data["Cluster_selected"] = kmeans.fit_predict(X_test_scaled)
X.head()
del test_data["Name"]
del test_data["Ticket"]
test_data.head()
# c = sns.countplot(X.Cluster_selected, hue = X.Survived, data=X)
# # Apply PCA
from sklearn.decomposition import PCA
def apply_pca(X, standardize=True):
# Standardize
if standardize:
X = (X - X.mean(axis=0)) / X.std(axis=0)
# Create principal components
pca = PCA()
X_pca = pca.fit_transform(X)
# Convert to dataframe
component_names = [f"PC{i+1}" for i in range(X_pca.shape[1])]
X_pca = pd.DataFrame(X_pca, columns=component_names)
# Create loadings
loadings = pd.DataFrame(
pca.components_.T, # transpose the matrix of loadings
columns=component_names, # so the columns are the principal components
index=X.columns, # and the rows are the original features
)
return pca, X_pca, loadings
def plot_variance(pca, width=8, dpi=100):
# Create figure
fig, axs = plt.subplots(1, 2)
n = pca.n_components_
grid = np.arange(1, n + 1)
# Explained variance
evr = pca.explained_variance_ratio_
axs[0].bar(grid, evr)
axs[0].set(xlabel="Component", title="% Explained Variance", ylim=(0.0, 1.0))
# Cumulative Variance
cv = np.cumsum(evr)
axs[1].plot(np.r_[0, grid], np.r_[0, cv], "o-")
axs[1].set(xlabel="Component", title="% Cumulative Variance", ylim=(0.0, 1.0))
# Set up figure
fig.set(figwidth=8, dpi=100)
return axs
Xp = X.copy()
del Xp["SibSp"]
del Xp["Parch"]
y = Xp.pop("Survived")
X_scaled_pca = (Xp - Xp.mean(axis=0)) / Xp.std(axis=0)
X_scaled_pca.head()
Xp.head()
testp = test_data.copy()
del testp["SibSp"]
del testp["Parch"]
test_scaled_pca = (testp - testp.mean(axis=0)) / testp.std(axis=0)
pca, X_pca, loadings = apply_pca(Xp)
X.isnull().sum()
test_data.isnull().sum()
tpca, test_pca, test_loading = apply_pca(testp)
# PC6, PC7, PC1, PC2
loadings
# Look at explained variance
plot_variance(pca)
mi_scores = make_mi_scores(X_pca, y)
mi_scores
# PC6, PC7, PC1, PC2 shows high mis scores against the target. So it worth to consider labeling assigned to those componenets.
# After exploring the labelings, its seems like SibSp*Parch and Pclass/Fare will give a good informative feature. Let's check by adding them to the dataset.
# X['SibSpMParch'] = (X.SibSp + 1) * (X.Parch + 1)
# X['PclassDFare'] = (X.Pclass + 1)/ (X.Fare + 1)
X["SexMAgeGroup"] = (X.Sex + 1) * (X.AgeGroup + 1)
X["TitleDSex"] = (X.Title + 1) / (X.Sex + 1)
# test_data['SibSpMParch'] = (test_data.SibSp + 1) * (test_data.Parch + 1)
# test_data['PclassDFare'] = test_data.Pclass / test_data.Fare
test_data["SexMAgeGroup"] = (test_data.Sex + 1) * (test_data.AgeGroup + 1)
test_data["TitleDSex"] = (test_data.Title + 1) / (test_data.Sex + 1)
# join PCA's to columns space
X = X.join(X_pca)
test_data = test_data.join(test_pca)
# Above plots show some clear strong relations between some feature coloumns, So here I create new features using those relationships
# X['SexVsAgeGrp'] = (X["Sex"] + 1) * (X["AgeGroup"] + 1)
# X['SexVsPclass'] = (X["Sex"] + 1) * (X["Pclass"] + 1)
# X['SexVsTitle'] = (X["Sex"] + 1) * (X["Title"] + 1)
# X['PclassVsFamSize'] = (X["Sex"] + 1) * (X["AgeGroup"] + 1)
# test_data['SexVsAgeGrp'] = (test_data["Sex"] + 1) * (test_data["AgeGroup"] + 1)
# test_data['SexVsPclass'] = (test_data["Sex"] + 1) * (test_data["Pclass"] + 1)
# test_data['SexVsTitle'] = (test_data["Sex"] + 1) * (test_data["Title"] + 1)
# test_data['PclassVsFamSize'] = (test_data["Sex"] + 1) * (test_data["AgeGroup"] + 1)
# # Check mutual information scores
from sklearn.feature_selection import mutual_info_classif
def make_mi_scores(X, y):
X = X.copy()
for colname in X.select_dtypes(["object", "category"]):
X[colname], _ = X[colname].factorize()
# All discrete features should now have integer dtypes
discrete_features = [pd.api.types.is_integer_dtype(t) for t in X.dtypes]
mi_scores = mutual_info_regression(
X, y, discrete_features=discrete_features, random_state=0
)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
def plot_mi_scores(scores):
scores = scores.sort_values(ascending=True)
width = np.arange(len(scores))
ticks = list(scores.index)
plt.barh(width, scores)
plt.yticks(width, ticks)
plt.title("Mutual Information Scores")
Xt = X.copy()
y = Xt.pop("Survived")
scores = make_mi_scores(Xt, y)
X_pca.head()
scores
train_features = [
"PC6",
"TitleDSex",
"Title",
"Sex",
"SexMAgeGroup",
"PC2",
"PC1",
"Fare",
"PC5",
"Age",
"PC4",
"PC3",
"PC8",
"Pclass",
"PC7",
]
scores
plot_mi_scores(scores)
# # **Train the model with feature engineered data**
# train_features = ['Sex','Pclass','Fare','AgeGroup','Title','FamSize']
# train_features = ['Sex','Pclass','Fare','AgeGroup','Title','FamSize','Cluster_selected']
# train_features = ['Sex','Pclass','Fare','AgeGroup','Title','FamSize','Cluster_selected','SexVsAgeGrp']
# train_features = ['Sex','Pclass','Fare','AgeGroup','Title','FamSize','Cluster_selected','SexVsAgeGrp','SexVsPclass']
# train_features = ['Sex','Pclass','Fare','AgeGroup','Title','FamSize','Cluster_selected','SexVsAgeGrp','SexVsPclass','SexVsTitle']
# train_features = ['Sex','Pclass','PclassDFare','Fare','Title','Cluster_selected','Embarked']
# train_features = ['Sex','Pclass','Fare','AgeGroup','Title','FamSize']
# train_features = ['PclassDFare','Title','Sex','PC6','PC1','PC8','Fare','PC2']
X[train_features].head()
test_data[train_features].head()
# Spliting Training Sets into Train and Cross-validation sets
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit
y = train_data["Survived"]
# train_features = ['Sex','Pclass','Fare','AgeGroup','Title','FamSize']
X_final_train = pd.get_dummies(X[train_features])
test = pd.get_dummies(test_data[train_features])
X_train, X_test, y_train, y_test = train_test_split(
X_final_train, y, test_size=0.2, random_state=0
)
# train on splited train set
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
clf = RandomForestClassifier(
criterion="entropy",
n_estimators=700,
min_samples_split=10,
min_samples_leaf=1,
max_features="auto",
oob_score=True,
random_state=1,
n_jobs=-1,
)
model.fit(X_train, y_train)
test_predictions = model.predict(X_test)
train_predictions = model.predict(X_train)
clf.fit(X_train, y_train)
test_predictions1 = clf.predict(X_test)
train_predictions1 = clf.predict(X_train)
from sklearn.metrics import accuracy_score
print("train: " + str(accuracy_score(y_train, train_predictions)))
print("test: " + str(accuracy_score(y_test, test_predictions)))
print("train1: " + str(accuracy_score(y_train, train_predictions1)))
print("test1: " + str(accuracy_score(y_test, test_predictions1)))
# TRain on all train data and make predictions
model_final = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
clf1 = RandomForestClassifier(
criterion="entropy",
n_estimators=700,
min_samples_split=10,
min_samples_leaf=1,
max_features="auto",
oob_score=True,
random_state=1,
n_jobs=-1,
)
clf1.fit(X_final_train, y)
test_output = clf1.predict(test)
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": test_output})
output.to_csv("submission_170217L_8.csv", index=False)
output.head(100)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/179/69179141.ipynb
| null | null |
[{"Id": 69179141, "ScriptId": 18841502, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7890639, "CreationDate": "07/27/2021 18:17:50", "VersionNumber": 2.0, "Title": "170217L_ML", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 428.0, "LinesInsertedFromPrevious": 10.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 418.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_selection import mutual_info_regression
from sklearn.preprocessing import LabelEncoder
import warnings
warnings.filterwarnings("ignore")
from sklearn.ensemble import RandomForestClassifier
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
def make_mi_scores(X, y):
X = X.copy()
for colname in X.select_dtypes(["object", "category"]):
X[colname], _ = X[colname].factorize()
# All discrete features should now have integer dtypes
discrete_features = [pd.api.types.is_integer_dtype(t) for t in X.dtypes]
mi_scores = mutual_info_regression(
X, y, discrete_features=discrete_features, random_state=0
)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
def plot_mi_scores(scores):
scores = scores.sort_values(ascending=True)
width = np.arange(len(scores))
ticks = list(scores.index)
plt.barh(width, scores)
plt.yticks(width, ticks)
plt.title("Mutual Information Scores")
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
train_data.head()
train_data.select_dtypes(["object"]).nunique().sort_values(ascending=False)
# print(train_data.SibSp.nunique())
# print(train_data.Parch.nunique())
features = ["Sex", "Age", "Pclass", "Fare", "Cabin", "Embarked", "SibSp", "Parch"]
# sns.relplot(
# x="value", y="Survived", col="variable", data=train_data.melt(id_vars="Survived", value_vars=features), facet_kws=dict(sharex=False),
# );
train_data.size
# # Handling Missing Values
# Identify missing values from selected features
features.append("Survived")
X = train_data[features]
X.isnull().sum()
# since cabin has lots of missing values it's good to remove the column 'Cabin' from training data setdel
del X["Cabin"]
del test_data["Cabin"]
features.remove("Cabin")
# fill the missing 2 values of Embarked with mode
X.Embarked.fillna(X.Embarked.mode()[0], inplace=True)
test_data.Embarked.fillna(test_data.Embarked.mode()[0], inplace=True)
test_data.Fare.fillna(test_data.Fare.mode()[0], inplace=True)
# Extracting titles from names and add as a feature
train_names = train_data.Name.values
train_titles = []
test_names = test_data.Name.values
test_titles = []
for name in train_names:
title = name.split(",")[1].split(".")[0]
train_titles.append(title)
for name in test_names:
title = name.split(",")[1].split(".")[0]
test_titles.append(title)
print(len(train_titles))
print(len(test_titles))
# add title as a feature
train_data["Title"] = train_titles
X["Title"] = train_titles
test_data["Title"] = test_titles
# Label encoding for 'Title' feature
X["Title"] = labelEncoder.fit_transform(X["Title"].astype("str"))
test_data["Title"] = labelEncoder.fit_transform(test_data["Title"].astype("str"))
# Since there are significant outlires for the age, there is a possiblity have a error value for mean value.
# Mode would be a good replacement for null values of age column
# grpx = X.groupby(['Sex', 'Pclass'])
# grptest = test_data.groupby(['Sex', 'Pclass'])
# grpx.Age.apply(lambda x: x.fillna(x.mode()[0]))
# X.Age.fillna(X.Age.mean, inplace = True)
# grptest.Age.apply(lambda x: x.fillna(x.mode()[0]))
# test_data.Age.fillna(test_data.Age.mean, inplace = True)
X["Age"] = X["Age"].fillna(X["Age"].mode()[0])
test_data["Age"] = test_data["Age"].fillna(test_data["Age"].mode()[0])
test_data["Fare"] = test_data["Fare"].fillna(test_data["Fare"].mode()[0])
# X['Fare_Category'] = pd.cut(X['Fare'], bins=[0,7.90,14.45,31.28,120], labels=['Low','Mid','High_Mid','High'])
# test_data['Fare_Category'] = pd.cut(test_data['Fare'], bins=[0,7.90,14.45,31.28,120], labels=['Low','Mid','High_Mid','High'])
bins = [0, 2, 4, 13, 16, 20, 23, 25, 28, 33, 38, 44, 55, 65, 110]
labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
X["AgeGroup"] = pd.cut(X["Age"], bins=bins, labels=labels, right=False)
test_data["AgeGroup"] = pd.cut(test_data["Age"], bins=bins, labels=labels, right=False)
# # Feature Engineering
# Seems like it would be better to have additional features by combining Age with Sex and Age with Pclass. lets mark it down for future use here.
# SibSp + Parch will give us the sizes of the each person's families on the ship. Therefore a new feature called 'FamSize' would be a good feature.
# Add new feature "FamSize"
X["FamSize"] = X["SibSp"] + X["Parch"]
test_data["FamSize"] = test_data["SibSp"] + test_data["Parch"]
X.isnull().sum()
# # Label Encoding for Categorical values
X.head()
# Label encoding for categorical values
labelEncoder = LabelEncoder()
# Label encoding for Sex
X["Sex"] = labelEncoder.fit_transform(X["Sex"].astype("str"))
test_data["Sex"] = labelEncoder.fit_transform(test_data["Sex"].astype("str"))
# Label encoding for Embarked
X["Embarked"] = labelEncoder.fit_transform(X["Embarked"].astype("str"))
test_data["Embarked"] = labelEncoder.fit_transform(test_data["Embarked"].astype("str"))
# Label encoding for AgeGroup
X["AgeGroup"] = labelEncoder.fit_transform(X["AgeGroup"].astype("str"))
test_data["AgeGroup"] = labelEncoder.fit_transform(test_data["AgeGroup"].astype("str"))
# Label encoding for Is_Alone
# X['Is_Alone'] = labelEncoder.fit_transform(X['Is_Alone'].astype('str'))
# test_data['Is_Alone'] = labelEncoder.fit_transform(test_data['Is_Alone'].astype('str'))
# Label encoding for Fare_Category
# X['Fare_Category'] = labelEncoder.fit_transform(X['Fare_Category'].astype('str'))
# test_data['Fare_Category'] = labelEncoder.fit_transform(test_data['Fare_Category'].astype('str'))
# Let's try to find way to create new features using available relations between fetaures.
sns.relplot(
x="value",
y="Pclass",
hue="Survived",
col="variable",
height=4,
aspect=1,
facet_kws={"sharex": False},
col_wrap=3,
data=Xy.melt(
value_vars=["Title"],
id_vars=["Pclass", "Survived"],
),
)
c = sns.countplot(X.Title, hue=X.Survived, data=X)
# Standardize "fare" variable values
from sklearn.preprocessing import StandardScaler
# Standardize the continuous variables
cont = ["Fare"]
scalar = StandardScaler()
X[cont] = scalar.fit_transform(X[cont])
test_data[cont] = scalar.fit_transform(test_data[cont])
conta = ["Age"]
scalara = StandardScaler()
X[conta] = scalara.fit_transform(X[cont])
test_data[conta] = scalara.fit_transform(test_data[cont])
# Add KMean clustering
X.head()
# from sklearn.cluster import KMeans
# features_to_kmean = ['Sex','Pclass','Fare_Category','Age','Title','FamSize','Is_Alone','Embarked']
# X_train_scaled = X.loc[:, features_to_kmean]
# X_test_scaled = test_data.loc[:, features_to_kmean]
# kmeans = KMeans(n_clusters=10, n_init=10, random_state=0)
# #X["Cluster_all"] = kmeans.fit_predict(train_data)
# X["Cluster_selected"] = kmeans.fit_predict(X_train_scaled)
# test_data["Cluster_selected"] = kmeans.fit_predict(X_test_scaled)
X.head()
del test_data["Name"]
del test_data["Ticket"]
test_data.head()
# c = sns.countplot(X.Cluster_selected, hue = X.Survived, data=X)
# # Apply PCA
from sklearn.decomposition import PCA
def apply_pca(X, standardize=True):
# Standardize
if standardize:
X = (X - X.mean(axis=0)) / X.std(axis=0)
# Create principal components
pca = PCA()
X_pca = pca.fit_transform(X)
# Convert to dataframe
component_names = [f"PC{i+1}" for i in range(X_pca.shape[1])]
X_pca = pd.DataFrame(X_pca, columns=component_names)
# Create loadings
loadings = pd.DataFrame(
pca.components_.T, # transpose the matrix of loadings
columns=component_names, # so the columns are the principal components
index=X.columns, # and the rows are the original features
)
return pca, X_pca, loadings
def plot_variance(pca, width=8, dpi=100):
# Create figure
fig, axs = plt.subplots(1, 2)
n = pca.n_components_
grid = np.arange(1, n + 1)
# Explained variance
evr = pca.explained_variance_ratio_
axs[0].bar(grid, evr)
axs[0].set(xlabel="Component", title="% Explained Variance", ylim=(0.0, 1.0))
# Cumulative Variance
cv = np.cumsum(evr)
axs[1].plot(np.r_[0, grid], np.r_[0, cv], "o-")
axs[1].set(xlabel="Component", title="% Cumulative Variance", ylim=(0.0, 1.0))
# Set up figure
fig.set(figwidth=8, dpi=100)
return axs
Xp = X.copy()
del Xp["SibSp"]
del Xp["Parch"]
y = Xp.pop("Survived")
X_scaled_pca = (Xp - Xp.mean(axis=0)) / Xp.std(axis=0)
X_scaled_pca.head()
Xp.head()
testp = test_data.copy()
del testp["SibSp"]
del testp["Parch"]
test_scaled_pca = (testp - testp.mean(axis=0)) / testp.std(axis=0)
pca, X_pca, loadings = apply_pca(Xp)
X.isnull().sum()
test_data.isnull().sum()
tpca, test_pca, test_loading = apply_pca(testp)
# PC6, PC7, PC1, PC2
loadings
# Look at explained variance
plot_variance(pca)
mi_scores = make_mi_scores(X_pca, y)
mi_scores
# PC6, PC7, PC1, PC2 shows high mis scores against the target. So it worth to consider labeling assigned to those componenets.
# After exploring the labelings, its seems like SibSp*Parch and Pclass/Fare will give a good informative feature. Let's check by adding them to the dataset.
# X['SibSpMParch'] = (X.SibSp + 1) * (X.Parch + 1)
# X['PclassDFare'] = (X.Pclass + 1)/ (X.Fare + 1)
X["SexMAgeGroup"] = (X.Sex + 1) * (X.AgeGroup + 1)
X["TitleDSex"] = (X.Title + 1) / (X.Sex + 1)
# test_data['SibSpMParch'] = (test_data.SibSp + 1) * (test_data.Parch + 1)
# test_data['PclassDFare'] = test_data.Pclass / test_data.Fare
test_data["SexMAgeGroup"] = (test_data.Sex + 1) * (test_data.AgeGroup + 1)
test_data["TitleDSex"] = (test_data.Title + 1) / (test_data.Sex + 1)
# join PCA's to columns space
X = X.join(X_pca)
test_data = test_data.join(test_pca)
# Above plots show some clear strong relations between some feature coloumns, So here I create new features using those relationships
# X['SexVsAgeGrp'] = (X["Sex"] + 1) * (X["AgeGroup"] + 1)
# X['SexVsPclass'] = (X["Sex"] + 1) * (X["Pclass"] + 1)
# X['SexVsTitle'] = (X["Sex"] + 1) * (X["Title"] + 1)
# X['PclassVsFamSize'] = (X["Sex"] + 1) * (X["AgeGroup"] + 1)
# test_data['SexVsAgeGrp'] = (test_data["Sex"] + 1) * (test_data["AgeGroup"] + 1)
# test_data['SexVsPclass'] = (test_data["Sex"] + 1) * (test_data["Pclass"] + 1)
# test_data['SexVsTitle'] = (test_data["Sex"] + 1) * (test_data["Title"] + 1)
# test_data['PclassVsFamSize'] = (test_data["Sex"] + 1) * (test_data["AgeGroup"] + 1)
# # Check mutual information scores
from sklearn.feature_selection import mutual_info_classif
def make_mi_scores(X, y):
X = X.copy()
for colname in X.select_dtypes(["object", "category"]):
X[colname], _ = X[colname].factorize()
# All discrete features should now have integer dtypes
discrete_features = [pd.api.types.is_integer_dtype(t) for t in X.dtypes]
mi_scores = mutual_info_regression(
X, y, discrete_features=discrete_features, random_state=0
)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
def plot_mi_scores(scores):
scores = scores.sort_values(ascending=True)
width = np.arange(len(scores))
ticks = list(scores.index)
plt.barh(width, scores)
plt.yticks(width, ticks)
plt.title("Mutual Information Scores")
Xt = X.copy()
y = Xt.pop("Survived")
scores = make_mi_scores(Xt, y)
X_pca.head()
scores
train_features = [
"PC6",
"TitleDSex",
"Title",
"Sex",
"SexMAgeGroup",
"PC2",
"PC1",
"Fare",
"PC5",
"Age",
"PC4",
"PC3",
"PC8",
"Pclass",
"PC7",
]
scores
plot_mi_scores(scores)
# # **Train the model with feature engineered data**
# train_features = ['Sex','Pclass','Fare','AgeGroup','Title','FamSize']
# train_features = ['Sex','Pclass','Fare','AgeGroup','Title','FamSize','Cluster_selected']
# train_features = ['Sex','Pclass','Fare','AgeGroup','Title','FamSize','Cluster_selected','SexVsAgeGrp']
# train_features = ['Sex','Pclass','Fare','AgeGroup','Title','FamSize','Cluster_selected','SexVsAgeGrp','SexVsPclass']
# train_features = ['Sex','Pclass','Fare','AgeGroup','Title','FamSize','Cluster_selected','SexVsAgeGrp','SexVsPclass','SexVsTitle']
# train_features = ['Sex','Pclass','PclassDFare','Fare','Title','Cluster_selected','Embarked']
# train_features = ['Sex','Pclass','Fare','AgeGroup','Title','FamSize']
# train_features = ['PclassDFare','Title','Sex','PC6','PC1','PC8','Fare','PC2']
X[train_features].head()
test_data[train_features].head()
# Spliting Training Sets into Train and Cross-validation sets
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit
y = train_data["Survived"]
# train_features = ['Sex','Pclass','Fare','AgeGroup','Title','FamSize']
X_final_train = pd.get_dummies(X[train_features])
test = pd.get_dummies(test_data[train_features])
X_train, X_test, y_train, y_test = train_test_split(
X_final_train, y, test_size=0.2, random_state=0
)
# train on splited train set
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
clf = RandomForestClassifier(
criterion="entropy",
n_estimators=700,
min_samples_split=10,
min_samples_leaf=1,
max_features="auto",
oob_score=True,
random_state=1,
n_jobs=-1,
)
model.fit(X_train, y_train)
test_predictions = model.predict(X_test)
train_predictions = model.predict(X_train)
clf.fit(X_train, y_train)
test_predictions1 = clf.predict(X_test)
train_predictions1 = clf.predict(X_train)
from sklearn.metrics import accuracy_score
print("train: " + str(accuracy_score(y_train, train_predictions)))
print("test: " + str(accuracy_score(y_test, test_predictions)))
print("train1: " + str(accuracy_score(y_train, train_predictions1)))
print("test1: " + str(accuracy_score(y_test, test_predictions1)))
# TRain on all train data and make predictions
model_final = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
clf1 = RandomForestClassifier(
criterion="entropy",
n_estimators=700,
min_samples_split=10,
min_samples_leaf=1,
max_features="auto",
oob_score=True,
random_state=1,
n_jobs=-1,
)
clf1.fit(X_final_train, y)
test_output = clf1.predict(test)
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": test_output})
output.to_csv("submission_170217L_8.csv", index=False)
output.head(100)
| false | 0 | 5,142 | 0 | 5,142 | 5,142 |
||
69179333
|
# import numpy as np # linear algebra
# import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# # Input data files are available in the read-only "../input/" directory
# # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
# import os
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# from sklearn.preprocessing import StandardScaler
# from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
# from sklearn.model_selection import KFold, cross_val_score, train_test_split
# from sklearn.metrics import mean_squared_error
# from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
# from xgboost import XGBRegressor
# train = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/train.csv")
# test = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/test.csv")
# train.SalePrice.describe()
# #Save the 'Id' column
# train_ID = train['Id']
# test_ID = test['Id']
# #drop "id" axis=1 means drop column, axis=0 means drop label
# train.drop("Id", axis = 1, inplace = True)
# test.drop("Id", axis = 1, inplace = True)
# print(train.shape)
# print(test.shape)
# #We use the numpy fuction log1p which applies log(1+x) to all elements of the column
# train["SalePrice"] = np.log1p(train["SalePrice"])
# #Combine two dataset into one
# ntrain = train.shape[0]
# ntest = test.shape[0]
# print(ntrain)
# print(ntest)
# y_train = train.SalePrice.values
# all_data = pd.concat((train, test)).reset_index(drop=True)
# all_data.drop(['SalePrice'], axis=1, inplace=True)
# print("all_data size is : {}".format(all_data.shape))
# #find out missing data
# all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
# all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30]
# missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
# missing_data.head(20)
# # 1.PoolQC
# all_data["PoolQC"] = all_data["PoolQC"].fillna("None")
# # 2.MiscFeature
# all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None")
# # 3.Alley
# all_data["Alley"] = all_data["Alley"].fillna("None")
# # 4.Fence
# all_data["Fence"] = all_data["Fence"].fillna("None")
# # 5.FireplaceQu
# all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None")
# all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform(
# lambda x: x.fillna(x.median()))
# for col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'):
# all_data[col] = all_data[col].fillna('None')
# for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):
# all_data[col] = all_data[col].fillna(0)
# for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):
# all_data[col] = all_data[col].fillna(0)
# for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
# all_data[col] = all_data[col].fillna('None')
# all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None")
# all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0)
# all_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].mode()[0])
# all_data = all_data.drop(['Utilities'], axis=1)
# all_data["Functional"] = all_data["Functional"].fillna("Typ")
# all_data['Electrical'] = all_data['Electrical'].fillna(all_data['Electrical'].mode()[0])
# all_data['KitchenQual'] = all_data['KitchenQual'].fillna(all_data['KitchenQual'].mode()[0])
# all_data['Exterior1st'] = all_data['Exterior1st'].fillna(all_data['Exterior1st'].mode()[0])
# all_data[] = all_data['Exterior2nd'].fillna(all_data['Exterior2nd'].mode()[0])
# all_data['SaleType'] = all_data['SaleType'].fillna(all_data['SaleType'].mode()[0])
# all_data['MSSubClass'] = all_data['MSSubClass'].fillna("None")
# all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
# all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)
# missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
# missing_data.head()
# #MSSubClass=The building class
# all_data['MSSubClass'] = all_data['MSSubClass'].apply(str)
# #Changing OverallCond into a categorical variable
# all_data['OverallCond'] = all_data['OverallCond'].astype(str)
# #Year and month sold are transformed into categorical features.
# all_data['YrSold'] = all_data['YrSold'].astype(str)
# all_data['MoSold'] = all_data['MoSold'].astype(str)
# from sklearn.preprocessing import LabelEncoder
# cols = ('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond',
# 'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1',
# 'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope',
# 'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond',
# 'YrSold', 'MoSold')
# # process columns, apply LabelEncoder to categorical features
# for c in cols:
# lbl = LabelEncoder()
# lbl.fit(list(all_data[c].values))
# all_data[c] = lbl.transform(list(all_data[c].values))
# # shape
# print('Shape all_data: {}'.format(all_data.shape))
# all_data = pd.get_dummies(all_data)
# print(all_data.shape)
# train = all_data[:ntrain]
# test = all_data[ntrain:]
# Setup cross validation folds
# kf = KFold(n_splits=12, random_state=42, shuffle=True)
# Define error metrics
# X = train.values
# n_folds = 5
# def rmsle_cv(model):
# kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(train.values)
# rmse= np.sqrt(-cross_val_score(model, X , y_train, scoring="neg_mean_squared_error", cv = kf))
# return(rmse)
# # Gradient Boosting Regressor
# gbr = GradientBoostingRegressor(n_estimators=6000,
# learning_rate=0.01,
# max_depth=4,
# min_samples_split=10,
# random_state=42)
# # XGBoost Regressor
# xgb = XGBRegressor(learning_rate=0.01,
# n_estimators=6000,
# max_depth=4,
# random_state=42)
# # Stack up all the models above, optimized using xgboost
# # stack_gen = StackingCVRegressor(regressors=(xgb, gbr),
# # meta_regressor=xgb,
# # use_features_in_secondary=True)
# scores = {}
# score = rmsle_cv(xgb)
# print("xgb: {:.4f} ({:.4f})".format(score.mean(), score.std()))
# scores['xgb'] = (score.mean(), score.std())
# score = rmsle_cv(gbr)
# print("gbr: {:.4f} ({:.4f})".format(score.mean(), score.std()))
# scores['gbr'] = (score.mean(), score.std())
# print('xgboost')
# xgb_model_full_data = xgb.fit(X, y_train)
# gbr.fit(train.values, y_train)
# gbr_train_pred = gbr.predict(train.values)
# gbr_pred = np.exp(gbr.predict(test.values))
# print('Gradient Boosting')
# sub = pd.DataFrame()
# sub['Id'] = test_ID
# sub['SalePrice'] = gbr_pred
# sub.to_csv('submission.csv',index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/179/69179333.ipynb
| null | null |
[{"Id": 69179333, "ScriptId": 18879908, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7772437, "CreationDate": "07/27/2021 18:21:04", "VersionNumber": 1.0, "Title": "House Price", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 194.0, "LinesInsertedFromPrevious": 194.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# import numpy as np # linear algebra
# import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# # Input data files are available in the read-only "../input/" directory
# # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
# import os
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# from sklearn.preprocessing import StandardScaler
# from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
# from sklearn.model_selection import KFold, cross_val_score, train_test_split
# from sklearn.metrics import mean_squared_error
# from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
# from xgboost import XGBRegressor
# train = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/train.csv")
# test = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/test.csv")
# train.SalePrice.describe()
# #Save the 'Id' column
# train_ID = train['Id']
# test_ID = test['Id']
# #drop "id" axis=1 means drop column, axis=0 means drop label
# train.drop("Id", axis = 1, inplace = True)
# test.drop("Id", axis = 1, inplace = True)
# print(train.shape)
# print(test.shape)
# #We use the numpy fuction log1p which applies log(1+x) to all elements of the column
# train["SalePrice"] = np.log1p(train["SalePrice"])
# #Combine two dataset into one
# ntrain = train.shape[0]
# ntest = test.shape[0]
# print(ntrain)
# print(ntest)
# y_train = train.SalePrice.values
# all_data = pd.concat((train, test)).reset_index(drop=True)
# all_data.drop(['SalePrice'], axis=1, inplace=True)
# print("all_data size is : {}".format(all_data.shape))
# #find out missing data
# all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
# all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30]
# missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
# missing_data.head(20)
# # 1.PoolQC
# all_data["PoolQC"] = all_data["PoolQC"].fillna("None")
# # 2.MiscFeature
# all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None")
# # 3.Alley
# all_data["Alley"] = all_data["Alley"].fillna("None")
# # 4.Fence
# all_data["Fence"] = all_data["Fence"].fillna("None")
# # 5.FireplaceQu
# all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None")
# all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform(
# lambda x: x.fillna(x.median()))
# for col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'):
# all_data[col] = all_data[col].fillna('None')
# for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):
# all_data[col] = all_data[col].fillna(0)
# for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):
# all_data[col] = all_data[col].fillna(0)
# for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
# all_data[col] = all_data[col].fillna('None')
# all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None")
# all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0)
# all_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].mode()[0])
# all_data = all_data.drop(['Utilities'], axis=1)
# all_data["Functional"] = all_data["Functional"].fillna("Typ")
# all_data['Electrical'] = all_data['Electrical'].fillna(all_data['Electrical'].mode()[0])
# all_data['KitchenQual'] = all_data['KitchenQual'].fillna(all_data['KitchenQual'].mode()[0])
# all_data['Exterior1st'] = all_data['Exterior1st'].fillna(all_data['Exterior1st'].mode()[0])
# all_data[] = all_data['Exterior2nd'].fillna(all_data['Exterior2nd'].mode()[0])
# all_data['SaleType'] = all_data['SaleType'].fillna(all_data['SaleType'].mode()[0])
# all_data['MSSubClass'] = all_data['MSSubClass'].fillna("None")
# all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
# all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)
# missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
# missing_data.head()
# #MSSubClass=The building class
# all_data['MSSubClass'] = all_data['MSSubClass'].apply(str)
# #Changing OverallCond into a categorical variable
# all_data['OverallCond'] = all_data['OverallCond'].astype(str)
# #Year and month sold are transformed into categorical features.
# all_data['YrSold'] = all_data['YrSold'].astype(str)
# all_data['MoSold'] = all_data['MoSold'].astype(str)
# from sklearn.preprocessing import LabelEncoder
# cols = ('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond',
# 'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1',
# 'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope',
# 'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond',
# 'YrSold', 'MoSold')
# # process columns, apply LabelEncoder to categorical features
# for c in cols:
# lbl = LabelEncoder()
# lbl.fit(list(all_data[c].values))
# all_data[c] = lbl.transform(list(all_data[c].values))
# # shape
# print('Shape all_data: {}'.format(all_data.shape))
# all_data = pd.get_dummies(all_data)
# print(all_data.shape)
# train = all_data[:ntrain]
# test = all_data[ntrain:]
# Setup cross validation folds
# kf = KFold(n_splits=12, random_state=42, shuffle=True)
# Define error metrics
# X = train.values
# n_folds = 5
# def rmsle_cv(model):
# kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(train.values)
# rmse= np.sqrt(-cross_val_score(model, X , y_train, scoring="neg_mean_squared_error", cv = kf))
# return(rmse)
# # Gradient Boosting Regressor
# gbr = GradientBoostingRegressor(n_estimators=6000,
# learning_rate=0.01,
# max_depth=4,
# min_samples_split=10,
# random_state=42)
# # XGBoost Regressor
# xgb = XGBRegressor(learning_rate=0.01,
# n_estimators=6000,
# max_depth=4,
# random_state=42)
# # Stack up all the models above, optimized using xgboost
# # stack_gen = StackingCVRegressor(regressors=(xgb, gbr),
# # meta_regressor=xgb,
# # use_features_in_secondary=True)
# scores = {}
# score = rmsle_cv(xgb)
# print("xgb: {:.4f} ({:.4f})".format(score.mean(), score.std()))
# scores['xgb'] = (score.mean(), score.std())
# score = rmsle_cv(gbr)
# print("gbr: {:.4f} ({:.4f})".format(score.mean(), score.std()))
# scores['gbr'] = (score.mean(), score.std())
# print('xgboost')
# xgb_model_full_data = xgb.fit(X, y_train)
# gbr.fit(train.values, y_train)
# gbr_train_pred = gbr.predict(train.values)
# gbr_pred = np.exp(gbr.predict(test.values))
# print('Gradient Boosting')
# sub = pd.DataFrame()
# sub['Id'] = test_ID
# sub['SalePrice'] = gbr_pred
# sub.to_csv('submission.csv',index=False)
| false | 0 | 2,512 | 0 | 2,512 | 2,512 |
||
69179699
|
<jupyter_start><jupyter_text>player_target_stats
Kaggle dataset identifier: player-target-stats
<jupyter_script># # MLB Engagement Predetion using LightGBM
# This is the first competition I spent a lot of time conducting research, do feature engineering, design appropriate cv methods. I'm a big baseball fan and very glad to have the opportunity to participating in this competition. Below is the method and pipeline about my work.
# ## Import Library
import torch
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.metrics import mean_absolute_error
from datetime import timedelta
from functools import reduce
from tqdm import tqdm_notebook
import lightgbm as lgbm
import mlb
# ## Load Dataset
# Shout out to @colum2131 and @Ken Miller. Due to their preprocess on raw data, I saved a lot of time to deal with this part.
BASE_DIR = Path("../input/mlb-player-digital-engagement-forecasting")
TRAIN_DIR = Path("../input/mlb-pdef-train-dataset")
players = pd.read_csv(BASE_DIR / "players.csv")
seasons = pd.read_csv(BASE_DIR / "seasons.csv")
rosters = pd.read_pickle(TRAIN_DIR / "rosters_train.pkl")
targets = pd.read_pickle(TRAIN_DIR / "nextDayPlayerEngagement_train.pkl")
games = pd.read_pickle(TRAIN_DIR / "games_train.pkl")
scores = pd.read_pickle(TRAIN_DIR / "playerBoxScores_train.pkl")
team_scores = pd.read_pickle(TRAIN_DIR / "teamBoxScores_train.pkl")
transactions = pd.read_pickle(TRAIN_DIR / "transactions_train.pkl")
awards = pd.read_pickle(TRAIN_DIR / "awards_train.pkl")
standings = pd.read_pickle(TRAIN_DIR / "standings_train.pkl")
inseason_player_target_stats = pd.read_csv(
"../input/inseason-target-stats/inseason_target_stats.csv"
)
lastmonth_player_target_stats = pd.read_csv(
"../input/month-target-stats/last_month_target_stats.csv"
)
cumulative_data = pd.read_csv("../input/cumulated-revised-data/train_cumulated.csv")
playoff_cumulative_data = pd.read_csv(
"../input/playoff-cumulated-data/train_cumulated_playoff.csv"
)
last7_cumulative_data = pd.read_csv(
"../input/recently-player-stats/train_cumulated_last7.csv"
)
last_year_award = pd.read_csv("../input/last-month-data/last_year_award.csv")
total_cumulative_data = pd.read_csv(
"../input/last-month-data/total_train_cumulated.csv"
)
# ## Preprocess data
# ### Date
# #### Revise wrong date in season.csv
seasons["regularSeasonStartDate"][2] = "2019-03-28"
seasons["seasonStartDate"][2] = "2019-03-28"
seasons["seasonStartDate"][4] = "2021-04-01"
# #### Convert "date" feature to datetime type
seasons["regularSeasonStartDate"] = pd.to_datetime(seasons["regularSeasonStartDate"])
seasons["postSeasonEndDate"] = pd.to_datetime(seasons["postSeasonEndDate"])
seasons["regularSeasonEndDate"] = pd.to_datetime(seasons["regularSeasonEndDate"])
seasons["allStarDate"][3] = np.nan
seasons["allStarDate"] = pd.to_datetime(seasons["allStarDate"])
seasons.rename(columns={"seasonId": "year"}, inplace=True)
team_col_dict = {}
for i, col in enumerate(team_scores.columns):
if i > 4 and i < len(team_scores.columns) - 2:
team_col_dict[col] = "team_" + col
team_scores.rename(columns=team_col_dict, inplace=True)
lastmonth_player_target_stats.rename(
columns={
"target1_median": "target1_last_month_median",
"target2_median": "target2_last_month_median",
"target3_median": "target3_last_month_median",
"target4_median": "target4_last_month_median",
"target1_std": "target1_last_month_std",
"target2_std": "target2_last_month_std",
"target3_std": "target3_last_month_std",
"target4_std": "target4_last_month_std",
"target1_mean": "target1_last_month_mean",
"target2_mean": "target2_last_month_mean",
"target3_mean": "target3_last_month_mean",
"target4_mean": "target4_last_month_mean",
},
inplace=True,
)
last7_cumulative_data.rename(
columns={
"cumulative_hits": "last7_cumulative_hits",
"cumulative_atBats": "last7_cumulative_atBats",
"cumulative_earnedRuns": "last5_cumulative_earnedRuns",
"cumulative_inningsPitched": "last5_cumulative_inningsPitched",
"cumulative_totalBases": "last7_cumulative_totalBases",
"cumulative_baseOnBalls": "last7_cumulative_baseOnBalls",
"cumulative_hitByPitch": "last7_cumulative_hitByPitch",
"cumulative_sacFlies": "last7_cumulative_sacFlies",
"cumulative_baseOnBallsPitching": "last5_cumulative_baseOnBallsPitching",
"cumulative_hitByPitchPitching": "last5_cumulative_hitByPitchPitching",
"cumulative_hitsPitching": "last5_cumulative_hitsPitching",
"cumulative_hr": "last7_cumulative_hr",
"cumulative_rbi": "last7_cumulative_rbi",
"avg": "last7_avg",
"era": "last5_era",
"slg": "last7_slg",
"obp": "last7_obp",
"ops": "last7_ops",
"whip": "last5_whip",
"cumulative_win": "last5_cumulative_win",
"cumulative_hold": "last5_cumulative_hold",
"cumulative_save": "last5_cumulative_save",
"cumulative_loss": "last5_cumulative_loss",
"cumulative_bs": "last5_cumulative_bs",
"cumulative_k": "last5_cumulative_k",
},
inplace=True,
)
lastmonth_player_target_stats
team_scores
# #### Create "year", "month", "days" columns
standings["year"] = pd.to_datetime(standings["gameDate"]).dt.year
standings["month"] = pd.to_datetime(standings["gameDate"]).dt.month
standings["days"] = pd.to_datetime(standings["gameDate"]).dt.day
standings["date"] = (
standings["year"] * 10000 + standings["month"] * 100 + standings["days"]
)
targets["year"] = pd.to_datetime(targets["date"], format="%Y%m%d").dt.year
targets["month"] = pd.to_datetime(targets["date"], format="%Y%m%d").dt.month
targets["days"] = pd.to_datetime(targets["date"], format="%Y%m%d").dt.day
# transactions['datetime_date'] = pd.to_datetime(transactions['date'], format="%Y%m%d")
# transactions['transaction_time'] = 1
# tmp_df = transactions.copy()
# tmp_df['datetime_date'] = transactions['datetime_date'] + pd.DateOffset(1)
# tmp_df['transaction_time'] = 0
# tmp_df['date'] = tmp_df['datetime_date'].dt.year * 10000 + tmp_df['datetime_date'].dt.month * 100 + tmp_df['datetime_date'].dt.day
# transactions = pd.concat([transactions, tmp_df], axis=0)
# ### Box Score
# #### Combine two games in one day
scores["game_count"] = 1
scores = scores.groupby(["playerId", "date"]).sum().reset_index()
scores
last_year_award["year"] = last_year_award["year"].apply(lambda x: x + 1)
# ### Columns Name
targets_cols = [
"playerId",
"target1",
"target2",
"target3",
"target4",
"date",
"year",
"month",
]
players_cols = ["playerId", "primaryPositionName"]
rosters_cols = ["playerId", "teamId", "status", "date"]
scores_cols = [
"playerId",
"flyOuts",
"groundOuts",
"runsScored",
"gamePk",
"doubles",
"triples",
"homeRuns",
"strikeOuts",
"baseOnBalls",
"intentionalWalks",
"hits",
"hitByPitch",
"atBats",
"caughtStealing",
"stolenBases",
"groundIntoDoublePlay",
"groundIntoTriplePlay",
"plateAppearances",
"totalBases",
"rbi",
"leftOnBase",
"sacBunts",
"sacFlies",
"catchersInterference",
"pickoffs",
"gamesPlayedPitching",
"gamesStartedPitching",
"completeGamesPitching",
"shutoutsPitching",
"winsPitching",
"lossesPitching",
"runsPitching",
"doublesPitching",
"triplesPitching",
"homeRunsPitching",
"strikeOutsPitching",
"baseOnBallsPitching",
"intentionalWalksPitching",
"hitsPitching",
"hitByPitchPitching",
"atBatsPitching",
"caughtStealingPitching",
"stolenBasesPitching",
"inningsPitched",
"saveOpportunities",
"earnedRuns",
"battersFaced",
"outsPitching",
"pitchesThrown",
"balls",
"strikes",
"hitBatsmen",
"balks",
"wildPitches",
"pickoffsPitching",
"rbiPitching",
"gamesFinishedPitching",
"inheritedRunners",
"inheritedRunnersScored",
"catchersInterferencePitching",
"sacBuntsPitching",
"sacFliesPitching",
"saves",
"holds",
"blownSaves",
"assists",
"putOuts",
"errors",
"chances",
"date",
]
team_scores_cols = ["teamId", "gamePk", "team_runsScored", "team_runsPitching"]
trans_cols = ["playerId", "date", "typeDesc"]
awards_cols = ["playerId", "date", "awardId"]
games_cols = ["gamePk", "homeId", "awayId", "dayNight", "gameType"]
standings_cols = ["streakCode", "pct", "teamId", "date"]
stats_cols = [
"playerId",
"target1_median",
"target1_std",
"target2_median",
"target2_std",
"target3_median",
"target3_std",
"target4_median",
"target4_std",
"target1_mean",
"target2_mean",
"target3_mean",
"target4_mean",
]
last_month_stats_cols = [
"playerId",
"year",
"month",
"target1_last_month_median",
"target1_last_month_std",
"target2_last_month_median",
"target2_last_month_std",
"target3_last_month_median",
"target3_last_month_std",
"target4_last_month_median",
"target4_last_month_std",
"target1_last_month_mean",
"target2_last_month_mean",
"target3_last_month_mean",
"target4_last_month_mean",
]
feature_cols = [
"label_playerId",
"label_primaryPositionName",
"label_teamId",
"label_status",
"DaysAfterRegularSeason",
"label_typeDesc",
"label_awardId",
"flyOuts",
"groundOuts",
"runsScored",
"label_homeId",
"label_awayId",
"Split",
"label_gameType",
"WinLose",
"Streak",
"pct",
"label_dayNight",
"cumulative_hits",
"cumulative_hr",
"cumulative_rbi",
"cumulative_win",
"cumulative_loss",
"pitch_win_pct",
"cumulative_save",
"cumulative_bs",
"cumulative_h_streak",
"last7_avg",
"last7_ops",
"last7_cumulative_hits",
"last7_cumulative_hr",
"last7_cumulative_rbi",
"last5_era",
"last5_whip",
"last5_cumulative_win",
"last5_cumulative_loss",
"era+",
"avg+",
"whip+",
"ops+",
"label_last_year_award",
"doubles",
"triples",
"homeRuns",
"strikeOuts",
"baseOnBalls",
"intentionalWalks",
"hits",
"hitByPitch",
"atBats",
"caughtStealing",
"stolenBases",
"totalBases",
"rbi",
"leftOnBase",
"catchersInterference",
"pickoffs",
"gamesPlayedPitching",
"gamesStartedPitching",
"completeGamesPitching",
"shutoutsPitching",
"winsPitching",
"lossesPitching",
"runsPitching",
"doublesPitching",
"triplesPitching",
"homeRunsPitching",
"strikeOutsPitching",
"baseOnBallsPitching",
"intentionalWalksPitching",
"hitsPitching",
"hitByPitchPitching",
"atBatsPitching",
"caughtStealingPitching",
"stolenBasesPitching",
"inningsPitched",
"saveOpportunities",
"earnedRuns",
"outsPitching",
"pitchesThrown",
"balls",
"strikes",
"hitBatsmen",
"balks",
"wildPitches",
"pickoffsPitching",
"rbiPitching",
"gamesFinishedPitching",
"inheritedRunners",
"inheritedRunnersScored",
"catchersInterferencePitching",
"saves",
"holds",
"blownSaves",
"assists",
"putOuts",
"errors",
"chances",
"target1_median",
"target1_std",
"target1_mean",
"target1_last_month_median",
"target1_last_month_std",
"target1_last_month_mean",
"target2_median",
"target2_std",
"target2_mean",
"target2_last_month_median",
"target2_last_month_std",
"target2_last_month_mean",
"target3_median",
"target3_std",
"target3_mean",
"target3_last_month_median",
"target3_last_month_std",
"target3_last_month_mean",
"target4_median",
"target4_std",
"target4_mean",
"target4_last_month_median",
"target4_last_month_std",
"target4_last_month_mean",
]
# ### Merge data
train = targets[targets_cols].merge(players[players_cols], on=["playerId"], how="left")
train = train.merge(rosters[rosters_cols], on=["playerId", "date"], how="left")
train = train.merge(scores[scores_cols], on=["playerId", "date"], how="left")
train = train.merge(games[games_cols], on=["gamePk"], how="left")
for i, row in tqdm_notebook(
games[(games["gameType"] == "E") | (games["gameType"] == "S")].iterrows()
):
train.loc[
(train["date"] == row["date"])
& ((train["teamId"] == row["homeId"]) | (train["teamId"] == row["awayId"])),
"gameType",
] = row["gameType"]
train.loc[train["gamePk"] > 700000, "gameType"] = "R"
train = train.merge(standings[standings_cols], on=["teamId", "date"], how="left")
# train = train.merge(team_scores[team_scores_cols], on=['gamePk', 'teamId'], how='left')
train = train.merge(
lastmonth_player_target_stats[last_month_stats_cols],
how="inner",
left_on=["playerId", "year", "month"],
right_on=["playerId", "year", "month"],
)
train = train.merge(
inseason_player_target_stats[stats_cols],
how="inner",
left_on=["playerId"],
right_on=["playerId"],
)
train = train.merge(seasons, on=["year"], how="left")
transactions = transactions[trans_cols].drop_duplicates(subset=["playerId", "date"])
train = train.merge(transactions, on=["playerId", "date"], how="left")
awards = awards[awards_cols].drop_duplicates(subset=["playerId", "date"])
train = train.merge(awards, on=["playerId", "date"], how="left")
train = train.drop_duplicates(subset=["playerId", "date"])
train = train.merge(cumulative_data, on=["playerId", "date"], how="left")
train = train.merge(last7_cumulative_data, on=["playerId", "date"], how="left")
train = train.merge(last_year_award, on=["playerId", "year"], how="left")
total_cumulative_data = total_cumulative_data.drop(columns=["playerId"])
train = train.merge(total_cumulative_data, on=["date"], how="left")
train
# ### Label Encoding
player2num = {c: i for i, c in enumerate(train["playerId"].unique())}
position2num = {c: i for i, c in enumerate(train["primaryPositionName"].unique())}
teamid2num = {c: i for i, c in enumerate(train["teamId"].unique())}
status2num = {c: i for i, c in enumerate(train["status"].unique())}
transdesc2num = {c: i for i, c in enumerate(train["typeDesc"].unique())}
awardid2num = {c: i for i, c in enumerate(train["awardId"].unique())}
gametype2num = {c: i for i, c in enumerate(train["gameType"].unique())}
last_year_award2num = {c: i for i, c in enumerate(train["award"].unique())}
# print(gametype2num)
train["label_playerId"] = train["playerId"].map(player2num)
train["label_primaryPositionName"] = train["primaryPositionName"].map(position2num)
train["label_teamId"] = train["teamId"].map(teamid2num)
train["label_status"] = train["status"].map(status2num)
train["label_typeDesc"] = train["typeDesc"].map(transdesc2num)
train["label_awardId"] = train["awardId"].map(awardid2num)
train["label_homeId"] = train["homeId"].map(teamid2num)
train["label_awayId"] = train["awayId"].map(teamid2num)
train["label_dayNight"] = train["dayNight"].map({"day": 0, "night": 1})
train["label_gameType"] = train["gameType"].map(gametype2num)
train["label_last_year_award"] = train["award"].map(last_year_award2num)
# ## Feature Engineering
train["datetime_date"] = pd.to_datetime(train["date"], format="%Y%m%d")
train["streakCode"] = train["streakCode"].fillna("W0")
train["WinLose"] = train["streakCode"].str[0]
train["WinLose"] = train["WinLose"].map({"L": 0, "W": 1})
train["Streak"] = train["streakCode"].str[1].astype(int)
train["pct"] = train["pct"].astype(float)
train["hr_ab"] = train["cumulative_hr"] / train["cumulative_atBats"]
train["pitch_win_pct"] = train["cumulative_win"] / (
train["cumulative_win"] + train["cumulative_loss"]
)
train["era+"] = train["total_era"] / train["era"] * 100
train["whip+"] = train["total_whip"] / train["whip"] * 100
train["ops+"] = (
train["slg"] / train["total_slg"] + train["obp"] / train["total_obp"] - 1
) * 100
train["avg+"] = train["avg"] / train["total_avg"] * 100
# #### CV Split
train["DaysAfterRegularSeason"] = (
train["datetime_date"] - train["regularSeasonStartDate"]
).dt.days
train["DaysAfterAllStar"] = (train["datetime_date"] - train["allStarDate"]).dt.days
train["DaysAfterpostSeasonEnd"] = (
train["datetime_date"] - train["postSeasonEndDate"]
).dt.days
train["DaysAfterRegularSeasonEnd"] = (
train["datetime_date"] - train["regularSeasonEndDate"]
).dt.days
# train['DaysAfterLastSeasonEnd'] = (train['datetime_date'] - train['LastSeasonEndDate']).dt.days
days_df = train[
[
"year",
"month",
"DaysAfterRegularSeason",
"DaysAfterAllStar",
"DaysAfterRegularSeasonEnd",
]
]
def f(x):
if x[2] < 0:
return 0
elif x[0] == 2018:
if x[1] == 5 or x[1] == 6:
return 1
elif x[1] == 8 or x[1] == 9:
return 2
else:
return 0
# elif x[0] == 2019:
# if x[1] == 2 or x[1] == 3 or x[1] == 4 or x[1] == 5 or x[1] == 6:
# return 3
# elif x[1] == 7 or x[1] == 8 or x[1] == 9 or x[1] == 10 or x[1] == 11:
# return 4
# else:
# return 0
# elif x[0] == 2020:
# return 5
elif x[0] == 2019:
if x[1] == 5 or x[1] == 6:
return 3
elif x[1] == 8 or x[1] == 9:
return 4
else:
return 0
else:
return 0
train["Split"] = days_df.apply(f, axis=1)
active_players = players.loc[
players["playerForTestSetAndFuturePreds"] == True, "playerId"
]
active_players = active_players.apply(lambda x: player2num[x])
x_train = train[feature_cols].reset_index(drop=True)
NFOLDS = 4
train_y = train[["target1", "target2", "target3", "target4"]].reset_index(drop=True)
x_train
import gc
del train
# del players
# del seasons
# del rosters
# del targets
# del games
# del scores
# del team_scores
# del transactions
# del awards
# del standings
# del inseason_player_target_stats
# del lastmonth_player_target_stats
# del cumulative_data
# del playoff_cumulative_data
# del last7_cumulative_data
# del last_year_award
# del total_cumulative_data
gc.collect()
# ## Training
def fit_lgbm(x_train, y_train, x_valid, y_valid, params: dict = None, verbose=100):
oof_pred = np.zeros(len(y_valid), dtype=np.float32)
model = lgbm.LGBMRegressor(**params)
model.fit(
x_train,
y_train,
eval_set=[(x_valid, y_valid)],
early_stopping_rounds=verbose,
verbose=verbose,
)
oof_pred = model.predict(x_valid)
score = mean_absolute_error(oof_pred, y_valid)
print("mae:", score)
return oof_pred, model, score
cv = 0
params = {
"objective": "mae",
"reg_alpha": 0.1,
"reg_lambda": 0.1,
"n_estimators": 2000,
"learning_rate": 0.1,
"random_state": 208,
"num_leaves": 250,
}
model1_list = []
model2_list = []
model3_list = []
model4_list = []
for idx in range(NFOLDS):
print("FOLD:", idx)
# tr_idx, val_idx = folds[idx]
x_tr = x_train[x_train["Split"] != idx + 1].drop(columns=["Split"])
x_val = x_train[x_train["Split"] == idx + 1][
x_train[x_train["Split"] == idx + 1]["label_playerId"].isin(
active_players.tolist()
)
].drop(columns=["Split"])
y_tr, y_val = (
train_y[x_train["Split"] != idx + 1],
train_y[x_train["Split"] == idx + 1][
x_train[x_train["Split"] == idx + 1]["label_playerId"].isin(
active_players.tolist()
)
],
)
oof1, model1, score1 = fit_lgbm(
x_tr, y_tr["target1"], x_val, y_val["target1"], params
)
oof2, model2, score2 = fit_lgbm(
x_tr, y_tr["target2"], x_val, y_val["target2"], params
)
oof3, model3, score3 = fit_lgbm(
x_tr, y_tr["target3"], x_val, y_val["target3"], params
)
oof4, model4, score4 = fit_lgbm(
x_tr, y_tr["target4"], x_val, y_val["target4"], params
)
score = (score1 + score2 + score3 + score4) / 4
print(f"score: {score}")
cv += score / NFOLDS
model1_list.append(model1)
model2_list.append(model2)
model3_list.append(model3)
model4_list.append(model4)
print("{} Folds Average CV: {}".format(NFOLDS, cv))
# ## Feature Importance
# players = pd.read_csv(BASE_DIR / 'players.csv')
# seasons = pd.read_csv(BASE_DIR / 'seasons.csv')
# rosters = pd.read_pickle(TRAIN_DIR / 'rosters_train.pkl')
# targets = pd.read_pickle(TRAIN_DIR / 'nextDayPlayerEngagement_train.pkl')
# games = pd.read_pickle(TRAIN_DIR / 'games_train.pkl')
# scores = pd.read_pickle(TRAIN_DIR / 'playerBoxScores_train.pkl')
# team_scores = pd.read_pickle(TRAIN_DIR / 'teamBoxScores_train.pkl')
# transactions = pd.read_pickle(TRAIN_DIR / 'transactions_train.pkl')
# awards = pd.read_pickle(TRAIN_DIR / 'awards_train.pkl')
# standings = pd.read_pickle(TRAIN_DIR / 'standings_train.pkl')
# inseason_player_target_stats = pd.read_csv("../input/inseason-target-stats/inseason_target_stats.csv")
# lastmonth_player_target_stats = pd.read_csv("../input/month-target-stats/last_month_target_stats.csv")
# cumulative_data = pd.read_csv("../input/cumulated-revised-data/train_cumulated.csv")
# playoff_cumulative_data = pd.read_csv('../input/playoff-cumulated-data/train_cumulated_playoff.csv')
# last7_cumulative_data = pd.read_csv('../input/recently-player-stats/train_cumulated_last7.csv')
# last_year_award = pd.read_csv('../input/last-month-data/last_year_award.csv')
# total_cumulative_data = pd.read_csv('../input/last-month-data/total_train_cumulated.csv')
# last_year_award['year'] = last_year_award['year'].apply(lambda x: x+1)
# total_cumulative_data = total_cumulative_data.drop(columns=['playerId'])
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
# sorted(zip(clf.feature_importances_, X.columns), reverse=True)
feature_imp = pd.DataFrame(
sorted(zip(model1.feature_importances_, train_X.columns)),
columns=["Value", "Feature"],
)
plt.figure(figsize=(20, 10))
sns.barplot(
x="Value", y="Feature", data=feature_imp.sort_values(by="Value", ascending=False)
)
plt.title("LightGBM Features with target 1")
plt.tight_layout()
plt.show()
plt.savefig("lgbm_importances-01.png")
feature_imp = pd.DataFrame(
sorted(zip(model2.feature_importances_, train_X.columns)),
columns=["Value", "Feature"],
)
plt.figure(figsize=(20, 10))
sns.barplot(
x="Value", y="Feature", data=feature_imp.sort_values(by="Value", ascending=False)
)
plt.title("LightGBM Features with target 2")
plt.tight_layout()
plt.show()
plt.savefig("lgbm_importances-02.png")
feature_imp = pd.DataFrame(
sorted(zip(model3.feature_importances_, train_X.columns)),
columns=["Value", "Feature"],
)
plt.figure(figsize=(20, 10))
sns.barplot(
x="Value", y="Feature", data=feature_imp.sort_values(by="Value", ascending=False)
)
plt.title("LightGBM Features with target 3")
plt.tight_layout()
plt.show()
plt.savefig("lgbm_importances-03.png")
feature_imp = pd.DataFrame(
sorted(zip(model4.feature_importances_, train_X.columns)),
columns=["Value", "Feature"],
)
plt.figure(figsize=(20, 10))
sns.barplot(
x="Value", y="Feature", data=feature_imp.sort_values(by="Value", ascending=False)
)
plt.title("LightGBM Features with target 4")
plt.tight_layout()
plt.show()
plt.savefig("lgbm_importances-04.png")
# ## Calculate Cumulative data
cumulative_hits = {}
cumulative_atBats = {}
cumulative_earnedRuns = {}
cumulative_inningsPitched = {}
cumulative_totalBases = {}
cumulative_baseOnBalls = {}
cumulative_hitByPitch = {}
cumulative_sacFlies = {}
cumulative_baseOnBallsPitching = {}
cumulative_hitByPitchPitching = {}
cumulative_hitsPitching = {}
cumulative_hr = {}
cumulative_rbi = {}
cumulative_win = {}
cumulative_loss = {}
cumulative_k = {}
cumulative_save = {}
cumulative_bs = {}
cumulative_h_streak = {}
# cumulative_hr_streak = {}
# cumulative_base_streak = {}
for idx, row in tqdm_notebook(cumulative_data.iterrows()):
if (idx) % 100000 == 0:
print(idx)
date = str(row["date"])
playerId = row["playerId"]
if date[:4] == "2021":
cumulative_hits[playerId] = row["cumulative_hits"]
cumulative_atBats[playerId] = row["cumulative_atBats"]
cumulative_earnedRuns[playerId] = row["cumulative_earnedRuns"]
cumulative_inningsPitched[playerId] = row["cumulative_inningsPitched"]
cumulative_totalBases[playerId] = row["cumulative_totalBases"]
cumulative_baseOnBalls[playerId] = row["cumulative_baseOnBalls"]
cumulative_hitByPitch[playerId] = row["cumulative_hitByPitch"]
cumulative_sacFlies[playerId] = row["cumulative_sacFlies"]
cumulative_baseOnBallsPitching[playerId] = row["cumulative_baseOnBallsPitching"]
cumulative_hitByPitchPitching[playerId] = row["cumulative_hitByPitchPitching"]
cumulative_hitsPitching[playerId] = row["cumulative_hitsPitching"]
cumulative_hr[playerId] = row["cumulative_hr"]
cumulative_rbi[playerId] = row["cumulative_rbi"]
cumulative_win[playerId] = row["cumulative_win"]
cumulative_loss[playerId] = row["cumulative_loss"]
cumulative_k[playerId] = row["cumulative_k"]
cumulative_save[playerId] = row["cumulative_save"]
cumulative_bs[playerId] = row["cumulative_bs"]
cumulative_h_streak[playerId] = row["cumulative_h_streak"]
# cumulative_hr_streak[playerId] = row['cumulative_hr_streak']
# cumulative_base_streak[playerId] = row['cumulative_base_streak']
total_hits = total_cumulative_data["total_cumulative_hits"]
total_atBats = total_cumulative_data["total_cumulative_atBats"]
total_earnedRuns = total_cumulative_data["total_cumulative_earnedRuns"]
total_inningsPitched = total_cumulative_data["total_cumulative_inningsPitched"]
total_totalBases = total_cumulative_data["total_cumulative_totalBases"]
total_baseOnBalls = total_cumulative_data["total_cumulative_baseOnBalls"]
total_hitByPitch = total_cumulative_data["total_cumulative_hitByPitch"]
total_sacFlies = total_cumulative_data["total_cumulative_sacFlies"]
total_baseOnBallsPitching = total_cumulative_data[
"total_cumulative_baseOnBallsPitching"
]
total_hitByPitchPitching = total_cumulative_data["total_cumulative_hitByPitchPitching"]
total_hitsPitching = total_cumulative_data["total_cumulative_hitsPitching"]
total_hr = total_cumulative_data["total_cumulative_hr"]
total_win = total_cumulative_data["total_cumulative_win"]
total_losses = total_cumulative_data["total_cumulative_loss"]
total_holds = total_cumulative_data["total_cumulative_hold"]
total_saves = total_cumulative_data["total_cumulative_save"]
total_bs = total_cumulative_data["total_cumulative_bs"]
total_k = total_cumulative_data["total_cumulative_k"]
total_rbi = total_cumulative_data["total_cumulative_rbi"]
total_intentionalWalksPitching = total_cumulative_data[
"total_cumulative_intentionalWalksPitching"
]
total_homeRunsPitching = total_cumulative_data["total_cumulative_homeRunsPitching"]
total_atBatsPitching = total_cumulative_data["total_cumulative_atBatsPitching"]
players_cols = ["playerId", "primaryPositionName"]
rosters_cols = ["playerId", "teamId", "status"]
scores_cols = [
"playerId",
"flyOuts",
"gamePk",
"groundOuts",
"runsScored",
"doubles",
"triples",
"homeRuns",
"strikeOuts",
"baseOnBalls",
"intentionalWalks",
"hits",
"hitByPitch",
"atBats",
"caughtStealing",
"stolenBases",
"groundIntoDoublePlay",
"groundIntoTriplePlay",
"plateAppearances",
"totalBases",
"rbi",
"leftOnBase",
"sacBunts",
"sacFlies",
"catchersInterference",
"pickoffs",
"gamesPlayedPitching",
"gamesStartedPitching",
"completeGamesPitching",
"shutoutsPitching",
"winsPitching",
"lossesPitching",
"runsPitching",
"doublesPitching",
"triplesPitching",
"homeRunsPitching",
"strikeOutsPitching",
"baseOnBallsPitching",
"intentionalWalksPitching",
"hitsPitching",
"hitByPitchPitching",
"atBatsPitching",
"caughtStealingPitching",
"stolenBasesPitching",
"inningsPitched",
"saveOpportunities",
"earnedRuns",
"battersFaced",
"outsPitching",
"pitchesThrown",
"balls",
"strikes",
"hitBatsmen",
"balks",
"wildPitches",
"pickoffsPitching",
"rbiPitching",
"gamesFinishedPitching",
"inheritedRunners",
"inheritedRunnersScored",
"catchersInterferencePitching",
"sacBuntsPitching",
"sacFliesPitching",
"saves",
"holds",
"blownSaves",
"assists",
"putOuts",
"errors",
"chances",
]
trans_cols = ["playerId", "typeDesc"]
awards_cols = ["playerId", "awardId"]
games_cols = ["gamePk", "homeId", "awayId", "dayNight", "gameType"]
standings_cols = ["streakCode", "pct", "leagueRank", "teamId"]
team_scores_cols = ["teamId", "gamePk", "team_runsScored", "team_runsPitching"]
null = np.nan
true = True
false = False
# player_target_stats = pd.read_csv("../input/player-target-stats/player_target_stats.csv")
env = mlb.make_env() # initialize the environment
iter_test = env.iter_test() # iterator which loops over each date in test set
import torch
player_cumulative_stats_dict = torch.load(
"../input/recently-player-stats/player_stats_last7.pkl"
)
for i, (test_df, sample_prediction_df) in enumerate(iter_test): # make predictions here
sample_prediction_df = sample_prediction_df.reset_index(drop=True)
# creat dataset
sample_prediction_df["playerId"] = sample_prediction_df["date_playerId"].map(
lambda x: int(x.split("_")[1])
)
sample_prediction_df["date"] = pd.to_datetime(
sample_prediction_df["date_playerId"].map(lambda x: int(x.split("_")[0])),
format="%Y%m%d",
) - pd.DateOffset(1)
# Dealing with missing values
if test_df["rosters"].iloc[0] == test_df["rosters"].iloc[0]:
test_rosters = pd.DataFrame(eval(test_df["rosters"].iloc[0]))
else:
test_rosters = pd.DataFrame({"playerId": sample_prediction_df["playerId"]})
for col in rosters.columns:
if col == "playerId":
continue
test_rosters[col] = np.nan
if test_df["playerBoxScores"].iloc[0] == test_df["playerBoxScores"].iloc[0]:
test_scores = pd.DataFrame(eval(test_df["playerBoxScores"].iloc[0]))
else:
test_scores = pd.DataFrame({"playerId": sample_prediction_df["playerId"]})
for col in scores.columns:
if col == "playerId":
continue
test_scores[col] = np.nan
# if test_df['teamBoxScores'].iloc[0] == test_df['teamBoxScores'].iloc[0]:
# test_team_scores = pd.DataFrame(eval(test_df['teamBoxScores'].iloc[0]))
# else:
# test_team_scores = pd.DataFrame({'playerId': sample_prediction_df['playerId']})
# for col in team_scores.columns:
# if col == 'playerId': continue
# test_team_scores[col] = np.nan
if test_df["transactions"].iloc[0] == test_df["transactions"].iloc[0]:
test_trans = pd.DataFrame(eval(test_df["transactions"].iloc[0]))
test_trans["transaction_time"] = 1
else:
test_trans = pd.DataFrame({"playerId": sample_prediction_df["playerId"]})
for col in transactions.columns:
if col == "playerId":
continue
test_trans[col] = np.nan
test_trans = test_trans.drop_duplicates(subset=["playerId"])
# pre_trans_old = test_trans.copy()
# if i != 0:
# test_trans = pd.concat([test_trans, pre_trans], axis=0)
# test_trans = test_trans.drop_duplicates(subset=['playerId'])
# if pre_trans_old.loc[0, 'transaction_time'] == 1:
# pre_trans_old['transaction_time'] = 0
# pre_trans = pre_trans_old.copy()
if test_df["awards"].iloc[0] == test_df["awards"].iloc[0]:
test_awards = pd.DataFrame(eval(test_df["awards"].iloc[0]))
test_awards["awards_time"] = 1
else:
test_awards = pd.DataFrame({"playerId": sample_prediction_df["playerId"]})
for col in awards.columns:
if col == "playerId":
continue
test_awards[col] = np.nan
test_awards = test_awards.drop_duplicates(subset=["playerId"])
if test_df["games"].iloc[0] == test_df["games"].iloc[0]:
test_games = pd.DataFrame(eval(test_df["games"].iloc[0]))
else:
test_games = pd.DataFrame({"playerId": sample_prediction_df["playerId"]})
for col in games.columns:
if col == "playerId":
continue
test_games[col] = np.nan
if test_df["standings"].iloc[0] == test_df["standings"].iloc[0]:
test_standings = pd.DataFrame(eval(test_df["standings"].iloc[0]))
else:
test_standings = pd.DataFrame({"playerId": sample_prediction_df["playerId"]})
for col in standings.columns:
if col == "playerId":
continue
test_standings[col] = np.nan
test_scores = test_scores.groupby("playerId").sum().reset_index()
test = sample_prediction_df[["playerId", "date"]].copy()
test = test.merge(players[players_cols], on="playerId", how="left")
test = test.merge(test_rosters[rosters_cols], on="playerId", how="left")
test = test.merge(test_scores[scores_cols], on="playerId", how="left")
# test = test.merge(test_team_scores[team_scores_cols], on=['teamId', 'gamePk'], how='left')
test = test.merge(test_games[games_cols], on="gamePk", how="left")
test = test.merge(test_standings[standings_cols], on="teamId", how="left")
test["year"] = test["date"].dt.year
test["month"] = test["date"].dt.month
test["days"] = test["date"].dt.day
test = test.merge(
lastmonth_player_target_stats[last_month_stats_cols],
how="inner",
left_on=["playerId", "year", "month"],
right_on=["playerId", "year", "month"],
)
test = test.merge(
inseason_player_target_stats[stats_cols],
how="inner",
left_on=["playerId"],
right_on=["playerId"],
)
test = test.merge(last_year_award, on=["playerId", "year"], how="left")
test = test.merge(test_trans[trans_cols], on="playerId", how="left")
test = test.merge(test_awards[awards_cols], on="playerId", how="left")
test = test.merge(seasons, on=["year"], how="left")
test.loc[test["gamePk"] > 700000, "gameType"] = "R"
# test.loc[test['gamePk'] > 700000, 'dayNight'] = 'day_night'
test["DaysAfterRegularSeason"] = (
test["date"] - test["regularSeasonStartDate"]
).dt.days
inningsPitched_list = []
for i in range(test.shape[0]):
inningsPitched = test["inningsPitched"][i]
if not np.isnan(inningsPitched):
if (str(inningsPitched)[-1]) == "1":
inningsPitched = int(str(inningsPitched).split(".")[0]) + 1 / 3
elif (str(inningsPitched)[-1]) == "2":
inningsPitched = int(str(inningsPitched).split(".")[0]) + 2 / 3
inningsPitched_list.append(inningsPitched)
total_hits += test["hits"].sum()
total_atBats += test["atBats"].sum()
total_earnedRuns += test["earnedRuns"].sum()
total_inningsPitched += np.sum(inningsPitched_list)
total_totalBases += test["totalBases"].sum()
total_baseOnBalls += test["baseOnBalls"].sum()
total_hitByPitch += test["hitByPitch"].sum()
total_sacFlies += test["sacFlies"].sum()
total_baseOnBallsPitching += test["baseOnBallsPitching"].sum()
total_hitByPitchPitching += test["hitByPitchPitching"].sum()
total_hitsPitching += test["hitsPitching"].sum()
total_hr += test["homeRuns"].sum()
total_win += test["winsPitching"].sum()
total_losses += test["lossesPitching"].sum()
total_holds += test["holds"].sum()
total_saves += test["saves"].sum()
total_bs += test["blownSaves"].sum()
total_k += test["strikeOutsPitching"].sum()
total_rbi += test["rbi"].sum()
total_intentionalWalksPitching += test["intentionalWalksPitching"].sum()
total_homeRunsPitching += test["homeRunsPitching"].sum()
total_atBatsPitching += test["atBatsPitching"].sum()
avg_list = []
era_list = []
whip_list = []
ops_list = []
obp_list = []
slg_list = []
hit_list = []
hr_list = []
rbi_list = []
atBats_list = []
inningsPitched_list = []
win_list = []
loss_list = []
save_list = []
bs_list = []
h_streak_list = []
last7_avg_list = []
last5_era_list = []
last5_whip_list = []
last7_ops_list = []
# last7_obp_list = []
# last7_slg_list = []
last7_hit_list = []
last7_hr_list = []
last7_rbi_list = []
last7_atBats_list = []
# inningsPitched_list = []
last5_win_list = []
last5_loss_list = []
# save_list = []
# bs_list = []
avgplus_list = []
whipplus_list = []
opsplus_list = []
eraplus_list = []
for idx, row in test.iterrows():
playerId = row["playerId"]
if not np.isnan(row["hits"]) and row["gameType"] != "A":
cumulative_hits[playerId] += row["hits"]
cumulative_atBats[playerId] += row["atBats"]
cumulative_totalBases[playerId] += row["totalBases"]
cumulative_baseOnBalls[playerId] += row["baseOnBalls"]
cumulative_hitByPitch[playerId] += row["hitByPitch"]
cumulative_sacFlies[playerId] += row["sacFlies"]
cumulative_hr[playerId] += row["homeRuns"]
cumulative_rbi[playerId] += row["rbi"]
player_cumulative_stats_dict[playerId]["cumulative_hits"].append(
row["hits"]
)
player_cumulative_stats_dict[playerId]["cumulative_atBats"].append(
row["atBats"]
)
player_cumulative_stats_dict[playerId]["cumulative_totalBases"].append(
row["totalBases"]
)
player_cumulative_stats_dict[playerId]["cumulative_hitByPitch"].append(
row["hitByPitch"]
)
player_cumulative_stats_dict[playerId]["cumulative_baseOnBalls"].append(
row["baseOnBalls"]
)
player_cumulative_stats_dict[playerId]["cumulative_sacFlies"].append(
row["sacFlies"]
)
player_cumulative_stats_dict[playerId]["cumulative_hr"].append(
row["homeRuns"]
)
player_cumulative_stats_dict[playerId]["cumulative_rbi"].append(row["rbi"])
if len(player_cumulative_stats_dict[playerId]["cumulative_hits"]) > 7:
player_cumulative_stats_dict[playerId]["cumulative_hits"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_atBats"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_totalBases"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_hitByPitch"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_baseOnBalls"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_sacFlies"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_hr"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_rbi"].pop(0)
if row["hits"] != 0:
cumulative_h_streak[playerId] += 1
else:
if not np.isnan(cumulative_h_streak[playerId]):
cumulative_h_streak[playerId] = 0
# if row['homeRuns'] != 0:
# cumulative_hr_streak[playerId] += 1
# else:
# cumulative_hr_streak[playerId] = 0
# if row['hits'] != 0 or row['baseOnBalls'] != 0 or row['hitByPitch'] != 0:
# cumulative_base_streak[playerId] += 1
# else:
# cumulative_base_streak[playerId] = 0
if not np.isnan(row["earnedRuns"]):
inningsPitched = row["inningsPitched"]
if (str(inningsPitched)[-1]) == "1":
inningsPitched = int(str(inningsPitched).split(".")[0]) + 1 / 3
elif (str(inningsPitched)[-1]) == "2":
inningsPitched = int(str(inningsPitched).split(".")[0]) + 2 / 3
cumulative_earnedRuns[playerId] += row["earnedRuns"]
cumulative_inningsPitched[playerId] += inningsPitched
cumulative_hitByPitchPitching[playerId] += row["hitByPitchPitching"]
cumulative_hitsPitching[playerId] += row["hitsPitching"]
cumulative_baseOnBallsPitching[playerId] += row["baseOnBallsPitching"]
cumulative_win[playerId] += row["winsPitching"]
cumulative_loss[playerId] += row["lossesPitching"]
cumulative_k[playerId] += row["strikeOutsPitching"]
cumulative_bs[playerId] += row["blownSaves"]
cumulative_save[playerId] += row["saves"]
if playerId != 660271 or inningsPitched > 0:
player_cumulative_stats_dict[playerId]["cumulative_earnedRuns"].append(
row["earnedRuns"]
)
player_cumulative_stats_dict[playerId][
"cumulative_inningsPitched"
].append(inningsPitched)
player_cumulative_stats_dict[playerId][
"cumulative_baseOnBallsPitching"
].append(row["baseOnBallsPitching"])
player_cumulative_stats_dict[playerId][
"cumulative_hitByPitchPitching"
].append(row["hitByPitchPitching"])
player_cumulative_stats_dict[playerId][
"cumulative_hitsPitching"
].append(row["hitsPitching"])
player_cumulative_stats_dict[playerId]["cumulative_saves"].append(
row["saves"]
)
player_cumulative_stats_dict[playerId]["cumulative_blownSaves"].append(
row["blownSaves"]
)
player_cumulative_stats_dict[playerId]["cumulative_win"].append(
row["winsPitching"]
)
player_cumulative_stats_dict[playerId]["cumulative_losses"].append(
row["lossesPitching"]
)
if len(player_cumulative_stats_dict[playerId]["cumulative_earnedRuns"]) > 5:
player_cumulative_stats_dict[playerId]["cumulative_earnedRuns"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_inningsPitched"].pop(
0
)
player_cumulative_stats_dict[playerId][
"cumulative_baseOnBallsPitching"
].pop(0)
player_cumulative_stats_dict[playerId][
"cumulative_hitByPitchPitching"
].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_hitsPitching"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_saves"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_blownSaves"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_win"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_losses"].pop(0)
avg_list.append(cumulative_hits[playerId] / cumulative_atBats[playerId])
era_list.append(
cumulative_earnedRuns[playerId] / cumulative_inningsPitched[playerId] * 9
)
slg = cumulative_totalBases[playerId] / cumulative_atBats[playerId]
obp = (
cumulative_hits[playerId]
+ cumulative_baseOnBalls[playerId]
+ cumulative_hitByPitch[playerId]
) / (
cumulative_atBats[playerId]
+ cumulative_baseOnBalls[playerId]
+ cumulative_hitByPitch[playerId]
+ cumulative_sacFlies[playerId]
)
ops_list.append(slg + obp)
whip = (
cumulative_baseOnBallsPitching[playerId]
+ cumulative_hitByPitchPitching[playerId]
+ cumulative_hitsPitching[playerId]
) / cumulative_inningsPitched[playerId]
whip_list.append(whip)
hr_list.append(cumulative_hr[playerId])
hit_list.append(cumulative_hits[playerId])
rbi_list.append(cumulative_rbi[playerId])
inningsPitched_list.append(cumulative_inningsPitched[playerId])
atBats_list.append(cumulative_atBats[playerId])
win_list.append(cumulative_win[playerId])
loss_list.append(cumulative_loss[playerId])
save_list.append(cumulative_save[playerId])
bs_list.append(cumulative_bs[playerId])
h_streak_list.append(cumulative_h_streak[playerId])
total_avg = total_hits / total_atBats
total_era = total_earnedRuns / total_inningsPitched * 9
total_whip = (
total_baseOnBallsPitching + total_hitByPitchPitching + total_hitsPitching
) / total_inningsPitched
total_obp = (total_hits + total_baseOnBalls + total_hitByPitch) / (
total_atBats + total_baseOnBalls + total_hitByPitch + total_sacFlies
)
total_slg = total_totalBases / total_atBats
avgplus_list.append(avg_list[-1] / total_avg * 100)
eraplus_list.append(total_era / era_list[-1] * 100)
whipplus_list.append(total_whip / whip_list[-1] * 100)
opsplus_list.append((slg / total_slg + obp / total_obp - 1) * 100)
if playerId == 660271:
last7_avg_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_hits"])
/ sum(player_cumulative_stats_dict[playerId]["cumulative_atBats"])
)
# era_list.append(cumulative_earnedRuns[playerId] / cumulative_inningsPitched[playerId] * 9)
slg = sum(
player_cumulative_stats_dict[playerId]["cumulative_totalBases"]
) / sum(player_cumulative_stats_dict[playerId]["cumulative_atBats"])
obp = (
sum(player_cumulative_stats_dict[playerId]["cumulative_hits"])
+ sum(player_cumulative_stats_dict[playerId]["cumulative_baseOnBalls"])
+ sum(player_cumulative_stats_dict[playerId]["cumulative_hitByPitch"])
) / (
sum(player_cumulative_stats_dict[playerId]["cumulative_atBats"])
+ sum(player_cumulative_stats_dict[playerId]["cumulative_baseOnBalls"])
+ sum(player_cumulative_stats_dict[playerId]["cumulative_hitByPitch"])
+ sum(player_cumulative_stats_dict[playerId]["cumulative_sacFlies"])
)
last7_ops_list.append(slg + obp)
# whip = (cumulative_baseOnBallsPitching[playerId] + cumulative_hitByPitchPitching[playerId] + cumulative_hitsPitching[playerId]) / cumulative_inningsPitched[playerId]
# whip_list.append(whip)
last7_hr_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_hr"])
)
last7_hit_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_hits"])
)
last7_rbi_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_rbi"])
)
# inningsPitched_list.append(cumulative_inningsPitched[playerId])
last7_atBats_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_atBats"])
)
last5_era_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_earnedRuns"])
/ sum(
player_cumulative_stats_dict[playerId]["cumulative_inningsPitched"]
)
* 9
)
last5_whip_list.append(
(
sum(
player_cumulative_stats_dict[playerId][
"cumulative_baseOnBallsPitching"
]
)
+ sum(
player_cumulative_stats_dict[playerId][
"cumulative_hitsPitching"
]
)
+ sum(
player_cumulative_stats_dict[playerId][
"cumulative_hitByPitchPitching"
]
)
)
/ sum(
player_cumulative_stats_dict[playerId]["cumulative_inningsPitched"]
)
)
last5_win_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_win"])
)
last5_loss_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_losses"])
)
elif row["primaryPositionName"] == "Pitcher":
last7_avg_list.append(float("nan"))
last7_ops_list.append(float("nan"))
last7_hr_list.append(float("nan"))
last7_hit_list.append(float("nan"))
last7_rbi_list.append(float("nan"))
last7_atBats_list.append(float("nan"))
if (
sum(player_cumulative_stats_dict[playerId]["cumulative_inningsPitched"])
== 0
):
last5_era_list.append(float("nan"))
last5_whip_list.append(float("nan"))
last5_win_list.append(float("nan"))
last5_loss_list.append(float("nan"))
else:
last5_era_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_earnedRuns"])
/ sum(
player_cumulative_stats_dict[playerId][
"cumulative_inningsPitched"
]
)
* 9
)
last5_whip_list.append(
(
sum(
player_cumulative_stats_dict[playerId][
"cumulative_baseOnBallsPitching"
]
)
+ sum(
player_cumulative_stats_dict[playerId][
"cumulative_hitsPitching"
]
)
+ sum(
player_cumulative_stats_dict[playerId][
"cumulative_hitByPitchPitching"
]
)
)
/ sum(
player_cumulative_stats_dict[playerId][
"cumulative_inningsPitched"
]
)
)
last5_win_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_win"])
)
last5_loss_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_losses"])
)
else:
last5_era_list.append(float("nan"))
last5_whip_list.append(float("nan"))
last5_win_list.append(float("nan"))
last5_loss_list.append(float("nan"))
if sum(player_cumulative_stats_dict[playerId]["cumulative_atBats"]) == 0:
last7_avg_list.append(float("nan"))
last7_ops_list.append(float("nan"))
last7_hr_list.append(float("nan"))
last7_hit_list.append(float("nan"))
last7_rbi_list.append(float("nan"))
last7_atBats_list.append(float("nan"))
else:
last7_avg_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_hits"])
/ sum(player_cumulative_stats_dict[playerId]["cumulative_atBats"])
)
# era_list.append(cumulative_earnedRuns[playerId] / cumulative_inningsPitched[playerId] * 9)
slg = sum(
player_cumulative_stats_dict[playerId]["cumulative_totalBases"]
) / sum(player_cumulative_stats_dict[playerId]["cumulative_atBats"])
obp = (
sum(player_cumulative_stats_dict[playerId]["cumulative_hits"])
+ sum(
player_cumulative_stats_dict[playerId]["cumulative_baseOnBalls"]
)
+ sum(
player_cumulative_stats_dict[playerId]["cumulative_hitByPitch"]
)
) / (
sum(player_cumulative_stats_dict[playerId]["cumulative_atBats"])
+ sum(
player_cumulative_stats_dict[playerId]["cumulative_baseOnBalls"]
)
+ sum(
player_cumulative_stats_dict[playerId]["cumulative_hitByPitch"]
)
+ sum(player_cumulative_stats_dict[playerId]["cumulative_sacFlies"])
)
last7_ops_list.append(slg + obp)
# whip = (cumulative_baseOnBallsPitching[playerId] + cumulative_hitByPitchPitching[playerId] + cumulative_hitsPitching[playerId]) / cumulative_inningsPitched[playerId]
# whip_list.append(whip)
last7_hr_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_hr"])
)
last7_hit_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_hits"])
)
last7_rbi_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_rbi"])
)
# inningsPitched_list.append(cumulative_inningsPitched[playerId])
last7_atBats_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_atBats"])
)
test["avg"] = avg_list
test["era"] = era_list
test["whip"] = whip_list
test["ops"] = ops_list
test["cumulative_hits"] = hit_list
test["cumulative_hr"] = hr_list
test["cumulative_rbi"] = rbi_list
test["cumulative_win"] = win_list
test["cumulative_loss"] = loss_list
test["pitch_win_pct"] = test["cumulative_win"] / (
test["cumulative_win"] + test["cumulative_loss"]
)
test["cumulative_save"] = save_list
test["cumulative_bs"] = bs_list
test["cumulative_h_streak"] = h_streak_list
test["last7_avg"] = last7_avg_list
test["last7_ops"] = last7_ops_list
test["last7_cumulative_hits"] = last7_hit_list
test["last7_cumulative_hr"] = last7_hr_list
test["last7_cumulative_rbi"] = last7_rbi_list
test["last5_era"] = last5_era_list
test["last5_whip"] = last5_whip_list
test["last5_cumulative_win"] = last5_win_list
test["last5_cumulative_loss"] = last5_loss_list
test["era+"] = eraplus_list
test["ops+"] = opsplus_list
test["whip+"] = whipplus_list
test["avg+"] = avgplus_list
# test['cumulative_hr_streak'] = hr_streak_list
# test['cumulative_base_streak'] = base_streak_list
test["label_playerId"] = test["playerId"].map(player2num)
test["label_primaryPositionName"] = test["primaryPositionName"].map(position2num)
test["label_teamId"] = test["teamId"].map(teamid2num)
test["label_homeId"] = test["homeId"].map(teamid2num)
test["label_awayId"] = test["awayId"].map(teamid2num)
test["label_status"] = test["status"].map(status2num)
test["label_typeDesc"] = test["typeDesc"].map(transdesc2num)
test["label_awardId"] = test["awardId"].map(awardid2num)
test["label_gameType"] = test["gameType"].map(awardid2num)
test["label_dayNight"] = test["dayNight"].map({"day": 0, "night": 1})
test = test.drop_duplicates("playerId")
test["streakCode"] = test["streakCode"].fillna("W0")
test["WinLose"] = test["streakCode"].str[0]
test["WinLose"] = test["WinLose"].map({"L": 0, "W": 1})
test["Streak"] = test["streakCode"].str[1].astype(int)
test["pct"] = test["pct"].astype(float)
test["Split"] = 0
test["label_last_year_award"] = test["award"].map(last_year_award2num)
# test['playoff_avg'] = np.nan
# test['playoff_ops'] = np.nan
# test['cumulative_playoff_hits'] = np.nan
# test['cumulative_playoff_hr'] = np.nan
# test['cumulative_playoff_rbi'] = np.nan
test_X_1 = test[feature_cols].drop(columns=["Split"]).values
# predict
for i in range(NFOLDS):
if i == 0:
pred1 = model1_list[i].predict(test_X_1) / NFOLDS
pred2 = model2_list[i].predict(test_X_1) / NFOLDS
pred3 = model3_list[i].predict(test_X_1) / NFOLDS
pred4 = model4_list[i].predict(test_X_1) / NFOLDS
else:
pred1 += model1_list[i].predict(test_X_1) / NFOLDS
pred2 += model2_list[i].predict(test_X_1) / NFOLDS
pred3 += model3_list[i].predict(test_X_1) / NFOLDS
pred4 += model4_list[i].predict(test_X_1) / NFOLDS
# merge submission
sample_prediction_df["target1"] = np.clip(pred1, 0, 100)
sample_prediction_df["target2"] = np.clip(pred2, 0, 100)
sample_prediction_df["target3"] = np.clip(pred3, 0, 100)
sample_prediction_df["target4"] = np.clip(pred4, 0, 100)
sample_prediction_df = sample_prediction_df.fillna(0.0)
del sample_prediction_df["playerId"]
del sample_prediction_df["date"]
env.predict(sample_prediction_df)
sample_prediction_df
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/179/69179699.ipynb
|
player-target-stats
|
mlconsult
|
[{"Id": 69179699, "ScriptId": 18660632, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4610171, "CreationDate": "07/27/2021 18:27:13", "VersionNumber": 68.0, "Title": "LightGBM - MLB Player Engagement Prediction", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 1026.0, "LinesInsertedFromPrevious": 152.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 874.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92034494, "KernelVersionId": 69179699, "SourceDatasetVersionId": 2379932}, {"Id": 92034493, "KernelVersionId": 69179699, "SourceDatasetVersionId": 2336518}]
|
[{"Id": 2379932, "DatasetId": 1418028, "DatasourceVersionId": 2421833, "CreatorUserId": 908174, "LicenseName": "Unknown", "CreationDate": "06/29/2021 13:40:56", "VersionNumber": 2.0, "Title": "player_target_stats", "Slug": "player-target-stats", "Subtitle": NaN, "Description": NaN, "VersionNotes": "drop max target 2", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1418028, "CreatorUserId": 908174, "OwnerUserId": 908174.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2379932.0, "CurrentDatasourceVersionId": 2421833.0, "ForumId": 1437419, "Type": 2, "CreationDate": "06/18/2021 16:48:52", "LastActivityDate": "06/18/2021", "TotalViews": 1980, "TotalDownloads": 365, "TotalVotes": 16, "TotalKernels": 34}]
|
[{"Id": 908174, "UserName": "mlconsult", "DisplayName": "Ken Miller", "RegisterDate": "02/11/2017", "PerformanceTier": 3}]
|
# # MLB Engagement Predetion using LightGBM
# This is the first competition I spent a lot of time conducting research, do feature engineering, design appropriate cv methods. I'm a big baseball fan and very glad to have the opportunity to participating in this competition. Below is the method and pipeline about my work.
# ## Import Library
import torch
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.metrics import mean_absolute_error
from datetime import timedelta
from functools import reduce
from tqdm import tqdm_notebook
import lightgbm as lgbm
import mlb
# ## Load Dataset
# Shout out to @colum2131 and @Ken Miller. Due to their preprocess on raw data, I saved a lot of time to deal with this part.
BASE_DIR = Path("../input/mlb-player-digital-engagement-forecasting")
TRAIN_DIR = Path("../input/mlb-pdef-train-dataset")
players = pd.read_csv(BASE_DIR / "players.csv")
seasons = pd.read_csv(BASE_DIR / "seasons.csv")
rosters = pd.read_pickle(TRAIN_DIR / "rosters_train.pkl")
targets = pd.read_pickle(TRAIN_DIR / "nextDayPlayerEngagement_train.pkl")
games = pd.read_pickle(TRAIN_DIR / "games_train.pkl")
scores = pd.read_pickle(TRAIN_DIR / "playerBoxScores_train.pkl")
team_scores = pd.read_pickle(TRAIN_DIR / "teamBoxScores_train.pkl")
transactions = pd.read_pickle(TRAIN_DIR / "transactions_train.pkl")
awards = pd.read_pickle(TRAIN_DIR / "awards_train.pkl")
standings = pd.read_pickle(TRAIN_DIR / "standings_train.pkl")
inseason_player_target_stats = pd.read_csv(
"../input/inseason-target-stats/inseason_target_stats.csv"
)
lastmonth_player_target_stats = pd.read_csv(
"../input/month-target-stats/last_month_target_stats.csv"
)
cumulative_data = pd.read_csv("../input/cumulated-revised-data/train_cumulated.csv")
playoff_cumulative_data = pd.read_csv(
"../input/playoff-cumulated-data/train_cumulated_playoff.csv"
)
last7_cumulative_data = pd.read_csv(
"../input/recently-player-stats/train_cumulated_last7.csv"
)
last_year_award = pd.read_csv("../input/last-month-data/last_year_award.csv")
total_cumulative_data = pd.read_csv(
"../input/last-month-data/total_train_cumulated.csv"
)
# ## Preprocess data
# ### Date
# #### Revise wrong date in season.csv
seasons["regularSeasonStartDate"][2] = "2019-03-28"
seasons["seasonStartDate"][2] = "2019-03-28"
seasons["seasonStartDate"][4] = "2021-04-01"
# #### Convert "date" feature to datetime type
seasons["regularSeasonStartDate"] = pd.to_datetime(seasons["regularSeasonStartDate"])
seasons["postSeasonEndDate"] = pd.to_datetime(seasons["postSeasonEndDate"])
seasons["regularSeasonEndDate"] = pd.to_datetime(seasons["regularSeasonEndDate"])
seasons["allStarDate"][3] = np.nan
seasons["allStarDate"] = pd.to_datetime(seasons["allStarDate"])
seasons.rename(columns={"seasonId": "year"}, inplace=True)
team_col_dict = {}
for i, col in enumerate(team_scores.columns):
if i > 4 and i < len(team_scores.columns) - 2:
team_col_dict[col] = "team_" + col
team_scores.rename(columns=team_col_dict, inplace=True)
lastmonth_player_target_stats.rename(
columns={
"target1_median": "target1_last_month_median",
"target2_median": "target2_last_month_median",
"target3_median": "target3_last_month_median",
"target4_median": "target4_last_month_median",
"target1_std": "target1_last_month_std",
"target2_std": "target2_last_month_std",
"target3_std": "target3_last_month_std",
"target4_std": "target4_last_month_std",
"target1_mean": "target1_last_month_mean",
"target2_mean": "target2_last_month_mean",
"target3_mean": "target3_last_month_mean",
"target4_mean": "target4_last_month_mean",
},
inplace=True,
)
last7_cumulative_data.rename(
columns={
"cumulative_hits": "last7_cumulative_hits",
"cumulative_atBats": "last7_cumulative_atBats",
"cumulative_earnedRuns": "last5_cumulative_earnedRuns",
"cumulative_inningsPitched": "last5_cumulative_inningsPitched",
"cumulative_totalBases": "last7_cumulative_totalBases",
"cumulative_baseOnBalls": "last7_cumulative_baseOnBalls",
"cumulative_hitByPitch": "last7_cumulative_hitByPitch",
"cumulative_sacFlies": "last7_cumulative_sacFlies",
"cumulative_baseOnBallsPitching": "last5_cumulative_baseOnBallsPitching",
"cumulative_hitByPitchPitching": "last5_cumulative_hitByPitchPitching",
"cumulative_hitsPitching": "last5_cumulative_hitsPitching",
"cumulative_hr": "last7_cumulative_hr",
"cumulative_rbi": "last7_cumulative_rbi",
"avg": "last7_avg",
"era": "last5_era",
"slg": "last7_slg",
"obp": "last7_obp",
"ops": "last7_ops",
"whip": "last5_whip",
"cumulative_win": "last5_cumulative_win",
"cumulative_hold": "last5_cumulative_hold",
"cumulative_save": "last5_cumulative_save",
"cumulative_loss": "last5_cumulative_loss",
"cumulative_bs": "last5_cumulative_bs",
"cumulative_k": "last5_cumulative_k",
},
inplace=True,
)
lastmonth_player_target_stats
team_scores
# #### Create "year", "month", "days" columns
standings["year"] = pd.to_datetime(standings["gameDate"]).dt.year
standings["month"] = pd.to_datetime(standings["gameDate"]).dt.month
standings["days"] = pd.to_datetime(standings["gameDate"]).dt.day
standings["date"] = (
standings["year"] * 10000 + standings["month"] * 100 + standings["days"]
)
targets["year"] = pd.to_datetime(targets["date"], format="%Y%m%d").dt.year
targets["month"] = pd.to_datetime(targets["date"], format="%Y%m%d").dt.month
targets["days"] = pd.to_datetime(targets["date"], format="%Y%m%d").dt.day
# transactions['datetime_date'] = pd.to_datetime(transactions['date'], format="%Y%m%d")
# transactions['transaction_time'] = 1
# tmp_df = transactions.copy()
# tmp_df['datetime_date'] = transactions['datetime_date'] + pd.DateOffset(1)
# tmp_df['transaction_time'] = 0
# tmp_df['date'] = tmp_df['datetime_date'].dt.year * 10000 + tmp_df['datetime_date'].dt.month * 100 + tmp_df['datetime_date'].dt.day
# transactions = pd.concat([transactions, tmp_df], axis=0)
# ### Box Score
# #### Combine two games in one day
scores["game_count"] = 1
scores = scores.groupby(["playerId", "date"]).sum().reset_index()
scores
last_year_award["year"] = last_year_award["year"].apply(lambda x: x + 1)
# ### Columns Name
targets_cols = [
"playerId",
"target1",
"target2",
"target3",
"target4",
"date",
"year",
"month",
]
players_cols = ["playerId", "primaryPositionName"]
rosters_cols = ["playerId", "teamId", "status", "date"]
scores_cols = [
"playerId",
"flyOuts",
"groundOuts",
"runsScored",
"gamePk",
"doubles",
"triples",
"homeRuns",
"strikeOuts",
"baseOnBalls",
"intentionalWalks",
"hits",
"hitByPitch",
"atBats",
"caughtStealing",
"stolenBases",
"groundIntoDoublePlay",
"groundIntoTriplePlay",
"plateAppearances",
"totalBases",
"rbi",
"leftOnBase",
"sacBunts",
"sacFlies",
"catchersInterference",
"pickoffs",
"gamesPlayedPitching",
"gamesStartedPitching",
"completeGamesPitching",
"shutoutsPitching",
"winsPitching",
"lossesPitching",
"runsPitching",
"doublesPitching",
"triplesPitching",
"homeRunsPitching",
"strikeOutsPitching",
"baseOnBallsPitching",
"intentionalWalksPitching",
"hitsPitching",
"hitByPitchPitching",
"atBatsPitching",
"caughtStealingPitching",
"stolenBasesPitching",
"inningsPitched",
"saveOpportunities",
"earnedRuns",
"battersFaced",
"outsPitching",
"pitchesThrown",
"balls",
"strikes",
"hitBatsmen",
"balks",
"wildPitches",
"pickoffsPitching",
"rbiPitching",
"gamesFinishedPitching",
"inheritedRunners",
"inheritedRunnersScored",
"catchersInterferencePitching",
"sacBuntsPitching",
"sacFliesPitching",
"saves",
"holds",
"blownSaves",
"assists",
"putOuts",
"errors",
"chances",
"date",
]
team_scores_cols = ["teamId", "gamePk", "team_runsScored", "team_runsPitching"]
trans_cols = ["playerId", "date", "typeDesc"]
awards_cols = ["playerId", "date", "awardId"]
games_cols = ["gamePk", "homeId", "awayId", "dayNight", "gameType"]
standings_cols = ["streakCode", "pct", "teamId", "date"]
stats_cols = [
"playerId",
"target1_median",
"target1_std",
"target2_median",
"target2_std",
"target3_median",
"target3_std",
"target4_median",
"target4_std",
"target1_mean",
"target2_mean",
"target3_mean",
"target4_mean",
]
last_month_stats_cols = [
"playerId",
"year",
"month",
"target1_last_month_median",
"target1_last_month_std",
"target2_last_month_median",
"target2_last_month_std",
"target3_last_month_median",
"target3_last_month_std",
"target4_last_month_median",
"target4_last_month_std",
"target1_last_month_mean",
"target2_last_month_mean",
"target3_last_month_mean",
"target4_last_month_mean",
]
feature_cols = [
"label_playerId",
"label_primaryPositionName",
"label_teamId",
"label_status",
"DaysAfterRegularSeason",
"label_typeDesc",
"label_awardId",
"flyOuts",
"groundOuts",
"runsScored",
"label_homeId",
"label_awayId",
"Split",
"label_gameType",
"WinLose",
"Streak",
"pct",
"label_dayNight",
"cumulative_hits",
"cumulative_hr",
"cumulative_rbi",
"cumulative_win",
"cumulative_loss",
"pitch_win_pct",
"cumulative_save",
"cumulative_bs",
"cumulative_h_streak",
"last7_avg",
"last7_ops",
"last7_cumulative_hits",
"last7_cumulative_hr",
"last7_cumulative_rbi",
"last5_era",
"last5_whip",
"last5_cumulative_win",
"last5_cumulative_loss",
"era+",
"avg+",
"whip+",
"ops+",
"label_last_year_award",
"doubles",
"triples",
"homeRuns",
"strikeOuts",
"baseOnBalls",
"intentionalWalks",
"hits",
"hitByPitch",
"atBats",
"caughtStealing",
"stolenBases",
"totalBases",
"rbi",
"leftOnBase",
"catchersInterference",
"pickoffs",
"gamesPlayedPitching",
"gamesStartedPitching",
"completeGamesPitching",
"shutoutsPitching",
"winsPitching",
"lossesPitching",
"runsPitching",
"doublesPitching",
"triplesPitching",
"homeRunsPitching",
"strikeOutsPitching",
"baseOnBallsPitching",
"intentionalWalksPitching",
"hitsPitching",
"hitByPitchPitching",
"atBatsPitching",
"caughtStealingPitching",
"stolenBasesPitching",
"inningsPitched",
"saveOpportunities",
"earnedRuns",
"outsPitching",
"pitchesThrown",
"balls",
"strikes",
"hitBatsmen",
"balks",
"wildPitches",
"pickoffsPitching",
"rbiPitching",
"gamesFinishedPitching",
"inheritedRunners",
"inheritedRunnersScored",
"catchersInterferencePitching",
"saves",
"holds",
"blownSaves",
"assists",
"putOuts",
"errors",
"chances",
"target1_median",
"target1_std",
"target1_mean",
"target1_last_month_median",
"target1_last_month_std",
"target1_last_month_mean",
"target2_median",
"target2_std",
"target2_mean",
"target2_last_month_median",
"target2_last_month_std",
"target2_last_month_mean",
"target3_median",
"target3_std",
"target3_mean",
"target3_last_month_median",
"target3_last_month_std",
"target3_last_month_mean",
"target4_median",
"target4_std",
"target4_mean",
"target4_last_month_median",
"target4_last_month_std",
"target4_last_month_mean",
]
# ### Merge data
train = targets[targets_cols].merge(players[players_cols], on=["playerId"], how="left")
train = train.merge(rosters[rosters_cols], on=["playerId", "date"], how="left")
train = train.merge(scores[scores_cols], on=["playerId", "date"], how="left")
train = train.merge(games[games_cols], on=["gamePk"], how="left")
for i, row in tqdm_notebook(
games[(games["gameType"] == "E") | (games["gameType"] == "S")].iterrows()
):
train.loc[
(train["date"] == row["date"])
& ((train["teamId"] == row["homeId"]) | (train["teamId"] == row["awayId"])),
"gameType",
] = row["gameType"]
train.loc[train["gamePk"] > 700000, "gameType"] = "R"
train = train.merge(standings[standings_cols], on=["teamId", "date"], how="left")
# train = train.merge(team_scores[team_scores_cols], on=['gamePk', 'teamId'], how='left')
train = train.merge(
lastmonth_player_target_stats[last_month_stats_cols],
how="inner",
left_on=["playerId", "year", "month"],
right_on=["playerId", "year", "month"],
)
train = train.merge(
inseason_player_target_stats[stats_cols],
how="inner",
left_on=["playerId"],
right_on=["playerId"],
)
train = train.merge(seasons, on=["year"], how="left")
transactions = transactions[trans_cols].drop_duplicates(subset=["playerId", "date"])
train = train.merge(transactions, on=["playerId", "date"], how="left")
awards = awards[awards_cols].drop_duplicates(subset=["playerId", "date"])
train = train.merge(awards, on=["playerId", "date"], how="left")
train = train.drop_duplicates(subset=["playerId", "date"])
train = train.merge(cumulative_data, on=["playerId", "date"], how="left")
train = train.merge(last7_cumulative_data, on=["playerId", "date"], how="left")
train = train.merge(last_year_award, on=["playerId", "year"], how="left")
total_cumulative_data = total_cumulative_data.drop(columns=["playerId"])
train = train.merge(total_cumulative_data, on=["date"], how="left")
train
# ### Label Encoding
player2num = {c: i for i, c in enumerate(train["playerId"].unique())}
position2num = {c: i for i, c in enumerate(train["primaryPositionName"].unique())}
teamid2num = {c: i for i, c in enumerate(train["teamId"].unique())}
status2num = {c: i for i, c in enumerate(train["status"].unique())}
transdesc2num = {c: i for i, c in enumerate(train["typeDesc"].unique())}
awardid2num = {c: i for i, c in enumerate(train["awardId"].unique())}
gametype2num = {c: i for i, c in enumerate(train["gameType"].unique())}
last_year_award2num = {c: i for i, c in enumerate(train["award"].unique())}
# print(gametype2num)
train["label_playerId"] = train["playerId"].map(player2num)
train["label_primaryPositionName"] = train["primaryPositionName"].map(position2num)
train["label_teamId"] = train["teamId"].map(teamid2num)
train["label_status"] = train["status"].map(status2num)
train["label_typeDesc"] = train["typeDesc"].map(transdesc2num)
train["label_awardId"] = train["awardId"].map(awardid2num)
train["label_homeId"] = train["homeId"].map(teamid2num)
train["label_awayId"] = train["awayId"].map(teamid2num)
train["label_dayNight"] = train["dayNight"].map({"day": 0, "night": 1})
train["label_gameType"] = train["gameType"].map(gametype2num)
train["label_last_year_award"] = train["award"].map(last_year_award2num)
# ## Feature Engineering
train["datetime_date"] = pd.to_datetime(train["date"], format="%Y%m%d")
train["streakCode"] = train["streakCode"].fillna("W0")
train["WinLose"] = train["streakCode"].str[0]
train["WinLose"] = train["WinLose"].map({"L": 0, "W": 1})
train["Streak"] = train["streakCode"].str[1].astype(int)
train["pct"] = train["pct"].astype(float)
train["hr_ab"] = train["cumulative_hr"] / train["cumulative_atBats"]
train["pitch_win_pct"] = train["cumulative_win"] / (
train["cumulative_win"] + train["cumulative_loss"]
)
train["era+"] = train["total_era"] / train["era"] * 100
train["whip+"] = train["total_whip"] / train["whip"] * 100
train["ops+"] = (
train["slg"] / train["total_slg"] + train["obp"] / train["total_obp"] - 1
) * 100
train["avg+"] = train["avg"] / train["total_avg"] * 100
# #### CV Split
train["DaysAfterRegularSeason"] = (
train["datetime_date"] - train["regularSeasonStartDate"]
).dt.days
train["DaysAfterAllStar"] = (train["datetime_date"] - train["allStarDate"]).dt.days
train["DaysAfterpostSeasonEnd"] = (
train["datetime_date"] - train["postSeasonEndDate"]
).dt.days
train["DaysAfterRegularSeasonEnd"] = (
train["datetime_date"] - train["regularSeasonEndDate"]
).dt.days
# train['DaysAfterLastSeasonEnd'] = (train['datetime_date'] - train['LastSeasonEndDate']).dt.days
days_df = train[
[
"year",
"month",
"DaysAfterRegularSeason",
"DaysAfterAllStar",
"DaysAfterRegularSeasonEnd",
]
]
def f(x):
if x[2] < 0:
return 0
elif x[0] == 2018:
if x[1] == 5 or x[1] == 6:
return 1
elif x[1] == 8 or x[1] == 9:
return 2
else:
return 0
# elif x[0] == 2019:
# if x[1] == 2 or x[1] == 3 or x[1] == 4 or x[1] == 5 or x[1] == 6:
# return 3
# elif x[1] == 7 or x[1] == 8 or x[1] == 9 or x[1] == 10 or x[1] == 11:
# return 4
# else:
# return 0
# elif x[0] == 2020:
# return 5
elif x[0] == 2019:
if x[1] == 5 or x[1] == 6:
return 3
elif x[1] == 8 or x[1] == 9:
return 4
else:
return 0
else:
return 0
train["Split"] = days_df.apply(f, axis=1)
active_players = players.loc[
players["playerForTestSetAndFuturePreds"] == True, "playerId"
]
active_players = active_players.apply(lambda x: player2num[x])
x_train = train[feature_cols].reset_index(drop=True)
NFOLDS = 4
train_y = train[["target1", "target2", "target3", "target4"]].reset_index(drop=True)
x_train
import gc
del train
# del players
# del seasons
# del rosters
# del targets
# del games
# del scores
# del team_scores
# del transactions
# del awards
# del standings
# del inseason_player_target_stats
# del lastmonth_player_target_stats
# del cumulative_data
# del playoff_cumulative_data
# del last7_cumulative_data
# del last_year_award
# del total_cumulative_data
gc.collect()
# ## Training
def fit_lgbm(x_train, y_train, x_valid, y_valid, params: dict = None, verbose=100):
oof_pred = np.zeros(len(y_valid), dtype=np.float32)
model = lgbm.LGBMRegressor(**params)
model.fit(
x_train,
y_train,
eval_set=[(x_valid, y_valid)],
early_stopping_rounds=verbose,
verbose=verbose,
)
oof_pred = model.predict(x_valid)
score = mean_absolute_error(oof_pred, y_valid)
print("mae:", score)
return oof_pred, model, score
cv = 0
params = {
"objective": "mae",
"reg_alpha": 0.1,
"reg_lambda": 0.1,
"n_estimators": 2000,
"learning_rate": 0.1,
"random_state": 208,
"num_leaves": 250,
}
model1_list = []
model2_list = []
model3_list = []
model4_list = []
for idx in range(NFOLDS):
print("FOLD:", idx)
# tr_idx, val_idx = folds[idx]
x_tr = x_train[x_train["Split"] != idx + 1].drop(columns=["Split"])
x_val = x_train[x_train["Split"] == idx + 1][
x_train[x_train["Split"] == idx + 1]["label_playerId"].isin(
active_players.tolist()
)
].drop(columns=["Split"])
y_tr, y_val = (
train_y[x_train["Split"] != idx + 1],
train_y[x_train["Split"] == idx + 1][
x_train[x_train["Split"] == idx + 1]["label_playerId"].isin(
active_players.tolist()
)
],
)
oof1, model1, score1 = fit_lgbm(
x_tr, y_tr["target1"], x_val, y_val["target1"], params
)
oof2, model2, score2 = fit_lgbm(
x_tr, y_tr["target2"], x_val, y_val["target2"], params
)
oof3, model3, score3 = fit_lgbm(
x_tr, y_tr["target3"], x_val, y_val["target3"], params
)
oof4, model4, score4 = fit_lgbm(
x_tr, y_tr["target4"], x_val, y_val["target4"], params
)
score = (score1 + score2 + score3 + score4) / 4
print(f"score: {score}")
cv += score / NFOLDS
model1_list.append(model1)
model2_list.append(model2)
model3_list.append(model3)
model4_list.append(model4)
print("{} Folds Average CV: {}".format(NFOLDS, cv))
# ## Feature Importance
# players = pd.read_csv(BASE_DIR / 'players.csv')
# seasons = pd.read_csv(BASE_DIR / 'seasons.csv')
# rosters = pd.read_pickle(TRAIN_DIR / 'rosters_train.pkl')
# targets = pd.read_pickle(TRAIN_DIR / 'nextDayPlayerEngagement_train.pkl')
# games = pd.read_pickle(TRAIN_DIR / 'games_train.pkl')
# scores = pd.read_pickle(TRAIN_DIR / 'playerBoxScores_train.pkl')
# team_scores = pd.read_pickle(TRAIN_DIR / 'teamBoxScores_train.pkl')
# transactions = pd.read_pickle(TRAIN_DIR / 'transactions_train.pkl')
# awards = pd.read_pickle(TRAIN_DIR / 'awards_train.pkl')
# standings = pd.read_pickle(TRAIN_DIR / 'standings_train.pkl')
# inseason_player_target_stats = pd.read_csv("../input/inseason-target-stats/inseason_target_stats.csv")
# lastmonth_player_target_stats = pd.read_csv("../input/month-target-stats/last_month_target_stats.csv")
# cumulative_data = pd.read_csv("../input/cumulated-revised-data/train_cumulated.csv")
# playoff_cumulative_data = pd.read_csv('../input/playoff-cumulated-data/train_cumulated_playoff.csv')
# last7_cumulative_data = pd.read_csv('../input/recently-player-stats/train_cumulated_last7.csv')
# last_year_award = pd.read_csv('../input/last-month-data/last_year_award.csv')
# total_cumulative_data = pd.read_csv('../input/last-month-data/total_train_cumulated.csv')
# last_year_award['year'] = last_year_award['year'].apply(lambda x: x+1)
# total_cumulative_data = total_cumulative_data.drop(columns=['playerId'])
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
# sorted(zip(clf.feature_importances_, X.columns), reverse=True)
feature_imp = pd.DataFrame(
sorted(zip(model1.feature_importances_, train_X.columns)),
columns=["Value", "Feature"],
)
plt.figure(figsize=(20, 10))
sns.barplot(
x="Value", y="Feature", data=feature_imp.sort_values(by="Value", ascending=False)
)
plt.title("LightGBM Features with target 1")
plt.tight_layout()
plt.show()
plt.savefig("lgbm_importances-01.png")
feature_imp = pd.DataFrame(
sorted(zip(model2.feature_importances_, train_X.columns)),
columns=["Value", "Feature"],
)
plt.figure(figsize=(20, 10))
sns.barplot(
x="Value", y="Feature", data=feature_imp.sort_values(by="Value", ascending=False)
)
plt.title("LightGBM Features with target 2")
plt.tight_layout()
plt.show()
plt.savefig("lgbm_importances-02.png")
feature_imp = pd.DataFrame(
sorted(zip(model3.feature_importances_, train_X.columns)),
columns=["Value", "Feature"],
)
plt.figure(figsize=(20, 10))
sns.barplot(
x="Value", y="Feature", data=feature_imp.sort_values(by="Value", ascending=False)
)
plt.title("LightGBM Features with target 3")
plt.tight_layout()
plt.show()
plt.savefig("lgbm_importances-03.png")
feature_imp = pd.DataFrame(
sorted(zip(model4.feature_importances_, train_X.columns)),
columns=["Value", "Feature"],
)
plt.figure(figsize=(20, 10))
sns.barplot(
x="Value", y="Feature", data=feature_imp.sort_values(by="Value", ascending=False)
)
plt.title("LightGBM Features with target 4")
plt.tight_layout()
plt.show()
plt.savefig("lgbm_importances-04.png")
# ## Calculate Cumulative data
cumulative_hits = {}
cumulative_atBats = {}
cumulative_earnedRuns = {}
cumulative_inningsPitched = {}
cumulative_totalBases = {}
cumulative_baseOnBalls = {}
cumulative_hitByPitch = {}
cumulative_sacFlies = {}
cumulative_baseOnBallsPitching = {}
cumulative_hitByPitchPitching = {}
cumulative_hitsPitching = {}
cumulative_hr = {}
cumulative_rbi = {}
cumulative_win = {}
cumulative_loss = {}
cumulative_k = {}
cumulative_save = {}
cumulative_bs = {}
cumulative_h_streak = {}
# cumulative_hr_streak = {}
# cumulative_base_streak = {}
for idx, row in tqdm_notebook(cumulative_data.iterrows()):
if (idx) % 100000 == 0:
print(idx)
date = str(row["date"])
playerId = row["playerId"]
if date[:4] == "2021":
cumulative_hits[playerId] = row["cumulative_hits"]
cumulative_atBats[playerId] = row["cumulative_atBats"]
cumulative_earnedRuns[playerId] = row["cumulative_earnedRuns"]
cumulative_inningsPitched[playerId] = row["cumulative_inningsPitched"]
cumulative_totalBases[playerId] = row["cumulative_totalBases"]
cumulative_baseOnBalls[playerId] = row["cumulative_baseOnBalls"]
cumulative_hitByPitch[playerId] = row["cumulative_hitByPitch"]
cumulative_sacFlies[playerId] = row["cumulative_sacFlies"]
cumulative_baseOnBallsPitching[playerId] = row["cumulative_baseOnBallsPitching"]
cumulative_hitByPitchPitching[playerId] = row["cumulative_hitByPitchPitching"]
cumulative_hitsPitching[playerId] = row["cumulative_hitsPitching"]
cumulative_hr[playerId] = row["cumulative_hr"]
cumulative_rbi[playerId] = row["cumulative_rbi"]
cumulative_win[playerId] = row["cumulative_win"]
cumulative_loss[playerId] = row["cumulative_loss"]
cumulative_k[playerId] = row["cumulative_k"]
cumulative_save[playerId] = row["cumulative_save"]
cumulative_bs[playerId] = row["cumulative_bs"]
cumulative_h_streak[playerId] = row["cumulative_h_streak"]
# cumulative_hr_streak[playerId] = row['cumulative_hr_streak']
# cumulative_base_streak[playerId] = row['cumulative_base_streak']
total_hits = total_cumulative_data["total_cumulative_hits"]
total_atBats = total_cumulative_data["total_cumulative_atBats"]
total_earnedRuns = total_cumulative_data["total_cumulative_earnedRuns"]
total_inningsPitched = total_cumulative_data["total_cumulative_inningsPitched"]
total_totalBases = total_cumulative_data["total_cumulative_totalBases"]
total_baseOnBalls = total_cumulative_data["total_cumulative_baseOnBalls"]
total_hitByPitch = total_cumulative_data["total_cumulative_hitByPitch"]
total_sacFlies = total_cumulative_data["total_cumulative_sacFlies"]
total_baseOnBallsPitching = total_cumulative_data[
"total_cumulative_baseOnBallsPitching"
]
total_hitByPitchPitching = total_cumulative_data["total_cumulative_hitByPitchPitching"]
total_hitsPitching = total_cumulative_data["total_cumulative_hitsPitching"]
total_hr = total_cumulative_data["total_cumulative_hr"]
total_win = total_cumulative_data["total_cumulative_win"]
total_losses = total_cumulative_data["total_cumulative_loss"]
total_holds = total_cumulative_data["total_cumulative_hold"]
total_saves = total_cumulative_data["total_cumulative_save"]
total_bs = total_cumulative_data["total_cumulative_bs"]
total_k = total_cumulative_data["total_cumulative_k"]
total_rbi = total_cumulative_data["total_cumulative_rbi"]
total_intentionalWalksPitching = total_cumulative_data[
"total_cumulative_intentionalWalksPitching"
]
total_homeRunsPitching = total_cumulative_data["total_cumulative_homeRunsPitching"]
total_atBatsPitching = total_cumulative_data["total_cumulative_atBatsPitching"]
players_cols = ["playerId", "primaryPositionName"]
rosters_cols = ["playerId", "teamId", "status"]
scores_cols = [
"playerId",
"flyOuts",
"gamePk",
"groundOuts",
"runsScored",
"doubles",
"triples",
"homeRuns",
"strikeOuts",
"baseOnBalls",
"intentionalWalks",
"hits",
"hitByPitch",
"atBats",
"caughtStealing",
"stolenBases",
"groundIntoDoublePlay",
"groundIntoTriplePlay",
"plateAppearances",
"totalBases",
"rbi",
"leftOnBase",
"sacBunts",
"sacFlies",
"catchersInterference",
"pickoffs",
"gamesPlayedPitching",
"gamesStartedPitching",
"completeGamesPitching",
"shutoutsPitching",
"winsPitching",
"lossesPitching",
"runsPitching",
"doublesPitching",
"triplesPitching",
"homeRunsPitching",
"strikeOutsPitching",
"baseOnBallsPitching",
"intentionalWalksPitching",
"hitsPitching",
"hitByPitchPitching",
"atBatsPitching",
"caughtStealingPitching",
"stolenBasesPitching",
"inningsPitched",
"saveOpportunities",
"earnedRuns",
"battersFaced",
"outsPitching",
"pitchesThrown",
"balls",
"strikes",
"hitBatsmen",
"balks",
"wildPitches",
"pickoffsPitching",
"rbiPitching",
"gamesFinishedPitching",
"inheritedRunners",
"inheritedRunnersScored",
"catchersInterferencePitching",
"sacBuntsPitching",
"sacFliesPitching",
"saves",
"holds",
"blownSaves",
"assists",
"putOuts",
"errors",
"chances",
]
trans_cols = ["playerId", "typeDesc"]
awards_cols = ["playerId", "awardId"]
games_cols = ["gamePk", "homeId", "awayId", "dayNight", "gameType"]
standings_cols = ["streakCode", "pct", "leagueRank", "teamId"]
team_scores_cols = ["teamId", "gamePk", "team_runsScored", "team_runsPitching"]
null = np.nan
true = True
false = False
# player_target_stats = pd.read_csv("../input/player-target-stats/player_target_stats.csv")
env = mlb.make_env() # initialize the environment
iter_test = env.iter_test() # iterator which loops over each date in test set
import torch
player_cumulative_stats_dict = torch.load(
"../input/recently-player-stats/player_stats_last7.pkl"
)
for i, (test_df, sample_prediction_df) in enumerate(iter_test): # make predictions here
sample_prediction_df = sample_prediction_df.reset_index(drop=True)
# creat dataset
sample_prediction_df["playerId"] = sample_prediction_df["date_playerId"].map(
lambda x: int(x.split("_")[1])
)
sample_prediction_df["date"] = pd.to_datetime(
sample_prediction_df["date_playerId"].map(lambda x: int(x.split("_")[0])),
format="%Y%m%d",
) - pd.DateOffset(1)
# Dealing with missing values
if test_df["rosters"].iloc[0] == test_df["rosters"].iloc[0]:
test_rosters = pd.DataFrame(eval(test_df["rosters"].iloc[0]))
else:
test_rosters = pd.DataFrame({"playerId": sample_prediction_df["playerId"]})
for col in rosters.columns:
if col == "playerId":
continue
test_rosters[col] = np.nan
if test_df["playerBoxScores"].iloc[0] == test_df["playerBoxScores"].iloc[0]:
test_scores = pd.DataFrame(eval(test_df["playerBoxScores"].iloc[0]))
else:
test_scores = pd.DataFrame({"playerId": sample_prediction_df["playerId"]})
for col in scores.columns:
if col == "playerId":
continue
test_scores[col] = np.nan
# if test_df['teamBoxScores'].iloc[0] == test_df['teamBoxScores'].iloc[0]:
# test_team_scores = pd.DataFrame(eval(test_df['teamBoxScores'].iloc[0]))
# else:
# test_team_scores = pd.DataFrame({'playerId': sample_prediction_df['playerId']})
# for col in team_scores.columns:
# if col == 'playerId': continue
# test_team_scores[col] = np.nan
if test_df["transactions"].iloc[0] == test_df["transactions"].iloc[0]:
test_trans = pd.DataFrame(eval(test_df["transactions"].iloc[0]))
test_trans["transaction_time"] = 1
else:
test_trans = pd.DataFrame({"playerId": sample_prediction_df["playerId"]})
for col in transactions.columns:
if col == "playerId":
continue
test_trans[col] = np.nan
test_trans = test_trans.drop_duplicates(subset=["playerId"])
# pre_trans_old = test_trans.copy()
# if i != 0:
# test_trans = pd.concat([test_trans, pre_trans], axis=0)
# test_trans = test_trans.drop_duplicates(subset=['playerId'])
# if pre_trans_old.loc[0, 'transaction_time'] == 1:
# pre_trans_old['transaction_time'] = 0
# pre_trans = pre_trans_old.copy()
if test_df["awards"].iloc[0] == test_df["awards"].iloc[0]:
test_awards = pd.DataFrame(eval(test_df["awards"].iloc[0]))
test_awards["awards_time"] = 1
else:
test_awards = pd.DataFrame({"playerId": sample_prediction_df["playerId"]})
for col in awards.columns:
if col == "playerId":
continue
test_awards[col] = np.nan
test_awards = test_awards.drop_duplicates(subset=["playerId"])
if test_df["games"].iloc[0] == test_df["games"].iloc[0]:
test_games = pd.DataFrame(eval(test_df["games"].iloc[0]))
else:
test_games = pd.DataFrame({"playerId": sample_prediction_df["playerId"]})
for col in games.columns:
if col == "playerId":
continue
test_games[col] = np.nan
if test_df["standings"].iloc[0] == test_df["standings"].iloc[0]:
test_standings = pd.DataFrame(eval(test_df["standings"].iloc[0]))
else:
test_standings = pd.DataFrame({"playerId": sample_prediction_df["playerId"]})
for col in standings.columns:
if col == "playerId":
continue
test_standings[col] = np.nan
test_scores = test_scores.groupby("playerId").sum().reset_index()
test = sample_prediction_df[["playerId", "date"]].copy()
test = test.merge(players[players_cols], on="playerId", how="left")
test = test.merge(test_rosters[rosters_cols], on="playerId", how="left")
test = test.merge(test_scores[scores_cols], on="playerId", how="left")
# test = test.merge(test_team_scores[team_scores_cols], on=['teamId', 'gamePk'], how='left')
test = test.merge(test_games[games_cols], on="gamePk", how="left")
test = test.merge(test_standings[standings_cols], on="teamId", how="left")
test["year"] = test["date"].dt.year
test["month"] = test["date"].dt.month
test["days"] = test["date"].dt.day
test = test.merge(
lastmonth_player_target_stats[last_month_stats_cols],
how="inner",
left_on=["playerId", "year", "month"],
right_on=["playerId", "year", "month"],
)
test = test.merge(
inseason_player_target_stats[stats_cols],
how="inner",
left_on=["playerId"],
right_on=["playerId"],
)
test = test.merge(last_year_award, on=["playerId", "year"], how="left")
test = test.merge(test_trans[trans_cols], on="playerId", how="left")
test = test.merge(test_awards[awards_cols], on="playerId", how="left")
test = test.merge(seasons, on=["year"], how="left")
test.loc[test["gamePk"] > 700000, "gameType"] = "R"
# test.loc[test['gamePk'] > 700000, 'dayNight'] = 'day_night'
test["DaysAfterRegularSeason"] = (
test["date"] - test["regularSeasonStartDate"]
).dt.days
inningsPitched_list = []
for i in range(test.shape[0]):
inningsPitched = test["inningsPitched"][i]
if not np.isnan(inningsPitched):
if (str(inningsPitched)[-1]) == "1":
inningsPitched = int(str(inningsPitched).split(".")[0]) + 1 / 3
elif (str(inningsPitched)[-1]) == "2":
inningsPitched = int(str(inningsPitched).split(".")[0]) + 2 / 3
inningsPitched_list.append(inningsPitched)
total_hits += test["hits"].sum()
total_atBats += test["atBats"].sum()
total_earnedRuns += test["earnedRuns"].sum()
total_inningsPitched += np.sum(inningsPitched_list)
total_totalBases += test["totalBases"].sum()
total_baseOnBalls += test["baseOnBalls"].sum()
total_hitByPitch += test["hitByPitch"].sum()
total_sacFlies += test["sacFlies"].sum()
total_baseOnBallsPitching += test["baseOnBallsPitching"].sum()
total_hitByPitchPitching += test["hitByPitchPitching"].sum()
total_hitsPitching += test["hitsPitching"].sum()
total_hr += test["homeRuns"].sum()
total_win += test["winsPitching"].sum()
total_losses += test["lossesPitching"].sum()
total_holds += test["holds"].sum()
total_saves += test["saves"].sum()
total_bs += test["blownSaves"].sum()
total_k += test["strikeOutsPitching"].sum()
total_rbi += test["rbi"].sum()
total_intentionalWalksPitching += test["intentionalWalksPitching"].sum()
total_homeRunsPitching += test["homeRunsPitching"].sum()
total_atBatsPitching += test["atBatsPitching"].sum()
avg_list = []
era_list = []
whip_list = []
ops_list = []
obp_list = []
slg_list = []
hit_list = []
hr_list = []
rbi_list = []
atBats_list = []
inningsPitched_list = []
win_list = []
loss_list = []
save_list = []
bs_list = []
h_streak_list = []
last7_avg_list = []
last5_era_list = []
last5_whip_list = []
last7_ops_list = []
# last7_obp_list = []
# last7_slg_list = []
last7_hit_list = []
last7_hr_list = []
last7_rbi_list = []
last7_atBats_list = []
# inningsPitched_list = []
last5_win_list = []
last5_loss_list = []
# save_list = []
# bs_list = []
avgplus_list = []
whipplus_list = []
opsplus_list = []
eraplus_list = []
for idx, row in test.iterrows():
playerId = row["playerId"]
if not np.isnan(row["hits"]) and row["gameType"] != "A":
cumulative_hits[playerId] += row["hits"]
cumulative_atBats[playerId] += row["atBats"]
cumulative_totalBases[playerId] += row["totalBases"]
cumulative_baseOnBalls[playerId] += row["baseOnBalls"]
cumulative_hitByPitch[playerId] += row["hitByPitch"]
cumulative_sacFlies[playerId] += row["sacFlies"]
cumulative_hr[playerId] += row["homeRuns"]
cumulative_rbi[playerId] += row["rbi"]
player_cumulative_stats_dict[playerId]["cumulative_hits"].append(
row["hits"]
)
player_cumulative_stats_dict[playerId]["cumulative_atBats"].append(
row["atBats"]
)
player_cumulative_stats_dict[playerId]["cumulative_totalBases"].append(
row["totalBases"]
)
player_cumulative_stats_dict[playerId]["cumulative_hitByPitch"].append(
row["hitByPitch"]
)
player_cumulative_stats_dict[playerId]["cumulative_baseOnBalls"].append(
row["baseOnBalls"]
)
player_cumulative_stats_dict[playerId]["cumulative_sacFlies"].append(
row["sacFlies"]
)
player_cumulative_stats_dict[playerId]["cumulative_hr"].append(
row["homeRuns"]
)
player_cumulative_stats_dict[playerId]["cumulative_rbi"].append(row["rbi"])
if len(player_cumulative_stats_dict[playerId]["cumulative_hits"]) > 7:
player_cumulative_stats_dict[playerId]["cumulative_hits"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_atBats"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_totalBases"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_hitByPitch"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_baseOnBalls"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_sacFlies"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_hr"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_rbi"].pop(0)
if row["hits"] != 0:
cumulative_h_streak[playerId] += 1
else:
if not np.isnan(cumulative_h_streak[playerId]):
cumulative_h_streak[playerId] = 0
# if row['homeRuns'] != 0:
# cumulative_hr_streak[playerId] += 1
# else:
# cumulative_hr_streak[playerId] = 0
# if row['hits'] != 0 or row['baseOnBalls'] != 0 or row['hitByPitch'] != 0:
# cumulative_base_streak[playerId] += 1
# else:
# cumulative_base_streak[playerId] = 0
if not np.isnan(row["earnedRuns"]):
inningsPitched = row["inningsPitched"]
if (str(inningsPitched)[-1]) == "1":
inningsPitched = int(str(inningsPitched).split(".")[0]) + 1 / 3
elif (str(inningsPitched)[-1]) == "2":
inningsPitched = int(str(inningsPitched).split(".")[0]) + 2 / 3
cumulative_earnedRuns[playerId] += row["earnedRuns"]
cumulative_inningsPitched[playerId] += inningsPitched
cumulative_hitByPitchPitching[playerId] += row["hitByPitchPitching"]
cumulative_hitsPitching[playerId] += row["hitsPitching"]
cumulative_baseOnBallsPitching[playerId] += row["baseOnBallsPitching"]
cumulative_win[playerId] += row["winsPitching"]
cumulative_loss[playerId] += row["lossesPitching"]
cumulative_k[playerId] += row["strikeOutsPitching"]
cumulative_bs[playerId] += row["blownSaves"]
cumulative_save[playerId] += row["saves"]
if playerId != 660271 or inningsPitched > 0:
player_cumulative_stats_dict[playerId]["cumulative_earnedRuns"].append(
row["earnedRuns"]
)
player_cumulative_stats_dict[playerId][
"cumulative_inningsPitched"
].append(inningsPitched)
player_cumulative_stats_dict[playerId][
"cumulative_baseOnBallsPitching"
].append(row["baseOnBallsPitching"])
player_cumulative_stats_dict[playerId][
"cumulative_hitByPitchPitching"
].append(row["hitByPitchPitching"])
player_cumulative_stats_dict[playerId][
"cumulative_hitsPitching"
].append(row["hitsPitching"])
player_cumulative_stats_dict[playerId]["cumulative_saves"].append(
row["saves"]
)
player_cumulative_stats_dict[playerId]["cumulative_blownSaves"].append(
row["blownSaves"]
)
player_cumulative_stats_dict[playerId]["cumulative_win"].append(
row["winsPitching"]
)
player_cumulative_stats_dict[playerId]["cumulative_losses"].append(
row["lossesPitching"]
)
if len(player_cumulative_stats_dict[playerId]["cumulative_earnedRuns"]) > 5:
player_cumulative_stats_dict[playerId]["cumulative_earnedRuns"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_inningsPitched"].pop(
0
)
player_cumulative_stats_dict[playerId][
"cumulative_baseOnBallsPitching"
].pop(0)
player_cumulative_stats_dict[playerId][
"cumulative_hitByPitchPitching"
].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_hitsPitching"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_saves"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_blownSaves"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_win"].pop(0)
player_cumulative_stats_dict[playerId]["cumulative_losses"].pop(0)
avg_list.append(cumulative_hits[playerId] / cumulative_atBats[playerId])
era_list.append(
cumulative_earnedRuns[playerId] / cumulative_inningsPitched[playerId] * 9
)
slg = cumulative_totalBases[playerId] / cumulative_atBats[playerId]
obp = (
cumulative_hits[playerId]
+ cumulative_baseOnBalls[playerId]
+ cumulative_hitByPitch[playerId]
) / (
cumulative_atBats[playerId]
+ cumulative_baseOnBalls[playerId]
+ cumulative_hitByPitch[playerId]
+ cumulative_sacFlies[playerId]
)
ops_list.append(slg + obp)
whip = (
cumulative_baseOnBallsPitching[playerId]
+ cumulative_hitByPitchPitching[playerId]
+ cumulative_hitsPitching[playerId]
) / cumulative_inningsPitched[playerId]
whip_list.append(whip)
hr_list.append(cumulative_hr[playerId])
hit_list.append(cumulative_hits[playerId])
rbi_list.append(cumulative_rbi[playerId])
inningsPitched_list.append(cumulative_inningsPitched[playerId])
atBats_list.append(cumulative_atBats[playerId])
win_list.append(cumulative_win[playerId])
loss_list.append(cumulative_loss[playerId])
save_list.append(cumulative_save[playerId])
bs_list.append(cumulative_bs[playerId])
h_streak_list.append(cumulative_h_streak[playerId])
total_avg = total_hits / total_atBats
total_era = total_earnedRuns / total_inningsPitched * 9
total_whip = (
total_baseOnBallsPitching + total_hitByPitchPitching + total_hitsPitching
) / total_inningsPitched
total_obp = (total_hits + total_baseOnBalls + total_hitByPitch) / (
total_atBats + total_baseOnBalls + total_hitByPitch + total_sacFlies
)
total_slg = total_totalBases / total_atBats
avgplus_list.append(avg_list[-1] / total_avg * 100)
eraplus_list.append(total_era / era_list[-1] * 100)
whipplus_list.append(total_whip / whip_list[-1] * 100)
opsplus_list.append((slg / total_slg + obp / total_obp - 1) * 100)
if playerId == 660271:
last7_avg_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_hits"])
/ sum(player_cumulative_stats_dict[playerId]["cumulative_atBats"])
)
# era_list.append(cumulative_earnedRuns[playerId] / cumulative_inningsPitched[playerId] * 9)
slg = sum(
player_cumulative_stats_dict[playerId]["cumulative_totalBases"]
) / sum(player_cumulative_stats_dict[playerId]["cumulative_atBats"])
obp = (
sum(player_cumulative_stats_dict[playerId]["cumulative_hits"])
+ sum(player_cumulative_stats_dict[playerId]["cumulative_baseOnBalls"])
+ sum(player_cumulative_stats_dict[playerId]["cumulative_hitByPitch"])
) / (
sum(player_cumulative_stats_dict[playerId]["cumulative_atBats"])
+ sum(player_cumulative_stats_dict[playerId]["cumulative_baseOnBalls"])
+ sum(player_cumulative_stats_dict[playerId]["cumulative_hitByPitch"])
+ sum(player_cumulative_stats_dict[playerId]["cumulative_sacFlies"])
)
last7_ops_list.append(slg + obp)
# whip = (cumulative_baseOnBallsPitching[playerId] + cumulative_hitByPitchPitching[playerId] + cumulative_hitsPitching[playerId]) / cumulative_inningsPitched[playerId]
# whip_list.append(whip)
last7_hr_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_hr"])
)
last7_hit_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_hits"])
)
last7_rbi_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_rbi"])
)
# inningsPitched_list.append(cumulative_inningsPitched[playerId])
last7_atBats_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_atBats"])
)
last5_era_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_earnedRuns"])
/ sum(
player_cumulative_stats_dict[playerId]["cumulative_inningsPitched"]
)
* 9
)
last5_whip_list.append(
(
sum(
player_cumulative_stats_dict[playerId][
"cumulative_baseOnBallsPitching"
]
)
+ sum(
player_cumulative_stats_dict[playerId][
"cumulative_hitsPitching"
]
)
+ sum(
player_cumulative_stats_dict[playerId][
"cumulative_hitByPitchPitching"
]
)
)
/ sum(
player_cumulative_stats_dict[playerId]["cumulative_inningsPitched"]
)
)
last5_win_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_win"])
)
last5_loss_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_losses"])
)
elif row["primaryPositionName"] == "Pitcher":
last7_avg_list.append(float("nan"))
last7_ops_list.append(float("nan"))
last7_hr_list.append(float("nan"))
last7_hit_list.append(float("nan"))
last7_rbi_list.append(float("nan"))
last7_atBats_list.append(float("nan"))
if (
sum(player_cumulative_stats_dict[playerId]["cumulative_inningsPitched"])
== 0
):
last5_era_list.append(float("nan"))
last5_whip_list.append(float("nan"))
last5_win_list.append(float("nan"))
last5_loss_list.append(float("nan"))
else:
last5_era_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_earnedRuns"])
/ sum(
player_cumulative_stats_dict[playerId][
"cumulative_inningsPitched"
]
)
* 9
)
last5_whip_list.append(
(
sum(
player_cumulative_stats_dict[playerId][
"cumulative_baseOnBallsPitching"
]
)
+ sum(
player_cumulative_stats_dict[playerId][
"cumulative_hitsPitching"
]
)
+ sum(
player_cumulative_stats_dict[playerId][
"cumulative_hitByPitchPitching"
]
)
)
/ sum(
player_cumulative_stats_dict[playerId][
"cumulative_inningsPitched"
]
)
)
last5_win_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_win"])
)
last5_loss_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_losses"])
)
else:
last5_era_list.append(float("nan"))
last5_whip_list.append(float("nan"))
last5_win_list.append(float("nan"))
last5_loss_list.append(float("nan"))
if sum(player_cumulative_stats_dict[playerId]["cumulative_atBats"]) == 0:
last7_avg_list.append(float("nan"))
last7_ops_list.append(float("nan"))
last7_hr_list.append(float("nan"))
last7_hit_list.append(float("nan"))
last7_rbi_list.append(float("nan"))
last7_atBats_list.append(float("nan"))
else:
last7_avg_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_hits"])
/ sum(player_cumulative_stats_dict[playerId]["cumulative_atBats"])
)
# era_list.append(cumulative_earnedRuns[playerId] / cumulative_inningsPitched[playerId] * 9)
slg = sum(
player_cumulative_stats_dict[playerId]["cumulative_totalBases"]
) / sum(player_cumulative_stats_dict[playerId]["cumulative_atBats"])
obp = (
sum(player_cumulative_stats_dict[playerId]["cumulative_hits"])
+ sum(
player_cumulative_stats_dict[playerId]["cumulative_baseOnBalls"]
)
+ sum(
player_cumulative_stats_dict[playerId]["cumulative_hitByPitch"]
)
) / (
sum(player_cumulative_stats_dict[playerId]["cumulative_atBats"])
+ sum(
player_cumulative_stats_dict[playerId]["cumulative_baseOnBalls"]
)
+ sum(
player_cumulative_stats_dict[playerId]["cumulative_hitByPitch"]
)
+ sum(player_cumulative_stats_dict[playerId]["cumulative_sacFlies"])
)
last7_ops_list.append(slg + obp)
# whip = (cumulative_baseOnBallsPitching[playerId] + cumulative_hitByPitchPitching[playerId] + cumulative_hitsPitching[playerId]) / cumulative_inningsPitched[playerId]
# whip_list.append(whip)
last7_hr_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_hr"])
)
last7_hit_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_hits"])
)
last7_rbi_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_rbi"])
)
# inningsPitched_list.append(cumulative_inningsPitched[playerId])
last7_atBats_list.append(
sum(player_cumulative_stats_dict[playerId]["cumulative_atBats"])
)
test["avg"] = avg_list
test["era"] = era_list
test["whip"] = whip_list
test["ops"] = ops_list
test["cumulative_hits"] = hit_list
test["cumulative_hr"] = hr_list
test["cumulative_rbi"] = rbi_list
test["cumulative_win"] = win_list
test["cumulative_loss"] = loss_list
test["pitch_win_pct"] = test["cumulative_win"] / (
test["cumulative_win"] + test["cumulative_loss"]
)
test["cumulative_save"] = save_list
test["cumulative_bs"] = bs_list
test["cumulative_h_streak"] = h_streak_list
test["last7_avg"] = last7_avg_list
test["last7_ops"] = last7_ops_list
test["last7_cumulative_hits"] = last7_hit_list
test["last7_cumulative_hr"] = last7_hr_list
test["last7_cumulative_rbi"] = last7_rbi_list
test["last5_era"] = last5_era_list
test["last5_whip"] = last5_whip_list
test["last5_cumulative_win"] = last5_win_list
test["last5_cumulative_loss"] = last5_loss_list
test["era+"] = eraplus_list
test["ops+"] = opsplus_list
test["whip+"] = whipplus_list
test["avg+"] = avgplus_list
# test['cumulative_hr_streak'] = hr_streak_list
# test['cumulative_base_streak'] = base_streak_list
test["label_playerId"] = test["playerId"].map(player2num)
test["label_primaryPositionName"] = test["primaryPositionName"].map(position2num)
test["label_teamId"] = test["teamId"].map(teamid2num)
test["label_homeId"] = test["homeId"].map(teamid2num)
test["label_awayId"] = test["awayId"].map(teamid2num)
test["label_status"] = test["status"].map(status2num)
test["label_typeDesc"] = test["typeDesc"].map(transdesc2num)
test["label_awardId"] = test["awardId"].map(awardid2num)
test["label_gameType"] = test["gameType"].map(awardid2num)
test["label_dayNight"] = test["dayNight"].map({"day": 0, "night": 1})
test = test.drop_duplicates("playerId")
test["streakCode"] = test["streakCode"].fillna("W0")
test["WinLose"] = test["streakCode"].str[0]
test["WinLose"] = test["WinLose"].map({"L": 0, "W": 1})
test["Streak"] = test["streakCode"].str[1].astype(int)
test["pct"] = test["pct"].astype(float)
test["Split"] = 0
test["label_last_year_award"] = test["award"].map(last_year_award2num)
# test['playoff_avg'] = np.nan
# test['playoff_ops'] = np.nan
# test['cumulative_playoff_hits'] = np.nan
# test['cumulative_playoff_hr'] = np.nan
# test['cumulative_playoff_rbi'] = np.nan
test_X_1 = test[feature_cols].drop(columns=["Split"]).values
# predict
for i in range(NFOLDS):
if i == 0:
pred1 = model1_list[i].predict(test_X_1) / NFOLDS
pred2 = model2_list[i].predict(test_X_1) / NFOLDS
pred3 = model3_list[i].predict(test_X_1) / NFOLDS
pred4 = model4_list[i].predict(test_X_1) / NFOLDS
else:
pred1 += model1_list[i].predict(test_X_1) / NFOLDS
pred2 += model2_list[i].predict(test_X_1) / NFOLDS
pred3 += model3_list[i].predict(test_X_1) / NFOLDS
pred4 += model4_list[i].predict(test_X_1) / NFOLDS
# merge submission
sample_prediction_df["target1"] = np.clip(pred1, 0, 100)
sample_prediction_df["target2"] = np.clip(pred2, 0, 100)
sample_prediction_df["target3"] = np.clip(pred3, 0, 100)
sample_prediction_df["target4"] = np.clip(pred4, 0, 100)
sample_prediction_df = sample_prediction_df.fillna(0.0)
del sample_prediction_df["playerId"]
del sample_prediction_df["date"]
env.predict(sample_prediction_df)
sample_prediction_df
| false | 7 | 17,408 | 0 | 17,431 | 17,408 |
||
69179633
|
<jupyter_start><jupyter_text>HackerEarth Machine Learning - Exhibit A(rt)
Predict the cost to ship the sculptures
=======================================
It can be difficult to navigate the logistics when it comes to buying art. These include, but are not limited to, the following:
Effective collection management
Shipping the paintings, antiques, sculptures, and other collectibles to their respective destinations after purchase
Though many companies have made shipping consumer goods a relatively quick and painless procedure, the same rules do not always apply while shipping paintings or transporting antiques and collectibles.
### Task
You work for a company that sells sculptures that are acquired from various artists around the world. Your task is to predict the cost required to ship these sculptures to customers based on the information provided in the dataset.
### Data description
The dataset folder contains the following files:
train.csv: 6500 x 20
test.csv: 3500 x 19
sample_submission.csv: 5 x 2
### The columns provided in the dataset are as follows:
Customer Id
Artist Name
Artist Reputation
Height
Width
Weight
Material
Price Of Sculpture
Base Shipping Price
International
Express Shipment
Installation Included
Transport
Fragile
Customer Information
Remote Location
Scheduled Date
Delivery Date
Customer Location
Cost
### Evaluation metric
score = 100*max(0, 1-metrics.meansquaredlog_error(actual, predicted))
Contest Link - [Exhibit A(rt)](https://www.hackerearth.com/challenges/competitive/hackerearth-machine-learning-challenge-predict-shipping-cost/machine-learning/predict-the-cost-to-ship-the-sculptures-12-e7728f5d/)
Kaggle dataset identifier: hackerearth-machine-learning-exhibit-art
<jupyter_code>import pandas as pd
df = pd.read_csv('hackerearth-machine-learning-exhibit-art/test.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 3500 entries, 0 to 3499
Data columns (total 19 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Customer Id 3500 non-null object
1 Artist Name 3500 non-null object
2 Artist Reputation 3278 non-null float64
3 Height 3381 non-null float64
4 Width 3359 non-null float64
5 Weight 3351 non-null float64
6 Material 3500 non-null object
7 Price Of Sculpture 3500 non-null float64
8 Base Shipping Price 3500 non-null float64
9 International 3500 non-null object
10 Express Shipment 3500 non-null object
11 Installation Included 3500 non-null object
12 Transport 3268 non-null object
13 Fragile 3500 non-null object
14 Customer Information 3500 non-null object
15 Remote Location 3500 non-null object
16 Scheduled Date 3500 non-null object
17 Delivery Date 3500 non-null object
18 Customer Location 3500 non-null object
dtypes: float64(6), object(13)
memory usage: 519.7+ KB
<jupyter_text>Examples:
{
"Customer Id": "fffe3400310033003300",
"Artist Name": "James Miller",
"Artist Reputation": 0.35000000000000003,
"Height": 53,
"Width": 18,
"Weight": 871,
"Material": "Wood",
"Price Of Sculpture": 5.98,
"Base Shipping Price": 19.11,
"International": "Yes",
"Express Shipment": "Yes",
"Installation Included": "No",
"Transport": "Airways",
"Fragile": "No",
"Customer Information": "Working Class",
"Remote Location": "No",
"Scheduled Date": "07/03/17",
"Delivery Date": "07/06/17",
"Customer Location": "Santoshaven, IA 63481"
}
{
"Customer Id": "fffe3600350035003400",
"Artist Name": "Karen Vetrano",
"Artist Reputation": 0.67,
"Height": 7,
"Width": 4,
"Weight": 108,
"Material": "Clay",
"Price Of Sculpture": 6.92,
"Base Shipping Price": 13.96,
"International": "No",
"Express Shipment": "No",
"Installation Included": "No",
"Transport": "Roadways",
"Fragile": "Yes",
"Customer Information": "Working Class",
"Remote Location": "No",
"Scheduled Date": "05/02/16",
"Delivery Date": "05/02/16",
"Customer Location": "Ericksonton, OH 98253"
}
{
"Customer Id": "fffe3700360030003500",
"Artist Name": "Roseanne Gaona",
"Artist Reputation": 0.61,
"Height": 6,
"Width": 5,
"Weight": 97,
"Material": "Aluminium",
"Price Of Sculpture": 4.23,
"Base Shipping Price": 13.62,
"International": "Yes",
"Express Shipment": "No",
"Installation Included": "No",
"Transport": "Airways",
"Fragile": "No",
"Customer Information": "Working Class",
"Remote Location": "No",
"Scheduled Date": "01/04/18",
"Delivery Date": "01/06/18",
"Customer Location": "APO AP 83453"
}
{
"Customer Id": "fffe350038003600",
"Artist Name": "Todd Almanza",
"Artist Reputation": 0.14,
"Height": 15,
"Width": 8,
"Weight": 757,
"Material": "Clay",
"Price Of Sculpture": 6.28,
"Base Shipping Price": 23.79,
"International": "No",
"Express Shipment": "Yes",
"Installation Included": "No",
"Transport": "Roadways",
"Fragile": "Yes",
"Customer Information": "Wealthy",
"Remote Location": "No",
"Scheduled Date": "09/14/17",
"Delivery Date": "09/17/17",
"Customer Location": "Antonioborough, AL 54778"
}
<jupyter_code>import pandas as pd
df = pd.read_csv('hackerearth-machine-learning-exhibit-art/train.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 6500 entries, 0 to 6499
Data columns (total 20 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Customer Id 6500 non-null object
1 Artist Name 6500 non-null object
2 Artist Reputation 5750 non-null float64
3 Height 6125 non-null float64
4 Width 5916 non-null float64
5 Weight 5913 non-null float64
6 Material 5736 non-null object
7 Price Of Sculpture 6500 non-null float64
8 Base Shipping Price 6500 non-null float64
9 International 6500 non-null object
10 Express Shipment 6500 non-null object
11 Installation Included 6500 non-null object
12 Transport 5108 non-null object
13 Fragile 6500 non-null object
14 Customer Information 6500 non-null object
15 Remote Location 5729 non-null object
16 Scheduled Date 6500 non-null object
17 Delivery Date 6500 non-null object
18 Customer Location 6500 non-null object
19 Cost 6500 non-null float64
dtypes: float64(7), object(13)
memory usage: 1015.8+ KB
<jupyter_text>Examples:
{
"Customer Id": "fffe3900350033003300",
"Artist Name": "Billy Jenkins",
"Artist Reputation": 0.26,
"Height": 17,
"Width": 6.0,
"Weight": 4128.0,
"Material": "Brass",
"Price Of Sculpture": 13.91,
"Base Shipping Price": 16.27,
"International": "Yes",
"Express Shipment": "Yes",
"Installation Included": "No",
"Transport": "Airways",
"Fragile": "No",
"Customer Information": "Working Class",
"Remote Location": "No",
"Scheduled Date": "06/07/15",
"Delivery Date": "06/03/15",
"Customer Location": "New Michelle, OH 50777",
"Cost": -283.29
}
{
"Customer Id": "fffe3800330031003900",
"Artist Name": "Jean Bryant",
"Artist Reputation": 0.28,
"Height": 3,
"Width": 3.0,
"Weight": 61.0,
"Material": "Brass",
"Price Of Sculpture": 6.83,
"Base Shipping Price": 15.0,
"International": "No",
"Express Shipment": "No",
"Installation Included": "No",
"Transport": "Roadways",
"Fragile": "No",
"Customer Information": "Working Class",
"Remote Location": "No",
"Scheduled Date": "03/06/17",
"Delivery Date": "03/05/17",
"Customer Location": "New Michaelport, WY 12072",
"Cost": -159.96
}
{
"Customer Id": "fffe3600370035003100",
"Artist Name": "Laura Miller",
"Artist Reputation": 0.07,
"Height": 8,
"Width": 5.0,
"Weight": 237.0,
"Material": "Clay",
"Price Of Sculpture": 4.96,
"Base Shipping Price": 21.18,
"International": "No",
"Express Shipment": "No",
"Installation Included": "No",
"Transport": "Roadways",
"Fragile": "Yes",
"Customer Information": "Working Class",
"Remote Location": "Yes",
"Scheduled Date": "03/09/15",
"Delivery Date": "03/08/15",
"Customer Location": "Bowmanshire, WA 19241",
"Cost": -154.29
}
{
"Customer Id": "fffe350031003300",
"Artist Name": "Robert Chaires",
"Artist Reputation": 0.12,
"Height": 9,
"Width": NaN,
"Weight": NaN,
"Material": "Aluminium",
"Price Of Sculpture": 5.8100000000000005,
"Base Shipping Price": 16.31,
"International": "No",
"Express Shipment": "No",
"Installation Included": "No",
"Transport": null,
"Fragile": "No",
"Customer Information": "Wealthy",
"Remote Location": "Yes",
"Scheduled Date": "05/24/15",
"Delivery Date": "05/20/15",
"Customer Location": "East Robyn, KY 86375",
"Cost": -161.16
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Analyzing of The Cost To Ship The Sculptures
# In this project, I wanted to work on the sale of various sculptures from all over the world. The aim was to predict the cost of delivering these sculptures to customers, taking into account shipping modes and similar features, since artworks are unlike other logistics products.Apart from the estimation part, I performed the exploratory data analysis part.
# The reason I chose this cost estimate data is because it is related to artworks unlike other cost estimation data. There are 6500 observations in the training set and 3500 observations in the test set, and there are 20 features in total. Link : https://www.kaggle.com/oossiiris/hackerearth-machine-learning-exhibit-art
# **The columns provided in the dataset are as follows:**
# Customer Id : Id of customers
# Artist Name : Creators of sculptures (name, surname)
# Artist Reputation : Reputation of artists (between 0 to 1)
# Height :Height of sculptures
# Width :Width of sculptures
# Weight :Weight of sculptures
# Material :Material type of sculptures
# Price Of Sculpture
# Base Shipping Price
# International :International or not (yes,no)
# Express Shipment :Express Shipment or not (yes,no)
# Installation Included :Installation Included or not (yes,no)
# Transport :Transportation types
# Fragile :Fragile or not (yes,no)
# Customer Information :Type of customer (wealthy or working class)
# Remote Location :Remote location or not (yes,no)
# Scheduled Date
# Delivery Date
# Customer Location : Adress of the location
# Cost
# visualization libraries
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
from scipy import stats
import warnings
warnings.filterwarnings("ignore")
train = pd.read_csv("../input/hackerearth-machine-learning-exhibit-art/train.csv")
test = pd.read_csv("../input/hackerearth-machine-learning-exhibit-art/test.csv")
display(train.head())
display(test.head())
# concated two dataframes with an indicator column to analyze with all data
df = pd.concat([train.assign(ind="train"), test.assign(ind="test")])
df.head()
# # Descriptive Statistics
# there are 7 numeric and 14 categoric variables
df.info()
# With describe() function,the count column show us whether there is a significant amount of missing values for each feature.
# mean, std and IQR of values give information about data.
# As we can see from the Cost column, some variables are negative. We need to examine this.
df.describe()
# # Missing Values
train.columns[train.isnull().any()]
test.columns[test.isnull().any()]
df.columns[df.isnull().any()]
# All missing values in the Cost variable are values that need to be estimated, so we ignore this column.
data_missings = df.filter(
[
"Artist Reputation",
"Height",
"Width",
"Weight",
"Material",
"Transport",
"Remote Location",
],
axis=1,
)
msno.bar(data_missings)
# elaboration of information on missing variables
def values_table(data_missings):
mis_val = data_missings.isnull().sum()
mis_val_percent = 100 * data_missings.isnull().sum() / len(data_missings)
table = pd.concat([mis_val, mis_val_percent], axis=1)
table = table.rename(columns={0: "Missing Values", 1: "% Missing Value"})
table["Data Type"] = data_missings.dtypes
table = (
table[table.iloc[:, 1] != 0]
.sort_values("% Missing Value", ascending=False)
.round(1)
)
print(
"There are "
+ str(df.shape[1])
+ " columns and "
+ str(df.shape[0])
+ " rows in the dataset.\n"
+ str(table.shape[0])
+ " of these columns have missing variables."
)
return table
values_table(data_missings)
# There are too many missing values in many features, removing them from the dataset will affect the performance of the prediction. So I think they can fill with appropriate methods. But first, the performance of the model needs to be tested after dropping missing values from the dataset.
# First experiment to remove missing values from data
# df_new=df.dropna()
# df_new.isnull().sum()
# checking duplicated values
df.duplicated().sum()
# checking unique values
df.nunique()
# # Visualization
# this chart shows the distribution of cost values (target variable)
plt.figure(figsize=(10, 6))
sns.distplot(df["Cost"])
plt.show()
# boxplots for outlier detection
df_numeric = df.filter(
[
"Artist Reputation",
"Height",
"Width",
"Weight",
"Price Of Sculpture",
"Base Shipping Price",
"Cost",
],
axis=1,
)
fig = plt.figure(figsize=(16, 16))
for i in range(len(df_numeric.columns)):
fig.add_subplot(3, 4, i + 1)
sns.boxplot(y=df_numeric.iloc[:, i])
plt.tight_layout()
plt.show()
# As we can see here, there are discrete values for some variables. However, these values can be ignored as they are values such as product weight, height, width and price. After looking at the effects on the model, outliers can be removed or changed from the dataset. But it needs to be examined in detail.
df.loc[:, ["Height", "Width", "Weight", "Price Of Sculpture", "Cost"]].describe()
# Visualizing with distplot to see how much value is scattered in which class for categorical data
df_cat = df.filter(
[
"Material",
"International",
"Express Shipment",
"Installation Included",
"Transport",
"Fragile",
"Customer Information",
"Remote Location",
],
axis=1,
)
fig = plt.figure(figsize=(15, 30))
for i in range(len(df_cat.columns)):
fig.add_subplot(9, 2, i + 1)
df_cat.iloc[:, i].hist(color="orange")
plt.xlabel(df_cat.columns[i])
plt.show()
# # Visualization of transport-related features
# pie chart of transport-related features
# Pie chart, where the slices will be ordered and plotted counter-clockwise:
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(10, 9))
ax1.pie(
df_cat["Transport"].value_counts(),
explode=(0.1, 0.1, 0.1),
labels=df_cat["Transport"].value_counts().index,
autopct="%1.1f%%",
shadow=True,
startangle=90,
)
ax1.axis("equal")
ax2.pie(
df_cat["International"].value_counts(),
explode=(0.1, 0.1),
labels=df_cat["International"].value_counts().index,
autopct="%1.1f%%",
shadow=True,
startangle=90,
)
ax2.axis("equal")
ax3.pie(
df_cat["Remote Location"].value_counts(),
explode=(0.1, 0.1),
labels=df_cat["Remote Location"].value_counts().index,
autopct="%1.1f%%",
shadow=True,
startangle=90,
)
ax3.axis("equal")
ax4.pie(
df_cat["Express Shipment"].value_counts(),
explode=(0.1, 0.1),
labels=df_cat["Express Shipment"].value_counts().index,
autopct="%1.1f%%",
shadow=True,
startangle=90,
)
ax4.axis("equal")
ax1.title.set_text("Transportation")
ax3.title.set_text("Remote Location")
ax2.title.set_text("International")
ax4.title.set_text("Express Shipment")
plt.show()
# As we can see most transport appears to be done in close proximity within the nation by roadways.From here :
# -We can look at the total shipping prices of transfers by grouping different kinds of features (we can look at which location, which ways, which country, etc.)
# -Does shipping price affected by long distance and international transport as expected?
#
df_2 = df.groupby(["International", "Remote Location"], as_index=False)[
"Base Shipping Price"
].sum()
df_2
df_1 = df.groupby(["Transport", "International"], as_index=False)[
"Base Shipping Price"
].sum()
a = df_1.pivot(
index="Transport", values="Base Shipping Price", columns=["International"]
).plot(kind="barh", figsize=(12, 6), cmap="icefire")
plt.title(
"Total Shipping Price based on International Ways and Transportation Type ",
fontsize=14,
)
for p in a.patches:
sum_ = "{:.1f}".format(p.get_width())
x, y = p.get_height() + p.get_width() - 0.5, p.get_y() + 0.1
a.annotate(sum_, (x, y), ha="center")
plt.show()
# As expected from the graph, more costs in national rather than international in roadway transportation , and in waterway transportation, more costs emerged in international transportation. These may be due to customer locations, but also sales policy.
df.columns
# create new feature (state) from existing column (customer location)
df.insert(
loc=14, column="State", value=df["Customer Location"].map(lambda x: x.split()[-2])
)
df.drop("Customer Location", inplace=True, axis=1)
df.head()
# With the state column, I want to see in which locations the transfers are more distributed.
plt.figure(figsize=(10, 20))
a = sns.countplot(
y="State", data=df, palette="Set3", order=df["State"].value_counts().index
)
plt.title("States to Be Delivered", fontsize=14)
for p in a.patches:
count = p.get_width()
x, y = p.get_x() + 10 + p.get_width(), p.get_y() + 0.5
a.annotate(count, (x, y), ha="center")
plt.show()
# Most transfers have been to these states (AP (Andhra Pradesh,India), AA (Armed Forces America?),AE (Armed Forces Europe?))
# From this point of view, we can see how the costs change on the basis of which country. We can see which artist has made more sales in which state. We can create a customer portfolio according to the states.
plt.figure(figsize=(10, 20))
a = sns.countplot(
y="State",
data=df,
hue="International",
palette="Set3",
order=df["State"].value_counts().index,
)
plt.title("States by number of deliveries, whether international or not", fontsize=14)
for p in a.patches:
count = p.get_width()
x, y = p.get_x() + 2 + p.get_width(), p.get_y() + 0.5
a.annotate(count, (x, y), ha="center")
plt.show()
plt.figure(figsize=(10, 20))
a = sns.countplot(
y="State",
data=df,
hue="Remote Location",
palette="Set2",
order=df["State"].value_counts().index,
)
plt.title("States by number of deliveries, whether remote or not", fontsize=14)
for p in a.patches:
count = p.get_width()
x, y = p.get_x() + 5 + p.get_width(), p.get_y() + 0.5
a.annotate(count, (x, y), ha="center")
plt.show()
# As it can be understood from these graphics, some of the transfer regions (states) pass as long distance and international, but also as close distance and domestic. The conclusion that can be drawn from this is that the locations where the transfers are made may vary. This is a factor that will affect the cost. Therefore, I think that the places where the transfers originate to be transmitted should also be included in the data.
# # Visualization of product-related features
plt.figure(figsize=(10, 6))
a = sns.countplot(x="Material", data=df, palette="PiYG_r")
plt.title("Number of materials of sculpture", fontsize=14)
for p in a.patches:
count = p.get_height()
x, y = p.get_x() + p.get_width() - 0.2, p.get_y() + 1200
a.annotate(count, (x, y), ha="right")
plt.show()
plt.figure(figsize=(10, 6))
a = sns.countplot(x="Material", data=df[df["Fragile"] == "Yes"], palette="Set3")
plt.title("Number of materials of sculpture", fontsize=14)
for p in a.patches:
count = p.get_height()
x, y = p.get_x() + p.get_width() - 0.3, p.get_y() + 50
a.annotate(count, (x, y), ha="right")
plt.show()
plt.figure(figsize=(16, 6))
sns.scatterplot(
data=df,
x="Price Of Sculpture",
y="Weight",
hue="Material",
size="Material",
sizes=(20, 200),
legend="full",
)
df.insert(loc=3, column="Sculpture Size", value=df["Height"] * df["Width"])
df.head()
plt.figure(figsize=(16, 6))
sns.scatterplot(
data=df,
x="Price Of Sculpture",
y="Sculpture Size",
hue="Material",
size="Material",
sizes=(20, 200),
legend="full",
)
# Dividing Artist Reputation column to four groups
bins = [0, 0.25, 0.5, 0.75, 1]
labels = [
"Not enough well-known",
"Enough well-known",
"Well-known",
"Extremely well-known",
]
df["Artist Reputation Type"] = pd.cut(
df["Artist Reputation"], bins, labels=labels, include_lowest=True
)
df.head()
plt.figure(figsize=(10, 6))
df_2 = df.groupby(["Artist Reputation Type"], as_index=False)["Artist Name"].count()
a = sns.barplot(
y="Artist Reputation Type",
x="Artist Name",
data=df_2,
palette="PiYG_r",
order=df["Artist Reputation Type"].value_counts().index,
)
plt.title("Number of Artists by their reputation", fontsize=14)
for p in a.patches:
count = p.get_width()
x, y = p.get_x() + 100 + p.get_width() + 25, p.get_y() + 0.45
a.annotate(count, (x, y), ha="right")
plt.show()
# Here, the number of well-known artists has the highest value according to the artist's reputation level, followed by the number of slightly less well-known artists.So let see how artists' reputations and price of their sculptures affect the cost.
plt.figure(figsize=(20, 8))
sns.scatterplot(
data=df,
x="Price Of Sculpture",
y="Cost",
hue="Artist Reputation Type",
size="Artist Reputation Type",
sizes=(20, 200),
legend="full",
)
# # Visualization of customer-related features
# pie chart of customer-related feature
# Pie chart, where the slices will be ordered and plotted counter-clockwise:
fig, ax1 = plt.subplots(figsize=(8, 5))
ax1.pie(
df["Customer Information"].value_counts(),
explode=(0.1, 0.1),
labels=df["Customer Information"].value_counts().index,
autopct="%1.1f%%",
shadow=True,
startangle=90,
)
ax1.axis("equal")
ax1.title.set_text("Customer Type")
plt.show()
# Most sales are made by customers belonging to the working class.
# Here we can look at which class of customers prefer which artist recognition level.
#
df.pivot_table(
index=["Artist Reputation Type"],
columns=["Customer Information"],
aggfunc={"Customer Id": "count"},
fill_value=0,
)
df.columns
# converting dates to datetime format
df["Scheduled Date"] = pd.to_datetime(df["Scheduled Date"])
df["Delivery Date"] = pd.to_datetime(df["Delivery Date"])
# checking if products are delivered on time
df.insert(
loc=20,
column="ScheduleDiff",
value=(df["Scheduled Date"] - df["Delivery Date"]).map(lambda x: str(x).split()[0]),
)
df["ScheduleDiff"] = pd.to_numeric(df["ScheduleDiff"])
df.head()
df_3 = df.groupby("Delivery Date")["Cost"].sum().reset_index()
df_4 = df.groupby("Scheduled Date")["Cost"].sum().reset_index()
df_4
import plotly.express as px
fig = px.line(df_3, x="Delivery Date", y="Cost")
fig.show()
df.insert(loc=20, column="Year", value=df["Delivery Date"].dt.year)
df.insert(loc=21, column="Month", value=df["Delivery Date"].dt.month)
df.head()
df_5 = df.groupby(["Month", "Year"], as_index=False)["Customer Id"].count()
df_5
plt.figure(figsize=(15, 8))
sns.lineplot(
data=df_5,
x="Year",
y="Customer Id",
hue="Month",
style="Month",
palette="gist_stern_r",
)
plt.title("Number of Deliveries between 2014-2019")
df.ScheduleDiff.value_counts()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/179/69179633.ipynb
|
hackerearth-machine-learning-exhibit-art
|
imsparsh
|
[{"Id": 69179633, "ScriptId": 18117578, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4629996, "CreationDate": "07/27/2021 18:25:53", "VersionNumber": 10.0, "Title": "Ay\u015fenur_\u00d6zen_Upschool_FinalProject", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 399.0, "LinesInsertedFromPrevious": 159.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 240.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92034342, "KernelVersionId": 69179633, "SourceDatasetVersionId": 1915009}]
|
[{"Id": 1915009, "DatasetId": 1141953, "DatasourceVersionId": 1953464, "CreatorUserId": 4184718, "LicenseName": "CC0: Public Domain", "CreationDate": "02/06/2021 12:03:11", "VersionNumber": 1.0, "Title": "HackerEarth Machine Learning - Exhibit A(rt)", "Slug": "hackerearth-machine-learning-exhibit-art", "Subtitle": "Predict the cost to ship the sculptures", "Description": "Predict the cost to ship the sculptures\n=======================================\n\nIt can be difficult to navigate the logistics when it comes to buying art. These include, but are not limited to, the following:\n\nEffective collection management\nShipping the paintings, antiques, sculptures, and other collectibles to their respective destinations after purchase\nThough many companies have made shipping consumer goods a relatively quick and painless procedure, the same rules do not always apply while shipping paintings or transporting antiques and collectibles.\n\n### Task\n\nYou work for a company that sells sculptures that are acquired from various artists around the world. Your task is to predict the cost required to ship these sculptures to customers based on the information provided in the dataset.\n\n### Data description\n\nThe dataset folder contains the following files:\n\ntrain.csv: 6500 x 20\ntest.csv: 3500 x 19\nsample_submission.csv: 5 x 2\n\n### The columns provided in the dataset are as follows:\n\nCustomer Id\nArtist Name\nArtist Reputation\nHeight \nWidth \nWeight \nMaterial \nPrice Of Sculpture\nBase Shipping Price\nInternational \nExpress Shipment \nInstallation Included\nTransport \nFragile \nCustomer Information\nRemote Location \nScheduled Date \nDelivery Date \nCustomer Location \nCost \n\n### Evaluation metric\n\nscore = 100*max(0, 1-metrics.meansquaredlog_error(actual, predicted))\n\nContest Link - [Exhibit A(rt)](https://www.hackerearth.com/challenges/competitive/hackerearth-machine-learning-challenge-predict-shipping-cost/machine-learning/predict-the-cost-to-ship-the-sculptures-12-e7728f5d/)", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1141953, "CreatorUserId": 4184718, "OwnerUserId": 4184718.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1915009.0, "CurrentDatasourceVersionId": 1953464.0, "ForumId": 1159434, "Type": 2, "CreationDate": "02/06/2021 12:03:11", "LastActivityDate": "02/06/2021", "TotalViews": 2349, "TotalDownloads": 51, "TotalVotes": 1, "TotalKernels": 4}]
|
[{"Id": 4184718, "UserName": "imsparsh", "DisplayName": "Sparsh Gupta", "RegisterDate": "12/10/2019", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Analyzing of The Cost To Ship The Sculptures
# In this project, I wanted to work on the sale of various sculptures from all over the world. The aim was to predict the cost of delivering these sculptures to customers, taking into account shipping modes and similar features, since artworks are unlike other logistics products.Apart from the estimation part, I performed the exploratory data analysis part.
# The reason I chose this cost estimate data is because it is related to artworks unlike other cost estimation data. There are 6500 observations in the training set and 3500 observations in the test set, and there are 20 features in total. Link : https://www.kaggle.com/oossiiris/hackerearth-machine-learning-exhibit-art
# **The columns provided in the dataset are as follows:**
# Customer Id : Id of customers
# Artist Name : Creators of sculptures (name, surname)
# Artist Reputation : Reputation of artists (between 0 to 1)
# Height :Height of sculptures
# Width :Width of sculptures
# Weight :Weight of sculptures
# Material :Material type of sculptures
# Price Of Sculpture
# Base Shipping Price
# International :International or not (yes,no)
# Express Shipment :Express Shipment or not (yes,no)
# Installation Included :Installation Included or not (yes,no)
# Transport :Transportation types
# Fragile :Fragile or not (yes,no)
# Customer Information :Type of customer (wealthy or working class)
# Remote Location :Remote location or not (yes,no)
# Scheduled Date
# Delivery Date
# Customer Location : Adress of the location
# Cost
# visualization libraries
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
from scipy import stats
import warnings
warnings.filterwarnings("ignore")
train = pd.read_csv("../input/hackerearth-machine-learning-exhibit-art/train.csv")
test = pd.read_csv("../input/hackerearth-machine-learning-exhibit-art/test.csv")
display(train.head())
display(test.head())
# concated two dataframes with an indicator column to analyze with all data
df = pd.concat([train.assign(ind="train"), test.assign(ind="test")])
df.head()
# # Descriptive Statistics
# there are 7 numeric and 14 categoric variables
df.info()
# With describe() function,the count column show us whether there is a significant amount of missing values for each feature.
# mean, std and IQR of values give information about data.
# As we can see from the Cost column, some variables are negative. We need to examine this.
df.describe()
# # Missing Values
train.columns[train.isnull().any()]
test.columns[test.isnull().any()]
df.columns[df.isnull().any()]
# All missing values in the Cost variable are values that need to be estimated, so we ignore this column.
data_missings = df.filter(
[
"Artist Reputation",
"Height",
"Width",
"Weight",
"Material",
"Transport",
"Remote Location",
],
axis=1,
)
msno.bar(data_missings)
# elaboration of information on missing variables
def values_table(data_missings):
mis_val = data_missings.isnull().sum()
mis_val_percent = 100 * data_missings.isnull().sum() / len(data_missings)
table = pd.concat([mis_val, mis_val_percent], axis=1)
table = table.rename(columns={0: "Missing Values", 1: "% Missing Value"})
table["Data Type"] = data_missings.dtypes
table = (
table[table.iloc[:, 1] != 0]
.sort_values("% Missing Value", ascending=False)
.round(1)
)
print(
"There are "
+ str(df.shape[1])
+ " columns and "
+ str(df.shape[0])
+ " rows in the dataset.\n"
+ str(table.shape[0])
+ " of these columns have missing variables."
)
return table
values_table(data_missings)
# There are too many missing values in many features, removing them from the dataset will affect the performance of the prediction. So I think they can fill with appropriate methods. But first, the performance of the model needs to be tested after dropping missing values from the dataset.
# First experiment to remove missing values from data
# df_new=df.dropna()
# df_new.isnull().sum()
# checking duplicated values
df.duplicated().sum()
# checking unique values
df.nunique()
# # Visualization
# this chart shows the distribution of cost values (target variable)
plt.figure(figsize=(10, 6))
sns.distplot(df["Cost"])
plt.show()
# boxplots for outlier detection
df_numeric = df.filter(
[
"Artist Reputation",
"Height",
"Width",
"Weight",
"Price Of Sculpture",
"Base Shipping Price",
"Cost",
],
axis=1,
)
fig = plt.figure(figsize=(16, 16))
for i in range(len(df_numeric.columns)):
fig.add_subplot(3, 4, i + 1)
sns.boxplot(y=df_numeric.iloc[:, i])
plt.tight_layout()
plt.show()
# As we can see here, there are discrete values for some variables. However, these values can be ignored as they are values such as product weight, height, width and price. After looking at the effects on the model, outliers can be removed or changed from the dataset. But it needs to be examined in detail.
df.loc[:, ["Height", "Width", "Weight", "Price Of Sculpture", "Cost"]].describe()
# Visualizing with distplot to see how much value is scattered in which class for categorical data
df_cat = df.filter(
[
"Material",
"International",
"Express Shipment",
"Installation Included",
"Transport",
"Fragile",
"Customer Information",
"Remote Location",
],
axis=1,
)
fig = plt.figure(figsize=(15, 30))
for i in range(len(df_cat.columns)):
fig.add_subplot(9, 2, i + 1)
df_cat.iloc[:, i].hist(color="orange")
plt.xlabel(df_cat.columns[i])
plt.show()
# # Visualization of transport-related features
# pie chart of transport-related features
# Pie chart, where the slices will be ordered and plotted counter-clockwise:
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(10, 9))
ax1.pie(
df_cat["Transport"].value_counts(),
explode=(0.1, 0.1, 0.1),
labels=df_cat["Transport"].value_counts().index,
autopct="%1.1f%%",
shadow=True,
startangle=90,
)
ax1.axis("equal")
ax2.pie(
df_cat["International"].value_counts(),
explode=(0.1, 0.1),
labels=df_cat["International"].value_counts().index,
autopct="%1.1f%%",
shadow=True,
startangle=90,
)
ax2.axis("equal")
ax3.pie(
df_cat["Remote Location"].value_counts(),
explode=(0.1, 0.1),
labels=df_cat["Remote Location"].value_counts().index,
autopct="%1.1f%%",
shadow=True,
startangle=90,
)
ax3.axis("equal")
ax4.pie(
df_cat["Express Shipment"].value_counts(),
explode=(0.1, 0.1),
labels=df_cat["Express Shipment"].value_counts().index,
autopct="%1.1f%%",
shadow=True,
startangle=90,
)
ax4.axis("equal")
ax1.title.set_text("Transportation")
ax3.title.set_text("Remote Location")
ax2.title.set_text("International")
ax4.title.set_text("Express Shipment")
plt.show()
# As we can see most transport appears to be done in close proximity within the nation by roadways.From here :
# -We can look at the total shipping prices of transfers by grouping different kinds of features (we can look at which location, which ways, which country, etc.)
# -Does shipping price affected by long distance and international transport as expected?
#
df_2 = df.groupby(["International", "Remote Location"], as_index=False)[
"Base Shipping Price"
].sum()
df_2
df_1 = df.groupby(["Transport", "International"], as_index=False)[
"Base Shipping Price"
].sum()
a = df_1.pivot(
index="Transport", values="Base Shipping Price", columns=["International"]
).plot(kind="barh", figsize=(12, 6), cmap="icefire")
plt.title(
"Total Shipping Price based on International Ways and Transportation Type ",
fontsize=14,
)
for p in a.patches:
sum_ = "{:.1f}".format(p.get_width())
x, y = p.get_height() + p.get_width() - 0.5, p.get_y() + 0.1
a.annotate(sum_, (x, y), ha="center")
plt.show()
# As expected from the graph, more costs in national rather than international in roadway transportation , and in waterway transportation, more costs emerged in international transportation. These may be due to customer locations, but also sales policy.
df.columns
# create new feature (state) from existing column (customer location)
df.insert(
loc=14, column="State", value=df["Customer Location"].map(lambda x: x.split()[-2])
)
df.drop("Customer Location", inplace=True, axis=1)
df.head()
# With the state column, I want to see in which locations the transfers are more distributed.
plt.figure(figsize=(10, 20))
a = sns.countplot(
y="State", data=df, palette="Set3", order=df["State"].value_counts().index
)
plt.title("States to Be Delivered", fontsize=14)
for p in a.patches:
count = p.get_width()
x, y = p.get_x() + 10 + p.get_width(), p.get_y() + 0.5
a.annotate(count, (x, y), ha="center")
plt.show()
# Most transfers have been to these states (AP (Andhra Pradesh,India), AA (Armed Forces America?),AE (Armed Forces Europe?))
# From this point of view, we can see how the costs change on the basis of which country. We can see which artist has made more sales in which state. We can create a customer portfolio according to the states.
plt.figure(figsize=(10, 20))
a = sns.countplot(
y="State",
data=df,
hue="International",
palette="Set3",
order=df["State"].value_counts().index,
)
plt.title("States by number of deliveries, whether international or not", fontsize=14)
for p in a.patches:
count = p.get_width()
x, y = p.get_x() + 2 + p.get_width(), p.get_y() + 0.5
a.annotate(count, (x, y), ha="center")
plt.show()
plt.figure(figsize=(10, 20))
a = sns.countplot(
y="State",
data=df,
hue="Remote Location",
palette="Set2",
order=df["State"].value_counts().index,
)
plt.title("States by number of deliveries, whether remote or not", fontsize=14)
for p in a.patches:
count = p.get_width()
x, y = p.get_x() + 5 + p.get_width(), p.get_y() + 0.5
a.annotate(count, (x, y), ha="center")
plt.show()
# As it can be understood from these graphics, some of the transfer regions (states) pass as long distance and international, but also as close distance and domestic. The conclusion that can be drawn from this is that the locations where the transfers are made may vary. This is a factor that will affect the cost. Therefore, I think that the places where the transfers originate to be transmitted should also be included in the data.
# # Visualization of product-related features
plt.figure(figsize=(10, 6))
a = sns.countplot(x="Material", data=df, palette="PiYG_r")
plt.title("Number of materials of sculpture", fontsize=14)
for p in a.patches:
count = p.get_height()
x, y = p.get_x() + p.get_width() - 0.2, p.get_y() + 1200
a.annotate(count, (x, y), ha="right")
plt.show()
plt.figure(figsize=(10, 6))
a = sns.countplot(x="Material", data=df[df["Fragile"] == "Yes"], palette="Set3")
plt.title("Number of materials of sculpture", fontsize=14)
for p in a.patches:
count = p.get_height()
x, y = p.get_x() + p.get_width() - 0.3, p.get_y() + 50
a.annotate(count, (x, y), ha="right")
plt.show()
plt.figure(figsize=(16, 6))
sns.scatterplot(
data=df,
x="Price Of Sculpture",
y="Weight",
hue="Material",
size="Material",
sizes=(20, 200),
legend="full",
)
df.insert(loc=3, column="Sculpture Size", value=df["Height"] * df["Width"])
df.head()
plt.figure(figsize=(16, 6))
sns.scatterplot(
data=df,
x="Price Of Sculpture",
y="Sculpture Size",
hue="Material",
size="Material",
sizes=(20, 200),
legend="full",
)
# Dividing Artist Reputation column to four groups
bins = [0, 0.25, 0.5, 0.75, 1]
labels = [
"Not enough well-known",
"Enough well-known",
"Well-known",
"Extremely well-known",
]
df["Artist Reputation Type"] = pd.cut(
df["Artist Reputation"], bins, labels=labels, include_lowest=True
)
df.head()
plt.figure(figsize=(10, 6))
df_2 = df.groupby(["Artist Reputation Type"], as_index=False)["Artist Name"].count()
a = sns.barplot(
y="Artist Reputation Type",
x="Artist Name",
data=df_2,
palette="PiYG_r",
order=df["Artist Reputation Type"].value_counts().index,
)
plt.title("Number of Artists by their reputation", fontsize=14)
for p in a.patches:
count = p.get_width()
x, y = p.get_x() + 100 + p.get_width() + 25, p.get_y() + 0.45
a.annotate(count, (x, y), ha="right")
plt.show()
# Here, the number of well-known artists has the highest value according to the artist's reputation level, followed by the number of slightly less well-known artists.So let see how artists' reputations and price of their sculptures affect the cost.
plt.figure(figsize=(20, 8))
sns.scatterplot(
data=df,
x="Price Of Sculpture",
y="Cost",
hue="Artist Reputation Type",
size="Artist Reputation Type",
sizes=(20, 200),
legend="full",
)
# # Visualization of customer-related features
# pie chart of customer-related feature
# Pie chart, where the slices will be ordered and plotted counter-clockwise:
fig, ax1 = plt.subplots(figsize=(8, 5))
ax1.pie(
df["Customer Information"].value_counts(),
explode=(0.1, 0.1),
labels=df["Customer Information"].value_counts().index,
autopct="%1.1f%%",
shadow=True,
startangle=90,
)
ax1.axis("equal")
ax1.title.set_text("Customer Type")
plt.show()
# Most sales are made by customers belonging to the working class.
# Here we can look at which class of customers prefer which artist recognition level.
#
df.pivot_table(
index=["Artist Reputation Type"],
columns=["Customer Information"],
aggfunc={"Customer Id": "count"},
fill_value=0,
)
df.columns
# converting dates to datetime format
df["Scheduled Date"] = pd.to_datetime(df["Scheduled Date"])
df["Delivery Date"] = pd.to_datetime(df["Delivery Date"])
# checking if products are delivered on time
df.insert(
loc=20,
column="ScheduleDiff",
value=(df["Scheduled Date"] - df["Delivery Date"]).map(lambda x: str(x).split()[0]),
)
df["ScheduleDiff"] = pd.to_numeric(df["ScheduleDiff"])
df.head()
df_3 = df.groupby("Delivery Date")["Cost"].sum().reset_index()
df_4 = df.groupby("Scheduled Date")["Cost"].sum().reset_index()
df_4
import plotly.express as px
fig = px.line(df_3, x="Delivery Date", y="Cost")
fig.show()
df.insert(loc=20, column="Year", value=df["Delivery Date"].dt.year)
df.insert(loc=21, column="Month", value=df["Delivery Date"].dt.month)
df.head()
df_5 = df.groupby(["Month", "Year"], as_index=False)["Customer Id"].count()
df_5
plt.figure(figsize=(15, 8))
sns.lineplot(
data=df_5,
x="Year",
y="Customer Id",
hue="Month",
style="Month",
palette="gist_stern_r",
)
plt.title("Number of Deliveries between 2014-2019")
df.ScheduleDiff.value_counts()
|
[{"hackerearth-machine-learning-exhibit-art/test.csv": {"column_names": "[\"Customer Id\", \"Artist Name\", \"Artist Reputation\", \"Height\", \"Width\", \"Weight\", \"Material\", \"Price Of Sculpture\", \"Base Shipping Price\", \"International\", \"Express Shipment\", \"Installation Included\", \"Transport\", \"Fragile\", \"Customer Information\", \"Remote Location\", \"Scheduled Date\", \"Delivery Date\", \"Customer Location\"]", "column_data_types": "{\"Customer Id\": \"object\", \"Artist Name\": \"object\", \"Artist Reputation\": \"float64\", \"Height\": \"float64\", \"Width\": \"float64\", \"Weight\": \"float64\", \"Material\": \"object\", \"Price Of Sculpture\": \"float64\", \"Base Shipping Price\": \"float64\", \"International\": \"object\", \"Express Shipment\": \"object\", \"Installation Included\": \"object\", \"Transport\": \"object\", \"Fragile\": \"object\", \"Customer Information\": \"object\", \"Remote Location\": \"object\", \"Scheduled Date\": \"object\", \"Delivery Date\": \"object\", \"Customer Location\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 3500 entries, 0 to 3499\nData columns (total 19 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Customer Id 3500 non-null object \n 1 Artist Name 3500 non-null object \n 2 Artist Reputation 3278 non-null float64\n 3 Height 3381 non-null float64\n 4 Width 3359 non-null float64\n 5 Weight 3351 non-null float64\n 6 Material 3500 non-null object \n 7 Price Of Sculpture 3500 non-null float64\n 8 Base Shipping Price 3500 non-null float64\n 9 International 3500 non-null object \n 10 Express Shipment 3500 non-null object \n 11 Installation Included 3500 non-null object \n 12 Transport 3268 non-null object \n 13 Fragile 3500 non-null object \n 14 Customer Information 3500 non-null object \n 15 Remote Location 3500 non-null object \n 16 Scheduled Date 3500 non-null object \n 17 Delivery Date 3500 non-null object \n 18 Customer Location 3500 non-null object \ndtypes: float64(6), object(13)\nmemory usage: 519.7+ KB\n", "summary": "{\"Artist Reputation\": {\"count\": 3278.0, \"mean\": 0.4632794386821233, \"std\": 0.27228676277452385, \"min\": 0.0, \"25%\": 0.23, \"50%\": 0.45, \"75%\": 0.68, \"max\": 1.0}, \"Height\": {\"count\": 3381.0, \"mean\": 21.275066548358474, \"std\": 11.689804743612909, \"min\": 3.0, \"25%\": 12.0, \"50%\": 20.0, \"75%\": 29.0, \"max\": 65.0}, \"Width\": {\"count\": 3359.0, \"mean\": 9.371836856207205, \"std\": 5.231694760541732, \"min\": 2.0, \"25%\": 6.0, \"50%\": 8.0, \"75%\": 12.0, \"max\": 48.0}, \"Weight\": {\"count\": 3351.0, \"mean\": 374966.48761563713, \"std\": 2517255.532619047, \"min\": 4.0, \"25%\": 489.5, \"50%\": 2929.0, \"75%\": 33406.5, \"max\": 64595001.0}, \"Price Of Sculpture\": {\"count\": 3500.0, \"mean\": 1059.6086457142858, \"std\": 7409.348267058369, \"min\": 3.0, \"25%\": 5.16, \"50%\": 7.12, \"75%\": 81.19500000000001, \"max\": 227254.24}, \"Base Shipping Price\": {\"count\": 3500.0, \"mean\": 36.35290857142857, \"std\": 26.299318229784124, \"min\": 10.0, \"25%\": 16.87, \"50%\": 23.055, \"75%\": 55.7425, \"max\": 99.98}}", "examples": "{\"Customer Id\":{\"0\":\"fffe3400310033003300\",\"1\":\"fffe3600350035003400\",\"2\":\"fffe3700360030003500\",\"3\":\"fffe350038003600\"},\"Artist Name\":{\"0\":\"James Miller\",\"1\":\"Karen Vetrano\",\"2\":\"Roseanne Gaona\",\"3\":\"Todd Almanza\"},\"Artist Reputation\":{\"0\":0.35,\"1\":0.67,\"2\":0.61,\"3\":0.14},\"Height\":{\"0\":53.0,\"1\":7.0,\"2\":6.0,\"3\":15.0},\"Width\":{\"0\":18.0,\"1\":4.0,\"2\":5.0,\"3\":8.0},\"Weight\":{\"0\":871.0,\"1\":108.0,\"2\":97.0,\"3\":757.0},\"Material\":{\"0\":\"Wood\",\"1\":\"Clay\",\"2\":\"Aluminium\",\"3\":\"Clay\"},\"Price Of Sculpture\":{\"0\":5.98,\"1\":6.92,\"2\":4.23,\"3\":6.28},\"Base Shipping Price\":{\"0\":19.11,\"1\":13.96,\"2\":13.62,\"3\":23.79},\"International\":{\"0\":\"Yes\",\"1\":\"No\",\"2\":\"Yes\",\"3\":\"No\"},\"Express Shipment\":{\"0\":\"Yes\",\"1\":\"No\",\"2\":\"No\",\"3\":\"Yes\"},\"Installation Included\":{\"0\":\"No\",\"1\":\"No\",\"2\":\"No\",\"3\":\"No\"},\"Transport\":{\"0\":\"Airways\",\"1\":\"Roadways\",\"2\":\"Airways\",\"3\":\"Roadways\"},\"Fragile\":{\"0\":\"No\",\"1\":\"Yes\",\"2\":\"No\",\"3\":\"Yes\"},\"Customer Information\":{\"0\":\"Working Class\",\"1\":\"Working Class\",\"2\":\"Working Class\",\"3\":\"Wealthy\"},\"Remote Location\":{\"0\":\"No\",\"1\":\"No\",\"2\":\"No\",\"3\":\"No\"},\"Scheduled Date\":{\"0\":\"07\\/03\\/17\",\"1\":\"05\\/02\\/16\",\"2\":\"01\\/04\\/18\",\"3\":\"09\\/14\\/17\"},\"Delivery Date\":{\"0\":\"07\\/06\\/17\",\"1\":\"05\\/02\\/16\",\"2\":\"01\\/06\\/18\",\"3\":\"09\\/17\\/17\"},\"Customer Location\":{\"0\":\"Santoshaven, IA 63481\",\"1\":\"Ericksonton, OH 98253\",\"2\":\"APO AP 83453\",\"3\":\"Antonioborough, AL 54778\"}}"}}, {"hackerearth-machine-learning-exhibit-art/train.csv": {"column_names": "[\"Customer Id\", \"Artist Name\", \"Artist Reputation\", \"Height\", \"Width\", \"Weight\", \"Material\", \"Price Of Sculpture\", \"Base Shipping Price\", \"International\", \"Express Shipment\", \"Installation Included\", \"Transport\", \"Fragile\", \"Customer Information\", \"Remote Location\", \"Scheduled Date\", \"Delivery Date\", \"Customer Location\", \"Cost\"]", "column_data_types": "{\"Customer Id\": \"object\", \"Artist Name\": \"object\", \"Artist Reputation\": \"float64\", \"Height\": \"float64\", \"Width\": \"float64\", \"Weight\": \"float64\", \"Material\": \"object\", \"Price Of Sculpture\": \"float64\", \"Base Shipping Price\": \"float64\", \"International\": \"object\", \"Express Shipment\": \"object\", \"Installation Included\": \"object\", \"Transport\": \"object\", \"Fragile\": \"object\", \"Customer Information\": \"object\", \"Remote Location\": \"object\", \"Scheduled Date\": \"object\", \"Delivery Date\": \"object\", \"Customer Location\": \"object\", \"Cost\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 6500 entries, 0 to 6499\nData columns (total 20 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Customer Id 6500 non-null object \n 1 Artist Name 6500 non-null object \n 2 Artist Reputation 5750 non-null float64\n 3 Height 6125 non-null float64\n 4 Width 5916 non-null float64\n 5 Weight 5913 non-null float64\n 6 Material 5736 non-null object \n 7 Price Of Sculpture 6500 non-null float64\n 8 Base Shipping Price 6500 non-null float64\n 9 International 6500 non-null object \n 10 Express Shipment 6500 non-null object \n 11 Installation Included 6500 non-null object \n 12 Transport 5108 non-null object \n 13 Fragile 6500 non-null object \n 14 Customer Information 6500 non-null object \n 15 Remote Location 5729 non-null object \n 16 Scheduled Date 6500 non-null object \n 17 Delivery Date 6500 non-null object \n 18 Customer Location 6500 non-null object \n 19 Cost 6500 non-null float64\ndtypes: float64(7), object(13)\nmemory usage: 1015.8+ KB\n", "summary": "{\"Artist Reputation\": {\"count\": 5750.0, \"mean\": 0.4618504347826087, \"std\": 0.26578112962844136, \"min\": 0.0, \"25%\": 0.24, \"50%\": 0.45, \"75%\": 0.68, \"max\": 1.0}, \"Height\": {\"count\": 6125.0, \"mean\": 21.766204081632655, \"std\": 11.96819214259749, \"min\": 3.0, \"25%\": 12.0, \"50%\": 20.0, \"75%\": 30.0, \"max\": 73.0}, \"Width\": {\"count\": 5916.0, \"mean\": 9.617647058823529, \"std\": 5.417000221775418, \"min\": 2.0, \"25%\": 6.0, \"50%\": 8.0, \"75%\": 12.0, \"max\": 50.0}, \"Weight\": {\"count\": 5913.0, \"mean\": 400694.8219178082, \"std\": 2678081.227562183, \"min\": 3.0, \"25%\": 503.0, \"50%\": 3102.0, \"75%\": 36456.0, \"max\": 117927869.0}, \"Price Of Sculpture\": {\"count\": 6500.0, \"mean\": 1192.420090076313, \"std\": 8819.6167502839, \"min\": 3.0, \"25%\": 5.23, \"50%\": 8.024999999999999, \"75%\": 89.47, \"max\": 382385.67}, \"Base Shipping Price\": {\"count\": 6500.0, \"mean\": 37.407173846153846, \"std\": 26.873518631336637, \"min\": 10.0, \"25%\": 16.7, \"50%\": 23.505000000000003, \"75%\": 57.905, \"max\": 99.98}, \"Cost\": {\"count\": 6500.0, \"mean\": 17139.195667692307, \"std\": 240657.86847316817, \"min\": -880172.65, \"25%\": 188.44, \"50%\": 382.065, \"75%\": 1156.115, \"max\": 11143428.25}}", "examples": "{\"Customer Id\":{\"0\":\"fffe3900350033003300\",\"1\":\"fffe3800330031003900\",\"2\":\"fffe3600370035003100\",\"3\":\"fffe350031003300\"},\"Artist Name\":{\"0\":\"Billy Jenkins\",\"1\":\"Jean Bryant\",\"2\":\"Laura Miller\",\"3\":\"Robert Chaires\"},\"Artist Reputation\":{\"0\":0.26,\"1\":0.28,\"2\":0.07,\"3\":0.12},\"Height\":{\"0\":17.0,\"1\":3.0,\"2\":8.0,\"3\":9.0},\"Width\":{\"0\":6.0,\"1\":3.0,\"2\":5.0,\"3\":null},\"Weight\":{\"0\":4128.0,\"1\":61.0,\"2\":237.0,\"3\":null},\"Material\":{\"0\":\"Brass\",\"1\":\"Brass\",\"2\":\"Clay\",\"3\":\"Aluminium\"},\"Price Of Sculpture\":{\"0\":13.91,\"1\":6.83,\"2\":4.96,\"3\":5.81},\"Base Shipping Price\":{\"0\":16.27,\"1\":15.0,\"2\":21.18,\"3\":16.31},\"International\":{\"0\":\"Yes\",\"1\":\"No\",\"2\":\"No\",\"3\":\"No\"},\"Express Shipment\":{\"0\":\"Yes\",\"1\":\"No\",\"2\":\"No\",\"3\":\"No\"},\"Installation Included\":{\"0\":\"No\",\"1\":\"No\",\"2\":\"No\",\"3\":\"No\"},\"Transport\":{\"0\":\"Airways\",\"1\":\"Roadways\",\"2\":\"Roadways\",\"3\":null},\"Fragile\":{\"0\":\"No\",\"1\":\"No\",\"2\":\"Yes\",\"3\":\"No\"},\"Customer Information\":{\"0\":\"Working Class\",\"1\":\"Working Class\",\"2\":\"Working Class\",\"3\":\"Wealthy\"},\"Remote Location\":{\"0\":\"No\",\"1\":\"No\",\"2\":\"Yes\",\"3\":\"Yes\"},\"Scheduled Date\":{\"0\":\"06\\/07\\/15\",\"1\":\"03\\/06\\/17\",\"2\":\"03\\/09\\/15\",\"3\":\"05\\/24\\/15\"},\"Delivery Date\":{\"0\":\"06\\/03\\/15\",\"1\":\"03\\/05\\/17\",\"2\":\"03\\/08\\/15\",\"3\":\"05\\/20\\/15\"},\"Customer Location\":{\"0\":\"New Michelle, OH 50777\",\"1\":\"New Michaelport, WY 12072\",\"2\":\"Bowmanshire, WA 19241\",\"3\":\"East Robyn, KY 86375\"},\"Cost\":{\"0\":-283.29,\"1\":-159.96,\"2\":-154.29,\"3\":-161.16}}"}}]
| true | 2 |
<start_data_description><data_path>hackerearth-machine-learning-exhibit-art/test.csv:
<column_names>
['Customer Id', 'Artist Name', 'Artist Reputation', 'Height', 'Width', 'Weight', 'Material', 'Price Of Sculpture', 'Base Shipping Price', 'International', 'Express Shipment', 'Installation Included', 'Transport', 'Fragile', 'Customer Information', 'Remote Location', 'Scheduled Date', 'Delivery Date', 'Customer Location']
<column_types>
{'Customer Id': 'object', 'Artist Name': 'object', 'Artist Reputation': 'float64', 'Height': 'float64', 'Width': 'float64', 'Weight': 'float64', 'Material': 'object', 'Price Of Sculpture': 'float64', 'Base Shipping Price': 'float64', 'International': 'object', 'Express Shipment': 'object', 'Installation Included': 'object', 'Transport': 'object', 'Fragile': 'object', 'Customer Information': 'object', 'Remote Location': 'object', 'Scheduled Date': 'object', 'Delivery Date': 'object', 'Customer Location': 'object'}
<dataframe_Summary>
{'Artist Reputation': {'count': 3278.0, 'mean': 0.4632794386821233, 'std': 0.27228676277452385, 'min': 0.0, '25%': 0.23, '50%': 0.45, '75%': 0.68, 'max': 1.0}, 'Height': {'count': 3381.0, 'mean': 21.275066548358474, 'std': 11.689804743612909, 'min': 3.0, '25%': 12.0, '50%': 20.0, '75%': 29.0, 'max': 65.0}, 'Width': {'count': 3359.0, 'mean': 9.371836856207205, 'std': 5.231694760541732, 'min': 2.0, '25%': 6.0, '50%': 8.0, '75%': 12.0, 'max': 48.0}, 'Weight': {'count': 3351.0, 'mean': 374966.48761563713, 'std': 2517255.532619047, 'min': 4.0, '25%': 489.5, '50%': 2929.0, '75%': 33406.5, 'max': 64595001.0}, 'Price Of Sculpture': {'count': 3500.0, 'mean': 1059.6086457142858, 'std': 7409.348267058369, 'min': 3.0, '25%': 5.16, '50%': 7.12, '75%': 81.19500000000001, 'max': 227254.24}, 'Base Shipping Price': {'count': 3500.0, 'mean': 36.35290857142857, 'std': 26.299318229784124, 'min': 10.0, '25%': 16.87, '50%': 23.055, '75%': 55.7425, 'max': 99.98}}
<dataframe_info>
RangeIndex: 3500 entries, 0 to 3499
Data columns (total 19 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Customer Id 3500 non-null object
1 Artist Name 3500 non-null object
2 Artist Reputation 3278 non-null float64
3 Height 3381 non-null float64
4 Width 3359 non-null float64
5 Weight 3351 non-null float64
6 Material 3500 non-null object
7 Price Of Sculpture 3500 non-null float64
8 Base Shipping Price 3500 non-null float64
9 International 3500 non-null object
10 Express Shipment 3500 non-null object
11 Installation Included 3500 non-null object
12 Transport 3268 non-null object
13 Fragile 3500 non-null object
14 Customer Information 3500 non-null object
15 Remote Location 3500 non-null object
16 Scheduled Date 3500 non-null object
17 Delivery Date 3500 non-null object
18 Customer Location 3500 non-null object
dtypes: float64(6), object(13)
memory usage: 519.7+ KB
<some_examples>
{'Customer Id': {'0': 'fffe3400310033003300', '1': 'fffe3600350035003400', '2': 'fffe3700360030003500', '3': 'fffe350038003600'}, 'Artist Name': {'0': 'James Miller', '1': 'Karen Vetrano', '2': 'Roseanne Gaona', '3': 'Todd Almanza'}, 'Artist Reputation': {'0': 0.35, '1': 0.67, '2': 0.61, '3': 0.14}, 'Height': {'0': 53.0, '1': 7.0, '2': 6.0, '3': 15.0}, 'Width': {'0': 18.0, '1': 4.0, '2': 5.0, '3': 8.0}, 'Weight': {'0': 871.0, '1': 108.0, '2': 97.0, '3': 757.0}, 'Material': {'0': 'Wood', '1': 'Clay', '2': 'Aluminium', '3': 'Clay'}, 'Price Of Sculpture': {'0': 5.98, '1': 6.92, '2': 4.23, '3': 6.28}, 'Base Shipping Price': {'0': 19.11, '1': 13.96, '2': 13.62, '3': 23.79}, 'International': {'0': 'Yes', '1': 'No', '2': 'Yes', '3': 'No'}, 'Express Shipment': {'0': 'Yes', '1': 'No', '2': 'No', '3': 'Yes'}, 'Installation Included': {'0': 'No', '1': 'No', '2': 'No', '3': 'No'}, 'Transport': {'0': 'Airways', '1': 'Roadways', '2': 'Airways', '3': 'Roadways'}, 'Fragile': {'0': 'No', '1': 'Yes', '2': 'No', '3': 'Yes'}, 'Customer Information': {'0': 'Working Class', '1': 'Working Class', '2': 'Working Class', '3': 'Wealthy'}, 'Remote Location': {'0': 'No', '1': 'No', '2': 'No', '3': 'No'}, 'Scheduled Date': {'0': '07/03/17', '1': '05/02/16', '2': '01/04/18', '3': '09/14/17'}, 'Delivery Date': {'0': '07/06/17', '1': '05/02/16', '2': '01/06/18', '3': '09/17/17'}, 'Customer Location': {'0': 'Santoshaven, IA 63481', '1': 'Ericksonton, OH 98253', '2': 'APO AP 83453', '3': 'Antonioborough, AL 54778'}}
<end_description>
<start_data_description><data_path>hackerearth-machine-learning-exhibit-art/train.csv:
<column_names>
['Customer Id', 'Artist Name', 'Artist Reputation', 'Height', 'Width', 'Weight', 'Material', 'Price Of Sculpture', 'Base Shipping Price', 'International', 'Express Shipment', 'Installation Included', 'Transport', 'Fragile', 'Customer Information', 'Remote Location', 'Scheduled Date', 'Delivery Date', 'Customer Location', 'Cost']
<column_types>
{'Customer Id': 'object', 'Artist Name': 'object', 'Artist Reputation': 'float64', 'Height': 'float64', 'Width': 'float64', 'Weight': 'float64', 'Material': 'object', 'Price Of Sculpture': 'float64', 'Base Shipping Price': 'float64', 'International': 'object', 'Express Shipment': 'object', 'Installation Included': 'object', 'Transport': 'object', 'Fragile': 'object', 'Customer Information': 'object', 'Remote Location': 'object', 'Scheduled Date': 'object', 'Delivery Date': 'object', 'Customer Location': 'object', 'Cost': 'float64'}
<dataframe_Summary>
{'Artist Reputation': {'count': 5750.0, 'mean': 0.4618504347826087, 'std': 0.26578112962844136, 'min': 0.0, '25%': 0.24, '50%': 0.45, '75%': 0.68, 'max': 1.0}, 'Height': {'count': 6125.0, 'mean': 21.766204081632655, 'std': 11.96819214259749, 'min': 3.0, '25%': 12.0, '50%': 20.0, '75%': 30.0, 'max': 73.0}, 'Width': {'count': 5916.0, 'mean': 9.617647058823529, 'std': 5.417000221775418, 'min': 2.0, '25%': 6.0, '50%': 8.0, '75%': 12.0, 'max': 50.0}, 'Weight': {'count': 5913.0, 'mean': 400694.8219178082, 'std': 2678081.227562183, 'min': 3.0, '25%': 503.0, '50%': 3102.0, '75%': 36456.0, 'max': 117927869.0}, 'Price Of Sculpture': {'count': 6500.0, 'mean': 1192.420090076313, 'std': 8819.6167502839, 'min': 3.0, '25%': 5.23, '50%': 8.024999999999999, '75%': 89.47, 'max': 382385.67}, 'Base Shipping Price': {'count': 6500.0, 'mean': 37.407173846153846, 'std': 26.873518631336637, 'min': 10.0, '25%': 16.7, '50%': 23.505000000000003, '75%': 57.905, 'max': 99.98}, 'Cost': {'count': 6500.0, 'mean': 17139.195667692307, 'std': 240657.86847316817, 'min': -880172.65, '25%': 188.44, '50%': 382.065, '75%': 1156.115, 'max': 11143428.25}}
<dataframe_info>
RangeIndex: 6500 entries, 0 to 6499
Data columns (total 20 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Customer Id 6500 non-null object
1 Artist Name 6500 non-null object
2 Artist Reputation 5750 non-null float64
3 Height 6125 non-null float64
4 Width 5916 non-null float64
5 Weight 5913 non-null float64
6 Material 5736 non-null object
7 Price Of Sculpture 6500 non-null float64
8 Base Shipping Price 6500 non-null float64
9 International 6500 non-null object
10 Express Shipment 6500 non-null object
11 Installation Included 6500 non-null object
12 Transport 5108 non-null object
13 Fragile 6500 non-null object
14 Customer Information 6500 non-null object
15 Remote Location 5729 non-null object
16 Scheduled Date 6500 non-null object
17 Delivery Date 6500 non-null object
18 Customer Location 6500 non-null object
19 Cost 6500 non-null float64
dtypes: float64(7), object(13)
memory usage: 1015.8+ KB
<some_examples>
{'Customer Id': {'0': 'fffe3900350033003300', '1': 'fffe3800330031003900', '2': 'fffe3600370035003100', '3': 'fffe350031003300'}, 'Artist Name': {'0': 'Billy Jenkins', '1': 'Jean Bryant', '2': 'Laura Miller', '3': 'Robert Chaires'}, 'Artist Reputation': {'0': 0.26, '1': 0.28, '2': 0.07, '3': 0.12}, 'Height': {'0': 17.0, '1': 3.0, '2': 8.0, '3': 9.0}, 'Width': {'0': 6.0, '1': 3.0, '2': 5.0, '3': None}, 'Weight': {'0': 4128.0, '1': 61.0, '2': 237.0, '3': None}, 'Material': {'0': 'Brass', '1': 'Brass', '2': 'Clay', '3': 'Aluminium'}, 'Price Of Sculpture': {'0': 13.91, '1': 6.83, '2': 4.96, '3': 5.81}, 'Base Shipping Price': {'0': 16.27, '1': 15.0, '2': 21.18, '3': 16.31}, 'International': {'0': 'Yes', '1': 'No', '2': 'No', '3': 'No'}, 'Express Shipment': {'0': 'Yes', '1': 'No', '2': 'No', '3': 'No'}, 'Installation Included': {'0': 'No', '1': 'No', '2': 'No', '3': 'No'}, 'Transport': {'0': 'Airways', '1': 'Roadways', '2': 'Roadways', '3': None}, 'Fragile': {'0': 'No', '1': 'No', '2': 'Yes', '3': 'No'}, 'Customer Information': {'0': 'Working Class', '1': 'Working Class', '2': 'Working Class', '3': 'Wealthy'}, 'Remote Location': {'0': 'No', '1': 'No', '2': 'Yes', '3': 'Yes'}, 'Scheduled Date': {'0': '06/07/15', '1': '03/06/17', '2': '03/09/15', '3': '05/24/15'}, 'Delivery Date': {'0': '06/03/15', '1': '03/05/17', '2': '03/08/15', '3': '05/20/15'}, 'Customer Location': {'0': 'New Michelle, OH 50777', '1': 'New Michaelport, WY 12072', '2': 'Bowmanshire, WA 19241', '3': 'East Robyn, KY 86375'}, 'Cost': {'0': -283.29, '1': -159.96, '2': -154.29, '3': -161.16}}
<end_description>
| 4,769 | 0 | 7,958 | 4,769 |
69179955
|
import numpy as np
import pandas as pd
import re, html, unicodedata, warnings
from tqdm import tqdm
warnings.filterwarnings("ignore")
from tensorflow.keras import Sequential
from tensorflow.keras.layers import LeakyReLU, Dropout, LSTM, Dense
from tensorflow.keras.optimizers import Adam
from sklearn.preprocessing import MinMaxScaler
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import tensorflow
tensorflow.keras.backend.set_epsilon(1)
np.random.seed(0)
from lightgbm import LGBMRegressor
main_train = train_df = pd.read_csv("/kaggle/input/commonlitreadabilityprize/train.csv")
main_test = train_df = pd.read_csv("/kaggle/input/commonlitreadabilityprize/test.csv")
# main_train = train_df = pd.read_csv('train.csv')
# main_test = train_df = pd.read_csv('test.csv')
import re
from string import digits
def clean(text):
text = re.sub("\n", "", text)
text = text.lower()
return text
def get_text_data_parameters(data, stop_words):
"""
Calculates some numeric parameters of paragraphs of the given series object.
"""
text_shortage = []
quotes = []
sentences = []
sent_length = []
word_length = []
lemma_length = []
# new_data = []
for row in data:
# Amount of quotes devided by 2 to determine if there is any dialogue
quotes.append(row.count('"') / 2)
# The original, raw text paragraph lenght
initial_length = len(row)
# Using nltk tokenizer to split a text into sentences to determine their amount
num_sent = len(sent_tokenize(row))
sentences.append(num_sent)
# Getting rid of all noncharacter symbols and splitting a text into
# words using nltk tokenizer and getting amount of words
row = re.sub("[^a-zA-Z]", " ", row)
row = row.lower()
row = word_tokenize(row)
num_words = len(row)
# Calculating mean amount of words per sentence and mean word length
sent_length.append(num_words / num_sent)
word_length.append(initial_length / num_words)
# Splitting text data into words and dropping stop words
row = [word for word in row if not word in stop_words]
# Words lemmatisation
lemma = nltk.WordNetLemmatizer()
row = [lemma.lemmatize(word) for word in row]
num_lemmas = len(row)
row = " ".join(row)
# Text length after cleaning and lemmatisation
processed_length = len(row)
# Calculating mean lemma length and amount of text shrinkage after the processing
lemma_length.append(processed_length / num_lemmas)
text_shortage.append(processed_length / initial_length)
# Creating a dataframe containing all calculated parameters
result_df = pd.concat(
[
pd.Series(text_shortage),
pd.Series(quotes),
pd.Series(sentences),
pd.Series(sent_length),
pd.Series(word_length),
pd.Series(lemma_length),
],
axis=1,
)
result_df.columns = [
"text_shortage",
"num_quotes",
"num_sentences",
"sent_length",
"mean_word_length",
"mean_lemma_length",
]
return result_df
train_df = main_train
train_df["clean_text"] = train_df["excerpt"].apply(lambda x: clean(x))
train_df = train_df.drop(
["url_legal", "license", "excerpt", "standard_error", "id"], axis=True
)
maxWordsCount = 0
for i in range(0, len(train_df)):
words = train_df.iloc[i, 1].split()
if len(words) > maxWordsCount:
maxWordsCount = len(words)
maxWordsCount
for i in range(0, len(train_df)):
words = train_df.iloc[i, 1].split()
for t in range(0, maxWordsCount):
try:
train_df.loc[i, "word" + str(t)] = words[t]
except IndexError:
train_df.loc[i, "word" + str(t)] = 0
stop_words = set(stopwords.words("english"))
allWordsEncoder = {}
words_to_delete = stop_words
allWordsCounter = 1
for i, item in train_df.items():
if i not in ["target", "clean_text"]:
for t in item.values:
if t in words_to_delete:
allWordsEncoder[t] = 0
else:
try:
allWordsEncoder[t]
except KeyError:
allWordsEncoder[t] = allWordsCounter
allWordsCounter = allWordsCounter + 1
for l in range(0, len(train_df)):
for m in range(0 + 2, maxWordsCount + 2):
needWord = train_df.iloc[l, m]
try:
searchWordIndex = allWordsEncoder[needWord]
train_df.iloc[l, m] = searchWordIndex
except KeyError:
train_df.iloc[l, m] = 0
stop_words = set(stopwords.words("english"))
# text_params = get_text_data_parameters(train_df["clean_text"].copy(), stop_words)
def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true)))
for i in range(0, len(train_df)):
text = train_df.iloc[i, 1]
avgWordLen = 0
wordsWithLenMin7 = 0
wordsWithLenMin8 = 0
wordsWithLenMin10 = 0
wordsWithLenMin12 = 0
words = text.split()
for w in range(0, len(words)):
avgWordLen = avgWordLen + len(words[w])
if len(words[w]) >= 7:
wordsWithLenMin7 = wordsWithLenMin7 + 1
if len(words[w]) >= 10:
wordsWithLenMin10 = wordsWithLenMin10 + 1
if len(words[w]) >= 8:
wordsWithLenMin8 = wordsWithLenMin8 + 1
if len(words[w]) >= 12:
wordsWithLenMin12 = wordsWithLenMin12 + 1
train_df.loc[i, "text_len"] = len(text)
train_df.loc[i, "avg_word_len"] = avgWordLen / len(words)
train_df.loc[i, "words_count"] = len(words)
train_df.loc[i, "wordsWithLenMin7"] = wordsWithLenMin7
train_df.loc[i, "wordsWithLenMin10"] = wordsWithLenMin10
train_df.loc[i, "wordsWithLenMin8"] = wordsWithLenMin8
train_df.loc[i, "wordsWithLenMin12"] = wordsWithLenMin12
# train_df = pd.merge(train_df, text_params, right_index=True, left_index=True)
df = train_df
df = df.fillna(0)
df = df.drop(["clean_text"], axis=True)
Y = df[["target"]]
df = df.drop(["target"], axis=True)
X = df
from tensorflow.keras import backend as K
def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true)))
min_max_scaler = preprocessing.MinMaxScaler()
min_max_scaler_Y = preprocessing.MinMaxScaler()
X_scaler = min_max_scaler.fit_transform(X.values)
Y_scaler = min_max_scaler.fit_transform(Y.values)
X_train, X_val_and_test, Y_train, Y_val_and_test = train_test_split(
X_scaler, Y, test_size=0.2
)
X_train = np.reshape(X_train, (X_train.shape[0], 1, X_train.shape[1]))
num_units = 50
activation_function = "sigmoid"
optimizer = "adam"
loss_function = "mse"
batch_size = 8
num_epochs = 150
# Initialize the RNN
regressor = Sequential()
# Adding the input layer and the LSTM layer
regressor.add(LSTM(units=num_units, activation=activation_function))
regressor.add(Dense(units=num_units, activation=activation_function))
regressor.add(Dropout(0.1))
# Adding the output layer
regressor.add(Dense(units=1))
# Compiling the RNN
regressor.compile(
optimizer=optimizer, loss=loss_function, metrics=root_mean_squared_error
)
# Using the training set to train the model
regressor.fit(X_train, Y_train, batch_size=batch_size, epochs=num_epochs)
ddd = main_test
ddd["clean_text"] = ddd["excerpt"].apply(lambda x: clean(x))
ddd = ddd.drop(["url_legal", "license", "excerpt", "id"], axis=True)
# ddd = ddd.drop(['clean_text'], axis = True)
for i in range(0, len(ddd)):
words = ddd.iloc[i, 0].split()
for t in range(0, maxWordsCount):
try:
ddd.loc[i, "word" + str(t)] = words[t]
except IndexError:
ddd.loc[i, "word" + str(t)] = 0
ddd = ddd.astype(str)
for l in range(0, len(ddd)):
for m in range(0 + 1, maxWordsCount):
needWord = ddd.iloc[l, m]
try:
searchWordIndex = allWordsEncoder[needWord]
ddd.iloc[l, m] = searchWordIndex
except KeyError:
ddd.iloc[l, m] = 0
for i in range(0, len(ddd)):
text = ddd.iloc[i, 0]
avgWordLen = 0
wordsWithLenMin7 = 0
wordsWithLenMin8 = 0
wordsWithLenMin10 = 0
wordsWithLenMin12 = 0
words = text.split()
for w in range(0, len(words)):
avgWordLen = avgWordLen + len(words[w])
if len(words[w]) >= 7:
wordsWithLenMin7 = wordsWithLenMin7 + 1
if len(words[w]) >= 10:
wordsWithLenMin10 = wordsWithLenMin10 + 1
if len(words[w]) >= 8:
wordsWithLenMin8 = wordsWithLenMin8 + 1
if len(words[w]) >= 12:
wordsWithLenMin12 = wordsWithLenMin12 + 1
ddd.loc[i, "text_len"] = len(text)
ddd.loc[i, "avg_word_len"] = avgWordLen / len(words)
ddd.loc[i, "words_count"] = len(words)
ddd.loc[i, "wordsWithLenMin7"] = wordsWithLenMin7
ddd.loc[i, "wordsWithLenMin10"] = wordsWithLenMin10
ddd.loc[i, "wordsWithLenMin8"] = wordsWithLenMin8
ddd.loc[i, "wordsWithLenMin12"] = wordsWithLenMin12
# text_params_ddd = get_text_data_parameters(ddd["clean_text"].copy(), stop_words)
# ddd = pd.merge(ddd, text_params_ddd, right_index=True, left_index=True)
ddd
ddd = ddd.drop(["clean_text"], axis=True)
X_ddd_scaler = min_max_scaler.transform(ddd.values)
X_ddd_scaler_reshaped = np.reshape(
X_ddd_scaler, (X_ddd_scaler.shape[0], 1, X_ddd_scaler.shape[1])
)
predicted_targets = regressor.predict(X_ddd_scaler_reshaped)
predicted_targets
finish = main_test
finish = finish.drop(["url_legal", "license", "excerpt", "clean_text"], axis=True)
finish["target"] = predicted_targets
finish.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/179/69179955.ipynb
| null | null |
[{"Id": 69179955, "ScriptId": 18859352, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7689398, "CreationDate": "07/27/2021 18:32:09", "VersionNumber": 20.0, "Title": "notebook20b449257a", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 275.0, "LinesInsertedFromPrevious": 177.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 98.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np
import pandas as pd
import re, html, unicodedata, warnings
from tqdm import tqdm
warnings.filterwarnings("ignore")
from tensorflow.keras import Sequential
from tensorflow.keras.layers import LeakyReLU, Dropout, LSTM, Dense
from tensorflow.keras.optimizers import Adam
from sklearn.preprocessing import MinMaxScaler
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import tensorflow
tensorflow.keras.backend.set_epsilon(1)
np.random.seed(0)
from lightgbm import LGBMRegressor
main_train = train_df = pd.read_csv("/kaggle/input/commonlitreadabilityprize/train.csv")
main_test = train_df = pd.read_csv("/kaggle/input/commonlitreadabilityprize/test.csv")
# main_train = train_df = pd.read_csv('train.csv')
# main_test = train_df = pd.read_csv('test.csv')
import re
from string import digits
def clean(text):
text = re.sub("\n", "", text)
text = text.lower()
return text
def get_text_data_parameters(data, stop_words):
"""
Calculates some numeric parameters of paragraphs of the given series object.
"""
text_shortage = []
quotes = []
sentences = []
sent_length = []
word_length = []
lemma_length = []
# new_data = []
for row in data:
# Amount of quotes devided by 2 to determine if there is any dialogue
quotes.append(row.count('"') / 2)
# The original, raw text paragraph lenght
initial_length = len(row)
# Using nltk tokenizer to split a text into sentences to determine their amount
num_sent = len(sent_tokenize(row))
sentences.append(num_sent)
# Getting rid of all noncharacter symbols and splitting a text into
# words using nltk tokenizer and getting amount of words
row = re.sub("[^a-zA-Z]", " ", row)
row = row.lower()
row = word_tokenize(row)
num_words = len(row)
# Calculating mean amount of words per sentence and mean word length
sent_length.append(num_words / num_sent)
word_length.append(initial_length / num_words)
# Splitting text data into words and dropping stop words
row = [word for word in row if not word in stop_words]
# Words lemmatisation
lemma = nltk.WordNetLemmatizer()
row = [lemma.lemmatize(word) for word in row]
num_lemmas = len(row)
row = " ".join(row)
# Text length after cleaning and lemmatisation
processed_length = len(row)
# Calculating mean lemma length and amount of text shrinkage after the processing
lemma_length.append(processed_length / num_lemmas)
text_shortage.append(processed_length / initial_length)
# Creating a dataframe containing all calculated parameters
result_df = pd.concat(
[
pd.Series(text_shortage),
pd.Series(quotes),
pd.Series(sentences),
pd.Series(sent_length),
pd.Series(word_length),
pd.Series(lemma_length),
],
axis=1,
)
result_df.columns = [
"text_shortage",
"num_quotes",
"num_sentences",
"sent_length",
"mean_word_length",
"mean_lemma_length",
]
return result_df
train_df = main_train
train_df["clean_text"] = train_df["excerpt"].apply(lambda x: clean(x))
train_df = train_df.drop(
["url_legal", "license", "excerpt", "standard_error", "id"], axis=True
)
maxWordsCount = 0
for i in range(0, len(train_df)):
words = train_df.iloc[i, 1].split()
if len(words) > maxWordsCount:
maxWordsCount = len(words)
maxWordsCount
for i in range(0, len(train_df)):
words = train_df.iloc[i, 1].split()
for t in range(0, maxWordsCount):
try:
train_df.loc[i, "word" + str(t)] = words[t]
except IndexError:
train_df.loc[i, "word" + str(t)] = 0
stop_words = set(stopwords.words("english"))
allWordsEncoder = {}
words_to_delete = stop_words
allWordsCounter = 1
for i, item in train_df.items():
if i not in ["target", "clean_text"]:
for t in item.values:
if t in words_to_delete:
allWordsEncoder[t] = 0
else:
try:
allWordsEncoder[t]
except KeyError:
allWordsEncoder[t] = allWordsCounter
allWordsCounter = allWordsCounter + 1
for l in range(0, len(train_df)):
for m in range(0 + 2, maxWordsCount + 2):
needWord = train_df.iloc[l, m]
try:
searchWordIndex = allWordsEncoder[needWord]
train_df.iloc[l, m] = searchWordIndex
except KeyError:
train_df.iloc[l, m] = 0
stop_words = set(stopwords.words("english"))
# text_params = get_text_data_parameters(train_df["clean_text"].copy(), stop_words)
def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true)))
for i in range(0, len(train_df)):
text = train_df.iloc[i, 1]
avgWordLen = 0
wordsWithLenMin7 = 0
wordsWithLenMin8 = 0
wordsWithLenMin10 = 0
wordsWithLenMin12 = 0
words = text.split()
for w in range(0, len(words)):
avgWordLen = avgWordLen + len(words[w])
if len(words[w]) >= 7:
wordsWithLenMin7 = wordsWithLenMin7 + 1
if len(words[w]) >= 10:
wordsWithLenMin10 = wordsWithLenMin10 + 1
if len(words[w]) >= 8:
wordsWithLenMin8 = wordsWithLenMin8 + 1
if len(words[w]) >= 12:
wordsWithLenMin12 = wordsWithLenMin12 + 1
train_df.loc[i, "text_len"] = len(text)
train_df.loc[i, "avg_word_len"] = avgWordLen / len(words)
train_df.loc[i, "words_count"] = len(words)
train_df.loc[i, "wordsWithLenMin7"] = wordsWithLenMin7
train_df.loc[i, "wordsWithLenMin10"] = wordsWithLenMin10
train_df.loc[i, "wordsWithLenMin8"] = wordsWithLenMin8
train_df.loc[i, "wordsWithLenMin12"] = wordsWithLenMin12
# train_df = pd.merge(train_df, text_params, right_index=True, left_index=True)
df = train_df
df = df.fillna(0)
df = df.drop(["clean_text"], axis=True)
Y = df[["target"]]
df = df.drop(["target"], axis=True)
X = df
from tensorflow.keras import backend as K
def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true)))
min_max_scaler = preprocessing.MinMaxScaler()
min_max_scaler_Y = preprocessing.MinMaxScaler()
X_scaler = min_max_scaler.fit_transform(X.values)
Y_scaler = min_max_scaler.fit_transform(Y.values)
X_train, X_val_and_test, Y_train, Y_val_and_test = train_test_split(
X_scaler, Y, test_size=0.2
)
X_train = np.reshape(X_train, (X_train.shape[0], 1, X_train.shape[1]))
num_units = 50
activation_function = "sigmoid"
optimizer = "adam"
loss_function = "mse"
batch_size = 8
num_epochs = 150
# Initialize the RNN
regressor = Sequential()
# Adding the input layer and the LSTM layer
regressor.add(LSTM(units=num_units, activation=activation_function))
regressor.add(Dense(units=num_units, activation=activation_function))
regressor.add(Dropout(0.1))
# Adding the output layer
regressor.add(Dense(units=1))
# Compiling the RNN
regressor.compile(
optimizer=optimizer, loss=loss_function, metrics=root_mean_squared_error
)
# Using the training set to train the model
regressor.fit(X_train, Y_train, batch_size=batch_size, epochs=num_epochs)
ddd = main_test
ddd["clean_text"] = ddd["excerpt"].apply(lambda x: clean(x))
ddd = ddd.drop(["url_legal", "license", "excerpt", "id"], axis=True)
# ddd = ddd.drop(['clean_text'], axis = True)
for i in range(0, len(ddd)):
words = ddd.iloc[i, 0].split()
for t in range(0, maxWordsCount):
try:
ddd.loc[i, "word" + str(t)] = words[t]
except IndexError:
ddd.loc[i, "word" + str(t)] = 0
ddd = ddd.astype(str)
for l in range(0, len(ddd)):
for m in range(0 + 1, maxWordsCount):
needWord = ddd.iloc[l, m]
try:
searchWordIndex = allWordsEncoder[needWord]
ddd.iloc[l, m] = searchWordIndex
except KeyError:
ddd.iloc[l, m] = 0
for i in range(0, len(ddd)):
text = ddd.iloc[i, 0]
avgWordLen = 0
wordsWithLenMin7 = 0
wordsWithLenMin8 = 0
wordsWithLenMin10 = 0
wordsWithLenMin12 = 0
words = text.split()
for w in range(0, len(words)):
avgWordLen = avgWordLen + len(words[w])
if len(words[w]) >= 7:
wordsWithLenMin7 = wordsWithLenMin7 + 1
if len(words[w]) >= 10:
wordsWithLenMin10 = wordsWithLenMin10 + 1
if len(words[w]) >= 8:
wordsWithLenMin8 = wordsWithLenMin8 + 1
if len(words[w]) >= 12:
wordsWithLenMin12 = wordsWithLenMin12 + 1
ddd.loc[i, "text_len"] = len(text)
ddd.loc[i, "avg_word_len"] = avgWordLen / len(words)
ddd.loc[i, "words_count"] = len(words)
ddd.loc[i, "wordsWithLenMin7"] = wordsWithLenMin7
ddd.loc[i, "wordsWithLenMin10"] = wordsWithLenMin10
ddd.loc[i, "wordsWithLenMin8"] = wordsWithLenMin8
ddd.loc[i, "wordsWithLenMin12"] = wordsWithLenMin12
# text_params_ddd = get_text_data_parameters(ddd["clean_text"].copy(), stop_words)
# ddd = pd.merge(ddd, text_params_ddd, right_index=True, left_index=True)
ddd
ddd = ddd.drop(["clean_text"], axis=True)
X_ddd_scaler = min_max_scaler.transform(ddd.values)
X_ddd_scaler_reshaped = np.reshape(
X_ddd_scaler, (X_ddd_scaler.shape[0], 1, X_ddd_scaler.shape[1])
)
predicted_targets = regressor.predict(X_ddd_scaler_reshaped)
predicted_targets
finish = main_test
finish = finish.drop(["url_legal", "license", "excerpt", "clean_text"], axis=True)
finish["target"] = predicted_targets
finish.to_csv("submission.csv", index=False)
| false | 0 | 3,081 | 0 | 3,081 | 3,081 |
||
69179349
|
# # Titanic - 1st Attempt
# ## Read data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
train_data = pd.read_csv("../input/titanic/train.csv")
test_data = pd.read_csv("../input/titanic/test.csv")
# ## Data Visualization
train_data.head()
test_data.head()
# ## Handle missing values
train_data.isna().sum()
test_data.isna().sum()
train_data = train_data.fillna({"Embarked": "S", "Age": train_data["Age"].median()})
test_data = test_data.fillna(
{"Fare": test_data["Fare"].mean(), "Age": test_data["Age"].median()}
)
# ## Feature Engineering
print("Total records - ", len(train_data.PassengerId))
print("Unique values in PassengerId col - ", len(train_data.PassengerId.unique()))
print("Unique values in Name col - ", len(train_data.Name.unique()))
# - Dropping the following values since all of these are unique.
possible_drops = ["PassengerId", "Name"]
dfs = [train_data, test_data]
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3)
fig.set_size_inches(15, 5)
sns.barplot(x="Sex", y="Survived", data=train_data, ax=ax1)
ax1.set(title="Survived - Male, Female")
sns.barplot(x="Pclass", y="Survived", data=train_data, ax=ax2)
ax2.set(title="Survived - Pclass")
sns.barplot(x="Sex", y="Survived", hue="Pclass", data=train_data, ax=ax3)
ax3.set(title="Survived - Male, Female per each Pclass")
plt.show()
# - It is abundantly clear that Gender affects the survivability most.
# - In the Pclass, class-1 had a higher survivability than other two. This might be due to the people belonging to that class being royalty or simply wealthy and important.
# - When combined, the females in the Pclass 1 and 2 have an almost perfect survivability. Even within the males, Pclass 1 has a significant survivability diffence than other two.
# Add new feature 'FamilyMembers' using SibSp and Parch. Adding 1 to count the person itself.
for df in dfs:
df["FamilyMembers"] = df.SibSp + df.Parch + 1
possible_drops.extend(["SibSp"])
# - It'll be interesting to see how having a family affects the survivability.
for df in dfs:
df["Alone"] = df["FamilyMembers"].apply(lambda x: 1 if x == 1 else 0)
fig, (ax1) = plt.subplots(ncols=1)
fig.set_size_inches(10, 5)
sns.barplot(x="Alone", y="Survived", data=train_data, ax=ax1)
ax1.set(title="Survived - Alone or Not")
ax1.set_ylim(0, 1)
plt.show()
# - It seems that there's no significant difference between a person who came with a family and on his own.
# - Might be good to seem how the different family sizes affected the survivability.
fig, (ax1) = plt.subplots(ncols=1)
fig.set_size_inches(10, 5)
sns.barplot(x="FamilyMembers", y="Survived", data=train_data, ax=ax1)
ax1.set(title="Survived - FamilyMembers")
ax1.set_ylim(0, 1)
plt.show()
# - From the above graph we can see that family size between 2-4 have a higher survivability than others.
# - We can introduce this as a feature to the training set.
# - Small - 1
# - Medium - 2-4
# - large - > 4
# def getFamilyType(familySize):
# if familySize == 1:
# return "Small"
# if 2 <= familySize <= 4:
# return "Medium"
# return "Large"
# for df in dfs:
# df['FamilyType'] = df.FamilyMembers.apply(getFamilyType)
# Converting cabin feature in to has cabin, depending on whether the cabin data is available or not.
for df in dfs:
df["HasCabin"] = df["Cabin"].apply(lambda x: 0 if type(x) == float else 1)
possible_drops.append("Cabin")
fig, (ax1) = plt.subplots(ncols=1)
fig.set_size_inches(10, 5)
sns.barplot(x="HasCabin", y="Survived", data=train_data, ax=ax1)
ax1.set(title="Survived - Cabin")
ax1.set_ylim(0, 1)
plt.show()
# - Poeple having a cabin has a higher survivability than others.
# Convert fare into ranges. (Or Clusters)
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=4)
train_data["FareCluster"] = kmeans.fit_predict(train_data.loc[:, ["Fare"]])
train_data["FareCluster"] = train_data["FareCluster"].astype("category")
test_data["FareCluster"] = kmeans.predict(test_data.loc[:, ["Fare"]])
test_data["FareCluster"] = test_data["FareCluster"].astype("category")
# Converting fares in to their log
for df in dfs:
df["FareLog"] = df.Fare.apply(np.log1p)
possible_drops.append("Fare")
# Getting the prefixes of the tickets
for df in dfs:
df["TicketPrefix"] = df["Ticket"].apply(lambda x: x[:3])
df["TicketPrefix"] = df["TicketPrefix"].astype("category")
df["TicketPrefix"] = df["TicketPrefix"].cat.codes
possible_drops.append("Ticket")
## Get Titles from names
import re
def get_Title(name):
title_search = re.search(" ([A-Za-z]+)\.", name)
if title_search:
return title_search.group(1)
return ""
for df in dfs:
df["Title"] = df["Name"].apply(get_Title)
# There are several Titles with only 1 or 2 values. Try to replace those with a similar more common one.
df["Title"] = df["Title"].replace(
[
"Dr",
"Rev",
"Major",
"Col",
"Mlle",
"Jonkheer",
"Don",
"Ms",
"Countess",
"Capt",
"Sir",
"Lady",
"Mme",
],
"Rare",
)
Title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
df["Title"] = df["Title"].map(Title_mapping)
df["Title"] = df["Title"].fillna(0)
temp_data = train_data.copy()
# Got the q number by going through numbers from 5-10. 5 seems to be a good value since it captured the survivabilty of children and adults clearly.
temp_data["AgeQ"] = pd.cut(train_data["Age"], 5)
temp_data[["AgeQ", "Survived"]].groupby(["AgeQ"], as_index=False).mean().sort_values(
by="AgeQ", ascending=True
)
# - Let's plot a graph to visualize the above information.
sns.violinplot(x="AgeQ", y="Survived", data=temp_data)
# - Based on above data we can divide age into ranges.
def getAgeRange(age):
if age <= 16:
return 0
if age > 16 and age <= 32:
return 1
if age > 32 and age <= 48:
return 2
if age > 48 and age <= 64:
return 3
if age > 64:
return 5
for df in dfs:
df["AgeRange"] = df.Age.apply(getAgeRange)
# possible_drops.append('Age')
# ### Drop unnecessary columns
# Print the dropping columns
print(possible_drops)
# Extracting the target column from training data.
y = train_data.pop("Survived")
X = train_data.drop(possible_drops, axis=1)
X_test = test_data.drop(possible_drops, axis=1)
# ### Encode categorical data
features = X.columns
X = pd.get_dummies(X)
X_test = pd.get_dummies(X_test)
X.head()
X_test.head()
# ## Define the Random Forest Model
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
model = RandomForestClassifier(n_estimators=2000, max_depth=6, random_state=0)
# , min_samples_split=4, criterion='gini',min_samples_leaf=5, max_features='auto',)
# ## Training
model.fit(X, y)
# ## Predictions
predictions = model.predict(X_test)
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions})
output["Survived"].value_counts()
# Save predictions
output.to_csv("predictions.csv", index=False)
print("Your submission was successfully saved!")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/179/69179349.ipynb
| null | null |
[{"Id": 69179349, "ScriptId": 18805935, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7890050, "CreationDate": "07/27/2021 18:21:26", "VersionNumber": 30.0, "Title": "Titanic - 170025V", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 233.0, "LinesInsertedFromPrevious": 9.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 224.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Titanic - 1st Attempt
# ## Read data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
train_data = pd.read_csv("../input/titanic/train.csv")
test_data = pd.read_csv("../input/titanic/test.csv")
# ## Data Visualization
train_data.head()
test_data.head()
# ## Handle missing values
train_data.isna().sum()
test_data.isna().sum()
train_data = train_data.fillna({"Embarked": "S", "Age": train_data["Age"].median()})
test_data = test_data.fillna(
{"Fare": test_data["Fare"].mean(), "Age": test_data["Age"].median()}
)
# ## Feature Engineering
print("Total records - ", len(train_data.PassengerId))
print("Unique values in PassengerId col - ", len(train_data.PassengerId.unique()))
print("Unique values in Name col - ", len(train_data.Name.unique()))
# - Dropping the following values since all of these are unique.
possible_drops = ["PassengerId", "Name"]
dfs = [train_data, test_data]
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3)
fig.set_size_inches(15, 5)
sns.barplot(x="Sex", y="Survived", data=train_data, ax=ax1)
ax1.set(title="Survived - Male, Female")
sns.barplot(x="Pclass", y="Survived", data=train_data, ax=ax2)
ax2.set(title="Survived - Pclass")
sns.barplot(x="Sex", y="Survived", hue="Pclass", data=train_data, ax=ax3)
ax3.set(title="Survived - Male, Female per each Pclass")
plt.show()
# - It is abundantly clear that Gender affects the survivability most.
# - In the Pclass, class-1 had a higher survivability than other two. This might be due to the people belonging to that class being royalty or simply wealthy and important.
# - When combined, the females in the Pclass 1 and 2 have an almost perfect survivability. Even within the males, Pclass 1 has a significant survivability diffence than other two.
# Add new feature 'FamilyMembers' using SibSp and Parch. Adding 1 to count the person itself.
for df in dfs:
df["FamilyMembers"] = df.SibSp + df.Parch + 1
possible_drops.extend(["SibSp"])
# - It'll be interesting to see how having a family affects the survivability.
for df in dfs:
df["Alone"] = df["FamilyMembers"].apply(lambda x: 1 if x == 1 else 0)
fig, (ax1) = plt.subplots(ncols=1)
fig.set_size_inches(10, 5)
sns.barplot(x="Alone", y="Survived", data=train_data, ax=ax1)
ax1.set(title="Survived - Alone or Not")
ax1.set_ylim(0, 1)
plt.show()
# - It seems that there's no significant difference between a person who came with a family and on his own.
# - Might be good to seem how the different family sizes affected the survivability.
fig, (ax1) = plt.subplots(ncols=1)
fig.set_size_inches(10, 5)
sns.barplot(x="FamilyMembers", y="Survived", data=train_data, ax=ax1)
ax1.set(title="Survived - FamilyMembers")
ax1.set_ylim(0, 1)
plt.show()
# - From the above graph we can see that family size between 2-4 have a higher survivability than others.
# - We can introduce this as a feature to the training set.
# - Small - 1
# - Medium - 2-4
# - large - > 4
# def getFamilyType(familySize):
# if familySize == 1:
# return "Small"
# if 2 <= familySize <= 4:
# return "Medium"
# return "Large"
# for df in dfs:
# df['FamilyType'] = df.FamilyMembers.apply(getFamilyType)
# Converting cabin feature in to has cabin, depending on whether the cabin data is available or not.
for df in dfs:
df["HasCabin"] = df["Cabin"].apply(lambda x: 0 if type(x) == float else 1)
possible_drops.append("Cabin")
fig, (ax1) = plt.subplots(ncols=1)
fig.set_size_inches(10, 5)
sns.barplot(x="HasCabin", y="Survived", data=train_data, ax=ax1)
ax1.set(title="Survived - Cabin")
ax1.set_ylim(0, 1)
plt.show()
# - Poeple having a cabin has a higher survivability than others.
# Convert fare into ranges. (Or Clusters)
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=4)
train_data["FareCluster"] = kmeans.fit_predict(train_data.loc[:, ["Fare"]])
train_data["FareCluster"] = train_data["FareCluster"].astype("category")
test_data["FareCluster"] = kmeans.predict(test_data.loc[:, ["Fare"]])
test_data["FareCluster"] = test_data["FareCluster"].astype("category")
# Converting fares in to their log
for df in dfs:
df["FareLog"] = df.Fare.apply(np.log1p)
possible_drops.append("Fare")
# Getting the prefixes of the tickets
for df in dfs:
df["TicketPrefix"] = df["Ticket"].apply(lambda x: x[:3])
df["TicketPrefix"] = df["TicketPrefix"].astype("category")
df["TicketPrefix"] = df["TicketPrefix"].cat.codes
possible_drops.append("Ticket")
## Get Titles from names
import re
def get_Title(name):
title_search = re.search(" ([A-Za-z]+)\.", name)
if title_search:
return title_search.group(1)
return ""
for df in dfs:
df["Title"] = df["Name"].apply(get_Title)
# There are several Titles with only 1 or 2 values. Try to replace those with a similar more common one.
df["Title"] = df["Title"].replace(
[
"Dr",
"Rev",
"Major",
"Col",
"Mlle",
"Jonkheer",
"Don",
"Ms",
"Countess",
"Capt",
"Sir",
"Lady",
"Mme",
],
"Rare",
)
Title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
df["Title"] = df["Title"].map(Title_mapping)
df["Title"] = df["Title"].fillna(0)
temp_data = train_data.copy()
# Got the q number by going through numbers from 5-10. 5 seems to be a good value since it captured the survivabilty of children and adults clearly.
temp_data["AgeQ"] = pd.cut(train_data["Age"], 5)
temp_data[["AgeQ", "Survived"]].groupby(["AgeQ"], as_index=False).mean().sort_values(
by="AgeQ", ascending=True
)
# - Let's plot a graph to visualize the above information.
sns.violinplot(x="AgeQ", y="Survived", data=temp_data)
# - Based on above data we can divide age into ranges.
def getAgeRange(age):
if age <= 16:
return 0
if age > 16 and age <= 32:
return 1
if age > 32 and age <= 48:
return 2
if age > 48 and age <= 64:
return 3
if age > 64:
return 5
for df in dfs:
df["AgeRange"] = df.Age.apply(getAgeRange)
# possible_drops.append('Age')
# ### Drop unnecessary columns
# Print the dropping columns
print(possible_drops)
# Extracting the target column from training data.
y = train_data.pop("Survived")
X = train_data.drop(possible_drops, axis=1)
X_test = test_data.drop(possible_drops, axis=1)
# ### Encode categorical data
features = X.columns
X = pd.get_dummies(X)
X_test = pd.get_dummies(X_test)
X.head()
X_test.head()
# ## Define the Random Forest Model
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
model = RandomForestClassifier(n_estimators=2000, max_depth=6, random_state=0)
# , min_samples_split=4, criterion='gini',min_samples_leaf=5, max_features='auto',)
# ## Training
model.fit(X, y)
# ## Predictions
predictions = model.predict(X_test)
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions})
output["Survived"].value_counts()
# Save predictions
output.to_csv("predictions.csv", index=False)
print("Your submission was successfully saved!")
| false | 0 | 2,420 | 0 | 2,420 | 2,420 |
||
69179962
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import seaborn as sns
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()
female = train_data[train_data["Sex"] == "female"]
fplot = sns.displot(female, x="Age", bins=40, hue="Survived")
male = train_data[train_data["Sex"] == "male"]
mplot = sns.displot(male, x="Age", bins=40, hue="Survived")
sns.barplot(data=train_data, x="Pclass", y="Survived", hue="Sex")
sns.barplot(data=train_data, x="Embarked", y="Survived", hue="Sex")
sns.barplot(data=train_data, x="Embarked", y="Survived", hue="Pclass")
sns.barplot(x="SibSp", y="Survived", data=train_data)
sns.barplot(x="Parch", y="Survived", data=train_data)
def countRelatives(df):
df["Relatives"] = df["Parch"] + df["SibSp"]
countRelatives(train_data)
countRelatives(test_data)
sns.displot(train_data, x="Relatives", kind="hist")
sns.barplot(data=train_data, x="Relatives", y="Survived")
# ## Handle Missing Values
train_data.isnull().sum()
test_data.isnull().sum()
def getDeck(df):
deck = {"A": 1, "B": 2, "C": 3, "D": 4, "E": 5, "F": 6, "G": 7, "T": 8, "Z": 0}
df["Cabin"] = df["Cabin"].fillna("Z00")
df["Deck"] = df["Cabin"].astype(str).str[0]
df["Deck"] = df["Deck"].map(deck).astype(int)
return df.drop(["Cabin"], axis=1)
train_data = getDeck(train_data)
test_data = getDeck(test_data)
sns.barplot(data=train_data, x="Deck", y="Survived")
def fillAge(df):
mean = df["Age"].mean()
std = df["Age"].std()
df["Age"] = df["Age"].apply(
lambda x: np.random.randint(mean - std, mean + std) if np.isnan(x) else x
)
fillAge(train_data)
fillAge(test_data)
train_data["Embarked"].value_counts()
train_data["Embarked"] = train_data["Embarked"].fillna("S")
test_data["Fare"] = test_data.fillna(0)
train_data.head()
# ## Encode Categorical Data
sex = {"male": 0, "female": 1}
train_data["Sex"] = train_data["Sex"].map(sex)
test_data["Sex"] = test_data["Sex"].map(sex)
ports = {"S": 0, "C": 1, "Q": 2}
train_data["Embarked"] = train_data["Embarked"].map(ports)
test_data["Embarked"] = test_data["Embarked"].map(ports)
def convertAge(df):
bins = [0, 12, 17, 22, 27, 32, 37, 42, 55, 100]
labels = [0, 1, 2, 3, 4, 5, 6, 7, 8]
df["AgeGroup"] = pd.cut(x=df["Age"], bins=bins, labels=labels)
df["AgeGroup"] = df["AgeGroup"].astype(int)
return df.drop(["Age"], axis=1)
train_data = convertAge(train_data)
test_data = convertAge(test_data)
sns.displot(data=train_data, x="AgeGroup")
def convertFare(df):
df["FareGroup"] = pd.qcut(df["Fare"], q=6, labels=[0, 1, 2, 3, 4, 5])
df["FareGroup"] = df["FareGroup"].astype(int)
return df.drop(["Fare"], axis=1)
train_data = convertFare(train_data)
test_data = convertFare(test_data)
sns.barplot(data=train_data, x="FareGroup", y="Survived")
# ## Feature Crossing
train_data["AgeClass"] = train_data["AgeGroup"] * train_data["Pclass"]
test_data["AgeClass"] = test_data["AgeGroup"] * test_data["Pclass"]
train_data = train_data.drop(["PassengerId", "Ticket", "Name"], axis=1)
test_data = test_data.drop(["Ticket", "Name"], axis=1)
train_data.head()
# ## Train Model
X = train_data.drop(["Survived"], axis=1)
y = train_data["Survived"]
X_test = test_data.drop(["PassengerId"], axis=1).copy()
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
score = model.score(X, y)
print(score)
feature_importance = pd.DataFrame(
{"feature": X.columns, "importance": np.round(model.feature_importances_, 3)}
)
feature_importance = feature_importance.sort_values(
"importance", ascending=False
).set_index("feature")
feature_importance.plot.bar()
feature_importance.head(10)
# X = X.drop(["Embarked", "Parch"], axis=1)
# X_test = X_test.drop(["Embarked", "Parch"], axis=1)
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
score = model.score(X, y)
print(score)
output = pd.DataFrame(
{"PassengerId": test_data["PassengerId"], "Survived": predictions}
)
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/179/69179962.ipynb
| null | null |
[{"Id": 69179962, "ScriptId": 18830822, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7890510, "CreationDate": "07/27/2021 18:32:21", "VersionNumber": 3.0, "Title": "Titanic Challenge", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 165.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 163.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import seaborn as sns
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()
female = train_data[train_data["Sex"] == "female"]
fplot = sns.displot(female, x="Age", bins=40, hue="Survived")
male = train_data[train_data["Sex"] == "male"]
mplot = sns.displot(male, x="Age", bins=40, hue="Survived")
sns.barplot(data=train_data, x="Pclass", y="Survived", hue="Sex")
sns.barplot(data=train_data, x="Embarked", y="Survived", hue="Sex")
sns.barplot(data=train_data, x="Embarked", y="Survived", hue="Pclass")
sns.barplot(x="SibSp", y="Survived", data=train_data)
sns.barplot(x="Parch", y="Survived", data=train_data)
def countRelatives(df):
df["Relatives"] = df["Parch"] + df["SibSp"]
countRelatives(train_data)
countRelatives(test_data)
sns.displot(train_data, x="Relatives", kind="hist")
sns.barplot(data=train_data, x="Relatives", y="Survived")
# ## Handle Missing Values
train_data.isnull().sum()
test_data.isnull().sum()
def getDeck(df):
deck = {"A": 1, "B": 2, "C": 3, "D": 4, "E": 5, "F": 6, "G": 7, "T": 8, "Z": 0}
df["Cabin"] = df["Cabin"].fillna("Z00")
df["Deck"] = df["Cabin"].astype(str).str[0]
df["Deck"] = df["Deck"].map(deck).astype(int)
return df.drop(["Cabin"], axis=1)
train_data = getDeck(train_data)
test_data = getDeck(test_data)
sns.barplot(data=train_data, x="Deck", y="Survived")
def fillAge(df):
mean = df["Age"].mean()
std = df["Age"].std()
df["Age"] = df["Age"].apply(
lambda x: np.random.randint(mean - std, mean + std) if np.isnan(x) else x
)
fillAge(train_data)
fillAge(test_data)
train_data["Embarked"].value_counts()
train_data["Embarked"] = train_data["Embarked"].fillna("S")
test_data["Fare"] = test_data.fillna(0)
train_data.head()
# ## Encode Categorical Data
sex = {"male": 0, "female": 1}
train_data["Sex"] = train_data["Sex"].map(sex)
test_data["Sex"] = test_data["Sex"].map(sex)
ports = {"S": 0, "C": 1, "Q": 2}
train_data["Embarked"] = train_data["Embarked"].map(ports)
test_data["Embarked"] = test_data["Embarked"].map(ports)
def convertAge(df):
bins = [0, 12, 17, 22, 27, 32, 37, 42, 55, 100]
labels = [0, 1, 2, 3, 4, 5, 6, 7, 8]
df["AgeGroup"] = pd.cut(x=df["Age"], bins=bins, labels=labels)
df["AgeGroup"] = df["AgeGroup"].astype(int)
return df.drop(["Age"], axis=1)
train_data = convertAge(train_data)
test_data = convertAge(test_data)
sns.displot(data=train_data, x="AgeGroup")
def convertFare(df):
df["FareGroup"] = pd.qcut(df["Fare"], q=6, labels=[0, 1, 2, 3, 4, 5])
df["FareGroup"] = df["FareGroup"].astype(int)
return df.drop(["Fare"], axis=1)
train_data = convertFare(train_data)
test_data = convertFare(test_data)
sns.barplot(data=train_data, x="FareGroup", y="Survived")
# ## Feature Crossing
train_data["AgeClass"] = train_data["AgeGroup"] * train_data["Pclass"]
test_data["AgeClass"] = test_data["AgeGroup"] * test_data["Pclass"]
train_data = train_data.drop(["PassengerId", "Ticket", "Name"], axis=1)
test_data = test_data.drop(["Ticket", "Name"], axis=1)
train_data.head()
# ## Train Model
X = train_data.drop(["Survived"], axis=1)
y = train_data["Survived"]
X_test = test_data.drop(["PassengerId"], axis=1).copy()
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
score = model.score(X, y)
print(score)
feature_importance = pd.DataFrame(
{"feature": X.columns, "importance": np.round(model.feature_importances_, 3)}
)
feature_importance = feature_importance.sort_values(
"importance", ascending=False
).set_index("feature")
feature_importance.plot.bar()
feature_importance.head(10)
# X = X.drop(["Embarked", "Parch"], axis=1)
# X_test = X_test.drop(["Embarked", "Parch"], axis=1)
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
score = model.score(X, y)
print(score)
output = pd.DataFrame(
{"PassengerId": test_data["PassengerId"], "Survived": predictions}
)
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
| false | 0 | 1,833 | 0 | 1,833 | 1,833 |
||
69179632
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
training = pd.read_csv("/kaggle/input/titanic/train.csv")
test = pd.read_csv("/kaggle/input/titanic/test.csv")
training["train_test"] = 1
test["train_test"] = 0
test["Survived"] = np.NaN
all_data = pd.concat([training, test])
all_data.columns
training.info()
# #Looking the data and null counts
# #looking at the datatypes and count of nulls
training.describe()
# #get understanding of the central tendencies of Data
training.describe().columns
# #seperating numeric columns
df_num = training[["Age", "SibSp", "Parch", "Fare"]]
df_cat = training[["Survived", "Pclass", "Sex", "Ticket", "Embarked"]]
# #categorical and numeric data types value seperation
# numeric variable distributions
for i in df_num.columns:
plt.hist(df_num[i])
plt.title(i)
plt.show()
# print(df_num.corr())
# sns.heatmap(df_num.corr())
pd.pivot_table(training, index="Survived", values=["Age", "SibSp", "Parch", "Fare"])
# #survival rate comparison across Age, SibSp,Parce and fare
for i in df_cat.columns:
sns.barplot(df_cat[i].value_counts().index, df_cat[i].value_counts()).set_title(i)
plt.show()
print(
pd.pivot_table(
training, index="Survived", columns="Pclass", values="Ticket", aggfunc="count"
)
)
print()
print(
pd.pivot_table(
training, index="Survived", columns="Sex", values="Ticket", aggfunc="count"
)
)
print()
print(
pd.pivot_table(
training, index="Survived", columns="Embarked", values="Ticket", aggfunc="count"
)
)
# #survival compairing and each categorical variable
training["numeric_ticket"] = training.Ticket.apply(lambda x: 1 if x.isnumeric() else 0)
training["ticket_letters"] = training.Ticket.apply(
lambda x: "".join(x.split(" ")[:-1]).replace(".", "").replace("/", "").lower()
if len(x.split(" ")[:-1]) > 0
else 0
)
training["numeric_ticket"].value_counts()
pd.set_option("max_rows", None)
training["ticket_letters"].value_counts()
pd.pivot_table(
training,
index="Survived",
columns="numeric_ticket",
values="Ticket",
aggfunc="count",
)
pd.pivot_table(
training,
index="Survived",
columns="ticket_letters",
values="Ticket",
aggfunc="count",
)
training.Name.head(50)
training["name_title"] = training.Name.apply(
lambda x: x.split(",")[1].split(".")[0].strip()
)
training["name_title"].value_counts()
# all_data['cabin_multiple'] = all_data.Cabin.apply(lambda x: 0 if pd.isna(x) else len(x.split(' ')))
# all_data['cabin_adv'] = all_data.Cabin.apply(lambda x: str(x)[0])
all_data["numeric_ticket"] = all_data.Ticket.apply(lambda x: 1 if x.isnumeric() else 0)
all_data["ticket_letters"] = all_data.Ticket.apply(
lambda x: "".join(x.split(" ")[:-1]).replace(".", "").replace("/", "").lower()
if len(x.split(" ")[:-1]) > 0
else 0
)
all_data["name_title"] = all_data.Name.apply(
lambda x: x.split(",")[1].split(".")[0].strip()
)
# impute nulls for continuous data
# all_data.Age = all_data.Age.fillna(training.Age.mean())
all_data.Age = all_data.Age.fillna(training.Age.median())
# all_data.Fare = all_data.Fare.fillna(training.Fare.mean())
all_data.Fare = all_data.Fare.fillna(training.Fare.median())
# drop null 'embarked' rows. Only 2 instances of this in training and 0 in test
all_data.dropna(subset=["Embarked"], inplace=True)
# tried log norm of sibsp (not used)
all_data["norm_sibsp"] = np.log(all_data.SibSp + 1)
all_data["norm_sibsp"].hist()
# log norm of fare (used)
all_data["norm_fare"] = np.log(all_data.Fare + 1)
all_data["norm_fare"].hist()
# converted fare to category for pd.get_dummies()
all_data.Pclass = all_data.Pclass.astype(str)
# created dummy variables from categories (also can use OneHotEncoder)
all_dummies = pd.get_dummies(
all_data[
[
"Pclass",
"Sex",
"Age",
"SibSp",
"Parch",
"norm_fare",
"Embarked",
"numeric_ticket",
"name_title",
"train_test",
]
]
)
# Split to train test again
X_train = all_dummies[all_dummies.train_test == 1].drop(["train_test"], axis=1)
X_test = all_dummies[all_dummies.train_test == 0].drop(["train_test"], axis=1)
y_train = all_data[all_data.train_test == 1].Survived
y_train.shape
from sklearn.preprocessing import StandardScaler
scale = StandardScaler()
all_dummies_scaled = all_dummies.copy()
all_dummies_scaled[["Age", "SibSp", "Parch", "norm_fare"]] = scale.fit_transform(
all_dummies_scaled[["Age", "SibSp", "Parch", "norm_fare"]]
)
all_dummies_scaled
X_train_scaled = all_dummies_scaled[all_dummies_scaled.train_test == 1].drop(
["train_test"], axis=1
)
X_test_scaled = all_dummies_scaled[all_dummies_scaled.train_test == 0].drop(
["train_test"], axis=1
)
y_train = all_data[all_data.train_test == 1].Survived
# from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
training = training.drop(["PassengerId", "Cabin"], axis=1)
test = test.drop(["Cabin"], axis=1)
training.head()
rf = RandomForestClassifier(
bootstrap=True,
criterion="gini",
max_depth=15,
max_features=10,
min_samples_leaf=3,
min_samples_split=2,
n_estimators=50,
)
rf.fit(X_train_scaled, y_train)
Y_pred = rf.predict(X_test_scaled).astype(int)
accuracy = round(rf.score(X_train_scaled, y_train) * 100, 2)
print(accuracy)
output = pd.DataFrame({"PassengerId": test.PassengerId, "Survived": Y_pred})
output.to_csv("submission.csv", index=False)
print("successfull")
from sklearn.model_selection import GridSearchCV
# rf = RandomForestClassifier(random_state = 1)
# param_grid = {'n_estimators': [400,450,500,550],
# 'criterion':['gini','entropy'],
# 'bootstrap': [True],
# 'max_depth': [15, 20, 25],
# 'max_features': ['auto','sqrt', 10],
# 'min_samples_leaf': [2,3],
# 'min_samples_split': [2,3]}
# clf_rf = GridSearchCV(rf, param_grid = param_grid, cv = 5, verbose = True, n_jobs = -1)
# best_clf_rf = clf_rf.fit(X_train_scaled,y_train)
# clf_performance(best_clf_rf,'Random Forest')
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/179/69179632.ipynb
| null | null |
[{"Id": 69179632, "ScriptId": 18884210, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7890211, "CreationDate": "07/27/2021 18:25:53", "VersionNumber": 1.0, "Title": "Fork of Titanic Prediction 9bdf53", "EvaluationDate": "07/27/2021", "IsChange": false, "TotalLines": 184.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 184.0, "LinesInsertedFromFork": 0.0, "LinesDeletedFromFork": 0.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 184.0, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
training = pd.read_csv("/kaggle/input/titanic/train.csv")
test = pd.read_csv("/kaggle/input/titanic/test.csv")
training["train_test"] = 1
test["train_test"] = 0
test["Survived"] = np.NaN
all_data = pd.concat([training, test])
all_data.columns
training.info()
# #Looking the data and null counts
# #looking at the datatypes and count of nulls
training.describe()
# #get understanding of the central tendencies of Data
training.describe().columns
# #seperating numeric columns
df_num = training[["Age", "SibSp", "Parch", "Fare"]]
df_cat = training[["Survived", "Pclass", "Sex", "Ticket", "Embarked"]]
# #categorical and numeric data types value seperation
# numeric variable distributions
for i in df_num.columns:
plt.hist(df_num[i])
plt.title(i)
plt.show()
# print(df_num.corr())
# sns.heatmap(df_num.corr())
pd.pivot_table(training, index="Survived", values=["Age", "SibSp", "Parch", "Fare"])
# #survival rate comparison across Age, SibSp,Parce and fare
for i in df_cat.columns:
sns.barplot(df_cat[i].value_counts().index, df_cat[i].value_counts()).set_title(i)
plt.show()
print(
pd.pivot_table(
training, index="Survived", columns="Pclass", values="Ticket", aggfunc="count"
)
)
print()
print(
pd.pivot_table(
training, index="Survived", columns="Sex", values="Ticket", aggfunc="count"
)
)
print()
print(
pd.pivot_table(
training, index="Survived", columns="Embarked", values="Ticket", aggfunc="count"
)
)
# #survival compairing and each categorical variable
training["numeric_ticket"] = training.Ticket.apply(lambda x: 1 if x.isnumeric() else 0)
training["ticket_letters"] = training.Ticket.apply(
lambda x: "".join(x.split(" ")[:-1]).replace(".", "").replace("/", "").lower()
if len(x.split(" ")[:-1]) > 0
else 0
)
training["numeric_ticket"].value_counts()
pd.set_option("max_rows", None)
training["ticket_letters"].value_counts()
pd.pivot_table(
training,
index="Survived",
columns="numeric_ticket",
values="Ticket",
aggfunc="count",
)
pd.pivot_table(
training,
index="Survived",
columns="ticket_letters",
values="Ticket",
aggfunc="count",
)
training.Name.head(50)
training["name_title"] = training.Name.apply(
lambda x: x.split(",")[1].split(".")[0].strip()
)
training["name_title"].value_counts()
# all_data['cabin_multiple'] = all_data.Cabin.apply(lambda x: 0 if pd.isna(x) else len(x.split(' ')))
# all_data['cabin_adv'] = all_data.Cabin.apply(lambda x: str(x)[0])
all_data["numeric_ticket"] = all_data.Ticket.apply(lambda x: 1 if x.isnumeric() else 0)
all_data["ticket_letters"] = all_data.Ticket.apply(
lambda x: "".join(x.split(" ")[:-1]).replace(".", "").replace("/", "").lower()
if len(x.split(" ")[:-1]) > 0
else 0
)
all_data["name_title"] = all_data.Name.apply(
lambda x: x.split(",")[1].split(".")[0].strip()
)
# impute nulls for continuous data
# all_data.Age = all_data.Age.fillna(training.Age.mean())
all_data.Age = all_data.Age.fillna(training.Age.median())
# all_data.Fare = all_data.Fare.fillna(training.Fare.mean())
all_data.Fare = all_data.Fare.fillna(training.Fare.median())
# drop null 'embarked' rows. Only 2 instances of this in training and 0 in test
all_data.dropna(subset=["Embarked"], inplace=True)
# tried log norm of sibsp (not used)
all_data["norm_sibsp"] = np.log(all_data.SibSp + 1)
all_data["norm_sibsp"].hist()
# log norm of fare (used)
all_data["norm_fare"] = np.log(all_data.Fare + 1)
all_data["norm_fare"].hist()
# converted fare to category for pd.get_dummies()
all_data.Pclass = all_data.Pclass.astype(str)
# created dummy variables from categories (also can use OneHotEncoder)
all_dummies = pd.get_dummies(
all_data[
[
"Pclass",
"Sex",
"Age",
"SibSp",
"Parch",
"norm_fare",
"Embarked",
"numeric_ticket",
"name_title",
"train_test",
]
]
)
# Split to train test again
X_train = all_dummies[all_dummies.train_test == 1].drop(["train_test"], axis=1)
X_test = all_dummies[all_dummies.train_test == 0].drop(["train_test"], axis=1)
y_train = all_data[all_data.train_test == 1].Survived
y_train.shape
from sklearn.preprocessing import StandardScaler
scale = StandardScaler()
all_dummies_scaled = all_dummies.copy()
all_dummies_scaled[["Age", "SibSp", "Parch", "norm_fare"]] = scale.fit_transform(
all_dummies_scaled[["Age", "SibSp", "Parch", "norm_fare"]]
)
all_dummies_scaled
X_train_scaled = all_dummies_scaled[all_dummies_scaled.train_test == 1].drop(
["train_test"], axis=1
)
X_test_scaled = all_dummies_scaled[all_dummies_scaled.train_test == 0].drop(
["train_test"], axis=1
)
y_train = all_data[all_data.train_test == 1].Survived
# from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
training = training.drop(["PassengerId", "Cabin"], axis=1)
test = test.drop(["Cabin"], axis=1)
training.head()
rf = RandomForestClassifier(
bootstrap=True,
criterion="gini",
max_depth=15,
max_features=10,
min_samples_leaf=3,
min_samples_split=2,
n_estimators=50,
)
rf.fit(X_train_scaled, y_train)
Y_pred = rf.predict(X_test_scaled).astype(int)
accuracy = round(rf.score(X_train_scaled, y_train) * 100, 2)
print(accuracy)
output = pd.DataFrame({"PassengerId": test.PassengerId, "Survived": Y_pred})
output.to_csv("submission.csv", index=False)
print("successfull")
from sklearn.model_selection import GridSearchCV
# rf = RandomForestClassifier(random_state = 1)
# param_grid = {'n_estimators': [400,450,500,550],
# 'criterion':['gini','entropy'],
# 'bootstrap': [True],
# 'max_depth': [15, 20, 25],
# 'max_features': ['auto','sqrt', 10],
# 'min_samples_leaf': [2,3],
# 'min_samples_split': [2,3]}
# clf_rf = GridSearchCV(rf, param_grid = param_grid, cv = 5, verbose = True, n_jobs = -1)
# best_clf_rf = clf_rf.fit(X_train_scaled,y_train)
# clf_performance(best_clf_rf,'Random Forest')
| false | 0 | 2,310 | 0 | 2,310 | 2,310 |
||
69179175
|
<jupyter_start><jupyter_text>Video Game Sales
This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1].
Fields include
* Rank - Ranking of overall sales
* Name - The games name
* Platform - Platform of the games release (i.e. PC,PS4, etc.)
* Year - Year of the game's release
* Genre - Genre of the game
* Publisher - Publisher of the game
* NA_Sales - Sales in North America (in millions)
* EU_Sales - Sales in Europe (in millions)
* JP_Sales - Sales in Japan (in millions)
* Other_Sales - Sales in the rest of the world (in millions)
* Global_Sales - Total worldwide sales.
The script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape.
It is based on BeautifulSoup using Python.
There are 16,598 records. 2 records were dropped due to incomplete information.
[1]: http://www.vgchartz.com/
Kaggle dataset identifier: videogamesales
<jupyter_code>import pandas as pd
df = pd.read_csv('videogamesales/vgsales.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 16598 entries, 0 to 16597
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Rank 16598 non-null int64
1 Name 16598 non-null object
2 Platform 16598 non-null object
3 Year 16327 non-null float64
4 Genre 16598 non-null object
5 Publisher 16540 non-null object
6 NA_Sales 16598 non-null float64
7 EU_Sales 16598 non-null float64
8 JP_Sales 16598 non-null float64
9 Other_Sales 16598 non-null float64
10 Global_Sales 16598 non-null float64
dtypes: float64(6), int64(1), object(4)
memory usage: 1.4+ MB
<jupyter_text>Examples:
{
"Rank": 1,
"Name": "Wii Sports",
"Platform": "Wii",
"Year": 2006,
"Genre": "Sports",
"Publisher": "Nintendo",
"NA_Sales": 41.49,
"EU_Sales": 29.02,
"JP_Sales": 3.77,
"Other_Sales": 8.46,
"Global_Sales": 82.74
}
{
"Rank": 2,
"Name": "Super Mario Bros.",
"Platform": "NES",
"Year": 1985,
"Genre": "Platform",
"Publisher": "Nintendo",
"NA_Sales": 29.08,
"EU_Sales": 3.58,
"JP_Sales": 6.8100000000000005,
"Other_Sales": 0.77,
"Global_Sales": 40.24
}
{
"Rank": 3,
"Name": "Mario Kart Wii",
"Platform": "Wii",
"Year": 2008,
"Genre": "Racing",
"Publisher": "Nintendo",
"NA_Sales": 15.85,
"EU_Sales": 12.88,
"JP_Sales": 3.79,
"Other_Sales": 3.31,
"Global_Sales": 35.82
}
{
"Rank": 4,
"Name": "Wii Sports Resort",
"Platform": "Wii",
"Year": 2009,
"Genre": "Sports",
"Publisher": "Nintendo",
"NA_Sales": 15.75,
"EU_Sales": 11.01,
"JP_Sales": 3.2800000000000002,
"Other_Sales": 2.96,
"Global_Sales": 33.0
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns # visualization tool
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv")
data.head()
# Verisetinde oyunların ismi, yayınlandığı platformu, yılı, yayımcısı ve bölgelere göre milyon ile ifade edilen satış seviyeleri ifade edilmektedir.
# In the data set, sales levels expressed in millions are expressed by the name of the games, the platform in which they were published, the year, the publisher and the regions.
data.info()
# Oyunların bölgelere göre oyunların satış seviyeleri ve yılı float; oyunun adı, yayınlandığı platformu, türü ve yayımcısı object ve oyunun sıralaması ise integer olarak veri setinde gözükmektedir.
# The sales levels and year of games by region are float; the name of the game, the platform on which it is published and the publisher are object, and the ranking of the game appear in the data set as integer.
data.corr
f, ax = plt.subplots(figsize=(18, 18))
sns.heatmap(data.corr(), annot=True, linewidths=0.5, fmt=".2g", ax=ax)
plt.show()
# Rank, Year gibi featureların bir ilişkiyi açıklamada anlamsız olduğu gözükmektedir. Bölgelerin satış verileriyle global satış verileri arasında pozitif korelasyonlar mevcuttur. Bu ilişki de bariz bir pozitif ilişkidir. Çünkü bölgelerdeki satış verilerinin artışı aynı zamanda globaldeki satışı artırır. JP pazarı ile diğer pazarlar arasında zayıf diye nitelendirebileceğimiz bir korelasyon mevcuttur. EU ve NA pazarlarındaki satışlar global satışlarla oldukça kuvvetli pozitif bir ilişkiye sahiptir.
data.columns
# Line Plot
# color = color, label = label, linewidth = width of line, alpha = opacity, grid = grid, linestyle = sytle of line
data.EU_Sales.plot(
kind="line",
color="g",
label="EU Sales",
linewidth=2,
alpha=0.5,
grid=True,
linestyle=":",
figsize=(15, 15),
)
data.NA_Sales.plot(
color="r",
label="NA Sales",
linewidth=2,
alpha=0.5,
grid=True,
linestyle="-.",
figsize=(15, 15),
)
plt.legend(loc="upper right") # legend = puts label into plot
plt.xlabel("x axis") # label = name of label
plt.ylabel("y axis")
plt.title("Line Plot") # title = title of plot
plt.show()
# Satış seviyelerine göre ranking yapıldığından dolayı, ilk kısımda bulunan verilerin dışadüşen veriler olduğu varsayımı altında line plot çizimi anlamsızdır.
# Scatter Plot
# x = EU Sales, y = NA Sales
data.plot(
kind="scatter", x="EU_Sales", y="NA_Sales", alpha=0.5, color="red", figsize=(10, 10)
)
plt.xlabel("EU Sales") # label = name of label
plt.ylabel("NA Sales")
plt.title("EU - NA Sales Scatter Plot") # title = title of plot
# İlgili oyunların iki bölgede hangi seviyede satıldığının karşılaştırmasını scatter plot aracılığıyla çizdirmiş bulunmaktayım. Fakat bazı oyunlar çok yüksek satış seviyelerine sahip olduğundan dolayı düşük satış seviyelerine sahip oyunlarda kümelenmeler bulunmaktadır.
# Verisetinde bu gibi grafiklerin çizdirilmesi şu an için anlamsız geldiğinden dolayı çeşitli filtreleme işlemleri yapılarak daha anlamlı grafikler çıkarılabilir.
# 1 - Filtering Pandas data frame
x = (
data["EU_Sales"] > 2.47
) # There are 100 games which have higher sales level than 2.47 million sales in Europe
data[x]
x = data[
(data["EU_Sales"] > 2.47) & (data["NA_Sales"] > 2.5)
] # There are 100 games which have higher sales level than 2.47 million sales in Europe
x
type(x)
x["EU_Sales"]
# Line Plot
# color = color, label = label, linewidth = width of line, alpha = opacity, grid = grid, linestyle = sytle of line
x.EU_Sales.plot(
kind="line",
color="g",
label="EU Sales",
linewidth=2,
alpha=0.5,
grid=True,
linestyle=":",
figsize=(15, 15),
)
x.NA_Sales.plot(
color="r",
label="NA Sales",
linewidth=2,
alpha=0.5,
grid=True,
linestyle="-.",
figsize=(15, 15),
)
plt.legend(loc="upper right") # legend = puts label into plot
plt.xlabel("x axis") # label = name of label
plt.ylabel("y axis")
plt.title("Line Plot") # title = title of plot
plt.show()
# Scatter Plot
# x = EU Sales, y = NA Sales
x.plot(
kind="scatter", x="EU_Sales", y="NA_Sales", alpha=0.5, color="red", figsize=(10, 10)
)
plt.xlabel("EU Sales") # label = name of label
plt.ylabel("NA Sales")
plt.title("EU - NA Sales Scatter Plot") # title = title of plot
# 'x' adlı yeni bir dataframe oluşturup bu dataframein içine filtreleme yaptığım EU ve NA bölgelerindeki satışların kesişim kümesinde bulunan satış seviyelerini aldım. Bu sayede derece daha rahat yorumlanabilen line ve scatter plotları oluşturdum.
for index, value in x[["EU_Sales"]][1:10].iterrows():
print(index, " : ", value)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/179/69179175.ipynb
|
videogamesales
|
gregorut
|
[{"Id": 69179175, "ScriptId": 18850336, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4936074, "CreationDate": "07/27/2021 18:18:20", "VersionNumber": 2.0, "Title": "Correlation Between Features and Filtering Feature", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 99.0, "LinesInsertedFromPrevious": 3.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 96.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 92033498, "KernelVersionId": 69179175, "SourceDatasetVersionId": 618}]
|
[{"Id": 618, "DatasetId": 284, "DatasourceVersionId": 618, "CreatorUserId": 462330, "LicenseName": "Unknown", "CreationDate": "10/26/2016 09:10:49", "VersionNumber": 2.0, "Title": "Video Game Sales", "Slug": "videogamesales", "Subtitle": "Analyze sales data from more than 16,500 games.", "Description": "This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1].\n\nFields include\n\n* Rank - Ranking of overall sales\n\n* Name - The games name\n\n* Platform - Platform of the games release (i.e. PC,PS4, etc.)\n\n* Year - Year of the game's release\n\n* Genre - Genre of the game\n\n* Publisher - Publisher of the game\n\n* NA_Sales - Sales in North America (in millions)\n\n* EU_Sales - Sales in Europe (in millions)\n\n* JP_Sales - Sales in Japan (in millions)\n\n* Other_Sales - Sales in the rest of the world (in millions)\n\n* Global_Sales - Total worldwide sales.\n\nThe script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape.\nIt is based on BeautifulSoup using Python.\nThere are 16,598 records. 2 records were dropped due to incomplete information.\n\n\n [1]: http://www.vgchartz.com/", "VersionNotes": "Cleaned up formating", "TotalCompressedBytes": 1355781.0, "TotalUncompressedBytes": 1355781.0}]
|
[{"Id": 284, "CreatorUserId": 462330, "OwnerUserId": 462330.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 618.0, "CurrentDatasourceVersionId": 618.0, "ForumId": 1788, "Type": 2, "CreationDate": "10/26/2016 08:17:30", "LastActivityDate": "02/06/2018", "TotalViews": 1798828, "TotalDownloads": 471172, "TotalVotes": 5485, "TotalKernels": 1480}]
|
[{"Id": 462330, "UserName": "gregorut", "DisplayName": "GregorySmith", "RegisterDate": "11/09/2015", "PerformanceTier": 1}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns # visualization tool
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv")
data.head()
# Verisetinde oyunların ismi, yayınlandığı platformu, yılı, yayımcısı ve bölgelere göre milyon ile ifade edilen satış seviyeleri ifade edilmektedir.
# In the data set, sales levels expressed in millions are expressed by the name of the games, the platform in which they were published, the year, the publisher and the regions.
data.info()
# Oyunların bölgelere göre oyunların satış seviyeleri ve yılı float; oyunun adı, yayınlandığı platformu, türü ve yayımcısı object ve oyunun sıralaması ise integer olarak veri setinde gözükmektedir.
# The sales levels and year of games by region are float; the name of the game, the platform on which it is published and the publisher are object, and the ranking of the game appear in the data set as integer.
data.corr
f, ax = plt.subplots(figsize=(18, 18))
sns.heatmap(data.corr(), annot=True, linewidths=0.5, fmt=".2g", ax=ax)
plt.show()
# Rank, Year gibi featureların bir ilişkiyi açıklamada anlamsız olduğu gözükmektedir. Bölgelerin satış verileriyle global satış verileri arasında pozitif korelasyonlar mevcuttur. Bu ilişki de bariz bir pozitif ilişkidir. Çünkü bölgelerdeki satış verilerinin artışı aynı zamanda globaldeki satışı artırır. JP pazarı ile diğer pazarlar arasında zayıf diye nitelendirebileceğimiz bir korelasyon mevcuttur. EU ve NA pazarlarındaki satışlar global satışlarla oldukça kuvvetli pozitif bir ilişkiye sahiptir.
data.columns
# Line Plot
# color = color, label = label, linewidth = width of line, alpha = opacity, grid = grid, linestyle = sytle of line
data.EU_Sales.plot(
kind="line",
color="g",
label="EU Sales",
linewidth=2,
alpha=0.5,
grid=True,
linestyle=":",
figsize=(15, 15),
)
data.NA_Sales.plot(
color="r",
label="NA Sales",
linewidth=2,
alpha=0.5,
grid=True,
linestyle="-.",
figsize=(15, 15),
)
plt.legend(loc="upper right") # legend = puts label into plot
plt.xlabel("x axis") # label = name of label
plt.ylabel("y axis")
plt.title("Line Plot") # title = title of plot
plt.show()
# Satış seviyelerine göre ranking yapıldığından dolayı, ilk kısımda bulunan verilerin dışadüşen veriler olduğu varsayımı altında line plot çizimi anlamsızdır.
# Scatter Plot
# x = EU Sales, y = NA Sales
data.plot(
kind="scatter", x="EU_Sales", y="NA_Sales", alpha=0.5, color="red", figsize=(10, 10)
)
plt.xlabel("EU Sales") # label = name of label
plt.ylabel("NA Sales")
plt.title("EU - NA Sales Scatter Plot") # title = title of plot
# İlgili oyunların iki bölgede hangi seviyede satıldığının karşılaştırmasını scatter plot aracılığıyla çizdirmiş bulunmaktayım. Fakat bazı oyunlar çok yüksek satış seviyelerine sahip olduğundan dolayı düşük satış seviyelerine sahip oyunlarda kümelenmeler bulunmaktadır.
# Verisetinde bu gibi grafiklerin çizdirilmesi şu an için anlamsız geldiğinden dolayı çeşitli filtreleme işlemleri yapılarak daha anlamlı grafikler çıkarılabilir.
# 1 - Filtering Pandas data frame
x = (
data["EU_Sales"] > 2.47
) # There are 100 games which have higher sales level than 2.47 million sales in Europe
data[x]
x = data[
(data["EU_Sales"] > 2.47) & (data["NA_Sales"] > 2.5)
] # There are 100 games which have higher sales level than 2.47 million sales in Europe
x
type(x)
x["EU_Sales"]
# Line Plot
# color = color, label = label, linewidth = width of line, alpha = opacity, grid = grid, linestyle = sytle of line
x.EU_Sales.plot(
kind="line",
color="g",
label="EU Sales",
linewidth=2,
alpha=0.5,
grid=True,
linestyle=":",
figsize=(15, 15),
)
x.NA_Sales.plot(
color="r",
label="NA Sales",
linewidth=2,
alpha=0.5,
grid=True,
linestyle="-.",
figsize=(15, 15),
)
plt.legend(loc="upper right") # legend = puts label into plot
plt.xlabel("x axis") # label = name of label
plt.ylabel("y axis")
plt.title("Line Plot") # title = title of plot
plt.show()
# Scatter Plot
# x = EU Sales, y = NA Sales
x.plot(
kind="scatter", x="EU_Sales", y="NA_Sales", alpha=0.5, color="red", figsize=(10, 10)
)
plt.xlabel("EU Sales") # label = name of label
plt.ylabel("NA Sales")
plt.title("EU - NA Sales Scatter Plot") # title = title of plot
# 'x' adlı yeni bir dataframe oluşturup bu dataframein içine filtreleme yaptığım EU ve NA bölgelerindeki satışların kesişim kümesinde bulunan satış seviyelerini aldım. Bu sayede derece daha rahat yorumlanabilen line ve scatter plotları oluşturdum.
for index, value in x[["EU_Sales"]][1:10].iterrows():
print(index, " : ", value)
|
[{"videogamesales/vgsales.csv": {"column_names": "[\"Rank\", \"Name\", \"Platform\", \"Year\", \"Genre\", \"Publisher\", \"NA_Sales\", \"EU_Sales\", \"JP_Sales\", \"Other_Sales\", \"Global_Sales\"]", "column_data_types": "{\"Rank\": \"int64\", \"Name\": \"object\", \"Platform\": \"object\", \"Year\": \"float64\", \"Genre\": \"object\", \"Publisher\": \"object\", \"NA_Sales\": \"float64\", \"EU_Sales\": \"float64\", \"JP_Sales\": \"float64\", \"Other_Sales\": \"float64\", \"Global_Sales\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 16598 entries, 0 to 16597\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Rank 16598 non-null int64 \n 1 Name 16598 non-null object \n 2 Platform 16598 non-null object \n 3 Year 16327 non-null float64\n 4 Genre 16598 non-null object \n 5 Publisher 16540 non-null object \n 6 NA_Sales 16598 non-null float64\n 7 EU_Sales 16598 non-null float64\n 8 JP_Sales 16598 non-null float64\n 9 Other_Sales 16598 non-null float64\n 10 Global_Sales 16598 non-null float64\ndtypes: float64(6), int64(1), object(4)\nmemory usage: 1.4+ MB\n", "summary": "{\"Rank\": {\"count\": 16598.0, \"mean\": 8300.605253645017, \"std\": 4791.853932896403, \"min\": 1.0, \"25%\": 4151.25, \"50%\": 8300.5, \"75%\": 12449.75, \"max\": 16600.0}, \"Year\": {\"count\": 16327.0, \"mean\": 2006.4064433147546, \"std\": 5.828981114712805, \"min\": 1980.0, \"25%\": 2003.0, \"50%\": 2007.0, \"75%\": 2010.0, \"max\": 2020.0}, \"NA_Sales\": {\"count\": 16598.0, \"mean\": 0.26466742981082064, \"std\": 0.8166830292988796, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.08, \"75%\": 0.24, \"max\": 41.49}, \"EU_Sales\": {\"count\": 16598.0, \"mean\": 0.14665200626581515, \"std\": 0.5053512312869116, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.02, \"75%\": 0.11, \"max\": 29.02}, \"JP_Sales\": {\"count\": 16598.0, \"mean\": 0.077781660441017, \"std\": 0.30929064808220297, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.04, \"max\": 10.22}, \"Other_Sales\": {\"count\": 16598.0, \"mean\": 0.0480630196409206, \"std\": 0.18858840291271461, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.01, \"75%\": 0.04, \"max\": 10.57}, \"Global_Sales\": {\"count\": 16598.0, \"mean\": 0.5374406555006628, \"std\": 1.5550279355699124, \"min\": 0.01, \"25%\": 0.06, \"50%\": 0.17, \"75%\": 0.47, \"max\": 82.74}}", "examples": "{\"Rank\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"Name\":{\"0\":\"Wii Sports\",\"1\":\"Super Mario Bros.\",\"2\":\"Mario Kart Wii\",\"3\":\"Wii Sports Resort\"},\"Platform\":{\"0\":\"Wii\",\"1\":\"NES\",\"2\":\"Wii\",\"3\":\"Wii\"},\"Year\":{\"0\":2006.0,\"1\":1985.0,\"2\":2008.0,\"3\":2009.0},\"Genre\":{\"0\":\"Sports\",\"1\":\"Platform\",\"2\":\"Racing\",\"3\":\"Sports\"},\"Publisher\":{\"0\":\"Nintendo\",\"1\":\"Nintendo\",\"2\":\"Nintendo\",\"3\":\"Nintendo\"},\"NA_Sales\":{\"0\":41.49,\"1\":29.08,\"2\":15.85,\"3\":15.75},\"EU_Sales\":{\"0\":29.02,\"1\":3.58,\"2\":12.88,\"3\":11.01},\"JP_Sales\":{\"0\":3.77,\"1\":6.81,\"2\":3.79,\"3\":3.28},\"Other_Sales\":{\"0\":8.46,\"1\":0.77,\"2\":3.31,\"3\":2.96},\"Global_Sales\":{\"0\":82.74,\"1\":40.24,\"2\":35.82,\"3\":33.0}}"}}]
| true | 1 |
<start_data_description><data_path>videogamesales/vgsales.csv:
<column_names>
['Rank', 'Name', 'Platform', 'Year', 'Genre', 'Publisher', 'NA_Sales', 'EU_Sales', 'JP_Sales', 'Other_Sales', 'Global_Sales']
<column_types>
{'Rank': 'int64', 'Name': 'object', 'Platform': 'object', 'Year': 'float64', 'Genre': 'object', 'Publisher': 'object', 'NA_Sales': 'float64', 'EU_Sales': 'float64', 'JP_Sales': 'float64', 'Other_Sales': 'float64', 'Global_Sales': 'float64'}
<dataframe_Summary>
{'Rank': {'count': 16598.0, 'mean': 8300.605253645017, 'std': 4791.853932896403, 'min': 1.0, '25%': 4151.25, '50%': 8300.5, '75%': 12449.75, 'max': 16600.0}, 'Year': {'count': 16327.0, 'mean': 2006.4064433147546, 'std': 5.828981114712805, 'min': 1980.0, '25%': 2003.0, '50%': 2007.0, '75%': 2010.0, 'max': 2020.0}, 'NA_Sales': {'count': 16598.0, 'mean': 0.26466742981082064, 'std': 0.8166830292988796, 'min': 0.0, '25%': 0.0, '50%': 0.08, '75%': 0.24, 'max': 41.49}, 'EU_Sales': {'count': 16598.0, 'mean': 0.14665200626581515, 'std': 0.5053512312869116, 'min': 0.0, '25%': 0.0, '50%': 0.02, '75%': 0.11, 'max': 29.02}, 'JP_Sales': {'count': 16598.0, 'mean': 0.077781660441017, 'std': 0.30929064808220297, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.04, 'max': 10.22}, 'Other_Sales': {'count': 16598.0, 'mean': 0.0480630196409206, 'std': 0.18858840291271461, 'min': 0.0, '25%': 0.0, '50%': 0.01, '75%': 0.04, 'max': 10.57}, 'Global_Sales': {'count': 16598.0, 'mean': 0.5374406555006628, 'std': 1.5550279355699124, 'min': 0.01, '25%': 0.06, '50%': 0.17, '75%': 0.47, 'max': 82.74}}
<dataframe_info>
RangeIndex: 16598 entries, 0 to 16597
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Rank 16598 non-null int64
1 Name 16598 non-null object
2 Platform 16598 non-null object
3 Year 16327 non-null float64
4 Genre 16598 non-null object
5 Publisher 16540 non-null object
6 NA_Sales 16598 non-null float64
7 EU_Sales 16598 non-null float64
8 JP_Sales 16598 non-null float64
9 Other_Sales 16598 non-null float64
10 Global_Sales 16598 non-null float64
dtypes: float64(6), int64(1), object(4)
memory usage: 1.4+ MB
<some_examples>
{'Rank': {'0': 1, '1': 2, '2': 3, '3': 4}, 'Name': {'0': 'Wii Sports', '1': 'Super Mario Bros.', '2': 'Mario Kart Wii', '3': 'Wii Sports Resort'}, 'Platform': {'0': 'Wii', '1': 'NES', '2': 'Wii', '3': 'Wii'}, 'Year': {'0': 2006.0, '1': 1985.0, '2': 2008.0, '3': 2009.0}, 'Genre': {'0': 'Sports', '1': 'Platform', '2': 'Racing', '3': 'Sports'}, 'Publisher': {'0': 'Nintendo', '1': 'Nintendo', '2': 'Nintendo', '3': 'Nintendo'}, 'NA_Sales': {'0': 41.49, '1': 29.08, '2': 15.85, '3': 15.75}, 'EU_Sales': {'0': 29.02, '1': 3.58, '2': 12.88, '3': 11.01}, 'JP_Sales': {'0': 3.77, '1': 6.81, '2': 3.79, '3': 3.28}, 'Other_Sales': {'0': 8.46, '1': 0.77, '2': 3.31, '3': 2.96}, 'Global_Sales': {'0': 82.74, '1': 40.24, '2': 35.82, '3': 33.0}}
<end_description>
| 1,807 | 1 | 2,921 | 1,807 |
69179568
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### Importing Libraries & Data Files
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("seaborn-whitegrid")
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.cluster import KMeans
from sklearn.impute import SimpleImputer
# Load train dataset
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
# Load test dataset
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()
# ## Data Pre-processing and Feature Engineering
#
train_data.isnull().sum()
test_data.isnull().sum()
# #### Extracting Title
# Title can be extracted from the name field.
# Title are later reduced to Mr., Mrs., Master and Miss.
def checkForSubstrings(big_string, substrings):
if type(big_string) == pd._libs.missing.NAType:
return "Unknown"
for substring in substrings:
if big_string.find(substring) != -1:
return substring
return np.nan
title_list = [
"Mrs",
"Mr",
"Master",
"Miss",
"Major",
"Rev",
"Dr",
"Ms",
"Mlle",
"Col",
"Capt",
"Mme",
"Countess",
"Don",
"Jonkheer",
]
# Extract Title from Name
train_data["Title"] = train_data["Name"].map(
lambda x: checkForSubstrings(x, title_list)
)
test_data["Title"] = test_data["Name"].map(lambda x: checkForSubstrings(x, title_list))
def replace_titles(x):
title = x["Title"]
if title in ["Don", "Major", "Capt", "Jonkheer", "Rev", "Col"]:
return "Mr"
elif title in ["Countess", "Mme"]:
return "Mrs"
elif title in ["Mlle", "Ms"]:
return "Miss"
elif title == "Dr":
if x["Sex"] == "Male":
return "Mr"
else:
return "Mrs"
else:
return title
# Reduce Titles to Mr., Mrs., Master and Miss.
train_data["Title"] = train_data.apply(replace_titles, axis=1)
test_data["Title"] = test_data.apply(replace_titles, axis=1)
# #### Extracting Deck
deck_list = ["A", "B", "C", "D", "E", "F", "T", "G", "Unknown"]
train_data["Deck"] = (
train_data["Cabin"].astype("string").map(lambda x: checkForSubstrings(x, deck_list))
)
test_data["Deck"] = (
test_data["Cabin"].astype("string").map(lambda x: checkForSubstrings(x, deck_list))
)
# #### Handling Missing Values
# Age, Cabin, Emparked has missing values in the train set.
# Age, Fare, Cabin has missing values in the test data.
# - Age:- Replace with the mean of the respective Title category
# - Embarked:- Replace with the mode
# - Fare:- Replacw with the mean
# - Cabin:- Ignore the feature since more than 77% are null
# Handling Age
train_data["AvgAge"] = train_data.groupby("Title")["Age"].transform("mean")
train_data["Age"] = train_data["Age"].fillna(train_data["AvgAge"])
test_data = test_data.merge(
train_data[["Title", "AvgAge"]].drop_duplicates(),
on="Title",
how="left",
)
test_data["Age"] = test_data["Age"].fillna(test_data["AvgAge"])
# Handling Fare
test_data["Fare"] = test_data["Fare"].fillna(train_data["Fare"].mean())
# Handling Embarked
# train_data["Embarked"] = train_data["Embarked"].fillna(train_data["Embarked"].mode())
imputer_embarked = SimpleImputer(missing_values=np.nan, strategy="most_frequent")
imputer_embarked = imputer_embarked.fit(train_data[["Embarked"]])
train_data["Embarked"] = imputer_embarked.transform(train_data[["Embarked"]])
test_data["Embarked"] = imputer_embarked.transform(test_data[["Embarked"]])
# #### Label Encording Categorical Features
title_dict = {"Mr": 0, "Mrs": 1, "Miss": 2, "Master": 4, "Unknown": 5}
train_data["Title"] = train_data["Title"].apply(
lambda x: title_dict[x] if (x in title_dict.keys()) else 5
)
test_data["Title"] = test_data["Title"].apply(
lambda x: title_dict[x] if (x in title_dict.keys()) else 5
)
deck_dict = {
"A": 0,
"B": 1,
"C": 2,
"D": 3,
"E": 4,
"F": 5,
"T": 6,
"G": 7,
"Unknown": 8,
}
train_data["Deck"] = train_data["Deck"].apply(
lambda x: deck_dict[x] if (x in deck_dict.keys()) else 8
)
test_data["Deck"] = test_data["Deck"].apply(
lambda x: deck_dict[x] if (x in deck_dict.keys()) else 8
)
sex_dict = {"male": 1, "female": 0}
train_data["Sex"] = train_data["Sex"].apply(
lambda x: sex_dict[x] if (x in sex_dict.keys()) else 3
)
test_data["Sex"] = test_data["Sex"].apply(
lambda x: sex_dict[x] if (x in sex_dict.keys()) else 3
)
# embarked_dict={'C':0,'Q':1,'S':2}
# train_data["Embarked"] = train_data["Embarked"].apply(lambda x: embarked_dict[x] if (x in embarked_dict.keys()) else 3)
# test_data["Embarked"] = test_data["Embarked"].apply(lambda x: embarked_dict[x] if (x in embarked_dict.keys()) else 3)
encoder_embarked = LabelEncoder()
train_data["Embarked"] = encoder_embarked.fit_transform(train_data["Embarked"].values)
test_data["Embarked"] = encoder_embarked.transform(test_data["Embarked"].values)
# #### Scaling Data(Log transform)
#
train_data["Fare"] = train_data["Fare"].apply(np.log1p)
test_data["Fare"] = test_data["Fare"].apply(np.log1p)
# #### Creating New Features
train_data["Relationships"] = train_data["SibSp"] + train_data["Parch"]
test_data["Relationships"] = test_data["SibSp"] + test_data["Parch"]
train_data["Age*Class"] = train_data["Age"] * train_data["Pclass"]
train_data["Age*Class"] = train_data["Age*Class"].fillna(0)
test_data["Age*Class"] = test_data["Age"] * test_data["Pclass"]
test_data["Age*Class"] = test_data["Age*Class"].fillna(0)
train_data["Sex*Class"] = train_data["Sex"] * train_data["Pclass"]
test_data["Sex*Class"] = test_data["Sex"] * test_data["Pclass"]
train_data["Fare_Per_Person"] = train_data["Fare"] / (train_data["Relationships"] + 1)
test_data["Fare_Per_Person"] = test_data["Fare"] / (test_data["Relationships"] + 1)
####
train_data["Age*Age"] = train_data["Age"] * train_data["Age"]
test_data["Age*Age"] = test_data["Age"] * test_data["Age"]
train_data["Age_sqrt"] = train_data["Age"].apply(np.sqrt)
test_data["Age_sqrt"] = test_data["Age"].apply(np.sqrt)
# #### Feature Importance
from sklearn.feature_selection import mutual_info_classif
def make_mi_scores(X, y, discrete_features):
mi_scores = mutual_info_classif(X, y, discrete_features=discrete_features)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
features_all = [
"Pclass",
"Sex",
"Age",
"SibSp",
"Parch",
"Ticket",
"Fare",
"Cabin",
"Embarked",
"Relationships",
"Title",
"Deck",
"Age*Class",
"Sex*Class",
"Fare_Per_Person",
]
X = train_data.copy()
y = X.pop("Survived")
# X = X[features]
# Label encoding for categoricals
for colname in X.select_dtypes("object"):
X[colname], _ = X[colname].factorize()
# All discrete features should now have integer dtypes (double-check this before using MI!)
discrete_features = X.dtypes == int
mi_scores = make_mi_scores(X, y, discrete_features)
mi_scores # show a few features with their MI scores
import matplotlib.pyplot as plt
def plot_mi_scores(scores):
scores = scores.sort_values(ascending=True)
width = np.arange(len(scores))
ticks = list(scores.index)
plt.barh(width, scores)
plt.yticks(width, ticks)
plt.title("Mutual Information Scores")
plt.figure(dpi=100, figsize=(8, 5))
plot_mi_scores(mi_scores)
# ### Creating Model
X_tmp = train_data.copy()
y_tmp = X_tmp.pop("Survived")
################# Experimenting with different combinations #################
# features = ["Pclass", "Sex", "SibSp", "Parch", "Fare", "Embarked", "Age"]
# features = ["Pclass", "Sex", "Fare", "Embarked", "Age", "Relationships"]
# features = ["Pclass", "Sex", "Embarked", "Age", "Relationships", "Title"] #v4
# features = ["Pclass", "Sex", "Embarked", "Age", "Relationships", "Title","Deck", "Age*Class", "Fare_Per_Person"]
# features = ["Pclass", "Sex", "Age", "SibSp","Parch", "Fare", "Embarked","Relationships", "Title","Deck", "Age*Class", "Sex*Class", "Fare_Per_Person"]
# features = ["Pclass", "Sex", "Age", "Fare", "Embarked","Relationships", "Title","Deck", "Age*Class", "Sex*Class", "Fare_Per_Person"]
# features = ["Pclass", "Sex", "Age", "Relationships", "Title", "Age*Class", "Sex*Class", "Fare_Per_Person"]
# features = ["Pclass", "Sex", "Embarked", "Age", "Relationships", "Title","Fare","Deck"]
# features = ["Pclass", "Sex", "Age", "SibSp","Parch", "Fare", "Embarked","Relationships", "Title","Deck", "Age*Class", "Sex*Class", "Fare_Per_Person"]
# features = ["Pclass", "Sex", "Age", "Fare", "Fare_Per_Person","Title", "Age*Class","Sex*Class", "Relationships","Age*Age","Age_sqrt","AvgAge"]
# features = ["Title", "Sex*Class","Sex", "Deck","Fare","Fare_Per_Person","Age*Class"] # on mi score
# features = ["Title", "Sex*Class","Sex", "Deck","Fare","Fare_Per_Person","Age*Class","Embarked","Relationships"]
features = ["Pclass", "Sex", "SibSp", "Parch", "Fare", "Embarked", "Age"] # .78
# features = ["Title", "Sex*Class","Sex", "Deck","Fare","Fare_Per_Person","Age*Class","Embarked","Relationships"]
# features = ["Pclass", "Sex", "Age", "Fare", "Fare_Per_Person","Title", "Age*Class","Sex*Class", "Relationships","Embarked","Deck"]
# features = ["Pclass", "Sex", "SibSp", "Parch", "Fare", "Embarked", "Age","Relationships"]
X_tmp = X_tmp[features]
X_train, X_test, y_train, y_test = train_test_split(
X_tmp, y_tmp, test_size=0.2, random_state=1, stratify=y_tmp
)
is_submission = True
# is_submission=False
# For generating final submission. Otherwise keep commented and proceed with splits from the train_data
if is_submission:
X_train = X_tmp
y_train = y_tmp
X_test = test_data[features]
# # Create cluster feature
# cluster_features=["Fare","Deck"]
# kmeans = KMeans(n_clusters=5)
# X_train["Cluster"] = kmeans.fit_predict(X_train[cluster_features])
# X_train["Cluster"] = X_train["Cluster"].astype("category")
# X_test["Cluster"] = kmeans.predict(X_test[cluster_features])
# X_test["Cluster"] = X_test["Cluster"].astype("category")
# for item in cluster_features:
# X_train.pop(item)
# X_test.pop(item)
# one_hot_feature_list = ['Sex','Embarked']
# one_hot_feature_list = ['Sex','Pclass','Title','Embarked']
one_hot_feature_list = ["Sex"]
# creating instance of one-hot-encoder
enc = OneHotEncoder(handle_unknown="ignore")
enc.fit(X_train[one_hot_feature_list])
enc_df_train = pd.DataFrame(enc.transform(X_train[one_hot_feature_list]).toarray())
enc_df_train.index = X_train.index
X_train = X_train.join(enc_df_train)
enc_df_test = pd.DataFrame(enc.transform(X_test[one_hot_feature_list]).toarray())
enc_df_test.index = X_test.index
X_test = X_test.join(enc_df_test)
for item in one_hot_feature_list:
X_train.pop(item)
X_test.pop(item)
X_train
model = RandomForestClassifier(n_estimators=500, max_depth=5, random_state=1)
model.fit(X_train, y_train)
print("Train Accuracy: ", accuracy_score(model.predict(X_train), y_train))
if not is_submission:
print("Test Accuracy: ", accuracy_score(model.predict(X_test), y_test))
# #### Generating Output File
if is_submission:
predictions = model.predict(X_test)
output = pd.DataFrame(
{"PassengerId": test_data.PassengerId, "Survived": predictions}
)
output.to_csv("my_submission.csv", index=False)
print("Filed dumped")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/179/69179568.ipynb
| null | null |
[{"Id": 69179568, "ScriptId": 18802922, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7890865, "CreationDate": "07/27/2021 18:24:56", "VersionNumber": 16.0, "Title": "feature_eng1", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 313.0, "LinesInsertedFromPrevious": 17.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 296.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### Importing Libraries & Data Files
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("seaborn-whitegrid")
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.cluster import KMeans
from sklearn.impute import SimpleImputer
# Load train dataset
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
# Load test dataset
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()
# ## Data Pre-processing and Feature Engineering
#
train_data.isnull().sum()
test_data.isnull().sum()
# #### Extracting Title
# Title can be extracted from the name field.
# Title are later reduced to Mr., Mrs., Master and Miss.
def checkForSubstrings(big_string, substrings):
if type(big_string) == pd._libs.missing.NAType:
return "Unknown"
for substring in substrings:
if big_string.find(substring) != -1:
return substring
return np.nan
title_list = [
"Mrs",
"Mr",
"Master",
"Miss",
"Major",
"Rev",
"Dr",
"Ms",
"Mlle",
"Col",
"Capt",
"Mme",
"Countess",
"Don",
"Jonkheer",
]
# Extract Title from Name
train_data["Title"] = train_data["Name"].map(
lambda x: checkForSubstrings(x, title_list)
)
test_data["Title"] = test_data["Name"].map(lambda x: checkForSubstrings(x, title_list))
def replace_titles(x):
title = x["Title"]
if title in ["Don", "Major", "Capt", "Jonkheer", "Rev", "Col"]:
return "Mr"
elif title in ["Countess", "Mme"]:
return "Mrs"
elif title in ["Mlle", "Ms"]:
return "Miss"
elif title == "Dr":
if x["Sex"] == "Male":
return "Mr"
else:
return "Mrs"
else:
return title
# Reduce Titles to Mr., Mrs., Master and Miss.
train_data["Title"] = train_data.apply(replace_titles, axis=1)
test_data["Title"] = test_data.apply(replace_titles, axis=1)
# #### Extracting Deck
deck_list = ["A", "B", "C", "D", "E", "F", "T", "G", "Unknown"]
train_data["Deck"] = (
train_data["Cabin"].astype("string").map(lambda x: checkForSubstrings(x, deck_list))
)
test_data["Deck"] = (
test_data["Cabin"].astype("string").map(lambda x: checkForSubstrings(x, deck_list))
)
# #### Handling Missing Values
# Age, Cabin, Emparked has missing values in the train set.
# Age, Fare, Cabin has missing values in the test data.
# - Age:- Replace with the mean of the respective Title category
# - Embarked:- Replace with the mode
# - Fare:- Replacw with the mean
# - Cabin:- Ignore the feature since more than 77% are null
# Handling Age
train_data["AvgAge"] = train_data.groupby("Title")["Age"].transform("mean")
train_data["Age"] = train_data["Age"].fillna(train_data["AvgAge"])
test_data = test_data.merge(
train_data[["Title", "AvgAge"]].drop_duplicates(),
on="Title",
how="left",
)
test_data["Age"] = test_data["Age"].fillna(test_data["AvgAge"])
# Handling Fare
test_data["Fare"] = test_data["Fare"].fillna(train_data["Fare"].mean())
# Handling Embarked
# train_data["Embarked"] = train_data["Embarked"].fillna(train_data["Embarked"].mode())
imputer_embarked = SimpleImputer(missing_values=np.nan, strategy="most_frequent")
imputer_embarked = imputer_embarked.fit(train_data[["Embarked"]])
train_data["Embarked"] = imputer_embarked.transform(train_data[["Embarked"]])
test_data["Embarked"] = imputer_embarked.transform(test_data[["Embarked"]])
# #### Label Encording Categorical Features
title_dict = {"Mr": 0, "Mrs": 1, "Miss": 2, "Master": 4, "Unknown": 5}
train_data["Title"] = train_data["Title"].apply(
lambda x: title_dict[x] if (x in title_dict.keys()) else 5
)
test_data["Title"] = test_data["Title"].apply(
lambda x: title_dict[x] if (x in title_dict.keys()) else 5
)
deck_dict = {
"A": 0,
"B": 1,
"C": 2,
"D": 3,
"E": 4,
"F": 5,
"T": 6,
"G": 7,
"Unknown": 8,
}
train_data["Deck"] = train_data["Deck"].apply(
lambda x: deck_dict[x] if (x in deck_dict.keys()) else 8
)
test_data["Deck"] = test_data["Deck"].apply(
lambda x: deck_dict[x] if (x in deck_dict.keys()) else 8
)
sex_dict = {"male": 1, "female": 0}
train_data["Sex"] = train_data["Sex"].apply(
lambda x: sex_dict[x] if (x in sex_dict.keys()) else 3
)
test_data["Sex"] = test_data["Sex"].apply(
lambda x: sex_dict[x] if (x in sex_dict.keys()) else 3
)
# embarked_dict={'C':0,'Q':1,'S':2}
# train_data["Embarked"] = train_data["Embarked"].apply(lambda x: embarked_dict[x] if (x in embarked_dict.keys()) else 3)
# test_data["Embarked"] = test_data["Embarked"].apply(lambda x: embarked_dict[x] if (x in embarked_dict.keys()) else 3)
encoder_embarked = LabelEncoder()
train_data["Embarked"] = encoder_embarked.fit_transform(train_data["Embarked"].values)
test_data["Embarked"] = encoder_embarked.transform(test_data["Embarked"].values)
# #### Scaling Data(Log transform)
#
train_data["Fare"] = train_data["Fare"].apply(np.log1p)
test_data["Fare"] = test_data["Fare"].apply(np.log1p)
# #### Creating New Features
train_data["Relationships"] = train_data["SibSp"] + train_data["Parch"]
test_data["Relationships"] = test_data["SibSp"] + test_data["Parch"]
train_data["Age*Class"] = train_data["Age"] * train_data["Pclass"]
train_data["Age*Class"] = train_data["Age*Class"].fillna(0)
test_data["Age*Class"] = test_data["Age"] * test_data["Pclass"]
test_data["Age*Class"] = test_data["Age*Class"].fillna(0)
train_data["Sex*Class"] = train_data["Sex"] * train_data["Pclass"]
test_data["Sex*Class"] = test_data["Sex"] * test_data["Pclass"]
train_data["Fare_Per_Person"] = train_data["Fare"] / (train_data["Relationships"] + 1)
test_data["Fare_Per_Person"] = test_data["Fare"] / (test_data["Relationships"] + 1)
####
train_data["Age*Age"] = train_data["Age"] * train_data["Age"]
test_data["Age*Age"] = test_data["Age"] * test_data["Age"]
train_data["Age_sqrt"] = train_data["Age"].apply(np.sqrt)
test_data["Age_sqrt"] = test_data["Age"].apply(np.sqrt)
# #### Feature Importance
from sklearn.feature_selection import mutual_info_classif
def make_mi_scores(X, y, discrete_features):
mi_scores = mutual_info_classif(X, y, discrete_features=discrete_features)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
features_all = [
"Pclass",
"Sex",
"Age",
"SibSp",
"Parch",
"Ticket",
"Fare",
"Cabin",
"Embarked",
"Relationships",
"Title",
"Deck",
"Age*Class",
"Sex*Class",
"Fare_Per_Person",
]
X = train_data.copy()
y = X.pop("Survived")
# X = X[features]
# Label encoding for categoricals
for colname in X.select_dtypes("object"):
X[colname], _ = X[colname].factorize()
# All discrete features should now have integer dtypes (double-check this before using MI!)
discrete_features = X.dtypes == int
mi_scores = make_mi_scores(X, y, discrete_features)
mi_scores # show a few features with their MI scores
import matplotlib.pyplot as plt
def plot_mi_scores(scores):
scores = scores.sort_values(ascending=True)
width = np.arange(len(scores))
ticks = list(scores.index)
plt.barh(width, scores)
plt.yticks(width, ticks)
plt.title("Mutual Information Scores")
plt.figure(dpi=100, figsize=(8, 5))
plot_mi_scores(mi_scores)
# ### Creating Model
X_tmp = train_data.copy()
y_tmp = X_tmp.pop("Survived")
################# Experimenting with different combinations #################
# features = ["Pclass", "Sex", "SibSp", "Parch", "Fare", "Embarked", "Age"]
# features = ["Pclass", "Sex", "Fare", "Embarked", "Age", "Relationships"]
# features = ["Pclass", "Sex", "Embarked", "Age", "Relationships", "Title"] #v4
# features = ["Pclass", "Sex", "Embarked", "Age", "Relationships", "Title","Deck", "Age*Class", "Fare_Per_Person"]
# features = ["Pclass", "Sex", "Age", "SibSp","Parch", "Fare", "Embarked","Relationships", "Title","Deck", "Age*Class", "Sex*Class", "Fare_Per_Person"]
# features = ["Pclass", "Sex", "Age", "Fare", "Embarked","Relationships", "Title","Deck", "Age*Class", "Sex*Class", "Fare_Per_Person"]
# features = ["Pclass", "Sex", "Age", "Relationships", "Title", "Age*Class", "Sex*Class", "Fare_Per_Person"]
# features = ["Pclass", "Sex", "Embarked", "Age", "Relationships", "Title","Fare","Deck"]
# features = ["Pclass", "Sex", "Age", "SibSp","Parch", "Fare", "Embarked","Relationships", "Title","Deck", "Age*Class", "Sex*Class", "Fare_Per_Person"]
# features = ["Pclass", "Sex", "Age", "Fare", "Fare_Per_Person","Title", "Age*Class","Sex*Class", "Relationships","Age*Age","Age_sqrt","AvgAge"]
# features = ["Title", "Sex*Class","Sex", "Deck","Fare","Fare_Per_Person","Age*Class"] # on mi score
# features = ["Title", "Sex*Class","Sex", "Deck","Fare","Fare_Per_Person","Age*Class","Embarked","Relationships"]
features = ["Pclass", "Sex", "SibSp", "Parch", "Fare", "Embarked", "Age"] # .78
# features = ["Title", "Sex*Class","Sex", "Deck","Fare","Fare_Per_Person","Age*Class","Embarked","Relationships"]
# features = ["Pclass", "Sex", "Age", "Fare", "Fare_Per_Person","Title", "Age*Class","Sex*Class", "Relationships","Embarked","Deck"]
# features = ["Pclass", "Sex", "SibSp", "Parch", "Fare", "Embarked", "Age","Relationships"]
X_tmp = X_tmp[features]
X_train, X_test, y_train, y_test = train_test_split(
X_tmp, y_tmp, test_size=0.2, random_state=1, stratify=y_tmp
)
is_submission = True
# is_submission=False
# For generating final submission. Otherwise keep commented and proceed with splits from the train_data
if is_submission:
X_train = X_tmp
y_train = y_tmp
X_test = test_data[features]
# # Create cluster feature
# cluster_features=["Fare","Deck"]
# kmeans = KMeans(n_clusters=5)
# X_train["Cluster"] = kmeans.fit_predict(X_train[cluster_features])
# X_train["Cluster"] = X_train["Cluster"].astype("category")
# X_test["Cluster"] = kmeans.predict(X_test[cluster_features])
# X_test["Cluster"] = X_test["Cluster"].astype("category")
# for item in cluster_features:
# X_train.pop(item)
# X_test.pop(item)
# one_hot_feature_list = ['Sex','Embarked']
# one_hot_feature_list = ['Sex','Pclass','Title','Embarked']
one_hot_feature_list = ["Sex"]
# creating instance of one-hot-encoder
enc = OneHotEncoder(handle_unknown="ignore")
enc.fit(X_train[one_hot_feature_list])
enc_df_train = pd.DataFrame(enc.transform(X_train[one_hot_feature_list]).toarray())
enc_df_train.index = X_train.index
X_train = X_train.join(enc_df_train)
enc_df_test = pd.DataFrame(enc.transform(X_test[one_hot_feature_list]).toarray())
enc_df_test.index = X_test.index
X_test = X_test.join(enc_df_test)
for item in one_hot_feature_list:
X_train.pop(item)
X_test.pop(item)
X_train
model = RandomForestClassifier(n_estimators=500, max_depth=5, random_state=1)
model.fit(X_train, y_train)
print("Train Accuracy: ", accuracy_score(model.predict(X_train), y_train))
if not is_submission:
print("Test Accuracy: ", accuracy_score(model.predict(X_test), y_test))
# #### Generating Output File
if is_submission:
predictions = model.predict(X_test)
output = pd.DataFrame(
{"PassengerId": test_data.PassengerId, "Survived": predictions}
)
output.to_csv("my_submission.csv", index=False)
print("Filed dumped")
| false | 0 | 4,143 | 0 | 4,143 | 4,143 |
||
69179573
|
# # **Initialization**
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
from sklearn import preprocessing
train_df = pd.read_csv("/kaggle/input/titanic/train.csv")
test_df = pd.read_csv("/kaggle/input/titanic/test.csv")
train_df.info()
test_df.info()
# # **Visualizing the Dataset**
train_df.describe()
test_df.describe()
# # **Filling Null Values**
train_df["Embarked"].fillna("U", inplace=True)
age_mean = train_df["Age"].mean()
train_df["Age"].fillna(age_mean, inplace=True)
test_df["Age"].fillna(age_mean, inplace=True)
fare_mean = train_df["Fare"].mean()
train_df["Fare"].fillna(fare_mean, inplace=True)
test_df["Fare"].fillna(fare_mean, inplace=True)
train_df["Title"] = train_df["Name"].str.extract(" ([A-Za-z]+)\.")
test_df["Title"] = test_df["Name"].str.extract(" ([A-Za-z]+)\.")
train_df["Title"] = np.where(
train_df["Title"].isin(
["Don", "Rev", "Dr", "Major", "Mlle", "Col", "Capt", "Jonkheer"]
),
"Other",
train_df["Title"],
)
test_df["Title"] = np.where(
test_df["Title"].isin(
["Don", "Rev", "Dr", "Major", "Mlle", "Col", "Capt", "Jonkheer"]
),
"Other",
test_df["Title"],
)
train_df["Title"] = np.where(
train_df["Title"].isin(["Mrs", "Miss", "Ms", "Dona", "Lady", "Countess", "Mme"]),
"Ms",
train_df["Title"],
)
test_df["Title"] = np.where(
test_df["Title"].isin(["Mrs", "Miss", "Ms", "Dona", "Lady", "Countess", "Mme"]),
"Ms",
test_df["Title"],
)
train_df["Title"] = np.where(
train_df["Title"].isin(["Sir", "Mr"]), "Mr", train_df["Title"]
)
test_df["Title"] = np.where(
test_df["Title"].isin(["Sir", "Mr"]), "Mr", test_df["Title"]
)
# train_df["Title"] = np.where(train_df["Title"].isin(['M', 'Rev', 'Dr', 'Major', 'Mlle', 'Col', 'Capt']), "Other", train_df["Title"])
print(test_df["Title"].unique())
print(train_df["Title"].unique())
train_df["IsaMinor"] = train_df["Age"] < 16
test_df["IsaMinor"] = test_df["Age"] < 16
# df['C'] = np.where(
# df['A'] == df['B'], 0, np.where(
# df['A'] > df['B'], 1, -1))
# # **Preparing the Model**
y = train_df["Survived"]
features = [
"Pclass",
"Sex",
"SibSp",
"Parch",
"Embarked",
"Fare",
"Age",
"IsaMinor",
"Title",
]
X = pd.get_dummies(train_df[features])
X = X.drop(["Embarked_U"], axis=1)
X_test = pd.get_dummies(test_df[features])
X.info()
from sklearn.feature_selection import mutual_info_classif
def make_mi_scores(X, y, discrete_features):
mi_scores = mutual_info_classif(X, y, discrete_features=discrete_features)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
discrete_features = X.dtypes == int
mi_scores = make_mi_scores(X, y, discrete_features)
mi_scores
# # **Feature Creation**
# I tried creating a few features, including,
# * *LogFare:* Logarithmic value of fare
# * *Family:* Total number of family members (SibSp+Parch)
# However, their inclusion caused a reduction in the final score. Therefore those features were removed.
# # **Training the Model**
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
output = pd.DataFrame({"PassengerId": test_df.PassengerId, "Survived": predictions})
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/179/69179573.ipynb
| null | null |
[{"Id": 69179573, "ScriptId": 18823967, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7890872, "CreationDate": "07/27/2021 18:25:04", "VersionNumber": 14.0, "Title": "Getting Started with Titanic", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 103.0, "LinesInsertedFromPrevious": 58.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 45.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # **Initialization**
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
from sklearn import preprocessing
train_df = pd.read_csv("/kaggle/input/titanic/train.csv")
test_df = pd.read_csv("/kaggle/input/titanic/test.csv")
train_df.info()
test_df.info()
# # **Visualizing the Dataset**
train_df.describe()
test_df.describe()
# # **Filling Null Values**
train_df["Embarked"].fillna("U", inplace=True)
age_mean = train_df["Age"].mean()
train_df["Age"].fillna(age_mean, inplace=True)
test_df["Age"].fillna(age_mean, inplace=True)
fare_mean = train_df["Fare"].mean()
train_df["Fare"].fillna(fare_mean, inplace=True)
test_df["Fare"].fillna(fare_mean, inplace=True)
train_df["Title"] = train_df["Name"].str.extract(" ([A-Za-z]+)\.")
test_df["Title"] = test_df["Name"].str.extract(" ([A-Za-z]+)\.")
train_df["Title"] = np.where(
train_df["Title"].isin(
["Don", "Rev", "Dr", "Major", "Mlle", "Col", "Capt", "Jonkheer"]
),
"Other",
train_df["Title"],
)
test_df["Title"] = np.where(
test_df["Title"].isin(
["Don", "Rev", "Dr", "Major", "Mlle", "Col", "Capt", "Jonkheer"]
),
"Other",
test_df["Title"],
)
train_df["Title"] = np.where(
train_df["Title"].isin(["Mrs", "Miss", "Ms", "Dona", "Lady", "Countess", "Mme"]),
"Ms",
train_df["Title"],
)
test_df["Title"] = np.where(
test_df["Title"].isin(["Mrs", "Miss", "Ms", "Dona", "Lady", "Countess", "Mme"]),
"Ms",
test_df["Title"],
)
train_df["Title"] = np.where(
train_df["Title"].isin(["Sir", "Mr"]), "Mr", train_df["Title"]
)
test_df["Title"] = np.where(
test_df["Title"].isin(["Sir", "Mr"]), "Mr", test_df["Title"]
)
# train_df["Title"] = np.where(train_df["Title"].isin(['M', 'Rev', 'Dr', 'Major', 'Mlle', 'Col', 'Capt']), "Other", train_df["Title"])
print(test_df["Title"].unique())
print(train_df["Title"].unique())
train_df["IsaMinor"] = train_df["Age"] < 16
test_df["IsaMinor"] = test_df["Age"] < 16
# df['C'] = np.where(
# df['A'] == df['B'], 0, np.where(
# df['A'] > df['B'], 1, -1))
# # **Preparing the Model**
y = train_df["Survived"]
features = [
"Pclass",
"Sex",
"SibSp",
"Parch",
"Embarked",
"Fare",
"Age",
"IsaMinor",
"Title",
]
X = pd.get_dummies(train_df[features])
X = X.drop(["Embarked_U"], axis=1)
X_test = pd.get_dummies(test_df[features])
X.info()
from sklearn.feature_selection import mutual_info_classif
def make_mi_scores(X, y, discrete_features):
mi_scores = mutual_info_classif(X, y, discrete_features=discrete_features)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
discrete_features = X.dtypes == int
mi_scores = make_mi_scores(X, y, discrete_features)
mi_scores
# # **Feature Creation**
# I tried creating a few features, including,
# * *LogFare:* Logarithmic value of fare
# * *Family:* Total number of family members (SibSp+Parch)
# However, their inclusion caused a reduction in the final score. Therefore those features were removed.
# # **Training the Model**
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
output = pd.DataFrame({"PassengerId": test_df.PassengerId, "Survived": predictions})
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
| false | 0 | 1,290 | 0 | 1,290 | 1,290 |
||
69179292
|
<jupyter_start><jupyter_text>COVID-19 Indonesia Dataset
### Context
The COVID-19 dataset in Indonesia was created to find out various factors that could be taken into consideration in decision making related to the level of stringency in each province in Indonesia.
### Content
Data compiled based on time series, both on a country level (Indonesia), and on a province level. If needed in certain provinces, it might also be provided at the city / regency level.
Demographic data is also available, as well as calculations between demographic data and COVID-19 pandemic data.
Kaggle dataset identifier: covid19-indonesia
<jupyter_code>import pandas as pd
df = pd.read_csv('covid19-indonesia/covid_19_indonesia_time_series_all.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 31822 entries, 0 to 31821
Data columns (total 38 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Date 31822 non-null object
1 Location ISO Code 31822 non-null object
2 Location 31822 non-null object
3 New Cases 31822 non-null int64
4 New Deaths 31822 non-null int64
5 New Recovered 31822 non-null int64
6 New Active Cases 31822 non-null int64
7 Total Cases 31822 non-null int64
8 Total Deaths 31822 non-null int64
9 Total Recovered 31822 non-null int64
10 Total Active Cases 31822 non-null int64
11 Location Level 31822 non-null object
12 City or Regency 0 non-null float64
13 Province 30893 non-null object
14 Country 31822 non-null object
15 Continent 31822 non-null object
16 Island 30893 non-null object
17 Time Zone 30893 non-null object
18 Special Status 4558 non-null object
19 Total Regencies 31822 non-null int64
20 Total Cities 30921 non-null float64
21 Total Districts 31822 non-null int64
22 Total Urban Villages 30918 non-null float64
23 Total Rural Villages 30893 non-null float64
24 Area (km2) 31822 non-null int64
25 Population 31822 non-null int64
26 Population Density 31822 non-null float64
27 Longitude 31822 non-null float64
28 Latitude 31822 non-null float64
29 New Cases per Million 31822 non-null float64
30 Total Cases per Million 31822 non-null float64
31 New Deaths per Million 31822 non-null float64
32 Total Deaths per Million 31822 non-null float64
33 Total Deaths per 100rb 31822 non-null float64
34 Case Fatality Rate 31822 non-null object
35 Case Recovered Rate 31822 non-null object
36 Growth Factor of New Cases 29883 non-null float64
37 Growth Factor of New Deaths 28375 non-null float64
dtypes: float64(14), int64(12), object(12)
memory usage: 9.2+ MB
<jupyter_text>Examples:
{
"Date": "2020-03-01 00:00:00",
"Location ISO Code": "ID-JK",
"Location": "DKI Jakarta",
"New Cases": 2,
"New Deaths": 0,
"New Recovered": 0,
"New Active Cases": 2,
"Total Cases": 39,
"Total Deaths": 20,
"Total Recovered": 75,
"Total Active Cases": -56,
"Location Level": "Province",
"City or Regency": NaN,
"Province": "DKI Jakarta",
"Country": "Indonesia",
"Continent": "Asia",
"Island": "Jawa",
"Time Zone": "UTC+07:00",
"Special Status": "Daerah Khusus Ibu Kota",
"Total Regencies": 1,
"...": "and 18 more columns"
}
{
"Date": "2020-03-02 00:00:00",
"Location ISO Code": "ID-JK",
"Location": "DKI Jakarta",
"New Cases": 2,
"New Deaths": 0,
"New Recovered": 0,
"New Active Cases": 2,
"Total Cases": 41,
"Total Deaths": 20,
"Total Recovered": 75,
"Total Active Cases": -54,
"Location Level": "Province",
"City or Regency": NaN,
"Province": "DKI Jakarta",
"Country": "Indonesia",
"Continent": "Asia",
"Island": "Jawa",
"Time Zone": "UTC+07:00",
"Special Status": "Daerah Khusus Ibu Kota",
"Total Regencies": 1,
"...": "and 18 more columns"
}
{
"Date": "2020-03-02 00:00:00",
"Location ISO Code": "IDN",
"Location": "Indonesia",
"New Cases": 2,
"New Deaths": 0,
"New Recovered": 0,
"New Active Cases": 2,
"Total Cases": 2,
"Total Deaths": 0,
"Total Recovered": 0,
"Total Active Cases": 2,
"Location Level": "Country",
"City or Regency": NaN,
"Province": null,
"Country": "Indonesia",
"Continent": "Asia",
"Island": null,
"Time Zone": null,
"Special Status": null,
"Total Regencies": 416,
"...": "and 18 more columns"
}
{
"Date": "2020-03-02 00:00:00",
"Location ISO Code": "ID-RI",
"Location": "Riau",
"New Cases": 1,
"New Deaths": 0,
"New Recovered": 0,
"New Active Cases": 1,
"Total Cases": 1,
"Total Deaths": 0,
"Total Recovered": 1,
"Total Active Cases": 0,
"Location Level": "Province",
"City or Regency": NaN,
"Province": "Riau",
"Country": "Indonesia",
"Continent": "Asia",
"Island": "Sumatera",
"Time Zone": "UTC+07:00",
"Special Status": null,
"Total Regencies": 10,
"...": "and 18 more columns"
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import os
import numpy as np
import pandas as pd
import random
import seaborn as sns
import datetime as datetime
import matplotlib.dates as dates
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from contextlib import contextmanager
from time import time
from tqdm import tqdm
import lightgbm as lgbm
from sklearn.metrics import classification_report, log_loss, accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
import datetime
from datetime import date
df = pd.read_csv(
"/kaggle/input/covid19-indonesia/covid_19_indonesia_time_series_all.csv"
)
df.head(20)
df.columns
print("---Location---")
df.Location.unique()
date2 = []
for item in df["Date"]:
item2 = item.split("/")
month = int(item2[0])
day = int(item2[1])
year = int(item2[2])
date2 += [datetime.date(year, month, day)]
df["Date"] = date2
df["Date"] = pd.to_datetime(df["Date"])
df.head()
# menghapus kolom dengan semua data bernilai null dan kolom yang tidak dibutuhkan
data = df.drop(
[
"City or Regency",
"Name",
"Item",
"Kind",
"Hidden",
"Location ISO Code",
"Province",
"Country",
"Continent",
"Island",
"Time Zone",
"Special Status",
"Total Regencies",
"Total Cities",
"Total Districts",
"Total Urban Villages",
"Total Rural Villages",
"New Cases per Million",
"Total Cases per Million",
"New Deaths per Million",
"Total Deaths per Million",
"Case Fatality Rate",
"Case Recovered Rate",
"Growth Factor of New Cases",
"Growth Factor of New Deaths",
],
axis=1,
)
data = data.fillna(0)
data.head(5)
data.info()
newest = data.drop_duplicates(subset="Location", keep="last")
newest.head()
# # EDA
# Pandemi telah berlangsung selama lebih dari 17 bulan di Indonesia dan hingga saat ini belum menemukan titik terang kapan pandemi akan berakhir. Jika kita lihat dari data yang telah ada sekarang apakah pandemi akan selesai sesegera mungkin ? Data ini akan memberikan gambaran apakah pandemi yang selama ini kita alami dapat terkendali atau justru sebaliknya yang kian memburuk.
# ## Data Terbaru mengenai COVID 19 Hingga Tanggal 9 Juli 2021
newest[newest.Location != "Indonesia"].sort_values(by=["Total Cases"], ascending=False)
# ## Provinsi dengan Total Kasus Terbanyak
plt.figure(figsize=(12, 9))
plt.bar(
newest[newest.Location != "Indonesia"]
.sort_values(by=["Total Cases"], ascending=False)["Location"]
.values[:5],
newest[newest.Location != "Indonesia"]
.sort_values(by=["Total Cases"], ascending=False)["Total Cases"]
.values[:5],
)
plt.title("5 Provinsi Teratas Dengan Total Kasus Paling Banyak", fontsize=14)
plt.xlabel("Provinsi")
plt.show()
(
newest[newest.Location != "Indonesia"]
.sort_values(by=["Total Cases"], ascending=False)["Total Cases"]
.values[:2][0]
- newest[newest.Location != "Indonesia"]
.sort_values(by=["Total Cases"], ascending=False)["Total Cases"]
.values[:2][1]
) / newest[newest.Location != "Indonesia"].sort_values(
by=["Total Cases"], ascending=False
)[
"Total Cases"
].values[
:2
][
0
] * 100
# Chart diatas menunjukan bahwa DKI Jakarta menempati peringkat tertinggi dalam total kasus covid 19. Dibandingkan dengan peringkat 2, DKI jauh mengungguli dengan 30% jumlah kasus dari 1 tingkat dibawahnya yaitu Jawa Barat.
# ## Provinsi dengan Angka Kematian Terbanyak
plt.figure(figsize=(12, 9))
# plt.bar(newest[newest.Location != 'Indonesia'].sort_values(by=['Total Deaths'], ascending=False)['Location'].values[:5], newest[newest.Location != 'Indonesia'].sort_values(by=['Total Deaths'], ascending=False)['Total Deaths'].values[:5])
# plt.title('5 Provinsi Teratas Dengan Total Kematian Paling Banyak', fontsize=14)
# plt.xlabel('Provinsi')
# plt.show()
sns.barplot(
newest[newest.Location != "Indonesia"]
.sort_values(by=["Total Deaths"], ascending=False)["Location"]
.values[:5],
newest[newest.Location != "Indonesia"]
.sort_values(by=["Total Deaths"], ascending=False)["Total Deaths"]
.values[:5],
)
plt.title("5 Provinsi Teratas Dengan Total Kematian Paling Banyak", fontsize=25)
plt.xlabel("Provinsi", fontsize=15)
plt.show()
# ## Provinsi dengan Angka Kesembuhan Terbanyak
plt.figure(figsize=(12, 9))
sns.barplot(
newest[newest.Location != "Indonesia"]
.sort_values(by=["Total Recovered"], ascending=False)["Location"]
.values[:5],
newest[newest.Location != "Indonesia"]
.sort_values(by=["Total Recovered"], ascending=False)["Total Recovered"]
.values[:5],
)
plt.title("5 Provinsi Teratas Dengan Total Kesembuhan Paling Banyak", fontsize=25)
plt.xlabel("Provinsi", fontsize=15)
plt.show()
# ## Provinsi dengan Angka Kasus Aktif Terbanyak
plt.figure(figsize=(12, 9))
sns.barplot(
newest[newest.Location != "Indonesia"]
.sort_values(by=["Total Active Cases"], ascending=False)["Location"]
.values[:5],
newest[newest.Location != "Indonesia"]
.sort_values(by=["Total Active Cases"], ascending=False)["Total Active Cases"]
.values[:5],
)
plt.title("5 Provinsi Teratas Dengan Total Kasus Aktif Paling Banyak", fontsize=25)
plt.xlabel("Provinsi", fontsize=15)
plt.show()
# ## COVID 19 Di DKI Jakarta
jakarta = data[data.Location == "DKI Jakarta"].tail(7).copy()
jakarta
plt.figure(figsize=(10, 7))
plt.plot(jakarta["Date"].values, jakarta["New Cases"].values)
plt.xlabel("Waktu")
plt.ylabel("Jumlah Kasus Baru")
plt.show()
print(
"Persentase Kenaikan dari Tanggal 7-8 sebanyak",
(jakarta["New Cases"].values[4:][1] - jakarta["New Cases"].values[4:][0])
/ jakarta["New Cases"].values[4:][0]
* 100,
)
# Dapat dilihat bahwa grafik menunjukan keadaan yang cukup fluaktif namun ketika dari tanggal 7 ke tanggal 8 mengalami kenaikan yang sangat cukup signifikan sebanyak 38%. Selanjutnya, grafik tetap meningkat hingga ke tgl 9. Bagaimana dengan tingkat kematian serta kesembuhan pada DKI Jakarta selama kurun waktu 6 hari tsb ?
plt.figure(figsize=(10, 9))
plt.plot(jakarta["Date"].values, jakarta["New Cases"].values, label="Cases")
plt.plot(jakarta["Date"].values, jakarta["New Recovered"].values, label="Recover")
plt.plot(jakarta["Date"].values, jakarta["New Deaths"].values, label="Death")
plt.title("Perbandingan Peningkatan Kasus Baru, Kematian, dan Kesembuhan. ")
plt.legend()
plt.show()
# fig, (ax1, ax2) = plt.subplots(1,2, figsize=(12,9))
# ax1.plot(jakarta['Date'].values, jakarta['New Deaths'].values, label = 'Death')
# ax2.plot(jakarta['Date'].values, jakarta['New Recovered'].values, label = 'Recover')
plt.figure(figsize=(10, 9))
plt.plot(jakarta["Date"].values, jakarta["New Deaths"].values, label="Death")
plt.title("Angka Kematian Covid 19 DKI Jakarta 3 Juli - 9 Juli 2021")
plt.legend()
plt.show()
# Chat diatas menjelaskan bahwa tingkat recover pada DKI Jakarta kian hari kian meningkat dan jumlah yang telah recover pada tangal 9 lebih tingg dari angka kasus baru. Namun pada angka kematian mengalami peningkatan kian harinya yang tiap harinya naik sejumlah 20-40 korban meninggal.
# ## COVID 19 Di Jawa Timur
jatim = data[data.Location == "Jawa Timur"].tail(7).copy()
jatim
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 9))
ax1.plot(range(7), jatim["Total Cases"].values)
ax1.set_title("Angka Kasus COVID 19")
ax2.plot(range(7), jatim["Total Deaths"].values)
ax2.set_title("Angka Kematian COVID 19")
plt.suptitle("Total Cases and Deaths", fontsize=20)
plt.show()
fig, ax = plt.subplots(1, 3, figsize=(15, 8))
fig.suptitle("Perbandingan Kasus, Kematian, Kesembuhan Pada Awal PPKM", fontsize=20)
ax[0].plot(range(7), jatim["New Cases"].values, label="Jawa Timur")
ax[0].plot(range(7), jakarta["New Cases"].values, label="DKI Jakarta")
ax[0].set_title("Angka Kasus")
ax[0].legend()
ax[1].plot(range(7), jatim["New Deaths"].values, label="Jawa Timur")
ax[1].plot(range(7), jakarta["New Deaths"].values, label="DKI Jakarta")
ax[1].set_title("Angka Kematian")
ax[1].legend()
ax[2].plot(range(7), jatim["New Recovered"].values, label="Jawa Timur")
ax[2].plot(range(7), jakarta["New Recovered"].values, label="DKI Jakarta")
ax[2].set_title("Angka Kesembuhan")
ax[2].legend()
plt.show()
# ## Prediction
data = data[
[
"New Cases",
"New Deaths",
"New Recovered",
"New Active Cases",
"Total Cases",
"Total Deaths",
"Total Recovered",
"Total Active Cases",
]
]
data.tail()
# ### Feature Selection
plt.figure(figsize=(10, 9))
sns.heatmap(data.corr(), annot=True)
plt.show()
data = data[
[
"New Cases",
"New Deaths",
"New Recovered",
"Total Cases",
"Total Deaths",
"Total Recovered",
"Total Active Cases",
]
]
data.tail()
# ### Split Data
X = data[
[
"New Cases",
"New Recovered",
"Total Cases",
"Total Deaths",
"Total Recovered",
"Total Active Cases",
]
]
y = data[["New Deaths"]]
# #### With scaling
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X)
X_scaled = scaler.transform(X)
y_scaled = scaler.fit_transform(y)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X_scaled, y_scaled, random_state=42, test_size=0.25
)
from sklearn.linear_model import LinearRegression
ln = LinearRegression()
ln.fit(X_train, y_train)
print(ln.score(X_train, y_train))
print(ln.score(X_test, y_test))
from sklearn.metrics import r2_score, confusion_matrix
ln_pred = ln.predict(X_test)
ln_pred
print(r2_score(ln_pred, y_test))
# #### Without Scaling
X = data[
[
"New Cases",
"New Recovered",
"Total Cases",
"Total Deaths",
"Total Recovered",
"Total Active Cases",
]
]
y = data[["New Deaths"]]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42, test_size=0.25
)
from sklearn.linear_model import LinearRegression
ln = LinearRegression()
ln.fit(X_train, y_train)
print(ln.score(X_train, y_train))
print(ln.score(X_test, y_test))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/179/69179292.ipynb
|
covid19-indonesia
|
hendratno
|
[{"Id": 69179292, "ScriptId": 18799858, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4272003, "CreationDate": "07/27/2021 18:20:24", "VersionNumber": 11.0, "Title": "COVID 19 in Indonesia Visualization and Prediction", "EvaluationDate": "07/27/2021", "IsChange": false, "TotalLines": 258.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 258.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92033719, "KernelVersionId": 69179292, "SourceDatasetVersionId": 2412603}]
|
[{"Id": 2412603, "DatasetId": 693956, "DatasourceVersionId": 2454725, "CreatorUserId": 5233251, "LicenseName": "CC BY-NC-SA 4.0", "CreationDate": "07/10/2021 14:10:30", "VersionNumber": 81.0, "Title": "COVID-19 Indonesia Dataset", "Slug": "covid19-indonesia", "Subtitle": "Based on reports from official sources", "Description": "### Context\n\nThe COVID-19 dataset in Indonesia was created to find out various factors that could be taken into consideration in decision making related to the level of stringency in each province in Indonesia.\n\n### Content\n\nData compiled based on time series, both on a country level (Indonesia), and on a province level. If needed in certain provinces, it might also be provided at the city / regency level.\n\nDemographic data is also available, as well as calculations between demographic data and COVID-19 pandemic data.\n\n### Acknowledgements\n\nThank you to those who have provided data openly so that we can compile it into a dataset here, which is as follows: covid19.go.id, kemendagri.go.id, bps.go.id, and bnpb-inacovid19.hub.arcgis.com", "VersionNotes": "Last Update Data: 9 July 2021 (Gorontalo data is fixed)", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 693956, "CreatorUserId": 5233251, "OwnerUserId": 5233251.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4214699.0, "CurrentDatasourceVersionId": 4271760.0, "ForumId": 708602, "Type": 2, "CreationDate": "06/04/2020 22:28:34", "LastActivityDate": "06/04/2020", "TotalViews": 145967, "TotalDownloads": 29882, "TotalVotes": 369, "TotalKernels": 45}]
|
[{"Id": 5233251, "UserName": "hendratno", "DisplayName": "Hendratno", "RegisterDate": "06/04/2020", "PerformanceTier": 0}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import os
import numpy as np
import pandas as pd
import random
import seaborn as sns
import datetime as datetime
import matplotlib.dates as dates
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from contextlib import contextmanager
from time import time
from tqdm import tqdm
import lightgbm as lgbm
from sklearn.metrics import classification_report, log_loss, accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
import datetime
from datetime import date
df = pd.read_csv(
"/kaggle/input/covid19-indonesia/covid_19_indonesia_time_series_all.csv"
)
df.head(20)
df.columns
print("---Location---")
df.Location.unique()
date2 = []
for item in df["Date"]:
item2 = item.split("/")
month = int(item2[0])
day = int(item2[1])
year = int(item2[2])
date2 += [datetime.date(year, month, day)]
df["Date"] = date2
df["Date"] = pd.to_datetime(df["Date"])
df.head()
# menghapus kolom dengan semua data bernilai null dan kolom yang tidak dibutuhkan
data = df.drop(
[
"City or Regency",
"Name",
"Item",
"Kind",
"Hidden",
"Location ISO Code",
"Province",
"Country",
"Continent",
"Island",
"Time Zone",
"Special Status",
"Total Regencies",
"Total Cities",
"Total Districts",
"Total Urban Villages",
"Total Rural Villages",
"New Cases per Million",
"Total Cases per Million",
"New Deaths per Million",
"Total Deaths per Million",
"Case Fatality Rate",
"Case Recovered Rate",
"Growth Factor of New Cases",
"Growth Factor of New Deaths",
],
axis=1,
)
data = data.fillna(0)
data.head(5)
data.info()
newest = data.drop_duplicates(subset="Location", keep="last")
newest.head()
# # EDA
# Pandemi telah berlangsung selama lebih dari 17 bulan di Indonesia dan hingga saat ini belum menemukan titik terang kapan pandemi akan berakhir. Jika kita lihat dari data yang telah ada sekarang apakah pandemi akan selesai sesegera mungkin ? Data ini akan memberikan gambaran apakah pandemi yang selama ini kita alami dapat terkendali atau justru sebaliknya yang kian memburuk.
# ## Data Terbaru mengenai COVID 19 Hingga Tanggal 9 Juli 2021
newest[newest.Location != "Indonesia"].sort_values(by=["Total Cases"], ascending=False)
# ## Provinsi dengan Total Kasus Terbanyak
plt.figure(figsize=(12, 9))
plt.bar(
newest[newest.Location != "Indonesia"]
.sort_values(by=["Total Cases"], ascending=False)["Location"]
.values[:5],
newest[newest.Location != "Indonesia"]
.sort_values(by=["Total Cases"], ascending=False)["Total Cases"]
.values[:5],
)
plt.title("5 Provinsi Teratas Dengan Total Kasus Paling Banyak", fontsize=14)
plt.xlabel("Provinsi")
plt.show()
(
newest[newest.Location != "Indonesia"]
.sort_values(by=["Total Cases"], ascending=False)["Total Cases"]
.values[:2][0]
- newest[newest.Location != "Indonesia"]
.sort_values(by=["Total Cases"], ascending=False)["Total Cases"]
.values[:2][1]
) / newest[newest.Location != "Indonesia"].sort_values(
by=["Total Cases"], ascending=False
)[
"Total Cases"
].values[
:2
][
0
] * 100
# Chart diatas menunjukan bahwa DKI Jakarta menempati peringkat tertinggi dalam total kasus covid 19. Dibandingkan dengan peringkat 2, DKI jauh mengungguli dengan 30% jumlah kasus dari 1 tingkat dibawahnya yaitu Jawa Barat.
# ## Provinsi dengan Angka Kematian Terbanyak
plt.figure(figsize=(12, 9))
# plt.bar(newest[newest.Location != 'Indonesia'].sort_values(by=['Total Deaths'], ascending=False)['Location'].values[:5], newest[newest.Location != 'Indonesia'].sort_values(by=['Total Deaths'], ascending=False)['Total Deaths'].values[:5])
# plt.title('5 Provinsi Teratas Dengan Total Kematian Paling Banyak', fontsize=14)
# plt.xlabel('Provinsi')
# plt.show()
sns.barplot(
newest[newest.Location != "Indonesia"]
.sort_values(by=["Total Deaths"], ascending=False)["Location"]
.values[:5],
newest[newest.Location != "Indonesia"]
.sort_values(by=["Total Deaths"], ascending=False)["Total Deaths"]
.values[:5],
)
plt.title("5 Provinsi Teratas Dengan Total Kematian Paling Banyak", fontsize=25)
plt.xlabel("Provinsi", fontsize=15)
plt.show()
# ## Provinsi dengan Angka Kesembuhan Terbanyak
plt.figure(figsize=(12, 9))
sns.barplot(
newest[newest.Location != "Indonesia"]
.sort_values(by=["Total Recovered"], ascending=False)["Location"]
.values[:5],
newest[newest.Location != "Indonesia"]
.sort_values(by=["Total Recovered"], ascending=False)["Total Recovered"]
.values[:5],
)
plt.title("5 Provinsi Teratas Dengan Total Kesembuhan Paling Banyak", fontsize=25)
plt.xlabel("Provinsi", fontsize=15)
plt.show()
# ## Provinsi dengan Angka Kasus Aktif Terbanyak
plt.figure(figsize=(12, 9))
sns.barplot(
newest[newest.Location != "Indonesia"]
.sort_values(by=["Total Active Cases"], ascending=False)["Location"]
.values[:5],
newest[newest.Location != "Indonesia"]
.sort_values(by=["Total Active Cases"], ascending=False)["Total Active Cases"]
.values[:5],
)
plt.title("5 Provinsi Teratas Dengan Total Kasus Aktif Paling Banyak", fontsize=25)
plt.xlabel("Provinsi", fontsize=15)
plt.show()
# ## COVID 19 Di DKI Jakarta
jakarta = data[data.Location == "DKI Jakarta"].tail(7).copy()
jakarta
plt.figure(figsize=(10, 7))
plt.plot(jakarta["Date"].values, jakarta["New Cases"].values)
plt.xlabel("Waktu")
plt.ylabel("Jumlah Kasus Baru")
plt.show()
print(
"Persentase Kenaikan dari Tanggal 7-8 sebanyak",
(jakarta["New Cases"].values[4:][1] - jakarta["New Cases"].values[4:][0])
/ jakarta["New Cases"].values[4:][0]
* 100,
)
# Dapat dilihat bahwa grafik menunjukan keadaan yang cukup fluaktif namun ketika dari tanggal 7 ke tanggal 8 mengalami kenaikan yang sangat cukup signifikan sebanyak 38%. Selanjutnya, grafik tetap meningkat hingga ke tgl 9. Bagaimana dengan tingkat kematian serta kesembuhan pada DKI Jakarta selama kurun waktu 6 hari tsb ?
plt.figure(figsize=(10, 9))
plt.plot(jakarta["Date"].values, jakarta["New Cases"].values, label="Cases")
plt.plot(jakarta["Date"].values, jakarta["New Recovered"].values, label="Recover")
plt.plot(jakarta["Date"].values, jakarta["New Deaths"].values, label="Death")
plt.title("Perbandingan Peningkatan Kasus Baru, Kematian, dan Kesembuhan. ")
plt.legend()
plt.show()
# fig, (ax1, ax2) = plt.subplots(1,2, figsize=(12,9))
# ax1.plot(jakarta['Date'].values, jakarta['New Deaths'].values, label = 'Death')
# ax2.plot(jakarta['Date'].values, jakarta['New Recovered'].values, label = 'Recover')
plt.figure(figsize=(10, 9))
plt.plot(jakarta["Date"].values, jakarta["New Deaths"].values, label="Death")
plt.title("Angka Kematian Covid 19 DKI Jakarta 3 Juli - 9 Juli 2021")
plt.legend()
plt.show()
# Chat diatas menjelaskan bahwa tingkat recover pada DKI Jakarta kian hari kian meningkat dan jumlah yang telah recover pada tangal 9 lebih tingg dari angka kasus baru. Namun pada angka kematian mengalami peningkatan kian harinya yang tiap harinya naik sejumlah 20-40 korban meninggal.
# ## COVID 19 Di Jawa Timur
jatim = data[data.Location == "Jawa Timur"].tail(7).copy()
jatim
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 9))
ax1.plot(range(7), jatim["Total Cases"].values)
ax1.set_title("Angka Kasus COVID 19")
ax2.plot(range(7), jatim["Total Deaths"].values)
ax2.set_title("Angka Kematian COVID 19")
plt.suptitle("Total Cases and Deaths", fontsize=20)
plt.show()
fig, ax = plt.subplots(1, 3, figsize=(15, 8))
fig.suptitle("Perbandingan Kasus, Kematian, Kesembuhan Pada Awal PPKM", fontsize=20)
ax[0].plot(range(7), jatim["New Cases"].values, label="Jawa Timur")
ax[0].plot(range(7), jakarta["New Cases"].values, label="DKI Jakarta")
ax[0].set_title("Angka Kasus")
ax[0].legend()
ax[1].plot(range(7), jatim["New Deaths"].values, label="Jawa Timur")
ax[1].plot(range(7), jakarta["New Deaths"].values, label="DKI Jakarta")
ax[1].set_title("Angka Kematian")
ax[1].legend()
ax[2].plot(range(7), jatim["New Recovered"].values, label="Jawa Timur")
ax[2].plot(range(7), jakarta["New Recovered"].values, label="DKI Jakarta")
ax[2].set_title("Angka Kesembuhan")
ax[2].legend()
plt.show()
# ## Prediction
data = data[
[
"New Cases",
"New Deaths",
"New Recovered",
"New Active Cases",
"Total Cases",
"Total Deaths",
"Total Recovered",
"Total Active Cases",
]
]
data.tail()
# ### Feature Selection
plt.figure(figsize=(10, 9))
sns.heatmap(data.corr(), annot=True)
plt.show()
data = data[
[
"New Cases",
"New Deaths",
"New Recovered",
"Total Cases",
"Total Deaths",
"Total Recovered",
"Total Active Cases",
]
]
data.tail()
# ### Split Data
X = data[
[
"New Cases",
"New Recovered",
"Total Cases",
"Total Deaths",
"Total Recovered",
"Total Active Cases",
]
]
y = data[["New Deaths"]]
# #### With scaling
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X)
X_scaled = scaler.transform(X)
y_scaled = scaler.fit_transform(y)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X_scaled, y_scaled, random_state=42, test_size=0.25
)
from sklearn.linear_model import LinearRegression
ln = LinearRegression()
ln.fit(X_train, y_train)
print(ln.score(X_train, y_train))
print(ln.score(X_test, y_test))
from sklearn.metrics import r2_score, confusion_matrix
ln_pred = ln.predict(X_test)
ln_pred
print(r2_score(ln_pred, y_test))
# #### Without Scaling
X = data[
[
"New Cases",
"New Recovered",
"Total Cases",
"Total Deaths",
"Total Recovered",
"Total Active Cases",
]
]
y = data[["New Deaths"]]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42, test_size=0.25
)
from sklearn.linear_model import LinearRegression
ln = LinearRegression()
ln.fit(X_train, y_train)
print(ln.score(X_train, y_train))
print(ln.score(X_test, y_test))
|
[{"covid19-indonesia/covid_19_indonesia_time_series_all.csv": {"column_names": "[\"Date\", \"Location ISO Code\", \"Location\", \"New Cases\", \"New Deaths\", \"New Recovered\", \"New Active Cases\", \"Total Cases\", \"Total Deaths\", \"Total Recovered\", \"Total Active Cases\", \"Location Level\", \"City or Regency\", \"Province\", \"Country\", \"Continent\", \"Island\", \"Time Zone\", \"Special Status\", \"Total Regencies\", \"Total Cities\", \"Total Districts\", \"Total Urban Villages\", \"Total Rural Villages\", \"Area (km2)\", \"Population\", \"Population Density\", \"Longitude\", \"Latitude\", \"New Cases per Million\", \"Total Cases per Million\", \"New Deaths per Million\", \"Total Deaths per Million\", \"Total Deaths per 100rb\", \"Case Fatality Rate\", \"Case Recovered Rate\", \"Growth Factor of New Cases\", \"Growth Factor of New Deaths\"]", "column_data_types": "{\"Date\": \"object\", \"Location ISO Code\": \"object\", \"Location\": \"object\", \"New Cases\": \"int64\", \"New Deaths\": \"int64\", \"New Recovered\": \"int64\", \"New Active Cases\": \"int64\", \"Total Cases\": \"int64\", \"Total Deaths\": \"int64\", \"Total Recovered\": \"int64\", \"Total Active Cases\": \"int64\", \"Location Level\": \"object\", \"City or Regency\": \"float64\", \"Province\": \"object\", \"Country\": \"object\", \"Continent\": \"object\", \"Island\": \"object\", \"Time Zone\": \"object\", \"Special Status\": \"object\", \"Total Regencies\": \"int64\", \"Total Cities\": \"float64\", \"Total Districts\": \"int64\", \"Total Urban Villages\": \"float64\", \"Total Rural Villages\": \"float64\", \"Area (km2)\": \"int64\", \"Population\": \"int64\", \"Population Density\": \"float64\", \"Longitude\": \"float64\", \"Latitude\": \"float64\", \"New Cases per Million\": \"float64\", \"Total Cases per Million\": \"float64\", \"New Deaths per Million\": \"float64\", \"Total Deaths per Million\": \"float64\", \"Total Deaths per 100rb\": \"float64\", \"Case Fatality Rate\": \"object\", \"Case Recovered Rate\": \"object\", \"Growth Factor of New Cases\": \"float64\", \"Growth Factor of New Deaths\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 31822 entries, 0 to 31821\nData columns (total 38 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Date 31822 non-null object \n 1 Location ISO Code 31822 non-null object \n 2 Location 31822 non-null object \n 3 New Cases 31822 non-null int64 \n 4 New Deaths 31822 non-null int64 \n 5 New Recovered 31822 non-null int64 \n 6 New Active Cases 31822 non-null int64 \n 7 Total Cases 31822 non-null int64 \n 8 Total Deaths 31822 non-null int64 \n 9 Total Recovered 31822 non-null int64 \n 10 Total Active Cases 31822 non-null int64 \n 11 Location Level 31822 non-null object \n 12 City or Regency 0 non-null float64\n 13 Province 30893 non-null object \n 14 Country 31822 non-null object \n 15 Continent 31822 non-null object \n 16 Island 30893 non-null object \n 17 Time Zone 30893 non-null object \n 18 Special Status 4558 non-null object \n 19 Total Regencies 31822 non-null int64 \n 20 Total Cities 30921 non-null float64\n 21 Total Districts 31822 non-null int64 \n 22 Total Urban Villages 30918 non-null float64\n 23 Total Rural Villages 30893 non-null float64\n 24 Area (km2) 31822 non-null int64 \n 25 Population 31822 non-null int64 \n 26 Population Density 31822 non-null float64\n 27 Longitude 31822 non-null float64\n 28 Latitude 31822 non-null float64\n 29 New Cases per Million 31822 non-null float64\n 30 Total Cases per Million 31822 non-null float64\n 31 New Deaths per Million 31822 non-null float64\n 32 Total Deaths per Million 31822 non-null float64\n 33 Total Deaths per 100rb 31822 non-null float64\n 34 Case Fatality Rate 31822 non-null object \n 35 Case Recovered Rate 31822 non-null object \n 36 Growth Factor of New Cases 29883 non-null float64\n 37 Growth Factor of New Deaths 28375 non-null float64\ndtypes: float64(14), int64(12), object(12)\nmemory usage: 9.2+ MB\n", "summary": "{\"New Cases\": {\"count\": 31822.0, \"mean\": 402.311388347684, \"std\": 2320.629838150327, \"min\": 0.0, \"25%\": 3.0, \"50%\": 27.0, \"75%\": 130.0, \"max\": 64718.0}, \"New Deaths\": {\"count\": 31822.0, \"mean\": 9.920652378857394, \"std\": 64.13907952149532, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 3.0, \"max\": 2069.0}, \"New Recovered\": {\"count\": 31822.0, \"mean\": 390.39849789453837, \"std\": 2199.8788022077515, \"min\": 0.0, \"25%\": 2.0, \"50%\": 20.0, \"75%\": 123.0, \"max\": 61361.0}, \"New Active Cases\": {\"count\": 31822.0, \"mean\": 1.9922380742882282, \"std\": 1219.5133545832693, \"min\": -29938.0, \"25%\": -12.0, \"50%\": 0.0, \"75%\": 19.0, \"max\": 39165.0}, \"Total Cases\": {\"count\": 31822.0, \"mean\": 159449.99770598958, \"std\": 626443.4507597397, \"min\": 1.0, \"25%\": 5223.25, \"50%\": 23596.5, \"75%\": 69927.75, \"max\": 6405044.0}, \"Total Deaths\": {\"count\": 31822.0, \"mean\": 4564.753221042047, \"std\": 17693.731369395253, \"min\": 0.0, \"25%\": 128.0, \"50%\": 565.5, \"75%\": 2189.0, \"max\": 157876.0}, \"Total Recovered\": {\"count\": 31822.0, \"mean\": 149261.46207026584, \"std\": 595853.6212042584, \"min\": 0.0, \"25%\": 3913.5, \"50%\": 21027.5, \"75%\": 64142.0, \"max\": 6218708.0}, \"Total Active Cases\": {\"count\": 31822.0, \"mean\": 5623.7824146816665, \"std\": 28537.412304831563, \"min\": -2343.0, \"25%\": 80.0, \"50%\": 557.0, \"75%\": 2279.0, \"max\": 586113.0}, \"City or Regency\": {\"count\": 0.0, \"mean\": NaN, \"std\": NaN, \"min\": NaN, \"25%\": NaN, \"50%\": NaN, \"75%\": NaN, \"max\": NaN}, \"Total Regencies\": {\"count\": 31822.0, \"mean\": 24.027276726792785, \"std\": 68.35973432428165, \"min\": 1.0, \"25%\": 7.0, \"50%\": 11.0, \"75%\": 18.0, \"max\": 416.0}, \"Total Cities\": {\"count\": 30921.0, \"mean\": 5.835839720578248, \"std\": 16.39012252876151, \"min\": 1.0, \"25%\": 1.0, \"50%\": 2.0, \"75%\": 4.0, \"max\": 98.0}, \"Total Districts\": {\"count\": 31822.0, \"mean\": 417.9522971529131, \"std\": 1192.9951492312462, \"min\": 44.0, \"25%\": 103.0, \"50%\": 169.0, \"75%\": 289.0, \"max\": 7230.0}, \"Total Urban Villages\": {\"count\": 30918.0, \"mean\": 505.51394009961837, \"std\": 1422.070929301676, \"min\": 35.0, \"25%\": 99.0, \"50%\": 175.0, \"75%\": 332.0, \"max\": 8488.0}, \"Total Rural Villages\": {\"count\": 30893.0, \"mean\": 4462.492797721166, \"std\": 12582.736429280361, \"min\": 275.0, \"25%\": 928.0, \"50%\": 1591.0, \"75%\": 2853.0, \"max\": 74953.0}, \"Area (km2)\": {\"count\": 31822.0, \"mean\": 110653.17051096726, \"std\": 318786.48801953037, \"min\": 664.0, \"25%\": 16787.0, \"50%\": 42013.0, \"75%\": 75468.0, \"max\": 1916907.0}, \"Population\": {\"count\": 31822.0, \"mean\": 15367655.677518697, \"std\": 44617141.30926732, \"min\": 648407.0, \"25%\": 1999539.0, \"50%\": 4216171.0, \"75%\": 9095591.0, \"max\": 265185520.0}, \"Population Density\": {\"count\": 31822.0, \"mean\": 738.894927722959, \"std\": 2729.4316257444484, \"min\": 8.59, \"25%\": 47.79, \"50%\": 103.84, \"75%\": 262.7, \"max\": 16334.31}, \"Longitude\": {\"count\": 31822.0, \"mean\": 113.7004783239966, \"std\": 9.86206813441533, \"min\": 96.91052174, \"25%\": 106.1090043, \"50%\": 113.4176536, \"75%\": 121.2010927, \"max\": 138.69603}, \"Latitude\": {\"count\": 31822.0, \"mean\": -2.725680566046509, \"std\": 3.608064792481357, \"min\": -8.682205, \"25%\": -6.204698991, \"50%\": -2.461746053, \"75%\": 0.212036949, \"max\": 4.225614628}, \"New Cases per Million\": {\"count\": 31822.0, \"mean\": 28.1332917478474, \"std\": 74.30970963525667, \"min\": 0.0, \"25%\": 0.83, \"50%\": 5.71, \"75%\": 22.29, \"max\": 1459.04}, \"Total Cases per Million\": {\"count\": 31822.0, \"mean\": 11485.038800201117, \"std\": 16477.38547945153, \"min\": 0.01, \"25%\": 1291.3674999999998, \"50%\": 6804.285, \"75%\": 14557.36, \"max\": 130231.62}, \"New Deaths per Million\": {\"count\": 31822.0, \"mean\": 0.6403082772924393, \"std\": 1.9330163192859184, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.54, \"max\": 63.8}, \"Total Deaths per Million\": {\"count\": 31822.0, \"mean\": 289.6336399346364, \"std\": 363.42872368746885, \"min\": 0.0, \"25%\": 38.8625, \"50%\": 158.415, \"75%\": 389.91, \"max\": 1632.6}, \"Total Deaths per 100rb\": {\"count\": 31822.0, \"mean\": 28.96332851486393, \"std\": 36.34288085488888, \"min\": 0.0, \"25%\": 3.89, \"50%\": 15.84, \"75%\": 38.99, \"max\": 163.26}, \"Growth Factor of New Cases\": {\"count\": 29883.0, \"mean\": 1.3267951678211694, \"std\": 2.6793794014191588, \"min\": 0.0, \"25%\": 0.65, \"50%\": 1.0, \"75%\": 1.31, \"max\": 175.0}, \"Growth Factor of New Deaths\": {\"count\": 28375.0, \"mean\": 1.0338343612334802, \"std\": 1.351755266328095, \"min\": 0.0, \"25%\": 0.75, \"50%\": 1.0, \"75%\": 1.0, \"max\": 134.5}}", "examples": "{\"Date\":{\"0\":\"3\\/1\\/2020\",\"1\":\"3\\/2\\/2020\",\"2\":\"3\\/2\\/2020\",\"3\":\"3\\/2\\/2020\"},\"Location ISO Code\":{\"0\":\"ID-JK\",\"1\":\"ID-JK\",\"2\":\"IDN\",\"3\":\"ID-RI\"},\"Location\":{\"0\":\"DKI Jakarta\",\"1\":\"DKI Jakarta\",\"2\":\"Indonesia\",\"3\":\"Riau\"},\"New Cases\":{\"0\":2,\"1\":2,\"2\":2,\"3\":1},\"New Deaths\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"New Recovered\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"New Active Cases\":{\"0\":2,\"1\":2,\"2\":2,\"3\":1},\"Total Cases\":{\"0\":39,\"1\":41,\"2\":2,\"3\":1},\"Total Deaths\":{\"0\":20,\"1\":20,\"2\":0,\"3\":0},\"Total Recovered\":{\"0\":75,\"1\":75,\"2\":0,\"3\":1},\"Total Active Cases\":{\"0\":-56,\"1\":-54,\"2\":2,\"3\":0},\"Location Level\":{\"0\":\"Province\",\"1\":\"Province\",\"2\":\"Country\",\"3\":\"Province\"},\"City or Regency\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null},\"Province\":{\"0\":\"DKI Jakarta\",\"1\":\"DKI Jakarta\",\"2\":null,\"3\":\"Riau\"},\"Country\":{\"0\":\"Indonesia\",\"1\":\"Indonesia\",\"2\":\"Indonesia\",\"3\":\"Indonesia\"},\"Continent\":{\"0\":\"Asia\",\"1\":\"Asia\",\"2\":\"Asia\",\"3\":\"Asia\"},\"Island\":{\"0\":\"Jawa\",\"1\":\"Jawa\",\"2\":null,\"3\":\"Sumatera\"},\"Time Zone\":{\"0\":\"UTC+07:00\",\"1\":\"UTC+07:00\",\"2\":null,\"3\":\"UTC+07:00\"},\"Special Status\":{\"0\":\"Daerah Khusus Ibu Kota\",\"1\":\"Daerah Khusus Ibu Kota\",\"2\":null,\"3\":null},\"Total Regencies\":{\"0\":1,\"1\":1,\"2\":416,\"3\":10},\"Total Cities\":{\"0\":5.0,\"1\":5.0,\"2\":98.0,\"3\":2.0},\"Total Districts\":{\"0\":44,\"1\":44,\"2\":7230,\"3\":169},\"Total Urban Villages\":{\"0\":267.0,\"1\":267.0,\"2\":8488.0,\"3\":268.0},\"Total Rural Villages\":{\"0\":null,\"1\":null,\"2\":74953.0,\"3\":1591.0},\"Area (km2)\":{\"0\":664,\"1\":664,\"2\":1916907,\"3\":87024},\"Population\":{\"0\":10846145,\"1\":10846145,\"2\":265185520,\"3\":6074100},\"Population Density\":{\"0\":16334.31,\"1\":16334.31,\"2\":138.34,\"3\":69.8},\"Longitude\":{\"0\":106.8361183,\"1\":106.8361183,\"2\":113.921327,\"3\":101.8051092},\"Latitude\":{\"0\":-6.204698991,\"1\":-6.204698991,\"2\":-0.789275,\"3\":0.511647851},\"New Cases per Million\":{\"0\":0.18,\"1\":0.18,\"2\":0.01,\"3\":0.16},\"Total Cases per Million\":{\"0\":3.6,\"1\":3.78,\"2\":0.01,\"3\":0.16},\"New Deaths per Million\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"Total Deaths per Million\":{\"0\":1.84,\"1\":1.84,\"2\":0.0,\"3\":0.0},\"Total Deaths per 100rb\":{\"0\":0.18,\"1\":0.18,\"2\":0.0,\"3\":0.0},\"Case Fatality Rate\":{\"0\":\"51.28%\",\"1\":\"48.78%\",\"2\":\"0.00%\",\"3\":\"0.00%\"},\"Case Recovered Rate\":{\"0\":\"192.31%\",\"1\":\"182.93%\",\"2\":\"0.00%\",\"3\":\"100.00%\"},\"Growth Factor of New Cases\":{\"0\":null,\"1\":1.0,\"2\":null,\"3\":null},\"Growth Factor of New Deaths\":{\"0\":null,\"1\":1.0,\"2\":null,\"3\":null}}"}}]
| true | 1 |
<start_data_description><data_path>covid19-indonesia/covid_19_indonesia_time_series_all.csv:
<column_names>
['Date', 'Location ISO Code', 'Location', 'New Cases', 'New Deaths', 'New Recovered', 'New Active Cases', 'Total Cases', 'Total Deaths', 'Total Recovered', 'Total Active Cases', 'Location Level', 'City or Regency', 'Province', 'Country', 'Continent', 'Island', 'Time Zone', 'Special Status', 'Total Regencies', 'Total Cities', 'Total Districts', 'Total Urban Villages', 'Total Rural Villages', 'Area (km2)', 'Population', 'Population Density', 'Longitude', 'Latitude', 'New Cases per Million', 'Total Cases per Million', 'New Deaths per Million', 'Total Deaths per Million', 'Total Deaths per 100rb', 'Case Fatality Rate', 'Case Recovered Rate', 'Growth Factor of New Cases', 'Growth Factor of New Deaths']
<column_types>
{'Date': 'object', 'Location ISO Code': 'object', 'Location': 'object', 'New Cases': 'int64', 'New Deaths': 'int64', 'New Recovered': 'int64', 'New Active Cases': 'int64', 'Total Cases': 'int64', 'Total Deaths': 'int64', 'Total Recovered': 'int64', 'Total Active Cases': 'int64', 'Location Level': 'object', 'City or Regency': 'float64', 'Province': 'object', 'Country': 'object', 'Continent': 'object', 'Island': 'object', 'Time Zone': 'object', 'Special Status': 'object', 'Total Regencies': 'int64', 'Total Cities': 'float64', 'Total Districts': 'int64', 'Total Urban Villages': 'float64', 'Total Rural Villages': 'float64', 'Area (km2)': 'int64', 'Population': 'int64', 'Population Density': 'float64', 'Longitude': 'float64', 'Latitude': 'float64', 'New Cases per Million': 'float64', 'Total Cases per Million': 'float64', 'New Deaths per Million': 'float64', 'Total Deaths per Million': 'float64', 'Total Deaths per 100rb': 'float64', 'Case Fatality Rate': 'object', 'Case Recovered Rate': 'object', 'Growth Factor of New Cases': 'float64', 'Growth Factor of New Deaths': 'float64'}
<dataframe_Summary>
{'New Cases': {'count': 31822.0, 'mean': 402.311388347684, 'std': 2320.629838150327, 'min': 0.0, '25%': 3.0, '50%': 27.0, '75%': 130.0, 'max': 64718.0}, 'New Deaths': {'count': 31822.0, 'mean': 9.920652378857394, 'std': 64.13907952149532, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 3.0, 'max': 2069.0}, 'New Recovered': {'count': 31822.0, 'mean': 390.39849789453837, 'std': 2199.8788022077515, 'min': 0.0, '25%': 2.0, '50%': 20.0, '75%': 123.0, 'max': 61361.0}, 'New Active Cases': {'count': 31822.0, 'mean': 1.9922380742882282, 'std': 1219.5133545832693, 'min': -29938.0, '25%': -12.0, '50%': 0.0, '75%': 19.0, 'max': 39165.0}, 'Total Cases': {'count': 31822.0, 'mean': 159449.99770598958, 'std': 626443.4507597397, 'min': 1.0, '25%': 5223.25, '50%': 23596.5, '75%': 69927.75, 'max': 6405044.0}, 'Total Deaths': {'count': 31822.0, 'mean': 4564.753221042047, 'std': 17693.731369395253, 'min': 0.0, '25%': 128.0, '50%': 565.5, '75%': 2189.0, 'max': 157876.0}, 'Total Recovered': {'count': 31822.0, 'mean': 149261.46207026584, 'std': 595853.6212042584, 'min': 0.0, '25%': 3913.5, '50%': 21027.5, '75%': 64142.0, 'max': 6218708.0}, 'Total Active Cases': {'count': 31822.0, 'mean': 5623.7824146816665, 'std': 28537.412304831563, 'min': -2343.0, '25%': 80.0, '50%': 557.0, '75%': 2279.0, 'max': 586113.0}, 'City or Regency': {'count': 0.0, 'mean': nan, 'std': nan, 'min': nan, '25%': nan, '50%': nan, '75%': nan, 'max': nan}, 'Total Regencies': {'count': 31822.0, 'mean': 24.027276726792785, 'std': 68.35973432428165, 'min': 1.0, '25%': 7.0, '50%': 11.0, '75%': 18.0, 'max': 416.0}, 'Total Cities': {'count': 30921.0, 'mean': 5.835839720578248, 'std': 16.39012252876151, 'min': 1.0, '25%': 1.0, '50%': 2.0, '75%': 4.0, 'max': 98.0}, 'Total Districts': {'count': 31822.0, 'mean': 417.9522971529131, 'std': 1192.9951492312462, 'min': 44.0, '25%': 103.0, '50%': 169.0, '75%': 289.0, 'max': 7230.0}, 'Total Urban Villages': {'count': 30918.0, 'mean': 505.51394009961837, 'std': 1422.070929301676, 'min': 35.0, '25%': 99.0, '50%': 175.0, '75%': 332.0, 'max': 8488.0}, 'Total Rural Villages': {'count': 30893.0, 'mean': 4462.492797721166, 'std': 12582.736429280361, 'min': 275.0, '25%': 928.0, '50%': 1591.0, '75%': 2853.0, 'max': 74953.0}, 'Area (km2)': {'count': 31822.0, 'mean': 110653.17051096726, 'std': 318786.48801953037, 'min': 664.0, '25%': 16787.0, '50%': 42013.0, '75%': 75468.0, 'max': 1916907.0}, 'Population': {'count': 31822.0, 'mean': 15367655.677518697, 'std': 44617141.30926732, 'min': 648407.0, '25%': 1999539.0, '50%': 4216171.0, '75%': 9095591.0, 'max': 265185520.0}, 'Population Density': {'count': 31822.0, 'mean': 738.894927722959, 'std': 2729.4316257444484, 'min': 8.59, '25%': 47.79, '50%': 103.84, '75%': 262.7, 'max': 16334.31}, 'Longitude': {'count': 31822.0, 'mean': 113.7004783239966, 'std': 9.86206813441533, 'min': 96.91052174, '25%': 106.1090043, '50%': 113.4176536, '75%': 121.2010927, 'max': 138.69603}, 'Latitude': {'count': 31822.0, 'mean': -2.725680566046509, 'std': 3.608064792481357, 'min': -8.682205, '25%': -6.204698991, '50%': -2.461746053, '75%': 0.212036949, 'max': 4.225614628}, 'New Cases per Million': {'count': 31822.0, 'mean': 28.1332917478474, 'std': 74.30970963525667, 'min': 0.0, '25%': 0.83, '50%': 5.71, '75%': 22.29, 'max': 1459.04}, 'Total Cases per Million': {'count': 31822.0, 'mean': 11485.038800201117, 'std': 16477.38547945153, 'min': 0.01, '25%': 1291.3674999999998, '50%': 6804.285, '75%': 14557.36, 'max': 130231.62}, 'New Deaths per Million': {'count': 31822.0, 'mean': 0.6403082772924393, 'std': 1.9330163192859184, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.54, 'max': 63.8}, 'Total Deaths per Million': {'count': 31822.0, 'mean': 289.6336399346364, 'std': 363.42872368746885, 'min': 0.0, '25%': 38.8625, '50%': 158.415, '75%': 389.91, 'max': 1632.6}, 'Total Deaths per 100rb': {'count': 31822.0, 'mean': 28.96332851486393, 'std': 36.34288085488888, 'min': 0.0, '25%': 3.89, '50%': 15.84, '75%': 38.99, 'max': 163.26}, 'Growth Factor of New Cases': {'count': 29883.0, 'mean': 1.3267951678211694, 'std': 2.6793794014191588, 'min': 0.0, '25%': 0.65, '50%': 1.0, '75%': 1.31, 'max': 175.0}, 'Growth Factor of New Deaths': {'count': 28375.0, 'mean': 1.0338343612334802, 'std': 1.351755266328095, 'min': 0.0, '25%': 0.75, '50%': 1.0, '75%': 1.0, 'max': 134.5}}
<dataframe_info>
RangeIndex: 31822 entries, 0 to 31821
Data columns (total 38 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Date 31822 non-null object
1 Location ISO Code 31822 non-null object
2 Location 31822 non-null object
3 New Cases 31822 non-null int64
4 New Deaths 31822 non-null int64
5 New Recovered 31822 non-null int64
6 New Active Cases 31822 non-null int64
7 Total Cases 31822 non-null int64
8 Total Deaths 31822 non-null int64
9 Total Recovered 31822 non-null int64
10 Total Active Cases 31822 non-null int64
11 Location Level 31822 non-null object
12 City or Regency 0 non-null float64
13 Province 30893 non-null object
14 Country 31822 non-null object
15 Continent 31822 non-null object
16 Island 30893 non-null object
17 Time Zone 30893 non-null object
18 Special Status 4558 non-null object
19 Total Regencies 31822 non-null int64
20 Total Cities 30921 non-null float64
21 Total Districts 31822 non-null int64
22 Total Urban Villages 30918 non-null float64
23 Total Rural Villages 30893 non-null float64
24 Area (km2) 31822 non-null int64
25 Population 31822 non-null int64
26 Population Density 31822 non-null float64
27 Longitude 31822 non-null float64
28 Latitude 31822 non-null float64
29 New Cases per Million 31822 non-null float64
30 Total Cases per Million 31822 non-null float64
31 New Deaths per Million 31822 non-null float64
32 Total Deaths per Million 31822 non-null float64
33 Total Deaths per 100rb 31822 non-null float64
34 Case Fatality Rate 31822 non-null object
35 Case Recovered Rate 31822 non-null object
36 Growth Factor of New Cases 29883 non-null float64
37 Growth Factor of New Deaths 28375 non-null float64
dtypes: float64(14), int64(12), object(12)
memory usage: 9.2+ MB
<some_examples>
{'Date': {'0': '3/1/2020', '1': '3/2/2020', '2': '3/2/2020', '3': '3/2/2020'}, 'Location ISO Code': {'0': 'ID-JK', '1': 'ID-JK', '2': 'IDN', '3': 'ID-RI'}, 'Location': {'0': 'DKI Jakarta', '1': 'DKI Jakarta', '2': 'Indonesia', '3': 'Riau'}, 'New Cases': {'0': 2, '1': 2, '2': 2, '3': 1}, 'New Deaths': {'0': 0, '1': 0, '2': 0, '3': 0}, 'New Recovered': {'0': 0, '1': 0, '2': 0, '3': 0}, 'New Active Cases': {'0': 2, '1': 2, '2': 2, '3': 1}, 'Total Cases': {'0': 39, '1': 41, '2': 2, '3': 1}, 'Total Deaths': {'0': 20, '1': 20, '2': 0, '3': 0}, 'Total Recovered': {'0': 75, '1': 75, '2': 0, '3': 1}, 'Total Active Cases': {'0': -56, '1': -54, '2': 2, '3': 0}, 'Location Level': {'0': 'Province', '1': 'Province', '2': 'Country', '3': 'Province'}, 'City or Regency': {'0': None, '1': None, '2': None, '3': None}, 'Province': {'0': 'DKI Jakarta', '1': 'DKI Jakarta', '2': None, '3': 'Riau'}, 'Country': {'0': 'Indonesia', '1': 'Indonesia', '2': 'Indonesia', '3': 'Indonesia'}, 'Continent': {'0': 'Asia', '1': 'Asia', '2': 'Asia', '3': 'Asia'}, 'Island': {'0': 'Jawa', '1': 'Jawa', '2': None, '3': 'Sumatera'}, 'Time Zone': {'0': 'UTC+07:00', '1': 'UTC+07:00', '2': None, '3': 'UTC+07:00'}, 'Special Status': {'0': 'Daerah Khusus Ibu Kota', '1': 'Daerah Khusus Ibu Kota', '2': None, '3': None}, 'Total Regencies': {'0': 1, '1': 1, '2': 416, '3': 10}, 'Total Cities': {'0': 5.0, '1': 5.0, '2': 98.0, '3': 2.0}, 'Total Districts': {'0': 44, '1': 44, '2': 7230, '3': 169}, 'Total Urban Villages': {'0': 267.0, '1': 267.0, '2': 8488.0, '3': 268.0}, 'Total Rural Villages': {'0': None, '1': None, '2': 74953.0, '3': 1591.0}, 'Area (km2)': {'0': 664, '1': 664, '2': 1916907, '3': 87024}, 'Population': {'0': 10846145, '1': 10846145, '2': 265185520, '3': 6074100}, 'Population Density': {'0': 16334.31, '1': 16334.31, '2': 138.34, '3': 69.8}, 'Longitude': {'0': 106.8361183, '1': 106.8361183, '2': 113.921327, '3': 101.8051092}, 'Latitude': {'0': -6.204698991, '1': -6.204698991, '2': -0.789275, '3': 0.511647851}, 'New Cases per Million': {'0': 0.18, '1': 0.18, '2': 0.01, '3': 0.16}, 'Total Cases per Million': {'0': 3.6, '1': 3.78, '2': 0.01, '3': 0.16}, 'New Deaths per Million': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'Total Deaths per Million': {'0': 1.84, '1': 1.84, '2': 0.0, '3': 0.0}, 'Total Deaths per 100rb': {'0': 0.18, '1': 0.18, '2': 0.0, '3': 0.0}, 'Case Fatality Rate': {'0': '51.28%', '1': '48.78%', '2': '0.00%', '3': '0.00%'}, 'Case Recovered Rate': {'0': '192.31%', '1': '182.93%', '2': '0.00%', '3': '100.00%'}, 'Growth Factor of New Cases': {'0': None, '1': 1.0, '2': None, '3': None}, 'Growth Factor of New Deaths': {'0': None, '1': 1.0, '2': None, '3': None}}
<end_description>
| 3,745 | 0 | 5,686 | 3,745 |
69009511
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# In this competition, we are going to use a **Random Forest** as our model. Run the following to import that from sklearn.
from sklearn.ensemble import RandomForestClassifier
# # 1. Loading Train and Test data
# Let us first load the training and testing datasets. Then let us see how the training set looks like.
train_set = pd.read_csv("/kaggle/input/titanic/train.csv")
test_set = pd.read_csv("/kaggle/input/titanic/test.csv")
train_set.head()
train_set.describe()
test_set.describe()
# # 2. Making initial decisions about features
# Now, let us do an initial, quick analysis of the different columns of the training set. We might also make some important decisions about the relevance of columns to be considered as 'features' to train our model.
train_set.dtypes
# * PassengerId - Just an identifier for each row. Not relevant for the prediction.
# * Survived - This is the **target** (We shall call it *y*).
# * Pclass - Passenger's class (1st, 2nd, or 3rd). As the Data description says, this is a '*proxy for socio-economic status*'. This could be a deciding factor for the survival of a passenger.
# * Name - Not relevant.
# * Sex - This is highly likely to influence on the survival (for instance, giving priority to females). We might want to encode the values.
# * Age - This also is likely to influence, such as giving priority to elderly people and children, and young people could have strength to survive. Just a thought. Moreover, there are missing values for Age.
# * SibSp - Number of Siblings/Spouses aboard. This could have an effect.
# * Parch - Number of Parents/Children abroad. Just like the above one, this also can have an effect.
# * Ticket - Ticket Number. This does not have an effect on the survival.
# * Fare - This potentially has an impact. Might want to do aggregate calculations too. However, there are missing values in the test set
# * Cabin - Cabin number. Although the exact cabin number will not have a great effect, by splitting the values and creating a new feature for the Deck would be effective. Need to check. Also, we can see that there are NaN values (missing values) and we need to think of an approach to deal with them.
# * Embarked - Port of embarkation. We might want to encode the values.
# So according to the above analysis, we have 8 columns to be initially treated as candidate features:
# **Pclass, Sex, Age, SibSp, Parch, Fare, Cabin, Embarked**.
# Now, let us extract the features from the original dataset, and also creat the target set.
features = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Cabin", "Embarked"]
# Features and Target in Training set
X = train_set[features]
y = train_set.Survived
# Features in Test set
test_X = test_set[features]
X.head()
# Following is an initial, basic model created for testing a successful submission. It is hidden as it was used only for testing purposes.
# # 3. Initial Model
# Let us create a basic model with only features - *Pclass, Sex, SibSp, Parch,* and *Embarked*. The model we are using in this competition is **Random Forest**. We shall encode categorical data using *get_dummies*.
init_features = ["Pclass", "Sex", "SibSp", "Parch", "Embarked"]
init_train_X = pd.get_dummies(X[init_features])
init_train_X.head()
init_test_X = pd.get_dummies(test_set[init_features])
model_1 = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model_1.fit(init_train_X, y)
predictions = model_1.predict(init_test_X)
sub = 1
# After submitting, the above model had an accuracy of 0.77033
# # 4. Analyzing and Processing further
# Now, it is time to refine our features. The modifications shall be applied to both the training and test sets.
# 1. First let's encode the 'Sex' feature as male:1, female:0. As this is our first modification, let us name the resulting dataframes as *X_1* and *test_X_1*.
# Mapping male:1, female:0 in 'Sex' column in train set
X_males = X.loc[X["Sex"] == "male", X.columns]
X_females = X.loc[X["Sex"] == "female", X.columns]
# Checking if Sex has no missing values
print((X_males.shape[0] + X_females.shape[0]) == X.shape[0])
# Drop the 'Sex' column
X_males = X_males.drop("Sex", axis=1)
X_females = X_females.drop("Sex", axis=1)
# Add the 'Sex' column back with encoded values
X_males["Sex"] = 1
X_females["Sex"] = 0
X_1 = pd.concat([X_males, X_females]).sort_index()
# X_1.head()
# X_1.describe()
# Mapping male:1, female:0 in 'Sex' column in test set
test_X_males = test_X.loc[test_X["Sex"] == "male", test_X.columns]
test_X_females = test_X.loc[test_X["Sex"] == "female", test_X.columns]
# Checking if Sex has no missing values
print((test_X_males.shape[0] + test_X_females.shape[0]) == test_X.shape[0])
# Drop the 'Sex' column
test_X_males = test_X_males.drop("Sex", axis=1)
test_X_females = test_X_females.drop("Sex", axis=1)
# Add the 'Sex' column back with encoded values
test_X_males["Sex"] = 1
test_X_females["Sex"] = 0
test_X_1 = pd.concat([test_X_males, test_X_females]).sort_index()
# test_X_1.head()
# test_X_1.describe()
# 2. The next modification we are going to do is to the 'Age' feature. We are going to replace the NaN values with the *mean* value of Age. When we look at the statistics of the Age feature (using the *describe()* method above), it can be seen that 50% of the passengers are slightly below the mean age, and the 75th Percentile value is also close to the mean. Therefore, the choice of mean value to fill the NaN values seems to be fair enough.
# mean age across the entire dataset
mean_age = (pd.concat([X_1["Age"], test_X_1["Age"]])).mean()
print(f"Mean Age = {mean_age}")
X_2 = X_1.copy()
X_2["Age"] = X_2["Age"].fillna(mean_age)
test_X_2 = test_X_1.copy()
test_X_2["Age"] = test_X_2["Age"].fillna(mean_age)
pd.concat([X_2, test_X_2]).describe()
# 3. Next, as we could see that in the test set, there is one missing value in the 'Fare' feature, let us go and fill that. To do so, first let us see the relevant record.
nan_fare_record = test_X_2[test_X_2["Fare"].isnull()]
nan_fare_record
# We can see that the Pclass of this passenger is 3. Therefore, let us fill the Fare value by the mean of records with Pclass = 3. The index value of this record is 152.
# All records where Pclass = 3 in the entire dataset
Pclass3_series = pd.concat([X_2[X_2["Pclass"] == 3], test_X_2[test_X_2["Pclass"] == 3]])
# Mean fare of Pclass3 records
Pclass3_mean_fare = Pclass3_series["Fare"].mean()
test_X_3 = test_X_2.copy()
test_X_3.loc[152, "Fare"] = Pclass3_mean_fare
test_X_3.describe()
# 4. And then, let us do one-hot encoding for 'Embarked' feature. For that we can use *get_dummies()* method. However, let us first make sure that there are no missing values for 'Embarked'.
nan_embarked = pd.concat(
[X_2[X_2["Embarked"].isnull()], test_X_3[test_X_3["Embarked"].isnull()]]
)
nan_embarked
# Oops! Looks like there are two records where 'Embarked' is missing. And since their indexes are less 891, both of them are in the train set. Let us check if the corresponding passengers survived or not.
print(y.loc[61], y.loc[829])
# Well, they have survived! I think what we should do here is to fill the missing 'Embarked' values with the 'Embarked' values corresponding to the most number of survivals. Let us figure that out.
cols = ["Survived", "Embarked"]
embarked_stats = train_set[cols].groupby("Embarked")["Survived"].sum()
embarked_stats
# From the stats, the most number of survivals are of passengers with 'Embarked' = 'S'. Therefore, let us fill the missing values with 'S'.
X_3 = X_2.copy()
X_3.loc[61, "Embarked"] = "S"
X_3.loc[829, "Embarked"] = "S"
# X_3[X_3["Embarked"].isnull()].head()
# Now we have filled the missing values. But, before doing the one-hot encoding, let us take a little detour to check whether the feature 'Cabin' is actually significant. Apparently there are many missing values for 'Cabin'. Let's see how many...
nan_cabin = pd.concat(
[X_3[X_3["Cabin"].isnull()], test_X_3[test_X_3["Cabin"].isnull()]]
)
nan_cabin.shape
# Oh! Out of the 1309 records, 1014 are having NaN for 'Cabin'. Looks like we should definitely drop the 'Cabin' column.
X_3 = X_3.drop(["Cabin"], axis=1)
test_X_3 = test_X_3.drop(["Cabin"], axis=1)
# Now let us perform the one-hot encoding using the *get_dummies()* method.
X_4 = pd.get_dummies(X_3)
test_X_4 = pd.get_dummies(test_X_3)
# # 5. Model with Refined Feature set
# Now it's time to train a model with the refined set of features. Let us use the same configurations we used when creating the initial Random Forest model.
model_2 = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model_2.fit(X_4, y)
predictions = model_2.predict(test_X_4)
sub = 4
# Wow! After submitting, the accuracy came out as 0.78947, which is an improvement!
# # 6. Another round of analysis and refining features
# If we look again at the feature 'Fare', we can see that there are 0.0 values, like so
zero_fare = pd.concat(
[train_set[train_set["Fare"] == 0], test_set[test_set["Fare"] == 0]]
)
zero_fare
line_tickets = pd.concat(
[train_set[train_set["Ticket"] == "LINE"], test_set[test_set["Ticket"] == "LINE"]]
)
line_tickets
# As the above is taken from the original training as test sets, we can see that for some records of 0 Fare, the Ticket is mentioned as "LINE", and their Pclass is 3. Out of curiosity, let us see what are the unique values for 'Ticket' column.
# pd.concat([train_set, test_set]).Ticket.unique()
# Uncomment the above to see. From what is seen, other than 'LINE', the values for 'Ticket' are random. So there is no explicit reason mentioned why the fares of passengers other than who had LINE tickets were 0.
# Moreover, we can see that records with "LINE" tickets are only in the training set, and there are only 4 of them. Only 1 has survived. Although the probability of a "LINE" ticketholder surviving is 0.25%, the amount of data deems to be not satisfactorily sufficient.
# From the above facts, we can decide on doing the following modifications:
# 1. **Remove** the records with "LINE" tickets
# 1. **Create a new feature called "Zero_Fare"** to indicate Fare = 0.0 records
# Since we do not have 'Ticket' in our feature set, to remove records with "LINE" tickets, let us consider the PassengerId.
# BE SURE to **remove the corresponding records in the label set** *y* as well.
X_5 = X_4.drop([180, 272, 303, 598])
X_5.describe()
y_new = y.drop([180, 272, 303, 598])
# To add the new feature "Zero_Fare", let us use *sum(axis=1)* with *eq(0)* for each row.
# Add new feature to training set
X_6 = X_5.copy()
X_6["Zero_Fare"] = X_6[["Fare"]].eq(0).sum(axis=1)
# Add new feature to test set
test_X_6 = test_X_4.copy()
test_X_6["Zero_Fare"] = test_X_6[["Fare"]].eq(0).sum(axis=1)
# X_6[X_6["Fare"] == 0]
# test_X_6.head()
# # 7. Model with newly added feature "Zero_Fare"
# Now let us give another shot for our model.
model_3 = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model_3.fit(X_6, y_new)
predictions = model_3.predict(test_X_6)
sub = 5
# The accuracy of model_3 came out as 0.77990, which is a decrease. The model may have overfit by the new feature. Therefore, let us have model_2 back as our best model.
model_2 = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model_2.fit(X_4, y)
predictions = model_2.predict(test_X_4)
sub = 6
# # Finally, Submission
output = pd.DataFrame({"PassengerId": test_set.PassengerId, "Survived": predictions})
output.to_csv(f"submission_{sub}.csv", index=False)
print("Your submission was successfully saved!")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/009/69009511.ipynb
| null | null |
[{"Id": 69009511, "ScriptId": 18820717, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7890515, "CreationDate": "07/25/2021 19:56:19", "VersionNumber": 9.0, "Title": "Titanic Survival Prediction", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 289.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 288.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# In this competition, we are going to use a **Random Forest** as our model. Run the following to import that from sklearn.
from sklearn.ensemble import RandomForestClassifier
# # 1. Loading Train and Test data
# Let us first load the training and testing datasets. Then let us see how the training set looks like.
train_set = pd.read_csv("/kaggle/input/titanic/train.csv")
test_set = pd.read_csv("/kaggle/input/titanic/test.csv")
train_set.head()
train_set.describe()
test_set.describe()
# # 2. Making initial decisions about features
# Now, let us do an initial, quick analysis of the different columns of the training set. We might also make some important decisions about the relevance of columns to be considered as 'features' to train our model.
train_set.dtypes
# * PassengerId - Just an identifier for each row. Not relevant for the prediction.
# * Survived - This is the **target** (We shall call it *y*).
# * Pclass - Passenger's class (1st, 2nd, or 3rd). As the Data description says, this is a '*proxy for socio-economic status*'. This could be a deciding factor for the survival of a passenger.
# * Name - Not relevant.
# * Sex - This is highly likely to influence on the survival (for instance, giving priority to females). We might want to encode the values.
# * Age - This also is likely to influence, such as giving priority to elderly people and children, and young people could have strength to survive. Just a thought. Moreover, there are missing values for Age.
# * SibSp - Number of Siblings/Spouses aboard. This could have an effect.
# * Parch - Number of Parents/Children abroad. Just like the above one, this also can have an effect.
# * Ticket - Ticket Number. This does not have an effect on the survival.
# * Fare - This potentially has an impact. Might want to do aggregate calculations too. However, there are missing values in the test set
# * Cabin - Cabin number. Although the exact cabin number will not have a great effect, by splitting the values and creating a new feature for the Deck would be effective. Need to check. Also, we can see that there are NaN values (missing values) and we need to think of an approach to deal with them.
# * Embarked - Port of embarkation. We might want to encode the values.
# So according to the above analysis, we have 8 columns to be initially treated as candidate features:
# **Pclass, Sex, Age, SibSp, Parch, Fare, Cabin, Embarked**.
# Now, let us extract the features from the original dataset, and also creat the target set.
features = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Cabin", "Embarked"]
# Features and Target in Training set
X = train_set[features]
y = train_set.Survived
# Features in Test set
test_X = test_set[features]
X.head()
# Following is an initial, basic model created for testing a successful submission. It is hidden as it was used only for testing purposes.
# # 3. Initial Model
# Let us create a basic model with only features - *Pclass, Sex, SibSp, Parch,* and *Embarked*. The model we are using in this competition is **Random Forest**. We shall encode categorical data using *get_dummies*.
init_features = ["Pclass", "Sex", "SibSp", "Parch", "Embarked"]
init_train_X = pd.get_dummies(X[init_features])
init_train_X.head()
init_test_X = pd.get_dummies(test_set[init_features])
model_1 = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model_1.fit(init_train_X, y)
predictions = model_1.predict(init_test_X)
sub = 1
# After submitting, the above model had an accuracy of 0.77033
# # 4. Analyzing and Processing further
# Now, it is time to refine our features. The modifications shall be applied to both the training and test sets.
# 1. First let's encode the 'Sex' feature as male:1, female:0. As this is our first modification, let us name the resulting dataframes as *X_1* and *test_X_1*.
# Mapping male:1, female:0 in 'Sex' column in train set
X_males = X.loc[X["Sex"] == "male", X.columns]
X_females = X.loc[X["Sex"] == "female", X.columns]
# Checking if Sex has no missing values
print((X_males.shape[0] + X_females.shape[0]) == X.shape[0])
# Drop the 'Sex' column
X_males = X_males.drop("Sex", axis=1)
X_females = X_females.drop("Sex", axis=1)
# Add the 'Sex' column back with encoded values
X_males["Sex"] = 1
X_females["Sex"] = 0
X_1 = pd.concat([X_males, X_females]).sort_index()
# X_1.head()
# X_1.describe()
# Mapping male:1, female:0 in 'Sex' column in test set
test_X_males = test_X.loc[test_X["Sex"] == "male", test_X.columns]
test_X_females = test_X.loc[test_X["Sex"] == "female", test_X.columns]
# Checking if Sex has no missing values
print((test_X_males.shape[0] + test_X_females.shape[0]) == test_X.shape[0])
# Drop the 'Sex' column
test_X_males = test_X_males.drop("Sex", axis=1)
test_X_females = test_X_females.drop("Sex", axis=1)
# Add the 'Sex' column back with encoded values
test_X_males["Sex"] = 1
test_X_females["Sex"] = 0
test_X_1 = pd.concat([test_X_males, test_X_females]).sort_index()
# test_X_1.head()
# test_X_1.describe()
# 2. The next modification we are going to do is to the 'Age' feature. We are going to replace the NaN values with the *mean* value of Age. When we look at the statistics of the Age feature (using the *describe()* method above), it can be seen that 50% of the passengers are slightly below the mean age, and the 75th Percentile value is also close to the mean. Therefore, the choice of mean value to fill the NaN values seems to be fair enough.
# mean age across the entire dataset
mean_age = (pd.concat([X_1["Age"], test_X_1["Age"]])).mean()
print(f"Mean Age = {mean_age}")
X_2 = X_1.copy()
X_2["Age"] = X_2["Age"].fillna(mean_age)
test_X_2 = test_X_1.copy()
test_X_2["Age"] = test_X_2["Age"].fillna(mean_age)
pd.concat([X_2, test_X_2]).describe()
# 3. Next, as we could see that in the test set, there is one missing value in the 'Fare' feature, let us go and fill that. To do so, first let us see the relevant record.
nan_fare_record = test_X_2[test_X_2["Fare"].isnull()]
nan_fare_record
# We can see that the Pclass of this passenger is 3. Therefore, let us fill the Fare value by the mean of records with Pclass = 3. The index value of this record is 152.
# All records where Pclass = 3 in the entire dataset
Pclass3_series = pd.concat([X_2[X_2["Pclass"] == 3], test_X_2[test_X_2["Pclass"] == 3]])
# Mean fare of Pclass3 records
Pclass3_mean_fare = Pclass3_series["Fare"].mean()
test_X_3 = test_X_2.copy()
test_X_3.loc[152, "Fare"] = Pclass3_mean_fare
test_X_3.describe()
# 4. And then, let us do one-hot encoding for 'Embarked' feature. For that we can use *get_dummies()* method. However, let us first make sure that there are no missing values for 'Embarked'.
nan_embarked = pd.concat(
[X_2[X_2["Embarked"].isnull()], test_X_3[test_X_3["Embarked"].isnull()]]
)
nan_embarked
# Oops! Looks like there are two records where 'Embarked' is missing. And since their indexes are less 891, both of them are in the train set. Let us check if the corresponding passengers survived or not.
print(y.loc[61], y.loc[829])
# Well, they have survived! I think what we should do here is to fill the missing 'Embarked' values with the 'Embarked' values corresponding to the most number of survivals. Let us figure that out.
cols = ["Survived", "Embarked"]
embarked_stats = train_set[cols].groupby("Embarked")["Survived"].sum()
embarked_stats
# From the stats, the most number of survivals are of passengers with 'Embarked' = 'S'. Therefore, let us fill the missing values with 'S'.
X_3 = X_2.copy()
X_3.loc[61, "Embarked"] = "S"
X_3.loc[829, "Embarked"] = "S"
# X_3[X_3["Embarked"].isnull()].head()
# Now we have filled the missing values. But, before doing the one-hot encoding, let us take a little detour to check whether the feature 'Cabin' is actually significant. Apparently there are many missing values for 'Cabin'. Let's see how many...
nan_cabin = pd.concat(
[X_3[X_3["Cabin"].isnull()], test_X_3[test_X_3["Cabin"].isnull()]]
)
nan_cabin.shape
# Oh! Out of the 1309 records, 1014 are having NaN for 'Cabin'. Looks like we should definitely drop the 'Cabin' column.
X_3 = X_3.drop(["Cabin"], axis=1)
test_X_3 = test_X_3.drop(["Cabin"], axis=1)
# Now let us perform the one-hot encoding using the *get_dummies()* method.
X_4 = pd.get_dummies(X_3)
test_X_4 = pd.get_dummies(test_X_3)
# # 5. Model with Refined Feature set
# Now it's time to train a model with the refined set of features. Let us use the same configurations we used when creating the initial Random Forest model.
model_2 = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model_2.fit(X_4, y)
predictions = model_2.predict(test_X_4)
sub = 4
# Wow! After submitting, the accuracy came out as 0.78947, which is an improvement!
# # 6. Another round of analysis and refining features
# If we look again at the feature 'Fare', we can see that there are 0.0 values, like so
zero_fare = pd.concat(
[train_set[train_set["Fare"] == 0], test_set[test_set["Fare"] == 0]]
)
zero_fare
line_tickets = pd.concat(
[train_set[train_set["Ticket"] == "LINE"], test_set[test_set["Ticket"] == "LINE"]]
)
line_tickets
# As the above is taken from the original training as test sets, we can see that for some records of 0 Fare, the Ticket is mentioned as "LINE", and their Pclass is 3. Out of curiosity, let us see what are the unique values for 'Ticket' column.
# pd.concat([train_set, test_set]).Ticket.unique()
# Uncomment the above to see. From what is seen, other than 'LINE', the values for 'Ticket' are random. So there is no explicit reason mentioned why the fares of passengers other than who had LINE tickets were 0.
# Moreover, we can see that records with "LINE" tickets are only in the training set, and there are only 4 of them. Only 1 has survived. Although the probability of a "LINE" ticketholder surviving is 0.25%, the amount of data deems to be not satisfactorily sufficient.
# From the above facts, we can decide on doing the following modifications:
# 1. **Remove** the records with "LINE" tickets
# 1. **Create a new feature called "Zero_Fare"** to indicate Fare = 0.0 records
# Since we do not have 'Ticket' in our feature set, to remove records with "LINE" tickets, let us consider the PassengerId.
# BE SURE to **remove the corresponding records in the label set** *y* as well.
X_5 = X_4.drop([180, 272, 303, 598])
X_5.describe()
y_new = y.drop([180, 272, 303, 598])
# To add the new feature "Zero_Fare", let us use *sum(axis=1)* with *eq(0)* for each row.
# Add new feature to training set
X_6 = X_5.copy()
X_6["Zero_Fare"] = X_6[["Fare"]].eq(0).sum(axis=1)
# Add new feature to test set
test_X_6 = test_X_4.copy()
test_X_6["Zero_Fare"] = test_X_6[["Fare"]].eq(0).sum(axis=1)
# X_6[X_6["Fare"] == 0]
# test_X_6.head()
# # 7. Model with newly added feature "Zero_Fare"
# Now let us give another shot for our model.
model_3 = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model_3.fit(X_6, y_new)
predictions = model_3.predict(test_X_6)
sub = 5
# The accuracy of model_3 came out as 0.77990, which is a decrease. The model may have overfit by the new feature. Therefore, let us have model_2 back as our best model.
model_2 = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model_2.fit(X_4, y)
predictions = model_2.predict(test_X_4)
sub = 6
# # Finally, Submission
output = pd.DataFrame({"PassengerId": test_set.PassengerId, "Survived": predictions})
output.to_csv(f"submission_{sub}.csv", index=False)
print("Your submission was successfully saved!")
| false | 0 | 4,045 | 0 | 4,045 | 4,045 |
||
69009480
|
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype, is_object_dtype
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
from xgboost import XGBRegressor
from sklearn import preprocessing
from sklearn import metrics
from sklearn.preprocessing import (
StandardScaler,
RobustScaler,
PolynomialFeatures,
MinMaxScaler,
)
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import (
accuracy_score,
mean_absolute_error,
mean_squared_error,
r2_score,
)
from sklearn.linear_model import LinearRegression, ElasticNetCV
from sklearn.ensemble import RandomForestRegressor
train = pd.read_csv("../input/house-prices-advanced-regression-techniques/train.csv")
test = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv")
combine = pd.concat([train, test])
train.drop("Id", axis=1, inplace=True)
test.drop("Id", axis=1, inplace=True)
train.head()
test.head()
test.shape, train.shape
train.describe().T
train["SalePrice"]
# Relation between saleprice and other features
correlation_num = train.corr()
correlation_num.sort_values(["SalePrice"], ascending=True, inplace=True)
correlation_num.SalePrice
# Check for Corelation between Features
plt.figure(figsize=(20, 10))
sns.heatmap(train.corr(), yticklabels=True, cbar=True, cmap="ocean")
# Function for printing null_values and related info
def descr(train_num):
no_rows = train_num.shape[0]
types = train_num.dtypes
col_null = train_num.columns[train_num.isna().any()].to_list()
counts = train_num.apply(lambda x: x.count())
uniques = train_num.apply(lambda x: x.unique())
nulls = train_num.apply(lambda x: x.isnull().sum())
distincts = train_num.apply(lambda x: x.unique().shape[0])
nan_percent = (train_num.isnull().sum() / no_rows) * 100
cols = {
"dtypes": types,
"counts": counts,
"distincts": distincts,
"nulls": nulls,
"missing_percent": nan_percent,
"uniques": uniques,
}
table = pd.DataFrame(data=cols)
return table
details_tr = descr(train)
details_tr.reset_index(level=[0], inplace=True)
details_tr.sort_values(by="missing_percent", ascending=False)
# Plot for Missing Values in Train dataset
details_tr.sort_values(by="missing_percent", ascending=False, inplace=True)
details_tr = details_tr[details_tr["missing_percent"] > 0]
plt.figure(figsize=(10, 4), dpi=100)
sns.barplot(x=details_tr["index"], y=details_tr["missing_percent"], data=details_tr)
plt.xticks(rotation=90)
plt.show()
details_test = descr(test)
details_test.reset_index(level=[0], inplace=True)
details_test.sort_values(by="missing_percent", ascending=False)
train.isnull().values.any()
test.isnull().values.any()
# From above table we know electrical has only 1 missing value so its better to replace nan with mode
train["Electrical"].mode()
# Filling Nan values according to datatype and category in train dataframe
n = []
c = []
bsmt_str_cols = ["BsmtQual", "BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2"]
bsmt_num_cols = [
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"BsmtFullBath",
"BsmtHalfBath",
]
for col, col_df in details_tr.iterrows():
row = col_df["index"]
if col_df["dtypes"] == "object":
c.append(col)
if row == "Electrical":
train[row].fillna("SBrkr", inplace=True)
elif row == "MasVnrType":
train[row].fillna("None", inplace=True)
elif row == "GarageType":
train[row].fillna("Attchd", inplace=True)
elif row == "GarageCond":
train[row].fillna("TA", inplace=True)
elif row == "GarageFinish":
train[row].fillna("Unf", inplace=True)
elif row == "GarageQual":
train[row].fillna("TA", inplace=True)
elif row == "FireplaceQu":
train[row].fillna("None", inplace=True)
for i in bsmt_str_cols:
if row == i:
train[row].fillna("None", inplace=True)
else:
train[row].fillna("NotAvailable", inplace=True)
else:
n.append(col)
if row == "MasVnrArea":
train[row].fillna(0, inplace=True)
for i in bsmt_num_cols:
if row == i:
train[row].fillna("None", inplace=True)
else:
train[row].fillna(train[row].median(), inplace=True)
print("\nNumerical Features -->", len(n))
print("Categorical Features -->", len(c))
# Filling Nan values according to datatype and category in test dataframe
nt = []
ct = []
bsmt_str_cols = ["BsmtQual", "BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2"]
bsmt_num_cols = [
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"BsmtFullBath",
"BsmtHalfBath",
]
for col, col_df in details_test.iterrows():
row = col_df["index"]
if col_df["dtypes"] == "object":
ct.append(col)
if row == "Electrical":
test[row].fillna("SBrkr", inplace=True)
elif row == "MasVnrType":
test[row].fillna("None", inplace=True)
elif row == "GarageType":
test[row].fillna("Attchd", inplace=True)
elif row == "GarageCond":
test[row].fillna("TA", inplace=True)
elif row == "GarageFinish":
test[row].fillna("Unf", inplace=True)
elif row == "GarageQual":
test[row].fillna("TA", inplace=True)
elif row == "FireplaceQu":
test[row].fillna("None", inplace=True)
else:
test[row].fillna("NotAvailable", inplace=True)
for i in bsmt_str_cols:
if row == i:
test[row].fillna("None", inplace=True)
else:
nt.append(col)
if row == "MasVnrArea":
test[row].fillna(0, inplace=True)
else:
test[row].fillna(test[row].median(), inplace=True)
for i in bsmt_num_cols:
if row == i:
test[row].fillna("None", inplace=True)
print("\nNumerical Features -->", len(nt))
print("Categorical Features -->", len(ct))
details_tr = descr(train)
details_tr.sort_values(by="missing_percent", ascending=False).head()
train.isnull().values.any()
details_test = descr(test)
details_test.reset_index(level=[0], inplace=True)
details_test.sort_values(by="dtypes", ascending=True).head()
test.isnull().values.any()
# Separating Columns with Numerical Value and Character in 2 dataframes of train,test Datasets
train_num = train.select_dtypes(exclude="object")
train_cat = train.select_dtypes(include="object")
test_num = test.select_dtypes(exclude="object")
test_cat = test.select_dtypes(include="object")
# Plotting numerical features with SalePrice
for i in train_num.columns:
sns.set_style("whitegrid")
plt.figure(figsize=(10, 10))
x = train_num[i]
sns.jointplot(x=x, y=train_num["SalePrice"], data=train_num)
# Plotting categorical features with SalePrice
for i in train_cat.columns:
sns.set_style("whitegrid")
plt.figure(figsize=(15, 15))
x = train_cat[i]
sns.jointplot(x=x, y=train_num["SalePrice"], data=train_cat)
train.groupby("YrSold")["SalePrice"].median().plot()
plt.xlabel("Year Sold")
plt.ylabel("Median House Price")
plt.title("House Price vs YearSold")
train_map = train.copy()
test_map = test.copy()
# numerical variables are skewed,so we can perform log normal distribution to prevent negative predictions.
# We will only perform log normal distribution to columns which do not have any zero values.
num_features = ["LotFrontage", "LotArea", "1stFlrSF", "GrLivArea"]
num_features1 = ["LotFrontage", "LotArea", "1stFlrSF", "GrLivArea"]
# for feature in num_features:
# train_map[feature]=np.log(train_map[feature])
# for feature in num_features1:
# test_map[feature]=np.log(test_map[feature])
train_map.head()
for feature in train_map.select_dtypes(include="object"):
labels_ordered = (
train_map.groupby([feature])["SalePrice"].mean().sort_values().index
)
labels_ordered = {k: i for i, k in enumerate(labels_ordered, 0)}
train_map[feature] = train_map[feature].map(labels_ordered)
for feature in test_map.select_dtypes(include="object"):
labels_ordered = (
test_map.groupby([feature])["LotFrontage"].mean().sort_values().index
)
labels_ordered = {k: i for i, k in enumerate(labels_ordered, 0)}
test_map[feature] = test_map[feature].map(labels_ordered)
test_map.head()
train_map.head()
# FEATURE SCALING
test_map = test_map.drop(["PoolQC", "MiscFeature", "Alley", "Fence"], axis=1)
train_map = train_map.drop(["PoolQC", "MiscFeature", "Alley", "Fence"], axis=1)
X = train_map.drop(["SalePrice"], axis=1).drop(train_map.index[-1])
Y = train_map["SalePrice"].drop(train_map.index[-1])
# Train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.4, random_state=101
)
# Standard scaling our data
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
X_train.shape, Y_train.shape, X_test.shape, Y_test.shape
# Ridge Regression
from sklearn.linear_model import Ridge, RidgeCV
rid_reg = Ridge(alpha=100)
rid_reg.fit(X_train, Y_train)
Y_pred = rid_reg.predict(X_test)
# testing the model
print("MAE : ", mean_absolute_error(Y_test, Y_pred))
print("R2 SCORE : ", r2_score(Y_test, Y_pred))
print("Score :", rid_reg.score(X_test, Y_test))
print("MSE :", mean_squared_error(Y_test, Y_pred))
print("RMSE :", np.sqrt(mean_squared_error(Y_test, Y_pred)))
# let's find best values for alpha by crossvalidating
ridgecv = RidgeCV(
alphas=(0.01, 400.0), scoring="neg_mean_squared_error", normalize=True
)
ridgecv.fit(X_train, Y_train)
ridgecv.alpha_
# Create the Ridge model using best alpha value:
from sklearn.linear_model import Ridge, RidgeCV
rid_reg = Ridge(alpha=0.01)
rid_reg.fit(X_train, Y_train)
Y_pred_ridge = rid_reg.predict(X_test)
# testing the model
ridge_mae = mean_absolute_error(Y_test, Y_pred_ridge)
ridge_r2_score = r2_score(Y_test, Y_pred_ridge)
ridge_rmse = np.sqrt(mean_squared_error(Y_test, Y_pred_ridge))
print("MAE for Ridge : ", ridge_mae)
print("R2 SCORE for Ridge: ", ridge_r2_score)
print("Score for Ridge:", rid_reg.score(X_test, Y_test))
print("MSE for Ridge : ", mean_squared_error(Y_test, Y_pred_ridge))
print("RMSE for Ridge : ", ridge_rmse)
Y_pred_ridge.min()
plt.figure(figsize=(10, 8))
sns.regplot(x=Y_pred_ridge, y=Y_test, color="springgreen")
# LASSO REGRESSION
# Create Lasso model
from sklearn.linear_model import Lasso, LassoCV
ls = Lasso(alpha=0.8)
ls.fit(X_train, Y_train)
Y_pred = ls.predict(X_test)
# testing the model
print("MAE : ", mean_absolute_error(Y_test, Y_pred))
print("R2 SCORE : ", r2_score(Y_test, Y_pred))
print("Score :", ls.score(X_test, Y_test))
print("MSE :", mean_squared_error(Y_test, Y_pred))
print("RMSE :", np.sqrt(mean_squared_error(Y_test, Y_pred)))
# 1. LASSOCV
lassocv = LassoCV(alphas=None, cv=10, max_iter=100000, normalize=True)
lassocv.fit(X_train, Y_train)
ls.set_params(alpha=lassocv.alpha_)
ls.fit(X_train, Y_train)
mean_squared_error(Y_test, ls.predict(X_test))
# Create the Lasso model using best alpha value:
ls = Lasso(alpha=0.0198850177087539)
ls.fit(X_train, Y_train)
Y_pred_lasso = ls.predict(X_test)
# testing the model
lasso_mae = mean_absolute_error(Y_test, Y_pred_lasso)
lasso_r2_score = r2_score(Y_test, Y_pred_lasso)
lasso_rmse = np.sqrt(mean_squared_error(Y_test, Y_pred_lasso))
print("MAE for Lasso : ", lasso_mae)
print("R2 SCORE for Lasso : ", lasso_r2_score)
print("Score for Lasso:", ls.score(X_test, Y_test))
print("MSE for Lasso :", mean_squared_error(Y_test, Y_pred_lasso))
print("RMSE for Lasso :", lasso_rmse)
Y_pred_lasso.min()
plt.figure(figsize=(10, 8))
sns.regplot(x=Y_pred_lasso, y=Y_test, color="darkorchid")
# POLYNOMIAL REGRESSION
# poly converter
polynomial_converter = PolynomialFeatures(degree=2, include_bias=False)
# convert X data and fit transform
poly_features_train = polynomial_converter.fit_transform(X_train)
poly_features_test = polynomial_converter.fit_transform(X_test)
# fit poly_train in elastic net
elastic_model = ElasticNetCV(l1_ratio=1, tol=0.01)
elastic_model.fit(poly_features_train, Y_train)
Y_pred_poly = elastic_model.predict(poly_features_test)
# Testing the model
poly_mae = mean_absolute_error(Y_test, Y_pred_poly)
poly_r2_score = r2_score(Y_test, Y_pred_poly)
poly_rmse = np.sqrt(mean_squared_error(Y_test, Y_pred_poly))
print("MAE for Polynomial: ", poly_mae)
print("R2 SCORE for Polynomial: ", poly_r2_score)
print("MSE for Polynomial :", mean_squared_error(Y_test, Y_pred_poly))
print("RMSE for Polynomial :", poly_rmse)
Y_pred_poly.min()
plt.figure(figsize=(10, 8))
sns.regplot(x=Y_pred_poly, y=Y_test, color="coral")
# LINEAR REGRESSION
lin_reg = LinearRegression(normalize=True)
lin_reg.fit(X_train, Y_train)
test_pred_lin = lin_reg.predict(X_test)
train_pred_lin = lin_reg.predict(X_train)
linear_mae = mean_absolute_error(Y_test, test_pred_lin)
linear_r2_score = r2_score(Y_test, test_pred_lin)
linear_rmse = np.sqrt(mean_squared_error(Y_test, test_pred_lin))
print("MAE for Linear : ", linear_mae)
print("R2 SCORE for Linear : ", linear_r2_score)
print("Score for Linear:", lin_reg.score(X_test, Y_test))
print("MSE for Linear :", mean_squared_error(Y_test, test_pred_lin))
print("RMSE for Linear :", linear_rmse)
test_pred_lin.min()
plt.figure(figsize=(10, 8))
sns.regplot(x=test_pred_lin, y=Y_test, color="blue")
# RANDOM FOREST REGRESSION
RF_reg = RandomForestRegressor(n_estimators=1000)
RF_reg.fit(X_train, Y_train)
test_pred_RF = RF_reg.predict(X_test)
train_pred_RF = RF_reg.predict(X_train)
RF_mae = mean_absolute_error(Y_test, test_pred_RF)
RF_r2_score = r2_score(Y_test, test_pred_RF)
RF_rmse = np.sqrt(mean_squared_error(Y_test, test_pred_RF))
print("MAE for RF : ", RF_mae)
print("R2 SCORE for RF : ", RF_r2_score)
print("Score for RF:", RF_reg.score(X_test, Y_test))
print("MSE for RF :", mean_squared_error(Y_test, test_pred_RF))
print("RMSE for RF :", RF_rmse)
test_pred_RF.min()
plt.figure(figsize=(10, 8))
sns.regplot(x=test_pred_RF, y=Y_test, color="olivedrab")
# SVM REGRESSION
from sklearn.svm import SVR
svm_reg = SVR(kernel="rbf", C=1000000, epsilon=0.001)
svm_reg.fit(X_train, Y_train)
test_pred_svm = svm_reg.predict(X_test)
train_pred_svm = svm_reg.predict(X_train)
SVM_mae = mean_absolute_error(Y_test, test_pred_svm)
SVM_r2_score = r2_score(Y_test, test_pred_svm)
SVM_rmse = np.sqrt(mean_squared_error(Y_test, test_pred_svm))
print("MAE for RF : ", SVM_mae)
print("R2 SCORE for RF : ", SVM_r2_score)
print("Score for RF:", svm_reg.score(X_test, Y_test))
print("MSE for RF :", mean_squared_error(Y_test, test_pred_svm))
print("RMSE for RF :", SVM_rmse)
test_pred_svm.min()
plt.figure(figsize=(10, 8))
sns.regplot(x=test_pred_svm, y=Y_test, color="lightseagreen")
# ELASTICNET REGRESSION
from sklearn.linear_model import ElasticNet
enet_reg = ElasticNet(alpha=0.1, l1_ratio=0.9, selection="random", random_state=42)
enet_reg.fit(X_train, Y_train)
test_pred_enet = enet_reg.predict(X_test)
train_pred_enet = enet_reg.predict(X_train)
ENET_mae = mean_absolute_error(Y_test, test_pred_enet)
ENET_r2_score = r2_score(Y_test, test_pred_enet)
ENET_rmse = np.sqrt(mean_squared_error(Y_test, test_pred_enet))
print("MAE for RF : ", ENET_mae)
print("R2 SCORE for RF : ", ENET_r2_score)
print("Score for RF:", enet_reg.score(X_test, Y_test))
print("MSE for RF :", mean_squared_error(Y_test, test_pred_enet))
print("RMSE for RF :", ENET_rmse)
test_pred_enet.min()
plt.figure(figsize=(10, 8))
sns.regplot(x=test_pred_enet, y=Y_test, color="tomato")
# SGD REGRESSION
from sklearn.linear_model import SGDRegressor
sgd_reg = SGDRegressor(n_iter_no_change=250, penalty=None, eta0=0.0001, max_iter=100000)
sgd_reg.fit(X_train, Y_train)
test_pred_sgd = sgd_reg.predict(X_test)
train_pred_sgd = sgd_reg.predict(X_train)
SGD_mae = mean_absolute_error(Y_test, test_pred_sgd)
SGD_r2_score = r2_score(Y_test, test_pred_sgd)
SGD_rmse = np.sqrt(mean_squared_error(Y_test, test_pred_sgd))
print("MAE for RF : ", SGD_mae)
print("R2 SCORE for RF : ", SGD_r2_score)
print("Score for RF:", sgd_reg.score(X_test, Y_test))
print("MSE for RF :", mean_squared_error(Y_test, test_pred_sgd))
print("RMSE for RF :", SGD_rmse)
test_pred_sgd.min()
plt.figure(figsize=(10, 8))
sns.regplot(x=test_pred_sgd, y=Y_test, color="yellow")
models = pd.DataFrame(
{
"Regression Model": [
"Ridge",
"Lasso",
"Polynomial",
"Linear",
"SVM",
"RandomForest",
"ElasticNet",
"SGD",
],
"MAE Score": [
ridge_mae,
lasso_mae,
poly_mae,
linear_mae,
SVM_mae,
RF_mae,
ENET_mae,
SGD_mae,
],
"R2 Score": [
ridge_r2_score,
lasso_r2_score,
poly_r2_score,
linear_r2_score,
SVM_r2_score,
RF_r2_score,
ENET_r2_score,
SGD_r2_score,
],
"RMSE": [
ridge_rmse,
lasso_rmse,
poly_rmse,
linear_rmse,
SVM_rmse,
RF_rmse,
ENET_rmse,
SGD_rmse,
],
}
)
print("-----------MODEL EVALUATION-----------")
models.sort_values(by="MAE Score", ascending=True)
models.sort_values(by="RMSE", ascending=True)
models.set_index("Regression Model", inplace=True)
models["R2 Score"].plot(kind="barh", figsize=(10, 6))
details_test = descr(test_map)
details_test.reset_index(level=[0], inplace=True)
details_test.sort_values(by="missing_percent", ascending=False)
test_map.shape, train_map.shape
test_map
Model = RandomForestRegressor()
Model.fit(X, Y)
Prediction = Model.predict(test_map)
Prediction
len(Prediction)
sample = pd.read_csv(
"../input/house-prices-advanced-regression-techniques/sample_submission.csv"
)
sample
sample["SalePrice"] = Prediction
sample
sample.to_csv("Submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/009/69009480.ipynb
| null | null |
[{"Id": 69009480, "ScriptId": 18832514, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7526544, "CreationDate": "07/25/2021 19:55:33", "VersionNumber": 1.0, "Title": "notebook77a8fb00de", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 571.0, "LinesInsertedFromPrevious": 571.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
| null | null | null | null |
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype, is_object_dtype
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
from xgboost import XGBRegressor
from sklearn import preprocessing
from sklearn import metrics
from sklearn.preprocessing import (
StandardScaler,
RobustScaler,
PolynomialFeatures,
MinMaxScaler,
)
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import (
accuracy_score,
mean_absolute_error,
mean_squared_error,
r2_score,
)
from sklearn.linear_model import LinearRegression, ElasticNetCV
from sklearn.ensemble import RandomForestRegressor
train = pd.read_csv("../input/house-prices-advanced-regression-techniques/train.csv")
test = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv")
combine = pd.concat([train, test])
train.drop("Id", axis=1, inplace=True)
test.drop("Id", axis=1, inplace=True)
train.head()
test.head()
test.shape, train.shape
train.describe().T
train["SalePrice"]
# Relation between saleprice and other features
correlation_num = train.corr()
correlation_num.sort_values(["SalePrice"], ascending=True, inplace=True)
correlation_num.SalePrice
# Check for Corelation between Features
plt.figure(figsize=(20, 10))
sns.heatmap(train.corr(), yticklabels=True, cbar=True, cmap="ocean")
# Function for printing null_values and related info
def descr(train_num):
no_rows = train_num.shape[0]
types = train_num.dtypes
col_null = train_num.columns[train_num.isna().any()].to_list()
counts = train_num.apply(lambda x: x.count())
uniques = train_num.apply(lambda x: x.unique())
nulls = train_num.apply(lambda x: x.isnull().sum())
distincts = train_num.apply(lambda x: x.unique().shape[0])
nan_percent = (train_num.isnull().sum() / no_rows) * 100
cols = {
"dtypes": types,
"counts": counts,
"distincts": distincts,
"nulls": nulls,
"missing_percent": nan_percent,
"uniques": uniques,
}
table = pd.DataFrame(data=cols)
return table
details_tr = descr(train)
details_tr.reset_index(level=[0], inplace=True)
details_tr.sort_values(by="missing_percent", ascending=False)
# Plot for Missing Values in Train dataset
details_tr.sort_values(by="missing_percent", ascending=False, inplace=True)
details_tr = details_tr[details_tr["missing_percent"] > 0]
plt.figure(figsize=(10, 4), dpi=100)
sns.barplot(x=details_tr["index"], y=details_tr["missing_percent"], data=details_tr)
plt.xticks(rotation=90)
plt.show()
details_test = descr(test)
details_test.reset_index(level=[0], inplace=True)
details_test.sort_values(by="missing_percent", ascending=False)
train.isnull().values.any()
test.isnull().values.any()
# From above table we know electrical has only 1 missing value so its better to replace nan with mode
train["Electrical"].mode()
# Filling Nan values according to datatype and category in train dataframe
n = []
c = []
bsmt_str_cols = ["BsmtQual", "BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2"]
bsmt_num_cols = [
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"BsmtFullBath",
"BsmtHalfBath",
]
for col, col_df in details_tr.iterrows():
row = col_df["index"]
if col_df["dtypes"] == "object":
c.append(col)
if row == "Electrical":
train[row].fillna("SBrkr", inplace=True)
elif row == "MasVnrType":
train[row].fillna("None", inplace=True)
elif row == "GarageType":
train[row].fillna("Attchd", inplace=True)
elif row == "GarageCond":
train[row].fillna("TA", inplace=True)
elif row == "GarageFinish":
train[row].fillna("Unf", inplace=True)
elif row == "GarageQual":
train[row].fillna("TA", inplace=True)
elif row == "FireplaceQu":
train[row].fillna("None", inplace=True)
for i in bsmt_str_cols:
if row == i:
train[row].fillna("None", inplace=True)
else:
train[row].fillna("NotAvailable", inplace=True)
else:
n.append(col)
if row == "MasVnrArea":
train[row].fillna(0, inplace=True)
for i in bsmt_num_cols:
if row == i:
train[row].fillna("None", inplace=True)
else:
train[row].fillna(train[row].median(), inplace=True)
print("\nNumerical Features -->", len(n))
print("Categorical Features -->", len(c))
# Filling Nan values according to datatype and category in test dataframe
nt = []
ct = []
bsmt_str_cols = ["BsmtQual", "BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2"]
bsmt_num_cols = [
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"BsmtFullBath",
"BsmtHalfBath",
]
for col, col_df in details_test.iterrows():
row = col_df["index"]
if col_df["dtypes"] == "object":
ct.append(col)
if row == "Electrical":
test[row].fillna("SBrkr", inplace=True)
elif row == "MasVnrType":
test[row].fillna("None", inplace=True)
elif row == "GarageType":
test[row].fillna("Attchd", inplace=True)
elif row == "GarageCond":
test[row].fillna("TA", inplace=True)
elif row == "GarageFinish":
test[row].fillna("Unf", inplace=True)
elif row == "GarageQual":
test[row].fillna("TA", inplace=True)
elif row == "FireplaceQu":
test[row].fillna("None", inplace=True)
else:
test[row].fillna("NotAvailable", inplace=True)
for i in bsmt_str_cols:
if row == i:
test[row].fillna("None", inplace=True)
else:
nt.append(col)
if row == "MasVnrArea":
test[row].fillna(0, inplace=True)
else:
test[row].fillna(test[row].median(), inplace=True)
for i in bsmt_num_cols:
if row == i:
test[row].fillna("None", inplace=True)
print("\nNumerical Features -->", len(nt))
print("Categorical Features -->", len(ct))
details_tr = descr(train)
details_tr.sort_values(by="missing_percent", ascending=False).head()
train.isnull().values.any()
details_test = descr(test)
details_test.reset_index(level=[0], inplace=True)
details_test.sort_values(by="dtypes", ascending=True).head()
test.isnull().values.any()
# Separating Columns with Numerical Value and Character in 2 dataframes of train,test Datasets
train_num = train.select_dtypes(exclude="object")
train_cat = train.select_dtypes(include="object")
test_num = test.select_dtypes(exclude="object")
test_cat = test.select_dtypes(include="object")
# Plotting numerical features with SalePrice
for i in train_num.columns:
sns.set_style("whitegrid")
plt.figure(figsize=(10, 10))
x = train_num[i]
sns.jointplot(x=x, y=train_num["SalePrice"], data=train_num)
# Plotting categorical features with SalePrice
for i in train_cat.columns:
sns.set_style("whitegrid")
plt.figure(figsize=(15, 15))
x = train_cat[i]
sns.jointplot(x=x, y=train_num["SalePrice"], data=train_cat)
train.groupby("YrSold")["SalePrice"].median().plot()
plt.xlabel("Year Sold")
plt.ylabel("Median House Price")
plt.title("House Price vs YearSold")
train_map = train.copy()
test_map = test.copy()
# numerical variables are skewed,so we can perform log normal distribution to prevent negative predictions.
# We will only perform log normal distribution to columns which do not have any zero values.
num_features = ["LotFrontage", "LotArea", "1stFlrSF", "GrLivArea"]
num_features1 = ["LotFrontage", "LotArea", "1stFlrSF", "GrLivArea"]
# for feature in num_features:
# train_map[feature]=np.log(train_map[feature])
# for feature in num_features1:
# test_map[feature]=np.log(test_map[feature])
train_map.head()
for feature in train_map.select_dtypes(include="object"):
labels_ordered = (
train_map.groupby([feature])["SalePrice"].mean().sort_values().index
)
labels_ordered = {k: i for i, k in enumerate(labels_ordered, 0)}
train_map[feature] = train_map[feature].map(labels_ordered)
for feature in test_map.select_dtypes(include="object"):
labels_ordered = (
test_map.groupby([feature])["LotFrontage"].mean().sort_values().index
)
labels_ordered = {k: i for i, k in enumerate(labels_ordered, 0)}
test_map[feature] = test_map[feature].map(labels_ordered)
test_map.head()
train_map.head()
# FEATURE SCALING
test_map = test_map.drop(["PoolQC", "MiscFeature", "Alley", "Fence"], axis=1)
train_map = train_map.drop(["PoolQC", "MiscFeature", "Alley", "Fence"], axis=1)
X = train_map.drop(["SalePrice"], axis=1).drop(train_map.index[-1])
Y = train_map["SalePrice"].drop(train_map.index[-1])
# Train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.4, random_state=101
)
# Standard scaling our data
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
X_train.shape, Y_train.shape, X_test.shape, Y_test.shape
# Ridge Regression
from sklearn.linear_model import Ridge, RidgeCV
rid_reg = Ridge(alpha=100)
rid_reg.fit(X_train, Y_train)
Y_pred = rid_reg.predict(X_test)
# testing the model
print("MAE : ", mean_absolute_error(Y_test, Y_pred))
print("R2 SCORE : ", r2_score(Y_test, Y_pred))
print("Score :", rid_reg.score(X_test, Y_test))
print("MSE :", mean_squared_error(Y_test, Y_pred))
print("RMSE :", np.sqrt(mean_squared_error(Y_test, Y_pred)))
# let's find best values for alpha by crossvalidating
ridgecv = RidgeCV(
alphas=(0.01, 400.0), scoring="neg_mean_squared_error", normalize=True
)
ridgecv.fit(X_train, Y_train)
ridgecv.alpha_
# Create the Ridge model using best alpha value:
from sklearn.linear_model import Ridge, RidgeCV
rid_reg = Ridge(alpha=0.01)
rid_reg.fit(X_train, Y_train)
Y_pred_ridge = rid_reg.predict(X_test)
# testing the model
ridge_mae = mean_absolute_error(Y_test, Y_pred_ridge)
ridge_r2_score = r2_score(Y_test, Y_pred_ridge)
ridge_rmse = np.sqrt(mean_squared_error(Y_test, Y_pred_ridge))
print("MAE for Ridge : ", ridge_mae)
print("R2 SCORE for Ridge: ", ridge_r2_score)
print("Score for Ridge:", rid_reg.score(X_test, Y_test))
print("MSE for Ridge : ", mean_squared_error(Y_test, Y_pred_ridge))
print("RMSE for Ridge : ", ridge_rmse)
Y_pred_ridge.min()
plt.figure(figsize=(10, 8))
sns.regplot(x=Y_pred_ridge, y=Y_test, color="springgreen")
# LASSO REGRESSION
# Create Lasso model
from sklearn.linear_model import Lasso, LassoCV
ls = Lasso(alpha=0.8)
ls.fit(X_train, Y_train)
Y_pred = ls.predict(X_test)
# testing the model
print("MAE : ", mean_absolute_error(Y_test, Y_pred))
print("R2 SCORE : ", r2_score(Y_test, Y_pred))
print("Score :", ls.score(X_test, Y_test))
print("MSE :", mean_squared_error(Y_test, Y_pred))
print("RMSE :", np.sqrt(mean_squared_error(Y_test, Y_pred)))
# 1. LASSOCV
lassocv = LassoCV(alphas=None, cv=10, max_iter=100000, normalize=True)
lassocv.fit(X_train, Y_train)
ls.set_params(alpha=lassocv.alpha_)
ls.fit(X_train, Y_train)
mean_squared_error(Y_test, ls.predict(X_test))
# Create the Lasso model using best alpha value:
ls = Lasso(alpha=0.0198850177087539)
ls.fit(X_train, Y_train)
Y_pred_lasso = ls.predict(X_test)
# testing the model
lasso_mae = mean_absolute_error(Y_test, Y_pred_lasso)
lasso_r2_score = r2_score(Y_test, Y_pred_lasso)
lasso_rmse = np.sqrt(mean_squared_error(Y_test, Y_pred_lasso))
print("MAE for Lasso : ", lasso_mae)
print("R2 SCORE for Lasso : ", lasso_r2_score)
print("Score for Lasso:", ls.score(X_test, Y_test))
print("MSE for Lasso :", mean_squared_error(Y_test, Y_pred_lasso))
print("RMSE for Lasso :", lasso_rmse)
Y_pred_lasso.min()
plt.figure(figsize=(10, 8))
sns.regplot(x=Y_pred_lasso, y=Y_test, color="darkorchid")
# POLYNOMIAL REGRESSION
# poly converter
polynomial_converter = PolynomialFeatures(degree=2, include_bias=False)
# convert X data and fit transform
poly_features_train = polynomial_converter.fit_transform(X_train)
poly_features_test = polynomial_converter.fit_transform(X_test)
# fit poly_train in elastic net
elastic_model = ElasticNetCV(l1_ratio=1, tol=0.01)
elastic_model.fit(poly_features_train, Y_train)
Y_pred_poly = elastic_model.predict(poly_features_test)
# Testing the model
poly_mae = mean_absolute_error(Y_test, Y_pred_poly)
poly_r2_score = r2_score(Y_test, Y_pred_poly)
poly_rmse = np.sqrt(mean_squared_error(Y_test, Y_pred_poly))
print("MAE for Polynomial: ", poly_mae)
print("R2 SCORE for Polynomial: ", poly_r2_score)
print("MSE for Polynomial :", mean_squared_error(Y_test, Y_pred_poly))
print("RMSE for Polynomial :", poly_rmse)
Y_pred_poly.min()
plt.figure(figsize=(10, 8))
sns.regplot(x=Y_pred_poly, y=Y_test, color="coral")
# LINEAR REGRESSION
lin_reg = LinearRegression(normalize=True)
lin_reg.fit(X_train, Y_train)
test_pred_lin = lin_reg.predict(X_test)
train_pred_lin = lin_reg.predict(X_train)
linear_mae = mean_absolute_error(Y_test, test_pred_lin)
linear_r2_score = r2_score(Y_test, test_pred_lin)
linear_rmse = np.sqrt(mean_squared_error(Y_test, test_pred_lin))
print("MAE for Linear : ", linear_mae)
print("R2 SCORE for Linear : ", linear_r2_score)
print("Score for Linear:", lin_reg.score(X_test, Y_test))
print("MSE for Linear :", mean_squared_error(Y_test, test_pred_lin))
print("RMSE for Linear :", linear_rmse)
test_pred_lin.min()
plt.figure(figsize=(10, 8))
sns.regplot(x=test_pred_lin, y=Y_test, color="blue")
# RANDOM FOREST REGRESSION
RF_reg = RandomForestRegressor(n_estimators=1000)
RF_reg.fit(X_train, Y_train)
test_pred_RF = RF_reg.predict(X_test)
train_pred_RF = RF_reg.predict(X_train)
RF_mae = mean_absolute_error(Y_test, test_pred_RF)
RF_r2_score = r2_score(Y_test, test_pred_RF)
RF_rmse = np.sqrt(mean_squared_error(Y_test, test_pred_RF))
print("MAE for RF : ", RF_mae)
print("R2 SCORE for RF : ", RF_r2_score)
print("Score for RF:", RF_reg.score(X_test, Y_test))
print("MSE for RF :", mean_squared_error(Y_test, test_pred_RF))
print("RMSE for RF :", RF_rmse)
test_pred_RF.min()
plt.figure(figsize=(10, 8))
sns.regplot(x=test_pred_RF, y=Y_test, color="olivedrab")
# SVM REGRESSION
from sklearn.svm import SVR
svm_reg = SVR(kernel="rbf", C=1000000, epsilon=0.001)
svm_reg.fit(X_train, Y_train)
test_pred_svm = svm_reg.predict(X_test)
train_pred_svm = svm_reg.predict(X_train)
SVM_mae = mean_absolute_error(Y_test, test_pred_svm)
SVM_r2_score = r2_score(Y_test, test_pred_svm)
SVM_rmse = np.sqrt(mean_squared_error(Y_test, test_pred_svm))
print("MAE for RF : ", SVM_mae)
print("R2 SCORE for RF : ", SVM_r2_score)
print("Score for RF:", svm_reg.score(X_test, Y_test))
print("MSE for RF :", mean_squared_error(Y_test, test_pred_svm))
print("RMSE for RF :", SVM_rmse)
test_pred_svm.min()
plt.figure(figsize=(10, 8))
sns.regplot(x=test_pred_svm, y=Y_test, color="lightseagreen")
# ELASTICNET REGRESSION
from sklearn.linear_model import ElasticNet
enet_reg = ElasticNet(alpha=0.1, l1_ratio=0.9, selection="random", random_state=42)
enet_reg.fit(X_train, Y_train)
test_pred_enet = enet_reg.predict(X_test)
train_pred_enet = enet_reg.predict(X_train)
ENET_mae = mean_absolute_error(Y_test, test_pred_enet)
ENET_r2_score = r2_score(Y_test, test_pred_enet)
ENET_rmse = np.sqrt(mean_squared_error(Y_test, test_pred_enet))
print("MAE for RF : ", ENET_mae)
print("R2 SCORE for RF : ", ENET_r2_score)
print("Score for RF:", enet_reg.score(X_test, Y_test))
print("MSE for RF :", mean_squared_error(Y_test, test_pred_enet))
print("RMSE for RF :", ENET_rmse)
test_pred_enet.min()
plt.figure(figsize=(10, 8))
sns.regplot(x=test_pred_enet, y=Y_test, color="tomato")
# SGD REGRESSION
from sklearn.linear_model import SGDRegressor
sgd_reg = SGDRegressor(n_iter_no_change=250, penalty=None, eta0=0.0001, max_iter=100000)
sgd_reg.fit(X_train, Y_train)
test_pred_sgd = sgd_reg.predict(X_test)
train_pred_sgd = sgd_reg.predict(X_train)
SGD_mae = mean_absolute_error(Y_test, test_pred_sgd)
SGD_r2_score = r2_score(Y_test, test_pred_sgd)
SGD_rmse = np.sqrt(mean_squared_error(Y_test, test_pred_sgd))
print("MAE for RF : ", SGD_mae)
print("R2 SCORE for RF : ", SGD_r2_score)
print("Score for RF:", sgd_reg.score(X_test, Y_test))
print("MSE for RF :", mean_squared_error(Y_test, test_pred_sgd))
print("RMSE for RF :", SGD_rmse)
test_pred_sgd.min()
plt.figure(figsize=(10, 8))
sns.regplot(x=test_pred_sgd, y=Y_test, color="yellow")
models = pd.DataFrame(
{
"Regression Model": [
"Ridge",
"Lasso",
"Polynomial",
"Linear",
"SVM",
"RandomForest",
"ElasticNet",
"SGD",
],
"MAE Score": [
ridge_mae,
lasso_mae,
poly_mae,
linear_mae,
SVM_mae,
RF_mae,
ENET_mae,
SGD_mae,
],
"R2 Score": [
ridge_r2_score,
lasso_r2_score,
poly_r2_score,
linear_r2_score,
SVM_r2_score,
RF_r2_score,
ENET_r2_score,
SGD_r2_score,
],
"RMSE": [
ridge_rmse,
lasso_rmse,
poly_rmse,
linear_rmse,
SVM_rmse,
RF_rmse,
ENET_rmse,
SGD_rmse,
],
}
)
print("-----------MODEL EVALUATION-----------")
models.sort_values(by="MAE Score", ascending=True)
models.sort_values(by="RMSE", ascending=True)
models.set_index("Regression Model", inplace=True)
models["R2 Score"].plot(kind="barh", figsize=(10, 6))
details_test = descr(test_map)
details_test.reset_index(level=[0], inplace=True)
details_test.sort_values(by="missing_percent", ascending=False)
test_map.shape, train_map.shape
test_map
Model = RandomForestRegressor()
Model.fit(X, Y)
Prediction = Model.predict(test_map)
Prediction
len(Prediction)
sample = pd.read_csv(
"../input/house-prices-advanced-regression-techniques/sample_submission.csv"
)
sample
sample["SalePrice"] = Prediction
sample
sample.to_csv("Submission.csv", index=False)
| false | 0 | 6,309 | 1 | 6,309 | 6,309 |
||
69009896
|
<jupyter_start><jupyter_text>YFinance Stock Price Data for Numerai Signals
This YFiance data is regularly updated to be used for the weekly round of the Numerai Signals.
Kaggle dataset identifier: yfinance-stock-price-data-for-numerai-signals
<jupyter_script># 
# Simply accumulating the YFiance Data for Numerai Signals (DAILY UPDATE!)
# # Libraries
import numerapi
# !pip install git+https://github.com/leonhma/yfinance.git #drop-in replacement yfinance fork for failed downloads, h/t ceunen
import yfinance
import simplejson
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import gc
import pathlib
from tqdm.auto import tqdm
import json
from multiprocessing import Pool, cpu_count
import time
import requests as re
from datetime import datetime
from dateutil.relativedelta import relativedelta, FR
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
# visualize
import matplotlib.pyplot as plt
import matplotlib.style as style
from matplotlib_venn import venn2, venn3
import seaborn as sns
from matplotlib import pyplot
from matplotlib.ticker import ScalarFormatter
sns.set_context("talk")
style.use("seaborn-colorblind")
import warnings
warnings.simplefilter("ignore")
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Today
today = datetime.now().strftime("%Y-%m-%d")
today
# # Config
class CFG:
INPUT_DIR = "../input/"
OUTPUT_DIR = "./"
START_DATE = "2002-12-01"
# START_DATE = '2021-05-01'
# Logging is always nice for your experiment:)
def init_logger(log_file="train.log"):
from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler
logger = getLogger(__name__)
logger.setLevel(INFO)
handler1 = StreamHandler()
handler1.setFormatter(Formatter("%(message)s"))
handler2 = FileHandler(filename=log_file)
handler2.setFormatter(Formatter("%(message)s"))
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
logger = init_logger(log_file=f"{CFG.OUTPUT_DIR}/{today}.log")
logger.info(f"Start Logging...today is {today}")
# # Kaggle API
# The following notebook is extremely useful!
# https://www.kaggle.com/paultimothymooney/exploring-the-kaggle-api
#
# config
config_path = "../input/kagglejson/kaggle.json"
with open(config_path) as f:
config = json.load(f)
logger.info("kaggle API config loaded!")
import nbformat
KAGGLE_CONFIG_DIR = os.path.join(os.path.expandvars("$HOME"), ".kaggle")
os.makedirs(KAGGLE_CONFIG_DIR, exist_ok=True)
with open(os.path.join(KAGGLE_CONFIG_DIR, "kaggle.json"), "w") as f:
json.dump({"username": config["username"], "key": config["key"]}, f)
logger.info("Kaggle API setup!")
# # Get Numerai-Eligible Tickers
napi = numerapi.SignalsAPI()
logger.info("numerai api setup!")
# read in list of active Signals tickers which can change slightly era to era
eligible_tickers = pd.Series(napi.ticker_universe(), name="ticker")
logger.info(f"Number of eligible tickers: {len(eligible_tickers)}")
# read in yahoo to numerai ticker map, still a work in progress, h/t wsouza and
# this tickermap is a work in progress and not guaranteed to be 100% correct
ticker_map = pd.read_csv(
"https://numerai-signals-public-data.s3-us-west-2.amazonaws.com/signals_ticker_map_w_bbg.csv"
)
ticker_map = ticker_map[ticker_map.bloomberg_ticker.isin(eligible_tickers)]
numerai_tickers = ticker_map["bloomberg_ticker"]
yfinance_tickers = ticker_map["yahoo"]
logger.info(f"Number of eligible tickers in map: {len(ticker_map)}")
print(ticker_map.shape)
ticker_map.head()
# # Fetch Data
def remove_strange_ticker(df):
out_tickers = (
df.loc[(df["close"] <= 0) | (df["volume"] <= 0), "ticker"].unique().tolist()
)
print("{:,} out tickers: {}".format(len(out_tickers), out_tickers))
df = df.loc[~df["ticker"].isin(out_tickers)]
return df
def fetch_yfinance(ticker_map, start="2002-01-31"):
"""
# fetch yfinance data
:INPUT:
- ticker_map : Numerai eligible ticker map (pd.DataFrame)
- start : date (str)
:OUTPUT:
- full_data : pd.DataFrame (date, ticker, price, volume)
"""
# ticker map
numerai_tickers = ticker_map["bloomberg_ticker"]
yfinance_tickers = ticker_map["yahoo"]
# fetch
raw_data = yfinance.download(
yfinance_tickers.str.cat(sep=" "), start=start, threads=True
)
# format
cols = ["Adj Close", "Close", "High", "Low", "Open", "Volume"]
full_data = raw_data[cols].stack().reset_index()
full_data.columns = [
"date",
"ticker",
"close",
"raw_close",
"high",
"low",
"open",
"volume",
]
full_data["date"] = pd.to_datetime(
full_data["date"], format="%Y-%m-%d"
).dt.strftime("%Y-%m-%d")
# map yfiance ticker to numerai tickers
full_data["ticker"] = full_data.ticker.map(
dict(zip(yfinance_tickers, numerai_tickers))
)
# # remove strange tickers
# full_data = remove_strange_ticker(full_data)
return full_data
full_data = fetch_yfinance(ticker_map, start=CFG.START_DATE)
logger.info(
"{:,} records, {:,} tickers fetched!".format(
full_data.shape[0], full_data["ticker"].nunique()
)
)
full_data.head()
full_data.tail()
# load already stored data
df = pd.read_csv("../input/yfinance-stock-price-data-for-numerai-signals/full_data.csv")
# df = remove_strange_ticker(df)
logger.info(
"{:,} records, {:,} tickers loaded!".format(df.shape[0], df["ticker"].nunique())
)
print(df.shape)
# concat
full_data = pd.concat([full_data, df]).drop_duplicates(keep="last")
del df
full_data = (
full_data.groupby(["ticker", "date"])
.max()
.reset_index()
.sort_values(by=["ticker", "date"])
)
logger.info(
"Combined: {:,} records, {:,} tickers exist!".format(
full_data.shape[0], full_data["ticker"].nunique()
)
)
full_data.head()
logger.info(
"{:.2f}% volume data missing".format(
np.sum(full_data["volume"] == 0) / len(full_data)
)
)
logger.info(
"{:.2f}% price data missing".format(
np.sum(full_data["close"] == 0) / len(full_data)
)
)
# # Load Numerai Targets
def read_numerai_signals_targets():
# read in Signals targets
numerai_targets = "https://numerai-signals-public-data.s3-us-west-2.amazonaws.com/signals_train_val.csv"
targets = pd.read_csv(numerai_targets)
# to datetime int
targets["friday_date"] = (
pd.to_datetime(targets["friday_date"].astype(str), format="%Y-%m-%d")
.dt.strftime("%Y%m%d")
.astype(int)
)
# # train, valid split
# train_targets = targets.query('data_type == "train"')
# valid_targets = targets.query('data_type == "validation"')
return targets
targets = read_numerai_signals_targets()
logger.info(
"Target shape: {}, dates {} - {}".format(
targets.shape, targets["friday_date"].min(), targets["friday_date"].max()
)
)
targets.head()
targets.tail()
# check ticker overlap
venn3(
[
set(full_data["ticker"].unique().tolist()),
set(targets.query('data_type == "train"')["ticker"].unique().tolist()),
set(targets.query('data_type == "validation"')["ticker"].unique().tolist()),
],
set_labels=("yf price", "train target", "valid target"),
)
# # Save
full_data.to_csv(pathlib.Path(f"{CFG.OUTPUT_DIR}/full_data.csv"), index=False)
logger.info(
"DONE! Shape={}, {:,} tickers".format(
full_data.shape, full_data["ticker"].nunique()
)
)
full_data.head()
full_data.tail()
# # Updating Kaggle Dataset
# Create dataset-specific JSON metadata file
# https://github.com/Kaggle/kaggle-api/wiki/Dataset-Metadata
name_of_new_dataset = "YFinance-Stock-Price-Data-for-Numerai-Signals"
dataset_meta_template = lambda user_id, title, file_id, nb_path: {
"title": name_of_new_dataset.replace("-", " "),
"subtitle": "Daily Updates of Stock Price (Close, High, Low, Open, Volume)",
"description": "This YFiance data is regularly updated to be used for the weekly round of the Numerai Signals.",
"id": f"{config['username']}/{name_of_new_dataset.lower()}",
"licenses": [{"name": "Other"}],
"resources": [
{
"path": "full_data.csv",
"description": "Stock price data obtained via the YFinance API",
},
],
}
path_of_current_data = "working"
with open("dataset-metadata.json", "w") as f:
meta_dict = dataset_meta_template(
config["username"],
name_of_new_dataset,
name_of_new_dataset,
path_of_current_data,
)
json.dump(meta_dict, f)
############ First Time ###############
# !kaggle datasets create -p .
######################################
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/009/69009896.ipynb
|
yfinance-stock-price-data-for-numerai-signals
|
code1110
|
[{"Id": 69009896, "ScriptId": 16878416, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 590240, "CreationDate": "07/25/2021 20:05:09", "VersionNumber": 103.0, "Title": "[NumeraiSignals] Accumulate YFiance Stock Prices", "EvaluationDate": "07/25/2021", "IsChange": false, "TotalLines": 278.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 278.0, "LinesInsertedFromFork": 153.0, "LinesDeletedFromFork": 424.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 125.0, "TotalVotes": 0}]
|
[{"Id": 91701061, "KernelVersionId": 69009896, "SourceDatasetVersionId": 2456339}]
|
[{"Id": 2456339, "DatasetId": 1333356, "DatasourceVersionId": 2498737, "CreatorUserId": 590240, "LicenseName": "Other (specified in description)", "CreationDate": "07/23/2021 20:51:58", "VersionNumber": 96.0, "Title": "YFinance Stock Price Data for Numerai Signals", "Slug": "yfinance-stock-price-data-for-numerai-signals", "Subtitle": "Daily Updates of Stock OHLCV (Close, High, Low, Open, Volume)", "Description": "This YFiance data is regularly updated to be used for the weekly round of the Numerai Signals.", "VersionNotes": "fAdded stock price data (updated 2021-07-23)", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1333356, "CreatorUserId": 590240, "OwnerUserId": 590240.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 6278991.0, "CurrentDatasourceVersionId": 6358939.0, "ForumId": 1352281, "Type": 2, "CreationDate": "05/11/2021 06:17:07", "LastActivityDate": "05/11/2021", "TotalViews": 26229, "TotalDownloads": 8196, "TotalVotes": 55, "TotalKernels": 4}]
|
[{"Id": 590240, "UserName": "code1110", "DisplayName": "katsu1110", "RegisterDate": "04/18/2016", "PerformanceTier": 3}]
|
# 
# Simply accumulating the YFiance Data for Numerai Signals (DAILY UPDATE!)
# # Libraries
import numerapi
# !pip install git+https://github.com/leonhma/yfinance.git #drop-in replacement yfinance fork for failed downloads, h/t ceunen
import yfinance
import simplejson
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import gc
import pathlib
from tqdm.auto import tqdm
import json
from multiprocessing import Pool, cpu_count
import time
import requests as re
from datetime import datetime
from dateutil.relativedelta import relativedelta, FR
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
# visualize
import matplotlib.pyplot as plt
import matplotlib.style as style
from matplotlib_venn import venn2, venn3
import seaborn as sns
from matplotlib import pyplot
from matplotlib.ticker import ScalarFormatter
sns.set_context("talk")
style.use("seaborn-colorblind")
import warnings
warnings.simplefilter("ignore")
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Today
today = datetime.now().strftime("%Y-%m-%d")
today
# # Config
class CFG:
INPUT_DIR = "../input/"
OUTPUT_DIR = "./"
START_DATE = "2002-12-01"
# START_DATE = '2021-05-01'
# Logging is always nice for your experiment:)
def init_logger(log_file="train.log"):
from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler
logger = getLogger(__name__)
logger.setLevel(INFO)
handler1 = StreamHandler()
handler1.setFormatter(Formatter("%(message)s"))
handler2 = FileHandler(filename=log_file)
handler2.setFormatter(Formatter("%(message)s"))
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
logger = init_logger(log_file=f"{CFG.OUTPUT_DIR}/{today}.log")
logger.info(f"Start Logging...today is {today}")
# # Kaggle API
# The following notebook is extremely useful!
# https://www.kaggle.com/paultimothymooney/exploring-the-kaggle-api
#
# config
config_path = "../input/kagglejson/kaggle.json"
with open(config_path) as f:
config = json.load(f)
logger.info("kaggle API config loaded!")
import nbformat
KAGGLE_CONFIG_DIR = os.path.join(os.path.expandvars("$HOME"), ".kaggle")
os.makedirs(KAGGLE_CONFIG_DIR, exist_ok=True)
with open(os.path.join(KAGGLE_CONFIG_DIR, "kaggle.json"), "w") as f:
json.dump({"username": config["username"], "key": config["key"]}, f)
logger.info("Kaggle API setup!")
# # Get Numerai-Eligible Tickers
napi = numerapi.SignalsAPI()
logger.info("numerai api setup!")
# read in list of active Signals tickers which can change slightly era to era
eligible_tickers = pd.Series(napi.ticker_universe(), name="ticker")
logger.info(f"Number of eligible tickers: {len(eligible_tickers)}")
# read in yahoo to numerai ticker map, still a work in progress, h/t wsouza and
# this tickermap is a work in progress and not guaranteed to be 100% correct
ticker_map = pd.read_csv(
"https://numerai-signals-public-data.s3-us-west-2.amazonaws.com/signals_ticker_map_w_bbg.csv"
)
ticker_map = ticker_map[ticker_map.bloomberg_ticker.isin(eligible_tickers)]
numerai_tickers = ticker_map["bloomberg_ticker"]
yfinance_tickers = ticker_map["yahoo"]
logger.info(f"Number of eligible tickers in map: {len(ticker_map)}")
print(ticker_map.shape)
ticker_map.head()
# # Fetch Data
def remove_strange_ticker(df):
out_tickers = (
df.loc[(df["close"] <= 0) | (df["volume"] <= 0), "ticker"].unique().tolist()
)
print("{:,} out tickers: {}".format(len(out_tickers), out_tickers))
df = df.loc[~df["ticker"].isin(out_tickers)]
return df
def fetch_yfinance(ticker_map, start="2002-01-31"):
"""
# fetch yfinance data
:INPUT:
- ticker_map : Numerai eligible ticker map (pd.DataFrame)
- start : date (str)
:OUTPUT:
- full_data : pd.DataFrame (date, ticker, price, volume)
"""
# ticker map
numerai_tickers = ticker_map["bloomberg_ticker"]
yfinance_tickers = ticker_map["yahoo"]
# fetch
raw_data = yfinance.download(
yfinance_tickers.str.cat(sep=" "), start=start, threads=True
)
# format
cols = ["Adj Close", "Close", "High", "Low", "Open", "Volume"]
full_data = raw_data[cols].stack().reset_index()
full_data.columns = [
"date",
"ticker",
"close",
"raw_close",
"high",
"low",
"open",
"volume",
]
full_data["date"] = pd.to_datetime(
full_data["date"], format="%Y-%m-%d"
).dt.strftime("%Y-%m-%d")
# map yfiance ticker to numerai tickers
full_data["ticker"] = full_data.ticker.map(
dict(zip(yfinance_tickers, numerai_tickers))
)
# # remove strange tickers
# full_data = remove_strange_ticker(full_data)
return full_data
full_data = fetch_yfinance(ticker_map, start=CFG.START_DATE)
logger.info(
"{:,} records, {:,} tickers fetched!".format(
full_data.shape[0], full_data["ticker"].nunique()
)
)
full_data.head()
full_data.tail()
# load already stored data
df = pd.read_csv("../input/yfinance-stock-price-data-for-numerai-signals/full_data.csv")
# df = remove_strange_ticker(df)
logger.info(
"{:,} records, {:,} tickers loaded!".format(df.shape[0], df["ticker"].nunique())
)
print(df.shape)
# concat
full_data = pd.concat([full_data, df]).drop_duplicates(keep="last")
del df
full_data = (
full_data.groupby(["ticker", "date"])
.max()
.reset_index()
.sort_values(by=["ticker", "date"])
)
logger.info(
"Combined: {:,} records, {:,} tickers exist!".format(
full_data.shape[0], full_data["ticker"].nunique()
)
)
full_data.head()
logger.info(
"{:.2f}% volume data missing".format(
np.sum(full_data["volume"] == 0) / len(full_data)
)
)
logger.info(
"{:.2f}% price data missing".format(
np.sum(full_data["close"] == 0) / len(full_data)
)
)
# # Load Numerai Targets
def read_numerai_signals_targets():
# read in Signals targets
numerai_targets = "https://numerai-signals-public-data.s3-us-west-2.amazonaws.com/signals_train_val.csv"
targets = pd.read_csv(numerai_targets)
# to datetime int
targets["friday_date"] = (
pd.to_datetime(targets["friday_date"].astype(str), format="%Y-%m-%d")
.dt.strftime("%Y%m%d")
.astype(int)
)
# # train, valid split
# train_targets = targets.query('data_type == "train"')
# valid_targets = targets.query('data_type == "validation"')
return targets
targets = read_numerai_signals_targets()
logger.info(
"Target shape: {}, dates {} - {}".format(
targets.shape, targets["friday_date"].min(), targets["friday_date"].max()
)
)
targets.head()
targets.tail()
# check ticker overlap
venn3(
[
set(full_data["ticker"].unique().tolist()),
set(targets.query('data_type == "train"')["ticker"].unique().tolist()),
set(targets.query('data_type == "validation"')["ticker"].unique().tolist()),
],
set_labels=("yf price", "train target", "valid target"),
)
# # Save
full_data.to_csv(pathlib.Path(f"{CFG.OUTPUT_DIR}/full_data.csv"), index=False)
logger.info(
"DONE! Shape={}, {:,} tickers".format(
full_data.shape, full_data["ticker"].nunique()
)
)
full_data.head()
full_data.tail()
# # Updating Kaggle Dataset
# Create dataset-specific JSON metadata file
# https://github.com/Kaggle/kaggle-api/wiki/Dataset-Metadata
name_of_new_dataset = "YFinance-Stock-Price-Data-for-Numerai-Signals"
dataset_meta_template = lambda user_id, title, file_id, nb_path: {
"title": name_of_new_dataset.replace("-", " "),
"subtitle": "Daily Updates of Stock Price (Close, High, Low, Open, Volume)",
"description": "This YFiance data is regularly updated to be used for the weekly round of the Numerai Signals.",
"id": f"{config['username']}/{name_of_new_dataset.lower()}",
"licenses": [{"name": "Other"}],
"resources": [
{
"path": "full_data.csv",
"description": "Stock price data obtained via the YFinance API",
},
],
}
path_of_current_data = "working"
with open("dataset-metadata.json", "w") as f:
meta_dict = dataset_meta_template(
config["username"],
name_of_new_dataset,
name_of_new_dataset,
path_of_current_data,
)
json.dump(meta_dict, f)
############ First Time ###############
# !kaggle datasets create -p .
######################################
| false | 1 | 2,799 | 0 | 2,864 | 2,799 |
||
69009870
|
<jupyter_start><jupyter_text>LightAutoML framework (LAMA)
Kaggle dataset identifier: lightautoml-framework-lama
<jupyter_script># # Offline LightAutoML installation
# # Libraries imports
import numpy as np
import pandas as pd
pd.set_option("max_rows", 300)
pd.set_option("max_columns", 300)
import warnings
warnings.filterwarnings("ignore")
import os
from collections import ChainMap
from joblib import Parallel, delayed
from tqdm.notebook import tqdm
from sklearn.model_selection import KFold, train_test_split
# LightAutoML presets, task and report generation
from lightautoml.automl.presets.tabular_presets import (
TabularAutoML,
TabularUtilizedAutoML,
)
from lightautoml.tasks import Task
from lightautoml.report.report_deco import ReportDeco
from matplotlib import pyplot as plt, rcParams
rcParams.update({"font.size": 22})
# # Global constants
INPUT_PATH = "../input/optiver-realized-volatility-prediction/"
N_THREADS = 4
# # Functions for preprocess
def calc_wap(df):
wap = (df["bid_price1"] * df["ask_size1"] + df["ask_price1"] * df["bid_size1"]) / (
df["bid_size1"] + df["ask_size1"]
)
return wap
def calc_wap2(df):
wap = (df["bid_price2"] * df["ask_size2"] + df["ask_price2"] * df["bid_size2"]) / (
df["bid_size2"] + df["ask_size2"]
)
return wap
def calc_mean_price(df):
mp = (df["bid_price1"] + df["ask_price1"]) / 2
return mp
def log_return(list_stock_prices):
return np.log(list_stock_prices).diff()
def realized_volatility(series):
return np.sqrt(np.sum(series**2))
def count_unique(series):
return len(np.unique(series))
def calc_array_feats(arr, prefix, q_cnt=10, diff=True):
if diff:
arr = np.diff(np.array(arr))
percs = np.linspace(0, 100, q_cnt + 1).astype(int)
cols = [prefix + "__P" + str(p) for p in percs]
if len(arr) > 0:
vals = np.percentile(arr, percs)
else:
vals = [np.nan] * len(cols)
res = dict(zip(cols, vals))
return res
def rmspe(y_true, y_pred, **kwargs):
return np.sqrt(np.mean(np.square((y_true - y_pred) / y_true)))
def create_targ_enc_feature(
col, targ_col, transform_func, tr_data, te_data, n_folds=20, n_runs=1
):
# Test col is transformed by the whole train set aggregation
stock_id_target_trans = tr_data.groupby(col)[targ_col].agg(transform_func)
te_col_transformed = te_data[col].map(stock_id_target_trans)
# Train col can be transformed only inside CV not to overfit
# New values imputed with global train values
glob_val = transform_func(tr_data[col].values)
tr_col_transformed = np.repeat(0.0, tr_data.shape[0])
for i in range(n_runs):
kf = KFold(n_splits=n_folds, shuffle=True, random_state=13)
for idx_train, idx_val in kf.split(tr_data):
target_trans = (
tr_data.iloc[idx_train].groupby(col)[targ_col].agg(transform_func)
)
tr_col_transformed[idx_val] += (
tr_data[col].iloc[idx_val].map(target_trans).fillna(glob_val) / n_runs
)
return tr_col_transformed, te_col_transformed
# # EDA and data visualization
def create_plot(stock_id, time_id, book_train, trade_train):
# Select time_id
bt = book_train.query("time_id == {}".format(time_id))
bt["wap"] = calc_wap(bt)
trades = trade_train.query("time_id == {}".format(time_id))
trades["seconds_in_bucket"] = np.maximum(trades["seconds_in_bucket"] - 1, 0)
# Combine trades prices/timestamps with book prices/timestamps
diffs = []
times = set(bt["seconds_in_bucket"].values)
for t in trades["seconds_in_bucket"].values:
d = 0
while t >= 0:
if t in times:
diffs.append(d)
break
else:
t -= 1
d += 1
if t == -1:
print("Negative!")
trades["seconds_in_bucket"] -= diffs
# Merged and calc the color (buy/sell)
merged = pd.merge(
bt,
trades[["seconds_in_bucket", "price", "size"]],
on="seconds_in_bucket",
how="left",
).dropna()
merged["diff_with_bid"] = merged["price"] - merged["bid_price1"]
merged["diff_with_ask"] = merged["ask_price1"] - merged["price"]
merged["color"] = (
(merged["diff_with_bid"] < merged["diff_with_ask"])
.astype(int)
.map({0: "green", 1: "red"})
)
fig = plt.figure(figsize=(60, 20))
plt.plot(
bt["seconds_in_bucket"].values,
bt["ask_price2"].values,
"b--",
linewidth=1,
label="Ask price 2",
)
plt.plot(
bt["seconds_in_bucket"].values,
bt["ask_price1"].values,
"b",
linewidth=2,
label="Ask price 1",
)
plt.plot(
bt["seconds_in_bucket"].values, bt["wap"].values, "m", linewidth=1, label="WAP"
)
plt.plot(
bt["seconds_in_bucket"].values,
bt["bid_price1"].values,
"g",
linewidth=2,
label="Bid price 1",
)
plt.plot(
bt["seconds_in_bucket"].values,
bt["bid_price2"].values,
"g--",
linewidth=1,
label="Bid price 2",
)
fig.axes[0].fill_between(
bt["seconds_in_bucket"].values,
bt["bid_price1"].values,
bt["ask_price1"].values,
color="orange",
alpha=0.15,
)
fig.axes[0].fill_between(
bt["seconds_in_bucket"].values,
bt["bid_price2"].values,
bt["bid_price1"].values,
color="green",
alpha=0.25,
)
fig.axes[0].fill_between(
bt["seconds_in_bucket"].values,
bt["ask_price1"].values,
bt["ask_price2"].values,
color="blue",
alpha=0.15,
)
mask = (merged["color"] == "green").values
plt.scatter(
merged["seconds_in_bucket"].values[mask],
merged["price"].values[mask],
marker="*",
color=merged["color"].values[mask],
s=500,
label="Buy trades",
)
mask = (merged["color"] == "red").values
plt.scatter(
merged["seconds_in_bucket"].values[mask],
merged["price"].values[mask],
marker="*",
color=merged["color"].values[mask],
s=500,
label="Sell trades",
)
plt.grid(True)
plt.legend()
plt.title("Stock_id = {}, time_id = {}".format(stock_id, time_id))
plt.xlabel("seconds_in_bucket")
plt.ylabel("Price")
plt.show()
for stock_id in [0, 1, 2]:
# Read data
bt = pd.read_parquet(INPUT_PATH + "book_train.parquet/stock_id={}".format(stock_id))
tt = pd.read_parquet(
INPUT_PATH + "trade_train.parquet/stock_id={}".format(stock_id)
)
time_ids = bt["time_id"].value_counts().index.values[[0, -1]]
for time_id in time_ids:
create_plot(stock_id, time_id, bt, tt)
# # Feature engineering
def create_features(stock_id, time_id, bt, trades, last_s):
q_cnt = 5
bt = bt[bt["seconds_in_bucket"] >= last_s]
trades = trades[trades["seconds_in_bucket"] > bt["seconds_in_bucket"].min() + 1]
# BOOK PART
bt["wap"] = calc_wap(bt)
bt["log_return"] = log_return(bt["wap"])
bt["wap2"] = calc_wap2(bt)
bt["log_return2"] = log_return(bt["wap2"])
bt["mean_price"] = calc_mean_price(bt)
bt["log_return_mean_price"] = log_return(bt["mean_price"])
bt["abs_wap_balance"] = abs(bt["wap"] - bt["wap2"])
bt["wap_balance"] = bt["wap"] - bt["wap2"]
bt["price_spread"] = (
2
* (bt["ask_price1"] - bt["bid_price1"])
/ (bt["ask_price1"] + bt["bid_price1"])
)
bt["bid_spread"] = (bt["bid_price1"] - bt["bid_price2"]) / bt["bid_price1"]
bt["ask_spread"] = (bt["ask_price1"] - bt["ask_price2"]) / bt["ask_price1"]
bt["total_volume"] = (bt["ask_size1"] + bt["ask_size2"]) + (
bt["bid_size1"] + bt["bid_size2"]
)
bt["bid_volume"] = bt["bid_size1"] + bt["bid_size2"]
bt["ask_volume"] = bt["ask_size1"] + bt["ask_size2"]
bt["abs_volume_imbalance"] = abs(bt["bid_volume"] - bt["ask_volume"])
bt["volume_imbalance"] = bt["bid_volume"] - bt["ask_volume"]
features_arr = [
{
"rv_1": realized_volatility(bt["log_return"]),
"rv_2": realized_volatility(bt["log_return2"]),
"rv_mp": realized_volatility(bt["log_return_mean_price"]),
}
]
for col in [
"abs_wap_balance",
"wap_balance",
"price_spread",
"bid_spread",
"ask_spread",
"total_volume",
"abs_volume_imbalance",
"volume_imbalance",
]:
features_arr.append(calc_array_feats(bt[col].values, "B_" + col, q_cnt, False))
for col in ["seconds_in_bucket", "bid_volume", "ask_volume"]:
features_arr.append(calc_array_feats(bt[col].values, "B_" + col, q_cnt, True))
# ==========================================
# TRADES PART ==========================================
trades["seconds_in_bucket"] = np.maximum(trades["seconds_in_bucket"] - 1, 0)
# Combine trades prices/timestamps with book prices/timestamps
diffs = []
times = set(bt["seconds_in_bucket"].values)
for t in trades["seconds_in_bucket"].values:
d = 0
while t >= 0:
if t in times:
diffs.append(d)
break
else:
t -= 1
d += 1
trades["seconds_in_bucket"] -= diffs
features_arr.append(calc_array_feats(np.array(diffs), "T_diffs", q_cnt, False))
vc_diffs = pd.Series(diffs).value_counts()
features_arr.append(
calc_array_feats(vc_diffs.values, "T_vc_diffs_values", q_cnt, False)
)
features_arr.append(
calc_array_feats(vc_diffs.index.values, "T_vc_diffs_index", q_cnt, False)
)
features_arr.append({"T_len_vc_diffs": len(vc_diffs)})
for col in ["size", "order_count"]:
features_arr.append(
calc_array_feats(trades[col].values, "T_" + col, q_cnt, False)
)
for col in ["seconds_in_bucket", "price"]:
features_arr.append(
calc_array_feats(trades[col].values, "T_" + col, q_cnt, True)
)
# ==========================================
# MERGED PART
merged = pd.merge(
bt,
trades[["seconds_in_bucket", "price", "size"]],
on="seconds_in_bucket",
how="left",
).dropna()
merged["diff_with_bid"] = merged["price"] - merged["bid_price1"]
merged["diff_with_ask"] = merged["ask_price1"] - merged["price"]
merged["side"] = (merged["diff_with_bid"] < merged["diff_with_ask"]).astype(int)
side = merged["side"].values
merged["diff_with_side_volume1"] = (
np.array(
[row[s] for row, s in zip(merged[["ask_size1", "bid_size1"]].values, side)]
)
- merged["size"]
)
merged["diff_with_side_full_volume"] = (
np.array(
[
row[s]
for row, s in zip(merged[["ask_volume", "bid_volume"]].values, side)
]
)
- merged["size"]
)
features_arr.append({"M_cnt": len(merged), "M_mean_side": np.mean(side)})
cside = np.cumsum(2 * side - 1)
features_arr.append(calc_array_feats(cside, "M_cside", q_cnt, False))
for col in [
"diff_with_side_volume1",
"diff_with_side_full_volume",
"diff_with_bid",
"diff_with_ask",
]:
features_arr.append(
calc_array_feats(merged[col].values, "M_" + col, q_cnt, False)
)
# ==========================================
features = dict(ChainMap(*features_arr))
features = {str(last_s) + "_" + k: features[k] for k in features}
features["#stock_id"] = stock_id
features["#time_id"] = time_id
features["#row_id"] = "{}-{}".format(stock_id, time_id)
return features
def calc_features_dataset_for_stock(stock_id, test_flg=False, debug=False):
part = "train"
if test_flg:
part = "test"
book_groups = pd.read_parquet(
INPUT_PATH + "book_{}.parquet/stock_id={}".format(part, stock_id)
).groupby("time_id")
trade_groups = pd.read_parquet(
INPUT_PATH + "trade_{}.parquet/stock_id={}".format(part, stock_id)
).groupby("time_id")
feats_arr = []
bg_keys = book_groups.groups.keys()
tr_keys = set(trade_groups.groups.keys())
sample_trades_df = pd.DataFrame(
columns=["time_id", "seconds_in_bucket", "price", "size", "order_count"]
)
for time_id in tqdm(bg_keys):
arr = []
for last_s in [600, 450, 300, 150]:
b_gr = book_groups.get_group(time_id)
if time_id in tr_keys:
t_gr = trade_groups.get_group(time_id)
else:
t_gr = sample_trades_df.copy()
arr.append(create_features(stock_id, time_id, b_gr, t_gr, 600 - last_s))
feats_arr.append(dict(ChainMap(*arr)))
if debug:
break
df = pd.DataFrame(feats_arr)
df = df[sorted(df.columns)].rename(
{"#stock_id": "stock_id", "#time_id": "time_id", "#row_id": "row_id"}, axis=1
)
print("Stock {} ready".format(stock_id))
return df
df = calc_features_dataset_for_stock(stock_id=0, test_flg=False, debug=True)
df
# # Multiprocessed preprocessor wrapper
def multiprocessed_df_creation(stock_ids, n_jobs=4, test_flg=False, debug=False):
res_df = Parallel(n_jobs=n_jobs, verbose=1)(
delayed(calc_features_dataset_for_stock)(stock_id, test_flg, debug)
for stock_id in stock_ids
)
res_df = pd.concat(res_df).reset_index(drop=True)
return res_df
stock_ids = [0, 1, 2, 3, 4, 5]
multiprocessed_df_creation(
stock_ids=stock_ids, n_jobs=N_THREADS, test_flg=False, debug=True
)
# # Generate full train
train = pd.read_csv(INPUT_PATH + "train.csv")
train.head()
train_stock_ids = train.stock_id.unique()
train_stock_ids
train_data = multiprocessed_df_creation(
train_stock_ids, n_jobs=N_THREADS, test_flg=False, debug=False
)
train_data = pd.merge(train, train_data, on=["stock_id", "time_id"], how="left")
train_data
train_data.shape
# # Generate full test
test = pd.read_csv(INPUT_PATH + "test.csv")
test.head()
DEBUG = test.shape[0] == 3
DEBUG
test_stock_ids = test.stock_id.unique()
test_stock_ids
test_data = multiprocessed_df_creation(
test_stock_ids, n_jobs=N_THREADS, test_flg=True, debug=False
)
test_data = pd.merge(test, test_data, on=["stock_id", "time_id", "row_id"], how="left")
test_data
# # Create LightAutoML model
N_FOLDS = 10
TIMEOUT = 24 * 3600
# Default params for LGBM models
lgbm_params = {
"objective": "rmse",
"metric": "rmse",
"boosting_type": "gbdt",
"early_stopping_rounds": 30,
"learning_rate": 0.01,
"lambda_l1": 1.0,
"lambda_l2": 1.0,
"feature_fraction": 0.8,
"bagging_fraction": 0.8,
}
def create_additional_feats(tr_data, te_data):
for t_col in ["target", "0_rv_1", "150_rv_1"]:
print(t_col)
for name, func in [("mean", np.mean), ("min", np.min), ("max", np.max)]:
print("\t", name)
tr_col, te_col = create_targ_enc_feature(
"stock_id", t_col, func, tr_data, te_data, 20, 3
)
tr_data["stock_id_enc_{}_{}".format(name, t_col)] = tr_col
te_data["stock_id_enc_{}_{}".format(name, t_col)] = te_col
for d in [tr_data, te_data]:
d["0rv1_diff_150rv1"] = d["0_rv_1"] - d["150_rv_1"]
d["0rv1_del_150rv1"] = d["0_rv_1"] / d["150_rv_1"]
if DEBUG:
tr_data, te_data = train_test_split(train_data, test_size=0.2, random_state=42)
print(
"Data splitted. Parts sizes: tr_data = {}, te_data = {}".format(
tr_data.shape, te_data.shape
)
)
def create_and_train_model(tr_data, te_data):
# Task setup - mse loss and mse metric. To optimize rmspe we use object weights for the loss (weight column)
task = Task(
"reg",
)
tr_data["weight"] = 1 / tr_data["target"] ** 2
# Columns roles setup
roles = {
"target": "target",
"drop": ["row_id", "time_id"],
"category": "stock_id",
"weights": "weight",
}
# Train LightAutoML model
automl = TabularAutoML(
task=task,
timeout=TIMEOUT,
cpu_limit=N_THREADS,
general_params={"use_algos": [["lgb", "lgb_tuned", "cb_tuned"]]},
reader_params={"n_jobs": N_THREADS, "cv": N_FOLDS},
tuning_params={"max_tuning_time": 600},
lgb_params={"default_params": lgbm_params, "freeze_defaults": True},
verbose=3,
)
oof_pred = automl.fit_predict(tr_data, roles=roles)
print(
"OOF prediction for tr_data:\n{}\nShape = {}".format(oof_pred, oof_pred.shape)
)
# Fast feature importances calculation
fast_fi = automl.get_feature_scores("fast")
fast_fi.set_index("Feature")["Importance"].head(100).plot.bar(
figsize=(50, 10), grid=True
)
# Let's see how the final model looks like
print(automl.create_model_str_desc())
# Test data prediction
te_pred = automl.predict(te_data)
print("Prediction for te_data:\n{}\nShape = {}".format(te_pred, te_pred.shape))
return oof_pred.data[:, 0], te_pred.data[:, 0], automl
if DEBUG:
create_additional_feats(tr_data, te_data)
oof_pred, valid_pred, automl = create_and_train_model(tr_data, te_data)
# Check scores
print("OOF RMSPE score = {:.5f}".format(rmspe(tr_data["target"], oof_pred)))
print("TEST RMSPE score = {:.5f}".format(rmspe(te_data["target"], valid_pred)))
create_additional_feats(tr_data, test_data)
test_pred = automl.predict(test_data)
submission = test_data[["row_id"]]
submission["target"] = test_pred.data[:, 0]
submission.to_csv("submission.csv", index=False)
if not DEBUG:
create_additional_feats(train_data, test_data)
oof_pred, test_pred, automl = create_and_train_model(train_data, test_data)
# Check scores
print("OOF RMSPE score = {:.5f}".format(rmspe(train_data["target"], oof_pred)))
submission = test_data[["row_id"]]
submission["target"] = test_pred
submission.to_csv("submission.csv", index=False)
# # Bonus. Feature importances and model structure
# Fast feature importances calculation
fast_fi = automl.get_feature_scores("fast")
fast_fi.set_index("Feature")["Importance"].head(100).plot.bar(
figsize=(50, 10), grid=True
)
# Let's see how the final model looks like
print(automl.create_model_str_desc())
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/009/69009870.ipynb
|
lightautoml-framework-lama
|
alexryzhkov
|
[{"Id": 69009870, "ScriptId": 18830238, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 19099, "CreationDate": "07/25/2021 20:04:30", "VersionNumber": 1.0, "Title": "EDA, FE and model creation (LightAutoML starter)", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 490.0, "LinesInsertedFromPrevious": 490.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 91701026, "KernelVersionId": 69009870, "SourceDatasetVersionId": 2462241}]
|
[{"Id": 2462241, "DatasetId": 1468103, "DatasourceVersionId": 2504667, "CreatorUserId": 19099, "LicenseName": "Unknown", "CreationDate": "07/25/2021 17:16:48", "VersionNumber": 5.0, "Title": "LightAutoML framework (LAMA)", "Slug": "lightautoml-framework-lama", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Update LightAutoML to 0.2.16.2", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1468103, "CreatorUserId": 19099, "OwnerUserId": 19099.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2462241.0, "CurrentDatasourceVersionId": 2504667.0, "ForumId": 1487732, "Type": 2, "CreationDate": "07/14/2021 23:26:38", "LastActivityDate": "07/14/2021", "TotalViews": 719, "TotalDownloads": 5, "TotalVotes": 5, "TotalKernels": 1}]
|
[{"Id": 19099, "UserName": "alexryzhkov", "DisplayName": "Alexander Ryzhkov", "RegisterDate": "10/22/2011", "PerformanceTier": 4}]
|
# # Offline LightAutoML installation
# # Libraries imports
import numpy as np
import pandas as pd
pd.set_option("max_rows", 300)
pd.set_option("max_columns", 300)
import warnings
warnings.filterwarnings("ignore")
import os
from collections import ChainMap
from joblib import Parallel, delayed
from tqdm.notebook import tqdm
from sklearn.model_selection import KFold, train_test_split
# LightAutoML presets, task and report generation
from lightautoml.automl.presets.tabular_presets import (
TabularAutoML,
TabularUtilizedAutoML,
)
from lightautoml.tasks import Task
from lightautoml.report.report_deco import ReportDeco
from matplotlib import pyplot as plt, rcParams
rcParams.update({"font.size": 22})
# # Global constants
INPUT_PATH = "../input/optiver-realized-volatility-prediction/"
N_THREADS = 4
# # Functions for preprocess
def calc_wap(df):
wap = (df["bid_price1"] * df["ask_size1"] + df["ask_price1"] * df["bid_size1"]) / (
df["bid_size1"] + df["ask_size1"]
)
return wap
def calc_wap2(df):
wap = (df["bid_price2"] * df["ask_size2"] + df["ask_price2"] * df["bid_size2"]) / (
df["bid_size2"] + df["ask_size2"]
)
return wap
def calc_mean_price(df):
mp = (df["bid_price1"] + df["ask_price1"]) / 2
return mp
def log_return(list_stock_prices):
return np.log(list_stock_prices).diff()
def realized_volatility(series):
return np.sqrt(np.sum(series**2))
def count_unique(series):
return len(np.unique(series))
def calc_array_feats(arr, prefix, q_cnt=10, diff=True):
if diff:
arr = np.diff(np.array(arr))
percs = np.linspace(0, 100, q_cnt + 1).astype(int)
cols = [prefix + "__P" + str(p) for p in percs]
if len(arr) > 0:
vals = np.percentile(arr, percs)
else:
vals = [np.nan] * len(cols)
res = dict(zip(cols, vals))
return res
def rmspe(y_true, y_pred, **kwargs):
return np.sqrt(np.mean(np.square((y_true - y_pred) / y_true)))
def create_targ_enc_feature(
col, targ_col, transform_func, tr_data, te_data, n_folds=20, n_runs=1
):
# Test col is transformed by the whole train set aggregation
stock_id_target_trans = tr_data.groupby(col)[targ_col].agg(transform_func)
te_col_transformed = te_data[col].map(stock_id_target_trans)
# Train col can be transformed only inside CV not to overfit
# New values imputed with global train values
glob_val = transform_func(tr_data[col].values)
tr_col_transformed = np.repeat(0.0, tr_data.shape[0])
for i in range(n_runs):
kf = KFold(n_splits=n_folds, shuffle=True, random_state=13)
for idx_train, idx_val in kf.split(tr_data):
target_trans = (
tr_data.iloc[idx_train].groupby(col)[targ_col].agg(transform_func)
)
tr_col_transformed[idx_val] += (
tr_data[col].iloc[idx_val].map(target_trans).fillna(glob_val) / n_runs
)
return tr_col_transformed, te_col_transformed
# # EDA and data visualization
def create_plot(stock_id, time_id, book_train, trade_train):
# Select time_id
bt = book_train.query("time_id == {}".format(time_id))
bt["wap"] = calc_wap(bt)
trades = trade_train.query("time_id == {}".format(time_id))
trades["seconds_in_bucket"] = np.maximum(trades["seconds_in_bucket"] - 1, 0)
# Combine trades prices/timestamps with book prices/timestamps
diffs = []
times = set(bt["seconds_in_bucket"].values)
for t in trades["seconds_in_bucket"].values:
d = 0
while t >= 0:
if t in times:
diffs.append(d)
break
else:
t -= 1
d += 1
if t == -1:
print("Negative!")
trades["seconds_in_bucket"] -= diffs
# Merged and calc the color (buy/sell)
merged = pd.merge(
bt,
trades[["seconds_in_bucket", "price", "size"]],
on="seconds_in_bucket",
how="left",
).dropna()
merged["diff_with_bid"] = merged["price"] - merged["bid_price1"]
merged["diff_with_ask"] = merged["ask_price1"] - merged["price"]
merged["color"] = (
(merged["diff_with_bid"] < merged["diff_with_ask"])
.astype(int)
.map({0: "green", 1: "red"})
)
fig = plt.figure(figsize=(60, 20))
plt.plot(
bt["seconds_in_bucket"].values,
bt["ask_price2"].values,
"b--",
linewidth=1,
label="Ask price 2",
)
plt.plot(
bt["seconds_in_bucket"].values,
bt["ask_price1"].values,
"b",
linewidth=2,
label="Ask price 1",
)
plt.plot(
bt["seconds_in_bucket"].values, bt["wap"].values, "m", linewidth=1, label="WAP"
)
plt.plot(
bt["seconds_in_bucket"].values,
bt["bid_price1"].values,
"g",
linewidth=2,
label="Bid price 1",
)
plt.plot(
bt["seconds_in_bucket"].values,
bt["bid_price2"].values,
"g--",
linewidth=1,
label="Bid price 2",
)
fig.axes[0].fill_between(
bt["seconds_in_bucket"].values,
bt["bid_price1"].values,
bt["ask_price1"].values,
color="orange",
alpha=0.15,
)
fig.axes[0].fill_between(
bt["seconds_in_bucket"].values,
bt["bid_price2"].values,
bt["bid_price1"].values,
color="green",
alpha=0.25,
)
fig.axes[0].fill_between(
bt["seconds_in_bucket"].values,
bt["ask_price1"].values,
bt["ask_price2"].values,
color="blue",
alpha=0.15,
)
mask = (merged["color"] == "green").values
plt.scatter(
merged["seconds_in_bucket"].values[mask],
merged["price"].values[mask],
marker="*",
color=merged["color"].values[mask],
s=500,
label="Buy trades",
)
mask = (merged["color"] == "red").values
plt.scatter(
merged["seconds_in_bucket"].values[mask],
merged["price"].values[mask],
marker="*",
color=merged["color"].values[mask],
s=500,
label="Sell trades",
)
plt.grid(True)
plt.legend()
plt.title("Stock_id = {}, time_id = {}".format(stock_id, time_id))
plt.xlabel("seconds_in_bucket")
plt.ylabel("Price")
plt.show()
for stock_id in [0, 1, 2]:
# Read data
bt = pd.read_parquet(INPUT_PATH + "book_train.parquet/stock_id={}".format(stock_id))
tt = pd.read_parquet(
INPUT_PATH + "trade_train.parquet/stock_id={}".format(stock_id)
)
time_ids = bt["time_id"].value_counts().index.values[[0, -1]]
for time_id in time_ids:
create_plot(stock_id, time_id, bt, tt)
# # Feature engineering
def create_features(stock_id, time_id, bt, trades, last_s):
q_cnt = 5
bt = bt[bt["seconds_in_bucket"] >= last_s]
trades = trades[trades["seconds_in_bucket"] > bt["seconds_in_bucket"].min() + 1]
# BOOK PART
bt["wap"] = calc_wap(bt)
bt["log_return"] = log_return(bt["wap"])
bt["wap2"] = calc_wap2(bt)
bt["log_return2"] = log_return(bt["wap2"])
bt["mean_price"] = calc_mean_price(bt)
bt["log_return_mean_price"] = log_return(bt["mean_price"])
bt["abs_wap_balance"] = abs(bt["wap"] - bt["wap2"])
bt["wap_balance"] = bt["wap"] - bt["wap2"]
bt["price_spread"] = (
2
* (bt["ask_price1"] - bt["bid_price1"])
/ (bt["ask_price1"] + bt["bid_price1"])
)
bt["bid_spread"] = (bt["bid_price1"] - bt["bid_price2"]) / bt["bid_price1"]
bt["ask_spread"] = (bt["ask_price1"] - bt["ask_price2"]) / bt["ask_price1"]
bt["total_volume"] = (bt["ask_size1"] + bt["ask_size2"]) + (
bt["bid_size1"] + bt["bid_size2"]
)
bt["bid_volume"] = bt["bid_size1"] + bt["bid_size2"]
bt["ask_volume"] = bt["ask_size1"] + bt["ask_size2"]
bt["abs_volume_imbalance"] = abs(bt["bid_volume"] - bt["ask_volume"])
bt["volume_imbalance"] = bt["bid_volume"] - bt["ask_volume"]
features_arr = [
{
"rv_1": realized_volatility(bt["log_return"]),
"rv_2": realized_volatility(bt["log_return2"]),
"rv_mp": realized_volatility(bt["log_return_mean_price"]),
}
]
for col in [
"abs_wap_balance",
"wap_balance",
"price_spread",
"bid_spread",
"ask_spread",
"total_volume",
"abs_volume_imbalance",
"volume_imbalance",
]:
features_arr.append(calc_array_feats(bt[col].values, "B_" + col, q_cnt, False))
for col in ["seconds_in_bucket", "bid_volume", "ask_volume"]:
features_arr.append(calc_array_feats(bt[col].values, "B_" + col, q_cnt, True))
# ==========================================
# TRADES PART ==========================================
trades["seconds_in_bucket"] = np.maximum(trades["seconds_in_bucket"] - 1, 0)
# Combine trades prices/timestamps with book prices/timestamps
diffs = []
times = set(bt["seconds_in_bucket"].values)
for t in trades["seconds_in_bucket"].values:
d = 0
while t >= 0:
if t in times:
diffs.append(d)
break
else:
t -= 1
d += 1
trades["seconds_in_bucket"] -= diffs
features_arr.append(calc_array_feats(np.array(diffs), "T_diffs", q_cnt, False))
vc_diffs = pd.Series(diffs).value_counts()
features_arr.append(
calc_array_feats(vc_diffs.values, "T_vc_diffs_values", q_cnt, False)
)
features_arr.append(
calc_array_feats(vc_diffs.index.values, "T_vc_diffs_index", q_cnt, False)
)
features_arr.append({"T_len_vc_diffs": len(vc_diffs)})
for col in ["size", "order_count"]:
features_arr.append(
calc_array_feats(trades[col].values, "T_" + col, q_cnt, False)
)
for col in ["seconds_in_bucket", "price"]:
features_arr.append(
calc_array_feats(trades[col].values, "T_" + col, q_cnt, True)
)
# ==========================================
# MERGED PART
merged = pd.merge(
bt,
trades[["seconds_in_bucket", "price", "size"]],
on="seconds_in_bucket",
how="left",
).dropna()
merged["diff_with_bid"] = merged["price"] - merged["bid_price1"]
merged["diff_with_ask"] = merged["ask_price1"] - merged["price"]
merged["side"] = (merged["diff_with_bid"] < merged["diff_with_ask"]).astype(int)
side = merged["side"].values
merged["diff_with_side_volume1"] = (
np.array(
[row[s] for row, s in zip(merged[["ask_size1", "bid_size1"]].values, side)]
)
- merged["size"]
)
merged["diff_with_side_full_volume"] = (
np.array(
[
row[s]
for row, s in zip(merged[["ask_volume", "bid_volume"]].values, side)
]
)
- merged["size"]
)
features_arr.append({"M_cnt": len(merged), "M_mean_side": np.mean(side)})
cside = np.cumsum(2 * side - 1)
features_arr.append(calc_array_feats(cside, "M_cside", q_cnt, False))
for col in [
"diff_with_side_volume1",
"diff_with_side_full_volume",
"diff_with_bid",
"diff_with_ask",
]:
features_arr.append(
calc_array_feats(merged[col].values, "M_" + col, q_cnt, False)
)
# ==========================================
features = dict(ChainMap(*features_arr))
features = {str(last_s) + "_" + k: features[k] for k in features}
features["#stock_id"] = stock_id
features["#time_id"] = time_id
features["#row_id"] = "{}-{}".format(stock_id, time_id)
return features
def calc_features_dataset_for_stock(stock_id, test_flg=False, debug=False):
part = "train"
if test_flg:
part = "test"
book_groups = pd.read_parquet(
INPUT_PATH + "book_{}.parquet/stock_id={}".format(part, stock_id)
).groupby("time_id")
trade_groups = pd.read_parquet(
INPUT_PATH + "trade_{}.parquet/stock_id={}".format(part, stock_id)
).groupby("time_id")
feats_arr = []
bg_keys = book_groups.groups.keys()
tr_keys = set(trade_groups.groups.keys())
sample_trades_df = pd.DataFrame(
columns=["time_id", "seconds_in_bucket", "price", "size", "order_count"]
)
for time_id in tqdm(bg_keys):
arr = []
for last_s in [600, 450, 300, 150]:
b_gr = book_groups.get_group(time_id)
if time_id in tr_keys:
t_gr = trade_groups.get_group(time_id)
else:
t_gr = sample_trades_df.copy()
arr.append(create_features(stock_id, time_id, b_gr, t_gr, 600 - last_s))
feats_arr.append(dict(ChainMap(*arr)))
if debug:
break
df = pd.DataFrame(feats_arr)
df = df[sorted(df.columns)].rename(
{"#stock_id": "stock_id", "#time_id": "time_id", "#row_id": "row_id"}, axis=1
)
print("Stock {} ready".format(stock_id))
return df
df = calc_features_dataset_for_stock(stock_id=0, test_flg=False, debug=True)
df
# # Multiprocessed preprocessor wrapper
def multiprocessed_df_creation(stock_ids, n_jobs=4, test_flg=False, debug=False):
res_df = Parallel(n_jobs=n_jobs, verbose=1)(
delayed(calc_features_dataset_for_stock)(stock_id, test_flg, debug)
for stock_id in stock_ids
)
res_df = pd.concat(res_df).reset_index(drop=True)
return res_df
stock_ids = [0, 1, 2, 3, 4, 5]
multiprocessed_df_creation(
stock_ids=stock_ids, n_jobs=N_THREADS, test_flg=False, debug=True
)
# # Generate full train
train = pd.read_csv(INPUT_PATH + "train.csv")
train.head()
train_stock_ids = train.stock_id.unique()
train_stock_ids
train_data = multiprocessed_df_creation(
train_stock_ids, n_jobs=N_THREADS, test_flg=False, debug=False
)
train_data = pd.merge(train, train_data, on=["stock_id", "time_id"], how="left")
train_data
train_data.shape
# # Generate full test
test = pd.read_csv(INPUT_PATH + "test.csv")
test.head()
DEBUG = test.shape[0] == 3
DEBUG
test_stock_ids = test.stock_id.unique()
test_stock_ids
test_data = multiprocessed_df_creation(
test_stock_ids, n_jobs=N_THREADS, test_flg=True, debug=False
)
test_data = pd.merge(test, test_data, on=["stock_id", "time_id", "row_id"], how="left")
test_data
# # Create LightAutoML model
N_FOLDS = 10
TIMEOUT = 24 * 3600
# Default params for LGBM models
lgbm_params = {
"objective": "rmse",
"metric": "rmse",
"boosting_type": "gbdt",
"early_stopping_rounds": 30,
"learning_rate": 0.01,
"lambda_l1": 1.0,
"lambda_l2": 1.0,
"feature_fraction": 0.8,
"bagging_fraction": 0.8,
}
def create_additional_feats(tr_data, te_data):
for t_col in ["target", "0_rv_1", "150_rv_1"]:
print(t_col)
for name, func in [("mean", np.mean), ("min", np.min), ("max", np.max)]:
print("\t", name)
tr_col, te_col = create_targ_enc_feature(
"stock_id", t_col, func, tr_data, te_data, 20, 3
)
tr_data["stock_id_enc_{}_{}".format(name, t_col)] = tr_col
te_data["stock_id_enc_{}_{}".format(name, t_col)] = te_col
for d in [tr_data, te_data]:
d["0rv1_diff_150rv1"] = d["0_rv_1"] - d["150_rv_1"]
d["0rv1_del_150rv1"] = d["0_rv_1"] / d["150_rv_1"]
if DEBUG:
tr_data, te_data = train_test_split(train_data, test_size=0.2, random_state=42)
print(
"Data splitted. Parts sizes: tr_data = {}, te_data = {}".format(
tr_data.shape, te_data.shape
)
)
def create_and_train_model(tr_data, te_data):
# Task setup - mse loss and mse metric. To optimize rmspe we use object weights for the loss (weight column)
task = Task(
"reg",
)
tr_data["weight"] = 1 / tr_data["target"] ** 2
# Columns roles setup
roles = {
"target": "target",
"drop": ["row_id", "time_id"],
"category": "stock_id",
"weights": "weight",
}
# Train LightAutoML model
automl = TabularAutoML(
task=task,
timeout=TIMEOUT,
cpu_limit=N_THREADS,
general_params={"use_algos": [["lgb", "lgb_tuned", "cb_tuned"]]},
reader_params={"n_jobs": N_THREADS, "cv": N_FOLDS},
tuning_params={"max_tuning_time": 600},
lgb_params={"default_params": lgbm_params, "freeze_defaults": True},
verbose=3,
)
oof_pred = automl.fit_predict(tr_data, roles=roles)
print(
"OOF prediction for tr_data:\n{}\nShape = {}".format(oof_pred, oof_pred.shape)
)
# Fast feature importances calculation
fast_fi = automl.get_feature_scores("fast")
fast_fi.set_index("Feature")["Importance"].head(100).plot.bar(
figsize=(50, 10), grid=True
)
# Let's see how the final model looks like
print(automl.create_model_str_desc())
# Test data prediction
te_pred = automl.predict(te_data)
print("Prediction for te_data:\n{}\nShape = {}".format(te_pred, te_pred.shape))
return oof_pred.data[:, 0], te_pred.data[:, 0], automl
if DEBUG:
create_additional_feats(tr_data, te_data)
oof_pred, valid_pred, automl = create_and_train_model(tr_data, te_data)
# Check scores
print("OOF RMSPE score = {:.5f}".format(rmspe(tr_data["target"], oof_pred)))
print("TEST RMSPE score = {:.5f}".format(rmspe(te_data["target"], valid_pred)))
create_additional_feats(tr_data, test_data)
test_pred = automl.predict(test_data)
submission = test_data[["row_id"]]
submission["target"] = test_pred.data[:, 0]
submission.to_csv("submission.csv", index=False)
if not DEBUG:
create_additional_feats(train_data, test_data)
oof_pred, test_pred, automl = create_and_train_model(train_data, test_data)
# Check scores
print("OOF RMSPE score = {:.5f}".format(rmspe(train_data["target"], oof_pred)))
submission = test_data[["row_id"]]
submission["target"] = test_pred
submission.to_csv("submission.csv", index=False)
# # Bonus. Feature importances and model structure
# Fast feature importances calculation
fast_fi = automl.get_feature_scores("fast")
fast_fi.set_index("Feature")["Importance"].head(100).plot.bar(
figsize=(50, 10), grid=True
)
# Let's see how the final model looks like
print(automl.create_model_str_desc())
| false | 0 | 5,981 | 0 | 6,009 | 5,981 |
||
69009687
|
<jupyter_start><jupyter_text>Netflix Original Films & IMDB Scores
### Context
This dataset consists of all Netflix original films released as of June 1st, 2021. Additionally, it also includes all Netflix documentaries and specials. The data was webscraped off of [this](https://en.wikipedia.org/wiki/Lists_of_Netflix_original_films) Wikipedia page, which was then integrated with a dataset consisting of all of their corresponding IMDB scores. IMDB scores are voted on by community members, and the majority of the films have 1,000+ reviews.
### Content
Included in the dataset is:
- Title of the film
- Genre of the film
- Original premiere date
- Runtime in minutes
- IMDB scores (as of 06/01/21)
- Languages currently available (as of 06/01/21)
Kaggle dataset identifier: netflix-original-films-imdb-scores
<jupyter_script>#
# “WELCOME TO NETFLIX! ENJOY WATCHING IT!!”
# <img
# src="https://media1.tenor.com/images/f6b11bd53411d94338117381cf9a9b9b/tenor.gif?itemid=18131525" style="width:100%;height:100%;">
# # Introduction
# Hello again with my new notebook! I wanted to chose interested data set for my next notebook so here we are. I will do my best to create awesome visualizations while this notebook. I hope you will like it as you like watching Netflix😆
# 🚀Sooo let's goo!
# ## Libraries ⬇
#
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib as mpl
from matplotlib.cm import ScalarMappable
from matplotlib.lines import Line2D
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from textwrap import wrap
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Set Style
sns.set_style("white")
mpl.rcParams["xtick.labelsize"] = 16
mpl.rcParams["ytick.labelsize"] = 16
mpl.rcParams["axes.spines.left"] = False
mpl.rcParams["axes.spines.right"] = False
mpl.rcParams["axes.spines.top"] = False
# ## Colors 🎨
#
colors_blue = ["#132C33", "#264D58", "#17869E", "#51C4D3", "#B4DBE9"]
colors_dark = ["#1F1F1F", "#313131", "#636363", "#AEAEAE", "#DADADA"]
colors_red = ["#331313", "#582626", "#9E1717", "#D35151", "#E9B4B4"]
colors_mix = [
"#17869E",
"#264D58",
"#179E66",
"#D35151",
"#E9DAB4",
"#E9B4B4",
"#D3B651",
"#6351D3",
]
colors_div = ["#132C33", "#17869E", "#DADADA", "#D35151", "#331313"]
sns.palplot(colors_blue)
sns.palplot(colors_dark)
sns.palplot(colors_red)
sns.palplot(colors_mix)
sns.palplot(colors_div)
# ## Importing Data ⬇
#
df = pd.read_csv(
"/kaggle/input/netflix-original-films-imdb-scores/NetflixOriginals.csv"
)
# ## First Look To Data 🕵️♀️
#
df.head()
df.isnull().sum()
df.info()
df.describe()
print("Number of categories:{}".format(df["Genre"].nunique()))
print(df["Genre"].unique())
print("*" * 75)
print("Number of Languages:{}".format(df["Language"].nunique()))
print(df["Language"].unique())
# > 📌 We all know that Netflix is a huge platform to watching series, movies and many on. As you can see there is ***115*** category and ***38*** language that you can choose. I guess next slogan of Netflix might be ***'We have content for everyone'***.
# ## Data Visualizations 📊
# I wanted to use ***circular barplot*** instead of using ***classic bar plots*** for this time. While checking resources for it, I [found this one](https://www.python-graph-gallery.com/web-circular-barplot-with-matplotlib). This is a great source if someone wants to learn more about cicular barplot or any other plots. I learned everything from there, thanks a lot to him!
imr = (
df.groupby("Genre", as_index=False)
.mean()
.sort_values(by="Runtime", ascending=False)
.reset_index(drop=True)
)
# Values for the x axis
ANGLES = np.linspace(0.05, 2 * np.pi - 0.05, 10, endpoint=False)
# Cumulative length
LENGTHS = imr.iloc[0:10, 1].values
# Genre label
GENRE = imr["Genre"].iloc[0:10].values
# Colors
COLORS = ["#6C5B7B", "#C06C84", "#F67280", "#F8B195"]
# Colormap
cmap = mpl.colors.LinearSegmentedColormap.from_list("my color", COLORS, N=256)
# Normalizer
norm = mpl.colors.Normalize(vmin=LENGTHS.min(), vmax=LENGTHS.max())
# Normalized colors. Each number of tracks is mapped to a color in the
# color scale 'cmap'
COLORS = cmap(norm(LENGTHS))
# Some layout stuff ----------------------------------------------
# Initialize layout in polar coordinates
fig, ax = plt.subplots(figsize=(9, 12.6), subplot_kw={"projection": "polar"})
# Set background color to white, both axis and figure.
fig.patch.set_facecolor("white")
ax.set_facecolor("white")
ax.set_theta_offset(1.2 * np.pi / 2)
ax.set_ylim(-50, 175)
# Add geometries to the plot -------------------------------------
# See the zorder to manipulate which geometries are on top
# Add bars to represent the cumulative track lengths
bars = ax.bar(ANGLES, LENGTHS, color=COLORS, alpha=0.9, width=0.52)
# ax.bar_label(bars,label_type="edge",fontsize=16)
# Add dashed vertical lines. These are just references
ax.vlines(ANGLES, 0, 170, color="black", ls=(0, (4, 4)))
# Remove lines for polar axis (x)
ax.xaxis.grid(False)
# ax.yaxis.grid(False)
# Add labels for the regions -------------------------------------
# Note the 'wrap()' function.
# The '5' means we want at most 5 consecutive letters in a word,
# but the 'break_long_words' means we don't want to break words
# longer than 5 characters.
GENRE = ["\n".join(wrap(r, 5, break_long_words=False)) for r in GENRE]
GENRE
# Set the labels
ax.set_xticks(ANGLES)
ax.set_xticklabels(GENRE, size=12)
# Remove unnecesary guides ---------------------------------------
# Remove lines for polar axis (x)
ax.xaxis.grid(False)
# Put grid lines for radial axis (y) at 0, 1000, 2000, and 3000
ax.set_yticklabels([])
ax.set_yticks([0, 25, 75, 125, 175])
# Remove spines
ax.spines["start"].set_color("none")
ax.spines["polar"].set_color("none")
# Adjust padding of the x axis labels ----------------------------
# This is going to add extra space around the labels for the
# ticks of the x axis.
XTICKS = ax.xaxis.get_major_ticks()
for tick in XTICKS:
tick.set_pad(10)
# Add custom annotations -----------------------------------------
# The following represent the heights in the values of the y axis
PAD = 10
ax.text(-0.2 * np.pi / 2, 25 + PAD, "25", ha="center", size=12)
ax.text(-0.2 * np.pi / 2, 75 + PAD, "75", ha="center", size=12)
ax.text(-0.2 * np.pi / 2, 125 + PAD, "125", ha="center", size=12)
ax.text(-0.2 * np.pi / 2, 175 + PAD, "175", ha="center", size=12)
title = "Top 10 Longest Content Genre by A"
fig.text(0.1, 0.83, title, fontsize=25, weight="bold", ha="left", va="baseline")
#
# “THIS IS A ROBBERY!”
# <img
# src="https://1.bp.blogspot.com/-cgODERMTBHY/XrgJGTlSNiI/AAAAAAAAAE4/rRVQHLBkWu8pksR1jku5F1YPv6q1LjL9gCLcBGAsYHQ/s1600/MoneyHeist.gif" style="width:100%;height:100%;">
# - Heist Film category is the longest by avarage duration. Even thought they are that long, is there anyone says that I'm bored?🤔 I don't think so. Ah also, if you didn't try Money Heist yet, go and watch it immediately. Wait wait, after finished notebook please😂
meanx = imr["IMDB Score"].mean()
meany = imr["Runtime"].mean()
fig, ax = plt.subplots(figsize=(18, 8), dpi=75)
sns.scatterplot(
data=imr,
x="IMDB Score",
y="Runtime",
size="IMDB Score",
ax=ax,
sizes=(5, 1000),
alpha=0.9,
color="darkred",
)
linex = ax.axvline(
meanx, linestyle="dotted", color=colors_dark[1], alpha=0.8, label="Average"
)
liney = ax.axhline(meany, linestyle="dotted", color=colors_dark[1], alpha=0.8)
ax.legend(bbox_to_anchor=(1.05, 1), ncol=1, borderpad=1, frameon=False, fontsize=12)
ax.grid(alpha=0.3)
ax.set_axisbelow(True)
ax.set_xlabel(
"IMDB Score", fontsize=14, labelpad=10, fontweight="bold", color=colors_dark[0]
)
ax.set_ylabel(
"Runtime", fontsize=14, labelpad=10, fontweight="bold", color=colors_dark[0]
)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
ax.legend().set_visible(False)
plt.text(
s="Is There a Correlation Between\nDuration and IMDB Score?",
ha="left",
x=xmin,
y=ymax * 1.1,
fontsize=24,
fontweight="bold",
color=colors_dark[0],
)
plt.title(
"It doesn't seem like there is a strong relationship between IMDB score and\nduration of content but it's gonna be better if we check it with correlation map.",
loc="left",
fontsize=13,
color=colors_dark[2],
)
plt.tight_layout()
plt.show()
df
lan = df["Language"].value_counts().iloc[0:10]
lan
class BubbleChart:
def __init__(self, area, bubble_spacing=0):
"""
Setup for bubble collapse.
Parameters
----------
area : array-like
Area of the bubbles.
bubble_spacing : float, default: 0
Minimal spacing between bubbles after collapsing.
Notes
-----
If "area" is sorted, the results might look weird.
"""
area = np.asarray(area)
r = np.sqrt(area / np.pi)
self.bubble_spacing = bubble_spacing
self.bubbles = np.ones((len(area), 4))
self.bubbles[:, 2] = r
self.bubbles[:, 3] = area
self.maxstep = 2 * self.bubbles[:, 2].max() + self.bubble_spacing
self.step_dist = self.maxstep / 2
# calculate initial grid layout for bubbles
length = np.ceil(np.sqrt(len(self.bubbles)))
grid = np.arange(length) * self.maxstep
gx, gy = np.meshgrid(grid, grid)
self.bubbles[:, 0] = gx.flatten()[: len(self.bubbles)]
self.bubbles[:, 1] = gy.flatten()[: len(self.bubbles)]
self.com = self.center_of_mass()
def center_of_mass(self):
return np.average(self.bubbles[:, :2], axis=0, weights=self.bubbles[:, 3])
def center_distance(self, bubble, bubbles):
return np.hypot(bubble[0] - bubbles[:, 0], bubble[1] - bubbles[:, 1])
def outline_distance(self, bubble, bubbles):
center_distance = self.center_distance(bubble, bubbles)
return center_distance - bubble[2] - bubbles[:, 2] - self.bubble_spacing
def check_collisions(self, bubble, bubbles):
distance = self.outline_distance(bubble, bubbles)
return len(distance[distance < 0])
def collides_with(self, bubble, bubbles):
distance = self.outline_distance(bubble, bubbles)
idx_min = np.argmin(distance)
return idx_min if type(idx_min) == np.ndarray else [idx_min]
def collapse(self, n_iterations=50):
"""
Move bubbles to the center of mass.
Parameters
----------
n_iterations : int, default: 50
Number of moves to perform.
"""
for _i in range(n_iterations):
moves = 0
for i in range(len(self.bubbles)):
rest_bub = np.delete(self.bubbles, i, 0)
# try to move directly towards the center of mass
# direction vector from bubble to the center of mass
dir_vec = self.com - self.bubbles[i, :2]
# shorten direction vector to have length of 1
dir_vec = dir_vec / np.sqrt(dir_vec.dot(dir_vec))
# calculate new bubble position
new_point = self.bubbles[i, :2] + dir_vec * self.step_dist
new_bubble = np.append(new_point, self.bubbles[i, 2:4])
# check whether new bubble collides with other bubbles
if not self.check_collisions(new_bubble, rest_bub):
self.bubbles[i, :] = new_bubble
self.com = self.center_of_mass()
moves += 1
else:
# try to move around a bubble that you collide with
# find colliding bubble
for colliding in self.collides_with(new_bubble, rest_bub):
# calculate direction vector
dir_vec = rest_bub[colliding, :2] - self.bubbles[i, :2]
dir_vec = dir_vec / np.sqrt(dir_vec.dot(dir_vec))
# calculate orthogonal vector
orth = np.array([dir_vec[1], -dir_vec[0]])
# test which direction to go
new_point1 = self.bubbles[i, :2] + orth * self.step_dist
new_point2 = self.bubbles[i, :2] - orth * self.step_dist
dist1 = self.center_distance(self.com, np.array([new_point1]))
dist2 = self.center_distance(self.com, np.array([new_point2]))
new_point = new_point1 if dist1 < dist2 else new_point2
new_bubble = np.append(new_point, self.bubbles[i, 2:4])
if not self.check_collisions(new_bubble, rest_bub):
self.bubbles[i, :] = new_bubble
self.com = self.center_of_mass()
if moves / len(self.bubbles) < 0.1:
self.step_dist = self.step_dist / 2
def plot(self, ax, labels, colors):
"""
Draw the bubble plot.
Parameters
----------
ax : matplotlib.axes.Axes
labels : list
Labels of the bubbles.
colors : list
Colors of the bubbles.
"""
for i in range(len(self.bubbles)):
circ = plt.Circle(self.bubbles[i, :2], self.bubbles[i, 2], color=colors[i])
ax.add_patch(circ)
ax.text(
*self.bubbles[i, :2],
labels[i],
horizontalalignment="center",
verticalalignment="center"
)
bubble_chart = BubbleChart(area=lan.values, bubble_spacing=0.1)
bubble_chart.collapse()
fig, ax = plt.subplots(subplot_kw=dict(aspect="equal"))
bubble_chart.plot(ax, lan.index, COLORS)
ax.axis("off")
ax.relim()
ax.autoscale_view()
ax.set_title("Browser market share")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/009/69009687.ipynb
|
netflix-original-films-imdb-scores
|
luiscorter
|
[{"Id": 69009687, "ScriptId": 18800732, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6590031, "CreationDate": "07/25/2021 20:00:10", "VersionNumber": 2.0, "Title": "\ud83d\udcfaNetflix - Data Visualizations", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 376.0, "LinesInsertedFromPrevious": 275.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 101.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 91700719, "KernelVersionId": 69009687, "SourceDatasetVersionId": 2301025}]
|
[{"Id": 2301025, "DatasetId": 1387482, "DatasourceVersionId": 2342316, "CreatorUserId": 5163313, "LicenseName": "CC0: Public Domain", "CreationDate": "06/03/2021 23:24:57", "VersionNumber": 1.0, "Title": "Netflix Original Films & IMDB Scores", "Slug": "netflix-original-films-imdb-scores", "Subtitle": "Netflix Films since 06/01/2021", "Description": "### Context\n\nThis dataset consists of all Netflix original films released as of June 1st, 2021. Additionally, it also includes all Netflix documentaries and specials. The data was webscraped off of [this](https://en.wikipedia.org/wiki/Lists_of_Netflix_original_films) Wikipedia page, which was then integrated with a dataset consisting of all of their corresponding IMDB scores. IMDB scores are voted on by community members, and the majority of the films have 1,000+ reviews. \n\n### Content\n\nIncluded in the dataset is: \n\n- Title of the film\n- Genre of the film\n- Original premiere date\n- Runtime in minutes \n- IMDB scores (as of 06/01/21)\n- Languages currently available (as of 06/01/21) \n\n### Acknowledgements\n\nThank you to Nakul Lakhotia, whose article I used as a reference to scrape the Wikipedia tables. [Here](https://medium.com/analytics-vidhya/web-scraping-a-wikipedia-table-into-a-dataframe-c52617e1f451) is the article I used. \n\n### Inspiration\n\nI originally planned on using this data solely to make a public dashboard in Tableau, but I thought I would upload it to Kaggle in case anyone was interested in using this data. Integrating IMDB scores with this dataset was a pain, so I hope someone is able to make interesting correlations between the scores and other facets of the data.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1387482, "CreatorUserId": 5163313, "OwnerUserId": 5163313.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2301025.0, "CurrentDatasourceVersionId": 2342316.0, "ForumId": 1406699, "Type": 2, "CreationDate": "06/03/2021 23:24:57", "LastActivityDate": "06/03/2021", "TotalViews": 84209, "TotalDownloads": 15607, "TotalVotes": 200, "TotalKernels": 64}]
|
[{"Id": 5163313, "UserName": "luiscorter", "DisplayName": "Luis", "RegisterDate": "05/25/2020", "PerformanceTier": 0}]
|
#
# “WELCOME TO NETFLIX! ENJOY WATCHING IT!!”
# <img
# src="https://media1.tenor.com/images/f6b11bd53411d94338117381cf9a9b9b/tenor.gif?itemid=18131525" style="width:100%;height:100%;">
# # Introduction
# Hello again with my new notebook! I wanted to chose interested data set for my next notebook so here we are. I will do my best to create awesome visualizations while this notebook. I hope you will like it as you like watching Netflix😆
# 🚀Sooo let's goo!
# ## Libraries ⬇
#
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib as mpl
from matplotlib.cm import ScalarMappable
from matplotlib.lines import Line2D
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from textwrap import wrap
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Set Style
sns.set_style("white")
mpl.rcParams["xtick.labelsize"] = 16
mpl.rcParams["ytick.labelsize"] = 16
mpl.rcParams["axes.spines.left"] = False
mpl.rcParams["axes.spines.right"] = False
mpl.rcParams["axes.spines.top"] = False
# ## Colors 🎨
#
colors_blue = ["#132C33", "#264D58", "#17869E", "#51C4D3", "#B4DBE9"]
colors_dark = ["#1F1F1F", "#313131", "#636363", "#AEAEAE", "#DADADA"]
colors_red = ["#331313", "#582626", "#9E1717", "#D35151", "#E9B4B4"]
colors_mix = [
"#17869E",
"#264D58",
"#179E66",
"#D35151",
"#E9DAB4",
"#E9B4B4",
"#D3B651",
"#6351D3",
]
colors_div = ["#132C33", "#17869E", "#DADADA", "#D35151", "#331313"]
sns.palplot(colors_blue)
sns.palplot(colors_dark)
sns.palplot(colors_red)
sns.palplot(colors_mix)
sns.palplot(colors_div)
# ## Importing Data ⬇
#
df = pd.read_csv(
"/kaggle/input/netflix-original-films-imdb-scores/NetflixOriginals.csv"
)
# ## First Look To Data 🕵️♀️
#
df.head()
df.isnull().sum()
df.info()
df.describe()
print("Number of categories:{}".format(df["Genre"].nunique()))
print(df["Genre"].unique())
print("*" * 75)
print("Number of Languages:{}".format(df["Language"].nunique()))
print(df["Language"].unique())
# > 📌 We all know that Netflix is a huge platform to watching series, movies and many on. As you can see there is ***115*** category and ***38*** language that you can choose. I guess next slogan of Netflix might be ***'We have content for everyone'***.
# ## Data Visualizations 📊
# I wanted to use ***circular barplot*** instead of using ***classic bar plots*** for this time. While checking resources for it, I [found this one](https://www.python-graph-gallery.com/web-circular-barplot-with-matplotlib). This is a great source if someone wants to learn more about cicular barplot or any other plots. I learned everything from there, thanks a lot to him!
imr = (
df.groupby("Genre", as_index=False)
.mean()
.sort_values(by="Runtime", ascending=False)
.reset_index(drop=True)
)
# Values for the x axis
ANGLES = np.linspace(0.05, 2 * np.pi - 0.05, 10, endpoint=False)
# Cumulative length
LENGTHS = imr.iloc[0:10, 1].values
# Genre label
GENRE = imr["Genre"].iloc[0:10].values
# Colors
COLORS = ["#6C5B7B", "#C06C84", "#F67280", "#F8B195"]
# Colormap
cmap = mpl.colors.LinearSegmentedColormap.from_list("my color", COLORS, N=256)
# Normalizer
norm = mpl.colors.Normalize(vmin=LENGTHS.min(), vmax=LENGTHS.max())
# Normalized colors. Each number of tracks is mapped to a color in the
# color scale 'cmap'
COLORS = cmap(norm(LENGTHS))
# Some layout stuff ----------------------------------------------
# Initialize layout in polar coordinates
fig, ax = plt.subplots(figsize=(9, 12.6), subplot_kw={"projection": "polar"})
# Set background color to white, both axis and figure.
fig.patch.set_facecolor("white")
ax.set_facecolor("white")
ax.set_theta_offset(1.2 * np.pi / 2)
ax.set_ylim(-50, 175)
# Add geometries to the plot -------------------------------------
# See the zorder to manipulate which geometries are on top
# Add bars to represent the cumulative track lengths
bars = ax.bar(ANGLES, LENGTHS, color=COLORS, alpha=0.9, width=0.52)
# ax.bar_label(bars,label_type="edge",fontsize=16)
# Add dashed vertical lines. These are just references
ax.vlines(ANGLES, 0, 170, color="black", ls=(0, (4, 4)))
# Remove lines for polar axis (x)
ax.xaxis.grid(False)
# ax.yaxis.grid(False)
# Add labels for the regions -------------------------------------
# Note the 'wrap()' function.
# The '5' means we want at most 5 consecutive letters in a word,
# but the 'break_long_words' means we don't want to break words
# longer than 5 characters.
GENRE = ["\n".join(wrap(r, 5, break_long_words=False)) for r in GENRE]
GENRE
# Set the labels
ax.set_xticks(ANGLES)
ax.set_xticklabels(GENRE, size=12)
# Remove unnecesary guides ---------------------------------------
# Remove lines for polar axis (x)
ax.xaxis.grid(False)
# Put grid lines for radial axis (y) at 0, 1000, 2000, and 3000
ax.set_yticklabels([])
ax.set_yticks([0, 25, 75, 125, 175])
# Remove spines
ax.spines["start"].set_color("none")
ax.spines["polar"].set_color("none")
# Adjust padding of the x axis labels ----------------------------
# This is going to add extra space around the labels for the
# ticks of the x axis.
XTICKS = ax.xaxis.get_major_ticks()
for tick in XTICKS:
tick.set_pad(10)
# Add custom annotations -----------------------------------------
# The following represent the heights in the values of the y axis
PAD = 10
ax.text(-0.2 * np.pi / 2, 25 + PAD, "25", ha="center", size=12)
ax.text(-0.2 * np.pi / 2, 75 + PAD, "75", ha="center", size=12)
ax.text(-0.2 * np.pi / 2, 125 + PAD, "125", ha="center", size=12)
ax.text(-0.2 * np.pi / 2, 175 + PAD, "175", ha="center", size=12)
title = "Top 10 Longest Content Genre by A"
fig.text(0.1, 0.83, title, fontsize=25, weight="bold", ha="left", va="baseline")
#
# “THIS IS A ROBBERY!”
# <img
# src="https://1.bp.blogspot.com/-cgODERMTBHY/XrgJGTlSNiI/AAAAAAAAAE4/rRVQHLBkWu8pksR1jku5F1YPv6q1LjL9gCLcBGAsYHQ/s1600/MoneyHeist.gif" style="width:100%;height:100%;">
# - Heist Film category is the longest by avarage duration. Even thought they are that long, is there anyone says that I'm bored?🤔 I don't think so. Ah also, if you didn't try Money Heist yet, go and watch it immediately. Wait wait, after finished notebook please😂
meanx = imr["IMDB Score"].mean()
meany = imr["Runtime"].mean()
fig, ax = plt.subplots(figsize=(18, 8), dpi=75)
sns.scatterplot(
data=imr,
x="IMDB Score",
y="Runtime",
size="IMDB Score",
ax=ax,
sizes=(5, 1000),
alpha=0.9,
color="darkred",
)
linex = ax.axvline(
meanx, linestyle="dotted", color=colors_dark[1], alpha=0.8, label="Average"
)
liney = ax.axhline(meany, linestyle="dotted", color=colors_dark[1], alpha=0.8)
ax.legend(bbox_to_anchor=(1.05, 1), ncol=1, borderpad=1, frameon=False, fontsize=12)
ax.grid(alpha=0.3)
ax.set_axisbelow(True)
ax.set_xlabel(
"IMDB Score", fontsize=14, labelpad=10, fontweight="bold", color=colors_dark[0]
)
ax.set_ylabel(
"Runtime", fontsize=14, labelpad=10, fontweight="bold", color=colors_dark[0]
)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
ax.legend().set_visible(False)
plt.text(
s="Is There a Correlation Between\nDuration and IMDB Score?",
ha="left",
x=xmin,
y=ymax * 1.1,
fontsize=24,
fontweight="bold",
color=colors_dark[0],
)
plt.title(
"It doesn't seem like there is a strong relationship between IMDB score and\nduration of content but it's gonna be better if we check it with correlation map.",
loc="left",
fontsize=13,
color=colors_dark[2],
)
plt.tight_layout()
plt.show()
df
lan = df["Language"].value_counts().iloc[0:10]
lan
class BubbleChart:
def __init__(self, area, bubble_spacing=0):
"""
Setup for bubble collapse.
Parameters
----------
area : array-like
Area of the bubbles.
bubble_spacing : float, default: 0
Minimal spacing between bubbles after collapsing.
Notes
-----
If "area" is sorted, the results might look weird.
"""
area = np.asarray(area)
r = np.sqrt(area / np.pi)
self.bubble_spacing = bubble_spacing
self.bubbles = np.ones((len(area), 4))
self.bubbles[:, 2] = r
self.bubbles[:, 3] = area
self.maxstep = 2 * self.bubbles[:, 2].max() + self.bubble_spacing
self.step_dist = self.maxstep / 2
# calculate initial grid layout for bubbles
length = np.ceil(np.sqrt(len(self.bubbles)))
grid = np.arange(length) * self.maxstep
gx, gy = np.meshgrid(grid, grid)
self.bubbles[:, 0] = gx.flatten()[: len(self.bubbles)]
self.bubbles[:, 1] = gy.flatten()[: len(self.bubbles)]
self.com = self.center_of_mass()
def center_of_mass(self):
return np.average(self.bubbles[:, :2], axis=0, weights=self.bubbles[:, 3])
def center_distance(self, bubble, bubbles):
return np.hypot(bubble[0] - bubbles[:, 0], bubble[1] - bubbles[:, 1])
def outline_distance(self, bubble, bubbles):
center_distance = self.center_distance(bubble, bubbles)
return center_distance - bubble[2] - bubbles[:, 2] - self.bubble_spacing
def check_collisions(self, bubble, bubbles):
distance = self.outline_distance(bubble, bubbles)
return len(distance[distance < 0])
def collides_with(self, bubble, bubbles):
distance = self.outline_distance(bubble, bubbles)
idx_min = np.argmin(distance)
return idx_min if type(idx_min) == np.ndarray else [idx_min]
def collapse(self, n_iterations=50):
"""
Move bubbles to the center of mass.
Parameters
----------
n_iterations : int, default: 50
Number of moves to perform.
"""
for _i in range(n_iterations):
moves = 0
for i in range(len(self.bubbles)):
rest_bub = np.delete(self.bubbles, i, 0)
# try to move directly towards the center of mass
# direction vector from bubble to the center of mass
dir_vec = self.com - self.bubbles[i, :2]
# shorten direction vector to have length of 1
dir_vec = dir_vec / np.sqrt(dir_vec.dot(dir_vec))
# calculate new bubble position
new_point = self.bubbles[i, :2] + dir_vec * self.step_dist
new_bubble = np.append(new_point, self.bubbles[i, 2:4])
# check whether new bubble collides with other bubbles
if not self.check_collisions(new_bubble, rest_bub):
self.bubbles[i, :] = new_bubble
self.com = self.center_of_mass()
moves += 1
else:
# try to move around a bubble that you collide with
# find colliding bubble
for colliding in self.collides_with(new_bubble, rest_bub):
# calculate direction vector
dir_vec = rest_bub[colliding, :2] - self.bubbles[i, :2]
dir_vec = dir_vec / np.sqrt(dir_vec.dot(dir_vec))
# calculate orthogonal vector
orth = np.array([dir_vec[1], -dir_vec[0]])
# test which direction to go
new_point1 = self.bubbles[i, :2] + orth * self.step_dist
new_point2 = self.bubbles[i, :2] - orth * self.step_dist
dist1 = self.center_distance(self.com, np.array([new_point1]))
dist2 = self.center_distance(self.com, np.array([new_point2]))
new_point = new_point1 if dist1 < dist2 else new_point2
new_bubble = np.append(new_point, self.bubbles[i, 2:4])
if not self.check_collisions(new_bubble, rest_bub):
self.bubbles[i, :] = new_bubble
self.com = self.center_of_mass()
if moves / len(self.bubbles) < 0.1:
self.step_dist = self.step_dist / 2
def plot(self, ax, labels, colors):
"""
Draw the bubble plot.
Parameters
----------
ax : matplotlib.axes.Axes
labels : list
Labels of the bubbles.
colors : list
Colors of the bubbles.
"""
for i in range(len(self.bubbles)):
circ = plt.Circle(self.bubbles[i, :2], self.bubbles[i, 2], color=colors[i])
ax.add_patch(circ)
ax.text(
*self.bubbles[i, :2],
labels[i],
horizontalalignment="center",
verticalalignment="center"
)
bubble_chart = BubbleChart(area=lan.values, bubble_spacing=0.1)
bubble_chart.collapse()
fig, ax = plt.subplots(subplot_kw=dict(aspect="equal"))
bubble_chart.plot(ax, lan.index, COLORS)
ax.axis("off")
ax.relim()
ax.autoscale_view()
ax.set_title("Browser market share")
plt.show()
| false | 1 | 4,231 | 0 | 4,466 | 4,231 |
||
69009258
|
# **This notebook is an exercise in the [Intermediate Machine Learning](https://www.kaggle.com/learn/intermediate-machine-learning) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/pipelines).**
# ---
# In this exercise, you will use **pipelines** to improve the efficiency of your machine learning code.
# # Setup
# The questions below will give you feedback on your work. Run the following cell to set up the feedback system.
# Set up code checking
import os
if not os.path.exists("../input/train.csv"):
os.symlink("../input/home-data-for-ml-course/train.csv", "../input/train.csv")
os.symlink("../input/home-data-for-ml-course/test.csv", "../input/test.csv")
from learntools.core import binder
binder.bind(globals())
from learntools.ml_intermediate.ex4 import *
print("Setup Complete")
# You will work with data from the [Housing Prices Competition for Kaggle Learn Users](https://www.kaggle.com/c/home-data-for-ml-course).
# 
# Run the next code cell without changes to load the training and validation sets in `X_train`, `X_valid`, `y_train`, and `y_valid`. The test set is loaded in `X_test`.
import pandas as pd
from sklearn.model_selection import train_test_split
# Read the data
X_full = pd.read_csv("../input/train.csv", index_col="Id")
X_test_full = pd.read_csv("../input/test.csv", index_col="Id")
# Remove rows with missing target, separate target from predictors
X_full.dropna(axis=0, subset=["SalePrice"], inplace=True)
y = X_full.SalePrice
X_full.drop(["SalePrice"], axis=1, inplace=True)
# Break off validation set from training data
X_train_full, X_valid_full, y_train, y_valid = train_test_split(
X_full, y, train_size=0.8, test_size=0.2, random_state=0
)
# "Cardinality" means the number of unique values in a column
# Select categorical columns with relatively low cardinality (convenient but arbitrary)
categorical_cols = [
cname
for cname in X_train_full.columns
if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == "object"
]
# Select numerical columns
numerical_cols = [
cname
for cname in X_train_full.columns
if X_train_full[cname].dtype in ["int64", "float64"]
]
# Keep selected columns only
my_cols = categorical_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
X_test = X_test_full[my_cols].copy()
X_train.head()
# The next code cell uses code from the tutorial to preprocess the data and train a model. Run this code without changes.
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
# Preprocessing for numerical data
numerical_transformer = SimpleImputer(strategy="constant")
# Preprocessing for categorical data
categorical_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="most_frequent")),
("onehot", OneHotEncoder(handle_unknown="ignore")),
]
)
# Bundle preprocessing for numerical and categorical data
preprocessor = ColumnTransformer(
transformers=[
("num", numerical_transformer, numerical_cols),
("cat", categorical_transformer, categorical_cols),
]
)
# Define model
model = RandomForestRegressor(n_estimators=100, random_state=0)
# Bundle preprocessing and modeling code in a pipeline
clf = Pipeline(steps=[("preprocessor", preprocessor), ("model", model)])
# Preprocessing of training data, fit model
clf.fit(X_train, y_train)
# Preprocessing of validation data, get predictions
preds = clf.predict(X_valid)
print("MAE:", mean_absolute_error(y_valid, preds))
# The code yields a value around 17862 for the mean absolute error (MAE). In the next step, you will amend the code to do better.
# # Step 1: Improve the performance
# ### Part A
# Now, it's your turn! In the code cell below, define your own preprocessing steps and random forest model. Fill in values for the following variables:
# - `numerical_transformer`
# - `categorical_transformer`
# - `model`
# To pass this part of the exercise, you need only define valid preprocessing steps and a random forest model.
# Preprocessing for numerical data
numerical_transformer = Pipeline(steps=[("imputer", SimpleImputer(strategy="median"))])
# Preprocessing for categorical data
categorical_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="most_frequent")),
("onehot", OneHotEncoder(handle_unknown="ignore")),
]
)
# Bundle preprocessing for numerical and categorical data
preprocessor = ColumnTransformer(
transformers=[
("num", numerical_transformer, numerical_cols),
("cat", categorical_transformer, categorical_cols),
]
)
# Define model
from xgboost import XGBRegressor
model = XGBRegressor(n_estimators=2000, learning_rate=0.025)
# Check your answer
step_1.a.check()
# Lines below will give you a hint or solution code
step_1.a.hint()
step_1.a.solution()
# ### Part B
# Run the code cell below without changes.
# To pass this step, you need to have defined a pipeline in **Part A** that achieves lower MAE than the code above. You're encouraged to take your time here and try out many different approaches, to see how low you can get the MAE! (_If your code does not pass, please amend the preprocessing steps and model in Part A._)
# Bundle preprocessing and modeling code in a pipeline
my_pipeline = Pipeline(steps=[("preprocessor", preprocessor), ("model", model)])
# Preprocessing of training data, fit model
my_pipeline.fit(X_train, y_train)
# Preprocessing of validation data, get predictions
preds = my_pipeline.predict(X_valid)
# Evaluate the model
score = mean_absolute_error(y_valid, preds)
print("MAE:", score)
# Check your answer
step_1.b.check()
# Line below will give you a hint
step_1.b.hint()
# # Step 2: Generate test predictions
# Now, you'll use your trained model to generate predictions with the test data.
# Preprocessing of test data, fit model
preds_test = my_pipeline.predict(X_test)
# Check your answer
step_2.check()
# Lines below will give you a hint or solution code
step_2.hint()
step_2.solution()
# Run the next code cell without changes to save your results to a CSV file that can be submitted directly to the competition.
# Save test predictions to file
output = pd.DataFrame({"Id": X_test.index, "SalePrice": preds_test})
output.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/009/69009258.ipynb
| null | null |
[{"Id": 69009258, "ScriptId": 18828532, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7944167, "CreationDate": "07/25/2021 19:50:45", "VersionNumber": 3.0, "Title": "Exercise: Pipelines", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 212.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 210.0, "LinesInsertedFromFork": 11.0, "LinesDeletedFromFork": 9.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 201.0, "TotalVotes": 0}]
| null | null | null | null |
# **This notebook is an exercise in the [Intermediate Machine Learning](https://www.kaggle.com/learn/intermediate-machine-learning) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/pipelines).**
# ---
# In this exercise, you will use **pipelines** to improve the efficiency of your machine learning code.
# # Setup
# The questions below will give you feedback on your work. Run the following cell to set up the feedback system.
# Set up code checking
import os
if not os.path.exists("../input/train.csv"):
os.symlink("../input/home-data-for-ml-course/train.csv", "../input/train.csv")
os.symlink("../input/home-data-for-ml-course/test.csv", "../input/test.csv")
from learntools.core import binder
binder.bind(globals())
from learntools.ml_intermediate.ex4 import *
print("Setup Complete")
# You will work with data from the [Housing Prices Competition for Kaggle Learn Users](https://www.kaggle.com/c/home-data-for-ml-course).
# 
# Run the next code cell without changes to load the training and validation sets in `X_train`, `X_valid`, `y_train`, and `y_valid`. The test set is loaded in `X_test`.
import pandas as pd
from sklearn.model_selection import train_test_split
# Read the data
X_full = pd.read_csv("../input/train.csv", index_col="Id")
X_test_full = pd.read_csv("../input/test.csv", index_col="Id")
# Remove rows with missing target, separate target from predictors
X_full.dropna(axis=0, subset=["SalePrice"], inplace=True)
y = X_full.SalePrice
X_full.drop(["SalePrice"], axis=1, inplace=True)
# Break off validation set from training data
X_train_full, X_valid_full, y_train, y_valid = train_test_split(
X_full, y, train_size=0.8, test_size=0.2, random_state=0
)
# "Cardinality" means the number of unique values in a column
# Select categorical columns with relatively low cardinality (convenient but arbitrary)
categorical_cols = [
cname
for cname in X_train_full.columns
if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == "object"
]
# Select numerical columns
numerical_cols = [
cname
for cname in X_train_full.columns
if X_train_full[cname].dtype in ["int64", "float64"]
]
# Keep selected columns only
my_cols = categorical_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
X_test = X_test_full[my_cols].copy()
X_train.head()
# The next code cell uses code from the tutorial to preprocess the data and train a model. Run this code without changes.
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
# Preprocessing for numerical data
numerical_transformer = SimpleImputer(strategy="constant")
# Preprocessing for categorical data
categorical_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="most_frequent")),
("onehot", OneHotEncoder(handle_unknown="ignore")),
]
)
# Bundle preprocessing for numerical and categorical data
preprocessor = ColumnTransformer(
transformers=[
("num", numerical_transformer, numerical_cols),
("cat", categorical_transformer, categorical_cols),
]
)
# Define model
model = RandomForestRegressor(n_estimators=100, random_state=0)
# Bundle preprocessing and modeling code in a pipeline
clf = Pipeline(steps=[("preprocessor", preprocessor), ("model", model)])
# Preprocessing of training data, fit model
clf.fit(X_train, y_train)
# Preprocessing of validation data, get predictions
preds = clf.predict(X_valid)
print("MAE:", mean_absolute_error(y_valid, preds))
# The code yields a value around 17862 for the mean absolute error (MAE). In the next step, you will amend the code to do better.
# # Step 1: Improve the performance
# ### Part A
# Now, it's your turn! In the code cell below, define your own preprocessing steps and random forest model. Fill in values for the following variables:
# - `numerical_transformer`
# - `categorical_transformer`
# - `model`
# To pass this part of the exercise, you need only define valid preprocessing steps and a random forest model.
# Preprocessing for numerical data
numerical_transformer = Pipeline(steps=[("imputer", SimpleImputer(strategy="median"))])
# Preprocessing for categorical data
categorical_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="most_frequent")),
("onehot", OneHotEncoder(handle_unknown="ignore")),
]
)
# Bundle preprocessing for numerical and categorical data
preprocessor = ColumnTransformer(
transformers=[
("num", numerical_transformer, numerical_cols),
("cat", categorical_transformer, categorical_cols),
]
)
# Define model
from xgboost import XGBRegressor
model = XGBRegressor(n_estimators=2000, learning_rate=0.025)
# Check your answer
step_1.a.check()
# Lines below will give you a hint or solution code
step_1.a.hint()
step_1.a.solution()
# ### Part B
# Run the code cell below without changes.
# To pass this step, you need to have defined a pipeline in **Part A** that achieves lower MAE than the code above. You're encouraged to take your time here and try out many different approaches, to see how low you can get the MAE! (_If your code does not pass, please amend the preprocessing steps and model in Part A._)
# Bundle preprocessing and modeling code in a pipeline
my_pipeline = Pipeline(steps=[("preprocessor", preprocessor), ("model", model)])
# Preprocessing of training data, fit model
my_pipeline.fit(X_train, y_train)
# Preprocessing of validation data, get predictions
preds = my_pipeline.predict(X_valid)
# Evaluate the model
score = mean_absolute_error(y_valid, preds)
print("MAE:", score)
# Check your answer
step_1.b.check()
# Line below will give you a hint
step_1.b.hint()
# # Step 2: Generate test predictions
# Now, you'll use your trained model to generate predictions with the test data.
# Preprocessing of test data, fit model
preds_test = my_pipeline.predict(X_test)
# Check your answer
step_2.check()
# Lines below will give you a hint or solution code
step_2.hint()
step_2.solution()
# Run the next code cell without changes to save your results to a CSV file that can be submitted directly to the competition.
# Save test predictions to file
output = pd.DataFrame({"Id": X_test.index, "SalePrice": preds_test})
output.to_csv("submission.csv", index=False)
| false | 0 | 1,828 | 0 | 1,828 | 1,828 |
||
69009621
|
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input
import cv2
import os
from IPython.display import clear_output
import plotly as plt
import plotly.graph_objects as go
from plotly.offline import plot, iplot
from plotly.subplots import make_subplots
tf.config.list_physical_devices("GPU")
class SnakeModel(keras.Model):
def __init__(self, input_shape, n_actions, gamma=0.99, lr=0.003):
super(SnakeModel, self).__init__()
self.gamma = gamma
self.lr = lr
self.n_actions = n_actions
self.image_shape = input_shape
self.input_ = keras.layers.Input(shape=self.image_shape)
self.body = keras.models.Sequential(
[
Conv2D(
32,
4,
padding="valid",
activation="relu",
strides=(2, 2),
kernel_initializer="random_uniform",
bias_initializer="zeros",
),
Conv2D(
64,
3,
padding="valid",
activation="relu",
strides=(1, 1),
kernel_initializer="random_uniform",
bias_initializer="zeros",
),
Flatten(),
]
)
self.policy_head = keras.models.Sequential(
[
keras.layers.Dense(
512,
activation="relu",
kernel_initializer="random_uniform",
bias_initializer="zeros",
),
keras.layers.Dense(self.n_actions),
]
)
self.value_head = keras.models.Sequential(
[
keras.layers.Dense(
512,
activation="relu",
kernel_initializer="random_uniform",
bias_initializer="zeros",
),
keras.layers.Dense(1),
]
)
self.optimizer = keras.optimizers.Adam(self.lr)
self.activation = tf.keras.layers.Activation("softmax")
@tf.function
def call(self, x):
latent = self.body(x / 255)
p = self.policy_head(latent)
return p, self.activation(p), self.value_head(latent)
@tf.function
def pcall(self, x):
latent = self.body(x / 255)
return self.policy_head(latent)
@tf.function
def vcall(self, x):
latent = self.body(x / 255)
return self.value_head(latent)
# def get_action(x):
# return ['forward', 'left', 'right'][x]
@tf.function
def calc_entropy(logi):
a0 = logi - tf.reduce_max(logi, axis=-1, keepdims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.math.log(z0) - a0), axis=-1)
@tf.function
def backpr(model, s, q, v, a): # , al
with tf.GradientTape() as tape:
logits = model.pcall(s)
grad = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=tf.one_hot(a, 4)
)
p_loss = tf.reduce_mean(grad * tf.squeeze(q - v))
vpreds = model.vcall(s)
v_loss = tf.reduce_mean(tf.square(vpreds - q))
entropy = tf.reduce_mean(calc_entropy(logits))
loss = p_loss + v_loss * 0.5 - 0.05 * entropy
var_list = tape.watched_variables()
grads_ = tape.gradient(loss, var_list)
grads_, _ = tf.clip_by_global_norm(grads_, 0.5)
model.optimizer.apply_gradients(zip(grads_, var_list))
return loss, p_loss, v_loss, entropy
def get_video(list_, num):
videodims = (grid_size * 100, grid_size * 100)
fps = 24
fourcc = cv2.VideoWriter_fourcc(*"avc1")
video = cv2.VideoWriter(
f"/users/abhiminhas/Downloads/videos/test_{num}.mp4", fourcc, 60, videodims
)
for i in range(0, len(list_) * fps):
a = int(i / fps)
imtemp = list_[a]
video.write(cv2.cvtColor(np.array(imtemp), cv2.COLOR_RGB2BGR))
clear_output()
video.release()
def get_snakestep(direction, snakeaction):
snakeaction = ["right", "down", "up", "left"][snakeaction]
if direction == snakeaction:
return "forward"
elif direction == "right":
act_dic = {"right": "forward", "left": "back", "down": "right", "up": "left"}
elif direction == "down":
act_dic = {"down": "forward", "left": "right", "right": "left", "up": "back"}
elif direction == "left":
act_dic = {"left": "forward", "right": "back", "down": "left", "up": "right"}
else:
act_dic = {"up": "forward", "right": "right", "left": "left", "down": "back"}
return act_dic[snakeaction]
# change the width in the rectangle to an integer if you want the grid to show
import numpy as np
import random
from PIL import Image, ImageDraw
from collections import deque
from PIL import Image, ImageDraw
# line 121 changed to self.reward -= 0 in case nothing is done
class SnakeGame:
def __init__(self, grid_size):
assert grid_size >= 5
self.length = 3
self.grid_size = grid_size
self.action_space = ["forward", "left", "right"]
self.eat = 1
self.WallCollisionReward = -1
self.BodyCollisionReward = -1
# self.step_penalty = 0.001
def get_color(self, x, y):
color = "black" # 0
body = list(self.snake_indices)[:-1]
tail = list(self.snake_indices)[0]
if [x, y] == list(self.food):
color = "white" #
elif [x, y] == list(self.snake_indices[-1]):
if self.done:
color = "green"
color = "blue" # 80
else:
for ele in body:
if list(ele) == [x, y]:
color = "green" # 180
if list(body[0]) == [x, y]:
color = "yellow"
return color
def get_image(self, scale):
self.img_size = grid_size * scale
self.image = Image.new(
mode="RGB", size=(self.img_size, self.img_size), color=0
) # this is red
self.draw = ImageDraw.Draw(self.image)
a = (self.image.width) / self.grid_size
y_idx = 0
for y in range(0, self.img_size, (int(a))):
x_idx = 0
for x in range(0, self.img_size, (int(a))):
self.draw.rectangle(
[(x, y), (x + a, a + y)],
fill=self.get_color(y_idx, x_idx),
outline=255,
width=1,
)
x_idx += 1
y_idx += 1
# del draw
return self.image
def reset(self):
self.face_direction = "right"
self.is_wall = []
if self.grid_size % 2 == 0:
self.h_r = int((self.grid_size / 2) - 1)
self.h_c = int((self.grid_size / 2) - 1)
else:
self.h_r = int((self.grid_size - 1) / 2)
self.h_c = int((self.grid_size - 1) / 2)
# initializing snake, leftmost element is the head
self.snake_indices = deque()
for ele in range(self.length):
self.snake_indices.append((self.h_r, self.h_c - self.length + 1 + ele))
self.food = self.new_food_index()
self.reward = 0
self.done = False
self.info = None
return {
"state": self.render(),
"reward": self.reward,
"info": self.info,
"done": self.done,
"face_direction": self.face_direction,
}
def render(self):
self.state = np.zeros((self.grid_size, self.grid_size), dtype=int)
for ele in range(len(self.snake_indices)):
if ele == len(self.snake_indices) - 1: # head
self.state[self.snake_indices[ele]] = 1
else:
self.state[self.snake_indices[ele]] = 2
for ele in range(len(self.snake_indices) - 1):
if self.snake_indices[-1] == self.snake_indices[ele]:
# if head index is the same as any body's index
self.info = "body_collision"
self.done = True
self.reward = self.BodyCollisionReward
self.state[self.food] = 3
return self.state
def new_food_index(self):
r = random.randint(0, self.grid_size - 1)
c = random.randint(0, self.grid_size - 1)
while (r, c) in set(self.snake_indices):
r = random.randint(0, self.grid_size - 1)
c = random.randint(0, self.grid_size - 1)
return (r, c)
def final_step(self, final_action):
old_length = len(self.snake_indices)
old_state = self.render()
self.reward = 0
if final_action == "right":
if old_state[self.snake_indices[-1][0], self.snake_indices[-1][1] + 1] == 3:
self.reward = self.eat
self.snake_indices.appendleft((0, 0))
self.food = self.new_food_index()
# else:
# self.reward -= self.step_penalty
x = 0
y = 1
if final_action == "left":
if old_state[self.snake_indices[-1][0], self.snake_indices[-1][1] - 1] == 3:
self.reward = self.eat
self.snake_indices.appendleft((0, 0))
self.food = self.new_food_index()
# else:
# self.reward -= self.step_penalty
x = 0
y = -1
if final_action == "up":
if old_state[self.snake_indices[-1][0] - 1, self.snake_indices[-1][1]] == 3:
self.reward = self.eat
self.snake_indices.appendleft((0, 0))
self.food = self.new_food_index()
# else:
# self.reward -= self.step_penalty
x = -1
y = 0
if final_action == "down":
if old_state[self.snake_indices[-1][0] + 1, self.snake_indices[-1][1]] == 3:
self.reward = self.eat
self.snake_indices.appendleft((0, 0))
self.food = self.new_food_index()
# else:
# self.reward -= self.step_penalty
x = 1
y = 0
self.face_direction = final_action
self.snake_indices = self.deque_update(x, y) # THIS HAS BEEN UPDATED
self.is_wall = self.iswall()
if len(self.snake_indices) > old_length:
food_index = self.new_food_index() # what the fuck is this
return self.render()
def step(self, action): # this has been edited to incorporate 4 actions,
key = action
if self.face_direction == "up":
dic = {"forward": "up", "left": "left", "right": "right", "back": "down"}
if self.face_direction == "down":
dic = {"forward": "down", "left": "right", "right": "left", "back": "up"}
if self.face_direction == "left":
dic = {"forward": "left", "left": "down", "right": "up", "back": "right"}
if self.face_direction == "right":
dic = {"forward": "right", "left": "up", "right": "down", "back": "left"}
# to incorporate 4 actions
if action == "back":
self.done == True
# this is to take care of steps subsequent to ending the game
# so that the snake doesnt keep on moving
if self.done == True:
return {
"state": self.render(),
"reward": 0,
"info": self.info,
"done": self.done,
"face_direction": dic[key],
}
else:
if action not in set(self.is_wall):
self.reward -= 0
return {
"state": self.final_step(dic[key]),
",reward": self.reward,
"info": self.info,
"done": self.done,
"face_direction": self.face_direction,
}
else:
# wall collision
self.reward = self.WallCollisionReward
self.done = True
self.info = "wall_collision"
return {
"state": self.render(),
"reward": self.reward,
"info": self.info,
"done": self.done,
"face_direction": dic[key],
}
def deque_update(self, x, y):
(a, b) = self.snake_indices[-1]
if -1 < (a + x) < self.grid_size and -1 < (b + y) < self.grid_size:
# if (a+x, b+y) in set(self.snake_indices):
# game_over = True
self.snake_indices.append(
(self.snake_indices[-1][0] + x, self.snake_indices[-1][1] + y)
)
self.snake_indices.popleft()
return self.snake_indices
else:
# WALL COLLISION happens, taken care in
return self.snake_indices
# reset the game
def iswall(self):
wall = []
if self.snake_indices[-1][0] == 0:
if self.face_direction == "up":
wall.append("forward")
if self.face_direction == "left":
wall.append("right")
if self.face_direction == "right":
wall.append("left")
if self.snake_indices[-1][0] == self.grid_size - 1:
if self.face_direction == "down":
wall.append("forward")
if self.face_direction == "left":
wall.append("left")
if self.face_direction == "right":
wall.append("right")
if self.snake_indices[-1][1] == 0:
if self.face_direction == "up":
wall.append("left")
if self.face_direction == "left":
wall.append("forward")
if self.face_direction == "down":
wall.append("right")
if self.snake_indices[-1][1] == self.grid_size - 1:
if self.face_direction == "up":
wall.append("right")
if self.face_direction == "down":
wall.append("left")
if self.face_direction == "right":
wall.append("forward")
return wall
# FIN
# ALWAYA USE 'G' AS THE INDEX FOR ENVIRONMENTS, MAKE SURE IT ISNT ASSIGNED TO ANYTHING
num_envs = 64
n_actions = 4
grid_size = 8
scaling_factor = 4
batch_size = 32
gamma = 0.99
envs = [SnakeGame(grid_size) for _ in range(num_envs)]
model = SnakeModel(
(grid_size * scaling_factor, grid_size * scaling_factor, 3), n_actions
)
states = [g.reset() for g in envs]
states = tf.convert_to_tensor([np.array(g.get_image(scaling_factor)) for g in envs])
_, _, _ = model(states)
# note that the following are the steps
# ['right', 'down', 'up', 'left'] == [0,1,2,3]
batch_rewards = []
batch_mean_rewards = []
batch_mean_mean_rewards = []
batch_count = 0
total_loss = []
actor_loss = []
critic_loss = []
for _ in range(10000):
sars_dvl = []
batch_count += 1
for _ in range(batch_size):
action_logits, action_probs, values = model(states)
actions = [
np.random.choice(range(n_actions), p=ap.numpy()) for ap in action_probs
]
_ = [
g.step(get_snakestep(g.face_direction, a)) for (g, a) in zip(envs, actions)
]
next_s, rewards, dones = zip(
*[
(np.array(g.get_image(scaling_factor)), g.reward - 0.001, g.done)
for g in envs
]
)
next_s = np.stack(next_s)
dones = np.expand_dims(np.stack(dones), -1)
rewards = np.expand_dims(np.stack(rewards), -1)
batch_rewards.extend([rewa for (rewa, d) in zip(rewards, dones) if d == True])
sars_dvl.append(
(states, actions, rewards, next_s, dones, values, action_logits)
)
_ = [g.reset() for g in envs if g.done == True] # if its done
states = tf.convert_to_tensor(next_s.copy())
clear_output()
img1 = envs[0].get_image(25)
img2 = envs[1].get_image(25)
img3 = envs[2].get_image(25)
display(img1, img2, img3)
rev = np.array([[0]] * num_envs)
discounted_rewards = []
for _, _, r, _, d, _, _ in reversed(sars_dvl):
rev = r + model.gamma * rev * (1 - d)
discounted_rewards.append(rev)
discounted_rewards = np.concatenate(
np.stack(list(reversed(discounted_rewards)))
).astype("float32")
united_states = np.concatenate([ele[0] for ele in sars_dvl]).astype("float32")
values = np.concatenate([ele[5] for ele in sars_dvl]).astype("float32")
actions = np.concatenate([ele[1] for ele in sars_dvl]).astype("int32")
# action_logits = np.concatenate([ele[-1] for ele in sars_dvl]).astype('float32')
t, ac, cr, _ = backpr(
model, united_states, discounted_rewards, values, actions
) # , action_logits
total_loss.append(np.array(t))
actor_loss.append(np.array(ac))
critic_loss.append(np.array(cr))
# if len(batch_rewards) > num_envs:
# batch_mean_rewards.append(np.mean(batch_rewards))
# batch_rewards = []
# if len(batch_mean_rewards) > 30:
# batch_mean_mean_rewards.append(np.mean(batch_mean_rewards))
# batch_mean_rewards = []
# fig = go.Figure()
# fig.add_trace(go.Scatter(x=[x for x in range(len(batch_mean_mean_rewards))],
# y=batch_mean_mean_rewards, name='batch_mean_mean_rewards',
# line=dict(color='royalblue', width=2)))
# clear_output()
# iplot(fig)
# if (batch_count%50 or (batch_count%50 +1)) == 0:
# fig = make_subplots(specs=[[{"secondary_y": True}]])
# loss_len = len(total_loss)
# fig.add_trace(go.Scatter(x=[x for x in range(loss_len)],
# y=total_loss, name='total_loss',
# line=dict(color='royalblue', width=2)),
# secondary_y = True)
# fig.add_trace(go.Scatter(x=[x for x in range(loss_len)],
# y=actor_loss, name='actor loss',
# line=dict(color='red', width=2)),
# secondary_y = False)
# fig.add_trace(go.Scatter(x=[x for x in range(loss_len)],
# y=critic_loss, name='critic loss',
# line=dict(color='green', width=2)),
# secondary_y = False)
# fig.update_yaxes(title_text="<b>actor/critic loss</b>", secondary_y=False)
# fig.update_yaxes(title_text="<b>total loss</b>", secondary_y=True)
# clear_output()
# iplot(fig)
# testing
if batch_count % 1000 == 0:
model.save_weights(f"training2_snakemodel_{(batch_count/1000)}.h5")
# abhi_test = SnakeGame(grid_size)
# abhi_test.reset()
# abhi_state = tf.convert_to_tensor([np.array(abhi_test.get_image(scaling_factor))])
# test_imgs = []
# while not abhi_test.done:
# _, a_pro, _ = model(abhi_state)
# a_pro = np.random.choice(range(n_actions), p=a_pro.numpy()[0])
# abhi_test.step(get_snakestep(abhi_test.face_direction, a_pro))
# abhi_state = tf.convert_to_tensor([np.array(abhi_test.get_image(scaling_factor))])
# test_imgs.append(abhi_test.get_image(100))
# get_video(test_imgs, (batch_count/1000)+5)
# clear_output()
# iplot(fig)
# # ALWAYA USE 'G' AS THE INDEX FOR ENVIRONMENTS, MAKE SURE IT ISNT ASSIGNED TO ANYTHING
# num_envs = 64
# n_actions = 4
# grid_size = 8
# scaling_factor = 4
# batch_size= 32
# gamma = 0.99
# envs = [SnakeGame(grid_size) for _ in range(num_envs)]
# model = SnakeModel((grid_size*scaling_factor, grid_size*scaling_factor, 3), n_actions)
# abhi_test = SnakeGame(grid_size)
# abhi_test.reset()
# abhi_state = tf.convert_to_tensor([np.array(abhi_test.get_image(scaling_factor))])
# # test_imgs = []
# _,_,_ = model(abhi_state)
# model.load_weights('../input/snakegame/training2_snakemodel_6.0.h5')
# import time
# while not abhi_test.done:
# _, a_pro, _ = model(abhi_state)
# a_pro = np.random.choice(range(n_actions), p=a_pro.numpy()[0])
# abhi_test.step(get_snakestep(abhi_test.face_direction, a_pro))
# display(abhi_test.get_image(30))
# abhi_state = tf.convert_to_tensor([np.array(abhi_test.get_image(scaling_factor))])
# time.sleep(0.3)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/009/69009621.ipynb
| null | null |
[{"Id": 69009621, "ScriptId": 18795074, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5855448, "CreationDate": "07/25/2021 19:58:36", "VersionNumber": 7.0, "Title": "snakegame", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 527.0, "LinesInsertedFromPrevious": 41.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 486.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input
import cv2
import os
from IPython.display import clear_output
import plotly as plt
import plotly.graph_objects as go
from plotly.offline import plot, iplot
from plotly.subplots import make_subplots
tf.config.list_physical_devices("GPU")
class SnakeModel(keras.Model):
def __init__(self, input_shape, n_actions, gamma=0.99, lr=0.003):
super(SnakeModel, self).__init__()
self.gamma = gamma
self.lr = lr
self.n_actions = n_actions
self.image_shape = input_shape
self.input_ = keras.layers.Input(shape=self.image_shape)
self.body = keras.models.Sequential(
[
Conv2D(
32,
4,
padding="valid",
activation="relu",
strides=(2, 2),
kernel_initializer="random_uniform",
bias_initializer="zeros",
),
Conv2D(
64,
3,
padding="valid",
activation="relu",
strides=(1, 1),
kernel_initializer="random_uniform",
bias_initializer="zeros",
),
Flatten(),
]
)
self.policy_head = keras.models.Sequential(
[
keras.layers.Dense(
512,
activation="relu",
kernel_initializer="random_uniform",
bias_initializer="zeros",
),
keras.layers.Dense(self.n_actions),
]
)
self.value_head = keras.models.Sequential(
[
keras.layers.Dense(
512,
activation="relu",
kernel_initializer="random_uniform",
bias_initializer="zeros",
),
keras.layers.Dense(1),
]
)
self.optimizer = keras.optimizers.Adam(self.lr)
self.activation = tf.keras.layers.Activation("softmax")
@tf.function
def call(self, x):
latent = self.body(x / 255)
p = self.policy_head(latent)
return p, self.activation(p), self.value_head(latent)
@tf.function
def pcall(self, x):
latent = self.body(x / 255)
return self.policy_head(latent)
@tf.function
def vcall(self, x):
latent = self.body(x / 255)
return self.value_head(latent)
# def get_action(x):
# return ['forward', 'left', 'right'][x]
@tf.function
def calc_entropy(logi):
a0 = logi - tf.reduce_max(logi, axis=-1, keepdims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.math.log(z0) - a0), axis=-1)
@tf.function
def backpr(model, s, q, v, a): # , al
with tf.GradientTape() as tape:
logits = model.pcall(s)
grad = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=tf.one_hot(a, 4)
)
p_loss = tf.reduce_mean(grad * tf.squeeze(q - v))
vpreds = model.vcall(s)
v_loss = tf.reduce_mean(tf.square(vpreds - q))
entropy = tf.reduce_mean(calc_entropy(logits))
loss = p_loss + v_loss * 0.5 - 0.05 * entropy
var_list = tape.watched_variables()
grads_ = tape.gradient(loss, var_list)
grads_, _ = tf.clip_by_global_norm(grads_, 0.5)
model.optimizer.apply_gradients(zip(grads_, var_list))
return loss, p_loss, v_loss, entropy
def get_video(list_, num):
videodims = (grid_size * 100, grid_size * 100)
fps = 24
fourcc = cv2.VideoWriter_fourcc(*"avc1")
video = cv2.VideoWriter(
f"/users/abhiminhas/Downloads/videos/test_{num}.mp4", fourcc, 60, videodims
)
for i in range(0, len(list_) * fps):
a = int(i / fps)
imtemp = list_[a]
video.write(cv2.cvtColor(np.array(imtemp), cv2.COLOR_RGB2BGR))
clear_output()
video.release()
def get_snakestep(direction, snakeaction):
snakeaction = ["right", "down", "up", "left"][snakeaction]
if direction == snakeaction:
return "forward"
elif direction == "right":
act_dic = {"right": "forward", "left": "back", "down": "right", "up": "left"}
elif direction == "down":
act_dic = {"down": "forward", "left": "right", "right": "left", "up": "back"}
elif direction == "left":
act_dic = {"left": "forward", "right": "back", "down": "left", "up": "right"}
else:
act_dic = {"up": "forward", "right": "right", "left": "left", "down": "back"}
return act_dic[snakeaction]
# change the width in the rectangle to an integer if you want the grid to show
import numpy as np
import random
from PIL import Image, ImageDraw
from collections import deque
from PIL import Image, ImageDraw
# line 121 changed to self.reward -= 0 in case nothing is done
class SnakeGame:
def __init__(self, grid_size):
assert grid_size >= 5
self.length = 3
self.grid_size = grid_size
self.action_space = ["forward", "left", "right"]
self.eat = 1
self.WallCollisionReward = -1
self.BodyCollisionReward = -1
# self.step_penalty = 0.001
def get_color(self, x, y):
color = "black" # 0
body = list(self.snake_indices)[:-1]
tail = list(self.snake_indices)[0]
if [x, y] == list(self.food):
color = "white" #
elif [x, y] == list(self.snake_indices[-1]):
if self.done:
color = "green"
color = "blue" # 80
else:
for ele in body:
if list(ele) == [x, y]:
color = "green" # 180
if list(body[0]) == [x, y]:
color = "yellow"
return color
def get_image(self, scale):
self.img_size = grid_size * scale
self.image = Image.new(
mode="RGB", size=(self.img_size, self.img_size), color=0
) # this is red
self.draw = ImageDraw.Draw(self.image)
a = (self.image.width) / self.grid_size
y_idx = 0
for y in range(0, self.img_size, (int(a))):
x_idx = 0
for x in range(0, self.img_size, (int(a))):
self.draw.rectangle(
[(x, y), (x + a, a + y)],
fill=self.get_color(y_idx, x_idx),
outline=255,
width=1,
)
x_idx += 1
y_idx += 1
# del draw
return self.image
def reset(self):
self.face_direction = "right"
self.is_wall = []
if self.grid_size % 2 == 0:
self.h_r = int((self.grid_size / 2) - 1)
self.h_c = int((self.grid_size / 2) - 1)
else:
self.h_r = int((self.grid_size - 1) / 2)
self.h_c = int((self.grid_size - 1) / 2)
# initializing snake, leftmost element is the head
self.snake_indices = deque()
for ele in range(self.length):
self.snake_indices.append((self.h_r, self.h_c - self.length + 1 + ele))
self.food = self.new_food_index()
self.reward = 0
self.done = False
self.info = None
return {
"state": self.render(),
"reward": self.reward,
"info": self.info,
"done": self.done,
"face_direction": self.face_direction,
}
def render(self):
self.state = np.zeros((self.grid_size, self.grid_size), dtype=int)
for ele in range(len(self.snake_indices)):
if ele == len(self.snake_indices) - 1: # head
self.state[self.snake_indices[ele]] = 1
else:
self.state[self.snake_indices[ele]] = 2
for ele in range(len(self.snake_indices) - 1):
if self.snake_indices[-1] == self.snake_indices[ele]:
# if head index is the same as any body's index
self.info = "body_collision"
self.done = True
self.reward = self.BodyCollisionReward
self.state[self.food] = 3
return self.state
def new_food_index(self):
r = random.randint(0, self.grid_size - 1)
c = random.randint(0, self.grid_size - 1)
while (r, c) in set(self.snake_indices):
r = random.randint(0, self.grid_size - 1)
c = random.randint(0, self.grid_size - 1)
return (r, c)
def final_step(self, final_action):
old_length = len(self.snake_indices)
old_state = self.render()
self.reward = 0
if final_action == "right":
if old_state[self.snake_indices[-1][0], self.snake_indices[-1][1] + 1] == 3:
self.reward = self.eat
self.snake_indices.appendleft((0, 0))
self.food = self.new_food_index()
# else:
# self.reward -= self.step_penalty
x = 0
y = 1
if final_action == "left":
if old_state[self.snake_indices[-1][0], self.snake_indices[-1][1] - 1] == 3:
self.reward = self.eat
self.snake_indices.appendleft((0, 0))
self.food = self.new_food_index()
# else:
# self.reward -= self.step_penalty
x = 0
y = -1
if final_action == "up":
if old_state[self.snake_indices[-1][0] - 1, self.snake_indices[-1][1]] == 3:
self.reward = self.eat
self.snake_indices.appendleft((0, 0))
self.food = self.new_food_index()
# else:
# self.reward -= self.step_penalty
x = -1
y = 0
if final_action == "down":
if old_state[self.snake_indices[-1][0] + 1, self.snake_indices[-1][1]] == 3:
self.reward = self.eat
self.snake_indices.appendleft((0, 0))
self.food = self.new_food_index()
# else:
# self.reward -= self.step_penalty
x = 1
y = 0
self.face_direction = final_action
self.snake_indices = self.deque_update(x, y) # THIS HAS BEEN UPDATED
self.is_wall = self.iswall()
if len(self.snake_indices) > old_length:
food_index = self.new_food_index() # what the fuck is this
return self.render()
def step(self, action): # this has been edited to incorporate 4 actions,
key = action
if self.face_direction == "up":
dic = {"forward": "up", "left": "left", "right": "right", "back": "down"}
if self.face_direction == "down":
dic = {"forward": "down", "left": "right", "right": "left", "back": "up"}
if self.face_direction == "left":
dic = {"forward": "left", "left": "down", "right": "up", "back": "right"}
if self.face_direction == "right":
dic = {"forward": "right", "left": "up", "right": "down", "back": "left"}
# to incorporate 4 actions
if action == "back":
self.done == True
# this is to take care of steps subsequent to ending the game
# so that the snake doesnt keep on moving
if self.done == True:
return {
"state": self.render(),
"reward": 0,
"info": self.info,
"done": self.done,
"face_direction": dic[key],
}
else:
if action not in set(self.is_wall):
self.reward -= 0
return {
"state": self.final_step(dic[key]),
",reward": self.reward,
"info": self.info,
"done": self.done,
"face_direction": self.face_direction,
}
else:
# wall collision
self.reward = self.WallCollisionReward
self.done = True
self.info = "wall_collision"
return {
"state": self.render(),
"reward": self.reward,
"info": self.info,
"done": self.done,
"face_direction": dic[key],
}
def deque_update(self, x, y):
(a, b) = self.snake_indices[-1]
if -1 < (a + x) < self.grid_size and -1 < (b + y) < self.grid_size:
# if (a+x, b+y) in set(self.snake_indices):
# game_over = True
self.snake_indices.append(
(self.snake_indices[-1][0] + x, self.snake_indices[-1][1] + y)
)
self.snake_indices.popleft()
return self.snake_indices
else:
# WALL COLLISION happens, taken care in
return self.snake_indices
# reset the game
def iswall(self):
wall = []
if self.snake_indices[-1][0] == 0:
if self.face_direction == "up":
wall.append("forward")
if self.face_direction == "left":
wall.append("right")
if self.face_direction == "right":
wall.append("left")
if self.snake_indices[-1][0] == self.grid_size - 1:
if self.face_direction == "down":
wall.append("forward")
if self.face_direction == "left":
wall.append("left")
if self.face_direction == "right":
wall.append("right")
if self.snake_indices[-1][1] == 0:
if self.face_direction == "up":
wall.append("left")
if self.face_direction == "left":
wall.append("forward")
if self.face_direction == "down":
wall.append("right")
if self.snake_indices[-1][1] == self.grid_size - 1:
if self.face_direction == "up":
wall.append("right")
if self.face_direction == "down":
wall.append("left")
if self.face_direction == "right":
wall.append("forward")
return wall
# FIN
# ALWAYA USE 'G' AS THE INDEX FOR ENVIRONMENTS, MAKE SURE IT ISNT ASSIGNED TO ANYTHING
num_envs = 64
n_actions = 4
grid_size = 8
scaling_factor = 4
batch_size = 32
gamma = 0.99
envs = [SnakeGame(grid_size) for _ in range(num_envs)]
model = SnakeModel(
(grid_size * scaling_factor, grid_size * scaling_factor, 3), n_actions
)
states = [g.reset() for g in envs]
states = tf.convert_to_tensor([np.array(g.get_image(scaling_factor)) for g in envs])
_, _, _ = model(states)
# note that the following are the steps
# ['right', 'down', 'up', 'left'] == [0,1,2,3]
batch_rewards = []
batch_mean_rewards = []
batch_mean_mean_rewards = []
batch_count = 0
total_loss = []
actor_loss = []
critic_loss = []
for _ in range(10000):
sars_dvl = []
batch_count += 1
for _ in range(batch_size):
action_logits, action_probs, values = model(states)
actions = [
np.random.choice(range(n_actions), p=ap.numpy()) for ap in action_probs
]
_ = [
g.step(get_snakestep(g.face_direction, a)) for (g, a) in zip(envs, actions)
]
next_s, rewards, dones = zip(
*[
(np.array(g.get_image(scaling_factor)), g.reward - 0.001, g.done)
for g in envs
]
)
next_s = np.stack(next_s)
dones = np.expand_dims(np.stack(dones), -1)
rewards = np.expand_dims(np.stack(rewards), -1)
batch_rewards.extend([rewa for (rewa, d) in zip(rewards, dones) if d == True])
sars_dvl.append(
(states, actions, rewards, next_s, dones, values, action_logits)
)
_ = [g.reset() for g in envs if g.done == True] # if its done
states = tf.convert_to_tensor(next_s.copy())
clear_output()
img1 = envs[0].get_image(25)
img2 = envs[1].get_image(25)
img3 = envs[2].get_image(25)
display(img1, img2, img3)
rev = np.array([[0]] * num_envs)
discounted_rewards = []
for _, _, r, _, d, _, _ in reversed(sars_dvl):
rev = r + model.gamma * rev * (1 - d)
discounted_rewards.append(rev)
discounted_rewards = np.concatenate(
np.stack(list(reversed(discounted_rewards)))
).astype("float32")
united_states = np.concatenate([ele[0] for ele in sars_dvl]).astype("float32")
values = np.concatenate([ele[5] for ele in sars_dvl]).astype("float32")
actions = np.concatenate([ele[1] for ele in sars_dvl]).astype("int32")
# action_logits = np.concatenate([ele[-1] for ele in sars_dvl]).astype('float32')
t, ac, cr, _ = backpr(
model, united_states, discounted_rewards, values, actions
) # , action_logits
total_loss.append(np.array(t))
actor_loss.append(np.array(ac))
critic_loss.append(np.array(cr))
# if len(batch_rewards) > num_envs:
# batch_mean_rewards.append(np.mean(batch_rewards))
# batch_rewards = []
# if len(batch_mean_rewards) > 30:
# batch_mean_mean_rewards.append(np.mean(batch_mean_rewards))
# batch_mean_rewards = []
# fig = go.Figure()
# fig.add_trace(go.Scatter(x=[x for x in range(len(batch_mean_mean_rewards))],
# y=batch_mean_mean_rewards, name='batch_mean_mean_rewards',
# line=dict(color='royalblue', width=2)))
# clear_output()
# iplot(fig)
# if (batch_count%50 or (batch_count%50 +1)) == 0:
# fig = make_subplots(specs=[[{"secondary_y": True}]])
# loss_len = len(total_loss)
# fig.add_trace(go.Scatter(x=[x for x in range(loss_len)],
# y=total_loss, name='total_loss',
# line=dict(color='royalblue', width=2)),
# secondary_y = True)
# fig.add_trace(go.Scatter(x=[x for x in range(loss_len)],
# y=actor_loss, name='actor loss',
# line=dict(color='red', width=2)),
# secondary_y = False)
# fig.add_trace(go.Scatter(x=[x for x in range(loss_len)],
# y=critic_loss, name='critic loss',
# line=dict(color='green', width=2)),
# secondary_y = False)
# fig.update_yaxes(title_text="<b>actor/critic loss</b>", secondary_y=False)
# fig.update_yaxes(title_text="<b>total loss</b>", secondary_y=True)
# clear_output()
# iplot(fig)
# testing
if batch_count % 1000 == 0:
model.save_weights(f"training2_snakemodel_{(batch_count/1000)}.h5")
# abhi_test = SnakeGame(grid_size)
# abhi_test.reset()
# abhi_state = tf.convert_to_tensor([np.array(abhi_test.get_image(scaling_factor))])
# test_imgs = []
# while not abhi_test.done:
# _, a_pro, _ = model(abhi_state)
# a_pro = np.random.choice(range(n_actions), p=a_pro.numpy()[0])
# abhi_test.step(get_snakestep(abhi_test.face_direction, a_pro))
# abhi_state = tf.convert_to_tensor([np.array(abhi_test.get_image(scaling_factor))])
# test_imgs.append(abhi_test.get_image(100))
# get_video(test_imgs, (batch_count/1000)+5)
# clear_output()
# iplot(fig)
# # ALWAYA USE 'G' AS THE INDEX FOR ENVIRONMENTS, MAKE SURE IT ISNT ASSIGNED TO ANYTHING
# num_envs = 64
# n_actions = 4
# grid_size = 8
# scaling_factor = 4
# batch_size= 32
# gamma = 0.99
# envs = [SnakeGame(grid_size) for _ in range(num_envs)]
# model = SnakeModel((grid_size*scaling_factor, grid_size*scaling_factor, 3), n_actions)
# abhi_test = SnakeGame(grid_size)
# abhi_test.reset()
# abhi_state = tf.convert_to_tensor([np.array(abhi_test.get_image(scaling_factor))])
# # test_imgs = []
# _,_,_ = model(abhi_state)
# model.load_weights('../input/snakegame/training2_snakemodel_6.0.h5')
# import time
# while not abhi_test.done:
# _, a_pro, _ = model(abhi_state)
# a_pro = np.random.choice(range(n_actions), p=a_pro.numpy()[0])
# abhi_test.step(get_snakestep(abhi_test.face_direction, a_pro))
# display(abhi_test.get_image(30))
# abhi_state = tf.convert_to_tensor([np.array(abhi_test.get_image(scaling_factor))])
# time.sleep(0.3)
| false | 0 | 6,136 | 0 | 6,136 | 6,136 |
||
69009858
|
<jupyter_start><jupyter_text>Breast cancer prediction
Breast cancer can occur in women and rarely in men.
Symptoms of breast cancer include a lump in the breast, bloody discharge from the nipple and changes in the shape or texture of the nipple or breast.
Its treatment depends on the stage of cancer. It may consist of chemotherapy, radiation, hormone therapy and surgery.
Kaggle dataset identifier: breast-cancer-prediction
<jupyter_script># # Breast Cancer (Diagnostic) Data Set
# **Task : To predict whether the cancer is benign or malignant**
# What Are the Symptoms of Breast Cancer?
# New lump in the breast or underarm (armpit).
# Thickening or swelling of part of the breast.
# Irritation or dimpling of breast skin.
# Redness or flaky skin in the nipple area or the breast.
# Pulling in of the nipple or pain in the nipple area.
# Nipple discharge other than breast milk, including blood.
# # IMPORTING THE LIBRARIES
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy as sp
import warnings
import os
warnings.filterwarnings("ignore")
import datetime
# # LOADING THE DATASET
data = pd.read_csv("/kaggle/input/breast-cancer-wisconsin-data/data.csv")
data.head() # displaying the head of dataset they gives the 1st to 5 rows of the data
data.describe() # description of dataset
data.info()
data.shape # 569 rows and 33 columns
data.columns # displaying the columns of dataset
data.value_counts
data.dtypes
data.isnull().sum()
# **So we have to drop the Unnamed: 32 coulumn which contains NaN values**
data.drop("Unnamed: 32", axis=1, inplace=True)
data
# # VISUALIZING THE DATA
data.corr()
plt.figure(figsize=(18, 9))
sns.heatmap(data.corr(), annot=True, cmap="Accent_r")
sns.barplot(x="id", y="diagnosis", data=data[160:190])
plt.title("Id vs Diagnosis", fontsize=15)
plt.xlabel("Id")
plt.ylabel("Diagonis")
plt.show()
plt.style.use("ggplot")
sns.barplot(x="radius_mean", y="texture_mean", data=data[170:180])
plt.title("Radius Mean vs Texture Mean", fontsize=15)
plt.xlabel("Radius Mean")
plt.ylabel("Texture Mean")
plt.show()
plt.style.use("ggplot")
mean_col = [
"diagnosis",
"radius_mean",
"texture_mean",
"perimeter_mean",
"area_mean",
"smoothness_mean",
"compactness_mean",
"concavity_mean",
"concave points_mean",
"symmetry_mean",
"fractal_dimension_mean",
]
sns.pairplot(data[mean_col], hue="diagnosis", palette="Accent")
sns.violinplot(x="smoothness_mean", y="perimeter_mean", data=data)
plt.figure(figsize=(14, 7))
sns.lineplot(
x="concavity_mean", y="concave points_mean", data=data[0:400], color="green"
)
plt.title("Concavity Mean vs Concave Mean")
plt.xlabel("Concavity Mean")
plt.ylabel("Concave Points")
plt.show()
worst_col = [
"diagnosis",
"radius_worst",
"texture_worst",
"perimeter_worst",
"area_worst",
"smoothness_worst",
"compactness_worst",
"concavity_worst",
"concave points_worst",
"symmetry_worst",
"fractal_dimension_worst",
]
sns.pairplot(data[worst_col], hue="diagnosis", palette="CMRmap")
# # TRAINING AND TESTING DATA
# Getting Features
x = data.drop(columns="diagnosis")
# Getting Predicting Value
y = data["diagnosis"]
# train_test_splitting of the dataset
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
print(len(x_train))
print(len(x_test))
print(len(y_train))
print(len(y_test))
# # MODELS
# # 1. Logistic Regression
from sklearn.linear_model import LogisticRegression
reg = LogisticRegression()
reg.fit(x_train, y_train)
y_pred = reg.predict(x_test)
from sklearn.metrics import (
accuracy_score,
classification_report,
confusion_matrix,
r2_score,
)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
print("Training Score: ", reg.score(x_train, y_train) * 100)
data = pd.DataFrame({"Actual": y_test, "Predicted": y_pred})
data
print(accuracy_score(y_test, y_pred) * 100)
# **So we get a accuracy score of 58.7 % using logistic regression**
from sklearn.model_selection import GridSearchCV
param = {"penalty": ["l1", "l2"], "C": [0.001, 0.01, 0.1, 1, 10, 20, 100, 1000]}
lr = LogisticRegression(penalty="l1")
cv = GridSearchCV(reg, param, cv=5, n_jobs=-1)
cv.fit(x_train, y_train)
cv.predict(x_test)
print("Best CV score", cv.best_score_ * 100)
# # 2. DECISION TREE CLASSIFIER
from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier(max_depth=6, random_state=123)
dtree.fit(x_train, y_train)
# y_pred = dtree.predict(x_test)
y_pred = dtree.predict(x_test)
from sklearn.metrics import (
classification_report,
confusion_matrix,
accuracy_score,
mean_squared_error,
)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
print("Training Score: ", dtree.score(x_train, y_train) * 100)
print(accuracy_score(y_test, y_pred) * 100)
# **So we get a accuracy score of 94.73 % using Decision Tree Classifier**
# # 3. Random Forest Classifier
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier()
rfc.fit(x_train, y_train)
y_pred = rfc.predict(x_test)
from sklearn.metrics import (
classification_report,
confusion_matrix,
accuracy_score,
mean_squared_error,
)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
print("Training Score: ", rfc.score(x_train, y_train) * 100)
print(accuracy_score(y_test, y_pred) * 100)
# **So we get a accuracy score of 96.49 % using Random Forest Classifier**
# # 4. KNeighborsClassifier
#
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=7)
knn.fit(x_train, y_train)
y_pred = knn.predict(x_test)
from sklearn.metrics import (
classification_report,
confusion_matrix,
accuracy_score,
mean_squared_error,
r2_score,
)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
print("Training Score: ", knn.score(x_train, y_train) * 100)
print(knn.score(x_test, y_test))
print(accuracy_score(y_test, y_pred) * 100)
# **So we get a accuracy score of 70.17 % using KNeighborsClassifier**
# # 5. SVC
from sklearn.svm import SVC
svc = SVC()
svc.fit(x_train, y_train)
y_pred = svc.predict(x_test)
from sklearn.metrics import (
classification_report,
confusion_matrix,
accuracy_score,
mean_squared_error,
r2_score,
)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
print("Training Score: ", svc.score(x_train, y_train) * 100)
print(svc.score(x_test, y_test))
print("Training Score: ", svc.score(x_train, y_train) * 100)
# **So we get a accuracy score of 63.7 % using SVC**
# # 6. AdaBoostClassifier
from sklearn.ensemble import AdaBoostClassifier
adb = AdaBoostClassifier(base_estimator=None)
adb.fit(x_train, y_train)
y_pred = adb.predict(x_test)
from sklearn.metrics import (
classification_report,
confusion_matrix,
accuracy_score,
mean_squared_error,
r2_score,
)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
print("Training Score: ", adb.score(x_train, y_train) * 100)
print(accuracy_score(y_test, y_pred) * 100)
# **So we get a accuracy score of 98.24 % using AdaBoostClassifier**
# # 7. Gradient Boosting Classifier
from sklearn.ensemble import GradientBoostingClassifier
gbc = GradientBoostingClassifier()
gbc.fit(x_train, y_train)
y_pred = gbc.predict(x_test)
from sklearn.metrics import (
classification_report,
confusion_matrix,
accuracy_score,
mean_squared_error,
r2_score,
)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
print("Training Score: ", gbc.score(x_train, y_train) * 100)
print(gbc.score(x_test, y_test))
print(accuracy_score(y_test, y_pred) * 100)
# **So we get a accuracy score of 95.61 % using GradientBoostingClassifier**
# # 8. XGBClassifier
from xgboost import XGBClassifier
xgb = XGBClassifier(
objective="reg:linear",
colsample_bytree=0.3,
learning_rate=0.1,
max_depth=5,
alpha=10,
n_estimators=10,
)
xgb.fit(x_train, y_train)
y_pred = xgb.predict(x_test)
from sklearn.metrics import (
classification_report,
confusion_matrix,
accuracy_score,
mean_squared_error,
r2_score,
)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
print("Training Score: ", xgb.score(x_train, y_train) * 100)
print(xgb.score(x_test, y_test))
print("Training Score: ", xgb.score(x_train, y_train) * 100)
data = pd.DataFrame({"Actual": y_test, "Predicted": y_pred})
data
# **So we get a accuracy score of 97.80 % using XGBClassifier**
# # 9. Naive Bayes
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(x_train, y_train)
y_pred = gnb.predict(x_test)
from sklearn.metrics import (
classification_report,
confusion_matrix,
accuracy_score,
mean_squared_error,
r2_score,
)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
print(accuracy_score(y_test, y_pred))
print("Training Score: ", gnb.score(x_train, y_train) * 100)
print(gnb.score(x_test, y_test))
# **So we get a accuracy score of 63.29 % using Naive Bayes**
data = pd.DataFrame({"Actual": y_test, "Predicted": y_pred})
data
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/009/69009858.ipynb
|
breast-cancer-prediction
|
iabhishekbhardwaj
|
[{"Id": 69009858, "ScriptId": 18832363, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5449099, "CreationDate": "07/25/2021 20:04:09", "VersionNumber": 2.0, "Title": "Breast_cancer_prediction_wisconsin_data", "EvaluationDate": "07/25/2021", "IsChange": false, "TotalLines": 381.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 381.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 37}]
|
[{"Id": 91701000, "KernelVersionId": 69009858, "SourceDatasetVersionId": 2462532}, {"Id": 91700999, "KernelVersionId": 69009858, "SourceDatasetVersionId": 408}]
|
[{"Id": 2462532, "DatasetId": 1490581, "DatasourceVersionId": 2504959, "CreatorUserId": 5449099, "LicenseName": "Unknown", "CreationDate": "07/25/2021 19:39:51", "VersionNumber": 1.0, "Title": "Breast cancer prediction", "Slug": "breast-cancer-prediction", "Subtitle": "Breast cancer prediction using ada booster", "Description": "Breast cancer can occur in women and rarely in men.\nSymptoms of breast cancer include a lump in the breast, bloody discharge from the nipple and changes in the shape or texture of the nipple or breast.\nIts treatment depends on the stage of cancer. It may consist of chemotherapy, radiation, hormone therapy and surgery.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1490581, "CreatorUserId": 5449099, "OwnerUserId": 5449099.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2462532.0, "CurrentDatasourceVersionId": 2504959.0, "ForumId": 1510284, "Type": 2, "CreationDate": "07/25/2021 19:39:51", "LastActivityDate": "07/25/2021", "TotalViews": 6282, "TotalDownloads": 523, "TotalVotes": 26, "TotalKernels": 4}]
|
[{"Id": 5449099, "UserName": "iabhishekbhardwaj", "DisplayName": "IAbhishekBhardwaj", "RegisterDate": "07/11/2020", "PerformanceTier": 2}]
|
# # Breast Cancer (Diagnostic) Data Set
# **Task : To predict whether the cancer is benign or malignant**
# What Are the Symptoms of Breast Cancer?
# New lump in the breast or underarm (armpit).
# Thickening or swelling of part of the breast.
# Irritation or dimpling of breast skin.
# Redness or flaky skin in the nipple area or the breast.
# Pulling in of the nipple or pain in the nipple area.
# Nipple discharge other than breast milk, including blood.
# # IMPORTING THE LIBRARIES
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy as sp
import warnings
import os
warnings.filterwarnings("ignore")
import datetime
# # LOADING THE DATASET
data = pd.read_csv("/kaggle/input/breast-cancer-wisconsin-data/data.csv")
data.head() # displaying the head of dataset they gives the 1st to 5 rows of the data
data.describe() # description of dataset
data.info()
data.shape # 569 rows and 33 columns
data.columns # displaying the columns of dataset
data.value_counts
data.dtypes
data.isnull().sum()
# **So we have to drop the Unnamed: 32 coulumn which contains NaN values**
data.drop("Unnamed: 32", axis=1, inplace=True)
data
# # VISUALIZING THE DATA
data.corr()
plt.figure(figsize=(18, 9))
sns.heatmap(data.corr(), annot=True, cmap="Accent_r")
sns.barplot(x="id", y="diagnosis", data=data[160:190])
plt.title("Id vs Diagnosis", fontsize=15)
plt.xlabel("Id")
plt.ylabel("Diagonis")
plt.show()
plt.style.use("ggplot")
sns.barplot(x="radius_mean", y="texture_mean", data=data[170:180])
plt.title("Radius Mean vs Texture Mean", fontsize=15)
plt.xlabel("Radius Mean")
plt.ylabel("Texture Mean")
plt.show()
plt.style.use("ggplot")
mean_col = [
"diagnosis",
"radius_mean",
"texture_mean",
"perimeter_mean",
"area_mean",
"smoothness_mean",
"compactness_mean",
"concavity_mean",
"concave points_mean",
"symmetry_mean",
"fractal_dimension_mean",
]
sns.pairplot(data[mean_col], hue="diagnosis", palette="Accent")
sns.violinplot(x="smoothness_mean", y="perimeter_mean", data=data)
plt.figure(figsize=(14, 7))
sns.lineplot(
x="concavity_mean", y="concave points_mean", data=data[0:400], color="green"
)
plt.title("Concavity Mean vs Concave Mean")
plt.xlabel("Concavity Mean")
plt.ylabel("Concave Points")
plt.show()
worst_col = [
"diagnosis",
"radius_worst",
"texture_worst",
"perimeter_worst",
"area_worst",
"smoothness_worst",
"compactness_worst",
"concavity_worst",
"concave points_worst",
"symmetry_worst",
"fractal_dimension_worst",
]
sns.pairplot(data[worst_col], hue="diagnosis", palette="CMRmap")
# # TRAINING AND TESTING DATA
# Getting Features
x = data.drop(columns="diagnosis")
# Getting Predicting Value
y = data["diagnosis"]
# train_test_splitting of the dataset
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
print(len(x_train))
print(len(x_test))
print(len(y_train))
print(len(y_test))
# # MODELS
# # 1. Logistic Regression
from sklearn.linear_model import LogisticRegression
reg = LogisticRegression()
reg.fit(x_train, y_train)
y_pred = reg.predict(x_test)
from sklearn.metrics import (
accuracy_score,
classification_report,
confusion_matrix,
r2_score,
)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
print("Training Score: ", reg.score(x_train, y_train) * 100)
data = pd.DataFrame({"Actual": y_test, "Predicted": y_pred})
data
print(accuracy_score(y_test, y_pred) * 100)
# **So we get a accuracy score of 58.7 % using logistic regression**
from sklearn.model_selection import GridSearchCV
param = {"penalty": ["l1", "l2"], "C": [0.001, 0.01, 0.1, 1, 10, 20, 100, 1000]}
lr = LogisticRegression(penalty="l1")
cv = GridSearchCV(reg, param, cv=5, n_jobs=-1)
cv.fit(x_train, y_train)
cv.predict(x_test)
print("Best CV score", cv.best_score_ * 100)
# # 2. DECISION TREE CLASSIFIER
from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier(max_depth=6, random_state=123)
dtree.fit(x_train, y_train)
# y_pred = dtree.predict(x_test)
y_pred = dtree.predict(x_test)
from sklearn.metrics import (
classification_report,
confusion_matrix,
accuracy_score,
mean_squared_error,
)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
print("Training Score: ", dtree.score(x_train, y_train) * 100)
print(accuracy_score(y_test, y_pred) * 100)
# **So we get a accuracy score of 94.73 % using Decision Tree Classifier**
# # 3. Random Forest Classifier
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier()
rfc.fit(x_train, y_train)
y_pred = rfc.predict(x_test)
from sklearn.metrics import (
classification_report,
confusion_matrix,
accuracy_score,
mean_squared_error,
)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
print("Training Score: ", rfc.score(x_train, y_train) * 100)
print(accuracy_score(y_test, y_pred) * 100)
# **So we get a accuracy score of 96.49 % using Random Forest Classifier**
# # 4. KNeighborsClassifier
#
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=7)
knn.fit(x_train, y_train)
y_pred = knn.predict(x_test)
from sklearn.metrics import (
classification_report,
confusion_matrix,
accuracy_score,
mean_squared_error,
r2_score,
)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
print("Training Score: ", knn.score(x_train, y_train) * 100)
print(knn.score(x_test, y_test))
print(accuracy_score(y_test, y_pred) * 100)
# **So we get a accuracy score of 70.17 % using KNeighborsClassifier**
# # 5. SVC
from sklearn.svm import SVC
svc = SVC()
svc.fit(x_train, y_train)
y_pred = svc.predict(x_test)
from sklearn.metrics import (
classification_report,
confusion_matrix,
accuracy_score,
mean_squared_error,
r2_score,
)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
print("Training Score: ", svc.score(x_train, y_train) * 100)
print(svc.score(x_test, y_test))
print("Training Score: ", svc.score(x_train, y_train) * 100)
# **So we get a accuracy score of 63.7 % using SVC**
# # 6. AdaBoostClassifier
from sklearn.ensemble import AdaBoostClassifier
adb = AdaBoostClassifier(base_estimator=None)
adb.fit(x_train, y_train)
y_pred = adb.predict(x_test)
from sklearn.metrics import (
classification_report,
confusion_matrix,
accuracy_score,
mean_squared_error,
r2_score,
)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
print("Training Score: ", adb.score(x_train, y_train) * 100)
print(accuracy_score(y_test, y_pred) * 100)
# **So we get a accuracy score of 98.24 % using AdaBoostClassifier**
# # 7. Gradient Boosting Classifier
from sklearn.ensemble import GradientBoostingClassifier
gbc = GradientBoostingClassifier()
gbc.fit(x_train, y_train)
y_pred = gbc.predict(x_test)
from sklearn.metrics import (
classification_report,
confusion_matrix,
accuracy_score,
mean_squared_error,
r2_score,
)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
print("Training Score: ", gbc.score(x_train, y_train) * 100)
print(gbc.score(x_test, y_test))
print(accuracy_score(y_test, y_pred) * 100)
# **So we get a accuracy score of 95.61 % using GradientBoostingClassifier**
# # 8. XGBClassifier
from xgboost import XGBClassifier
xgb = XGBClassifier(
objective="reg:linear",
colsample_bytree=0.3,
learning_rate=0.1,
max_depth=5,
alpha=10,
n_estimators=10,
)
xgb.fit(x_train, y_train)
y_pred = xgb.predict(x_test)
from sklearn.metrics import (
classification_report,
confusion_matrix,
accuracy_score,
mean_squared_error,
r2_score,
)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
print("Training Score: ", xgb.score(x_train, y_train) * 100)
print(xgb.score(x_test, y_test))
print("Training Score: ", xgb.score(x_train, y_train) * 100)
data = pd.DataFrame({"Actual": y_test, "Predicted": y_pred})
data
# **So we get a accuracy score of 97.80 % using XGBClassifier**
# # 9. Naive Bayes
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(x_train, y_train)
y_pred = gnb.predict(x_test)
from sklearn.metrics import (
classification_report,
confusion_matrix,
accuracy_score,
mean_squared_error,
r2_score,
)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
print(accuracy_score(y_test, y_pred))
print("Training Score: ", gnb.score(x_train, y_train) * 100)
print(gnb.score(x_test, y_test))
# **So we get a accuracy score of 63.29 % using Naive Bayes**
data = pd.DataFrame({"Actual": y_test, "Predicted": y_pred})
data
| false | 1 | 3,151 | 37 | 3,263 | 3,151 |
||
69009902
|
# # Understand the representations for finance
# When we are dealing with financial data, we need to establish a representation for machine learning algorithm. In order to represent the price fluctuatiion of a share, one of the first idea we could have is by using time. However, others representations are availables.
# Through this notebook, we are going to understand the different approaches availables and understand their pros and cons.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from datetime import datetime
import requests
import gzip
import shutil
# ## Get the data
# In this notebook, the dataset that we are going to use refers to a trade book of crypto currencies. In this dataset, we are going to analyze one day of trading (2021-07-21) and we will focue the eth-usd. Of course, this method could be apply to any other stocks exchange and for a longer duration.
# Get the data
url = "https://s3-eu-west-1.amazonaws.com/public.bitmex.com/data/trade/20210721.csv.gz"
r = requests.get(url, allow_redirects=True)
open("trade.csv.gz", "wb").write(r.content)
# Extract the data
with open("trade.csv.gz", "rb") as fd:
gzip_fd = gzip.GzipFile(fileobj=fd)
destinations = pd.read_csv(gzip_fd)
# Get the eth values
eth_values = destinations[destinations.symbol == "ETHUSD"]
eth_values.head()
# ### Understand the data
# Each trade realized is represented as a row in our dataset. So, for each trade, we have :
# - timestamp : When the trade hase been realized.
# - symbol : The name of what is been exchange, here in our case, we focus on the eth/usd exchange.
# - side : The type of order (Buy or Sell)
# - size : The number of contracts are been traded
# - price : Price of the assets for the current trade.
# - tickDirection : Indicate regarding the last trade if it's higher, lower or the same
# - trdMatchID : Unique trade ID.
# - grossValue : Gross value of the current trade.
# - homeNotional : Trade value in ETH.
# - foreignNotional : Trade value in USD.
print("Number of trade for the current day : ", len(eth_values))
eth_values["timestamp"] = eth_values.timestamp.map(
lambda t: datetime.strptime(t[:-3], "%Y-%m-%dD%H:%M:%S.%f")
)
# # Time Bars Representation
# In this representation, we group the transactions under an interval of time. By that, we could select the length of this interval, by minutes, hours...
# Volume-weighted average price
# https://en.wikipedia.org/wiki/Volume-weighted_average_price
def compute_vwap(df):
q = df["foreignNotional"]
p = df["price"]
vwap = np.sum(p * q) / np.sum(q)
df["vwap"] = vwap
return df
# Create an index on the timestamp
eth_time = eth_values.set_index("timestamp")
# Group the ETH price by 15 minutes
eth_time_15_minutes = eth_time.groupby(pd.Grouper(freq="15Min"))
nb_time_bars = len(eth_time_15_minutes)
# Compute the VWAP
eth_time_vwap = eth_time_15_minutes.apply(compute_vwap)
# Visualize the VWAP
plt.figure(figsize=(15, 5))
eth_time_vwap.vwap.plot()
# The main drawbacks of time representations are :
# - The market is not a constant level. Indeed, we could have differencies based on the time (morning, night), especially for classic share actions, where people are usually more tired at the end of the deay instead of the morning. This is normal as we are human being. Regarding the crypto currencies market, it will depends on the beginning of big classical estate.
# - Additionally time-sampled series exibit poor statistical properties (serial correlation, heteroscedasticity, non-normality of return)
# # Tick Bars Representation
# Group the observations every N transactions (ticks) instead of fixed time buckets.
# Nb of transactions
total_ticks = len(eth_values)
num_ticks_per_bar = 1000
# Assign a new index regarding the ticks number
eth_tick_grp = eth_values.reset_index().assign(
grpId=lambda row: row.index // num_ticks_per_bar
)
# Compute the VWAP
eth_tick_vwap = eth_tick_grp.groupby("grpId").apply(compute_vwap)
eth_tick_vwap.set_index("timestamp", inplace=True)
# Visualize the VWAP
plt.figure(figsize=(15, 5))
eth_tick_vwap.vwap.plot()
# # Volume Bars Representation
eth_cm_vol = eth_values.assign(cmVol=eth_values["homeNotional"].cumsum())
total_vol = eth_cm_vol.cmVol.values[-1]
vol_per_bar = 100 # total_vol / num_time_bars
# vol_per_bar = round(vol_per_bar, -2) # round to the nearest hundred
eth_vol_grp = eth_cm_vol.assign(grpId=lambda row: row.cmVol // vol_per_bar)
eth_vol_vwap = eth_vol_grp.groupby("grpId").apply(compute_vwap)
eth_vol_vwap.set_index("timestamp", inplace=True)
# Visualize the VWAP
plt.figure(figsize=(15, 5))
eth_vol_vwap.vwap.plot()
# # Dollars Bars Representation
# Foreign -> current price in dollar
eth_cm_vol = eth_values.assign(cmVol=eth_values["foreignNotional"].cumsum())
total_vol = eth_cm_vol.cmVol.values[-1]
vol_per_bar = 100 # total_vol / num_time_bars
# vol_per_bar = round(vol_per_bar, -2) # round to the nearest hundred
eth_vol_grp = eth_cm_vol.assign(grpId=lambda row: row.cmVol // vol_per_bar)
eth_vol_vwap = eth_vol_grp.groupby("grpId").apply(compute_vwap)
eth_vol_vwap.set_index("timestamp", inplace=True)
# Visualize the VWAP
plt.figure(figsize=(15, 5))
eth_vol_vwap.vwap.plot()
# # Dolar Imbalance Bars
# Compute the tick direction
def convert_tick_direction(tick_direction):
if tick_direction in ("PlusTick", "ZeroPlusTick"):
return 1
elif tick_direction in ("MinusTick", "ZeroMinusTick"):
return -1
else:
raise ValueError("converting invalid input: " + str(tick_direction))
# Convert tick direction to integer
eth_values["direction"] = eth_values.tickDirection.map(convert_tick_direction)
# Compute signed flows at each tick
eth_signed_flow = eth_values.assign(bv=eth_values["direction"] * eth_values["size"])
eth_signed_flow.head()
from numba import jit
from numba import float64
from numba import int64
@jit((float64[:], int64), nopython=True, nogil=True)
def _ewma(arr_in, window):
r"""Exponentialy weighted moving average specified by a decay ``window``
to provide better adjustments for small windows via:
y[t] = (x[t] + (1-a)*x[t-1] + (1-a)^2*x[t-2] + ... + (1-a)^n*x[t-n]) /
(1 + (1-a) + (1-a)^2 + ... + (1-a)^n).
Parameters
----------
arr_in : np.ndarray, float64
A single dimenisional numpy array
window : int64
The decay window, or 'span'
Returns
-------
np.ndarray
The EWMA vector, same length / shape as ``arr_in``
Examples
--------
>>> import pandas as pd
>>> a = np.arange(5, dtype=float)
>>> exp = pd.DataFrame(a).ewm(span=10, adjust=True).mean()
>>> np.array_equal(_ewma_infinite_hist(a, 10), exp.values.ravel())
True
"""
n = arr_in.shape[0]
ewma = np.empty(n, dtype=float64)
alpha = 2 / float(window + 1)
w = 1
ewma_old = arr_in[0]
ewma[0] = ewma_old
for i in range(1, n):
w += (1 - alpha) ** i
ewma_old = ewma_old * (1 - alpha) + arr_in[i]
ewma[i] = ewma_old / w
return ewma
abs_Ebv_init = np.abs(eth_signed_flow["bv"].mean())
E_T_init = 10000 # 500000 ticks to warm up
def compute_Ts(bvs, E_T_init, abs_Ebv_init):
Ts, i_s = [], []
i_prev, E_T, abs_Ebv = 0, E_T_init, abs_Ebv_init
n = bvs.shape[0]
bvs_val = bvs.values.astype(np.float64)
abs_thetas, thresholds = np.zeros(n), np.zeros(n)
abs_thetas[0], cur_theta = np.abs(bvs_val[0]), bvs_val[0]
for i in range(1, n):
cur_theta += bvs_val[i]
abs_theta = np.abs(cur_theta)
abs_thetas[i] = abs_theta
threshold = E_T * abs_Ebv
thresholds[i] = threshold
if abs_theta >= threshold:
cur_theta = 0
Ts.append(np.float64(i - i_prev))
i_s.append(i)
i_prev = i
E_T = _ewma(np.array(Ts), window=np.int64(len(Ts)))[-1]
abs_Ebv = np.abs(
_ewma(bvs_val[:i], window=np.int64(E_T_init * 3))[-1]
) # window of 3 bars
return Ts, abs_thetas, thresholds, i_s
Ts, abs_thetas, thresholds, i_s = compute_Ts(eth_signed_flow.bv, E_T_init, abs_Ebv_init)
plt.figure(figsize=(15, 5))
plt.plot(abs_thetas)
plt.plot(thresholds)
n = eth_signed_flow.shape[0]
i_iter = iter(i_s + [n])
i_cur = i_iter.__next__()
grpId = np.zeros(n)
for i in range(1, n):
if i <= i_cur:
grpId[i] = grpId[i - 1]
else:
grpId[i] = grpId[i - 1] + 1
i_cur = i_iter.__next__()
data_dollar_imb_grp = eth_signed_flow.assign(grpId=grpId)
data_dollar_imb_vwap = data_dollar_imb_grp.groupby("grpId").apply(compute_vwap).vwap
len(data_dollar_imb_vwap)
eth_time_vwap["dollar_rep"] = data_dollar_imb_vwap.values
eth_time_vwap.head()
plt.figure(figsize=(15, 5))
eth_time_vwap.vwap.plot()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/009/69009902.ipynb
| null | null |
[{"Id": 69009902, "ScriptId": 18786928, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1128978, "CreationDate": "07/25/2021 20:05:15", "VersionNumber": 2.0, "Title": "Time bars vs Ticks bars [In Progress]", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 259.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 258.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
| null | null | null | null |
# # Understand the representations for finance
# When we are dealing with financial data, we need to establish a representation for machine learning algorithm. In order to represent the price fluctuatiion of a share, one of the first idea we could have is by using time. However, others representations are availables.
# Through this notebook, we are going to understand the different approaches availables and understand their pros and cons.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from datetime import datetime
import requests
import gzip
import shutil
# ## Get the data
# In this notebook, the dataset that we are going to use refers to a trade book of crypto currencies. In this dataset, we are going to analyze one day of trading (2021-07-21) and we will focue the eth-usd. Of course, this method could be apply to any other stocks exchange and for a longer duration.
# Get the data
url = "https://s3-eu-west-1.amazonaws.com/public.bitmex.com/data/trade/20210721.csv.gz"
r = requests.get(url, allow_redirects=True)
open("trade.csv.gz", "wb").write(r.content)
# Extract the data
with open("trade.csv.gz", "rb") as fd:
gzip_fd = gzip.GzipFile(fileobj=fd)
destinations = pd.read_csv(gzip_fd)
# Get the eth values
eth_values = destinations[destinations.symbol == "ETHUSD"]
eth_values.head()
# ### Understand the data
# Each trade realized is represented as a row in our dataset. So, for each trade, we have :
# - timestamp : When the trade hase been realized.
# - symbol : The name of what is been exchange, here in our case, we focus on the eth/usd exchange.
# - side : The type of order (Buy or Sell)
# - size : The number of contracts are been traded
# - price : Price of the assets for the current trade.
# - tickDirection : Indicate regarding the last trade if it's higher, lower or the same
# - trdMatchID : Unique trade ID.
# - grossValue : Gross value of the current trade.
# - homeNotional : Trade value in ETH.
# - foreignNotional : Trade value in USD.
print("Number of trade for the current day : ", len(eth_values))
eth_values["timestamp"] = eth_values.timestamp.map(
lambda t: datetime.strptime(t[:-3], "%Y-%m-%dD%H:%M:%S.%f")
)
# # Time Bars Representation
# In this representation, we group the transactions under an interval of time. By that, we could select the length of this interval, by minutes, hours...
# Volume-weighted average price
# https://en.wikipedia.org/wiki/Volume-weighted_average_price
def compute_vwap(df):
q = df["foreignNotional"]
p = df["price"]
vwap = np.sum(p * q) / np.sum(q)
df["vwap"] = vwap
return df
# Create an index on the timestamp
eth_time = eth_values.set_index("timestamp")
# Group the ETH price by 15 minutes
eth_time_15_minutes = eth_time.groupby(pd.Grouper(freq="15Min"))
nb_time_bars = len(eth_time_15_minutes)
# Compute the VWAP
eth_time_vwap = eth_time_15_minutes.apply(compute_vwap)
# Visualize the VWAP
plt.figure(figsize=(15, 5))
eth_time_vwap.vwap.plot()
# The main drawbacks of time representations are :
# - The market is not a constant level. Indeed, we could have differencies based on the time (morning, night), especially for classic share actions, where people are usually more tired at the end of the deay instead of the morning. This is normal as we are human being. Regarding the crypto currencies market, it will depends on the beginning of big classical estate.
# - Additionally time-sampled series exibit poor statistical properties (serial correlation, heteroscedasticity, non-normality of return)
# # Tick Bars Representation
# Group the observations every N transactions (ticks) instead of fixed time buckets.
# Nb of transactions
total_ticks = len(eth_values)
num_ticks_per_bar = 1000
# Assign a new index regarding the ticks number
eth_tick_grp = eth_values.reset_index().assign(
grpId=lambda row: row.index // num_ticks_per_bar
)
# Compute the VWAP
eth_tick_vwap = eth_tick_grp.groupby("grpId").apply(compute_vwap)
eth_tick_vwap.set_index("timestamp", inplace=True)
# Visualize the VWAP
plt.figure(figsize=(15, 5))
eth_tick_vwap.vwap.plot()
# # Volume Bars Representation
eth_cm_vol = eth_values.assign(cmVol=eth_values["homeNotional"].cumsum())
total_vol = eth_cm_vol.cmVol.values[-1]
vol_per_bar = 100 # total_vol / num_time_bars
# vol_per_bar = round(vol_per_bar, -2) # round to the nearest hundred
eth_vol_grp = eth_cm_vol.assign(grpId=lambda row: row.cmVol // vol_per_bar)
eth_vol_vwap = eth_vol_grp.groupby("grpId").apply(compute_vwap)
eth_vol_vwap.set_index("timestamp", inplace=True)
# Visualize the VWAP
plt.figure(figsize=(15, 5))
eth_vol_vwap.vwap.plot()
# # Dollars Bars Representation
# Foreign -> current price in dollar
eth_cm_vol = eth_values.assign(cmVol=eth_values["foreignNotional"].cumsum())
total_vol = eth_cm_vol.cmVol.values[-1]
vol_per_bar = 100 # total_vol / num_time_bars
# vol_per_bar = round(vol_per_bar, -2) # round to the nearest hundred
eth_vol_grp = eth_cm_vol.assign(grpId=lambda row: row.cmVol // vol_per_bar)
eth_vol_vwap = eth_vol_grp.groupby("grpId").apply(compute_vwap)
eth_vol_vwap.set_index("timestamp", inplace=True)
# Visualize the VWAP
plt.figure(figsize=(15, 5))
eth_vol_vwap.vwap.plot()
# # Dolar Imbalance Bars
# Compute the tick direction
def convert_tick_direction(tick_direction):
if tick_direction in ("PlusTick", "ZeroPlusTick"):
return 1
elif tick_direction in ("MinusTick", "ZeroMinusTick"):
return -1
else:
raise ValueError("converting invalid input: " + str(tick_direction))
# Convert tick direction to integer
eth_values["direction"] = eth_values.tickDirection.map(convert_tick_direction)
# Compute signed flows at each tick
eth_signed_flow = eth_values.assign(bv=eth_values["direction"] * eth_values["size"])
eth_signed_flow.head()
from numba import jit
from numba import float64
from numba import int64
@jit((float64[:], int64), nopython=True, nogil=True)
def _ewma(arr_in, window):
r"""Exponentialy weighted moving average specified by a decay ``window``
to provide better adjustments for small windows via:
y[t] = (x[t] + (1-a)*x[t-1] + (1-a)^2*x[t-2] + ... + (1-a)^n*x[t-n]) /
(1 + (1-a) + (1-a)^2 + ... + (1-a)^n).
Parameters
----------
arr_in : np.ndarray, float64
A single dimenisional numpy array
window : int64
The decay window, or 'span'
Returns
-------
np.ndarray
The EWMA vector, same length / shape as ``arr_in``
Examples
--------
>>> import pandas as pd
>>> a = np.arange(5, dtype=float)
>>> exp = pd.DataFrame(a).ewm(span=10, adjust=True).mean()
>>> np.array_equal(_ewma_infinite_hist(a, 10), exp.values.ravel())
True
"""
n = arr_in.shape[0]
ewma = np.empty(n, dtype=float64)
alpha = 2 / float(window + 1)
w = 1
ewma_old = arr_in[0]
ewma[0] = ewma_old
for i in range(1, n):
w += (1 - alpha) ** i
ewma_old = ewma_old * (1 - alpha) + arr_in[i]
ewma[i] = ewma_old / w
return ewma
abs_Ebv_init = np.abs(eth_signed_flow["bv"].mean())
E_T_init = 10000 # 500000 ticks to warm up
def compute_Ts(bvs, E_T_init, abs_Ebv_init):
Ts, i_s = [], []
i_prev, E_T, abs_Ebv = 0, E_T_init, abs_Ebv_init
n = bvs.shape[0]
bvs_val = bvs.values.astype(np.float64)
abs_thetas, thresholds = np.zeros(n), np.zeros(n)
abs_thetas[0], cur_theta = np.abs(bvs_val[0]), bvs_val[0]
for i in range(1, n):
cur_theta += bvs_val[i]
abs_theta = np.abs(cur_theta)
abs_thetas[i] = abs_theta
threshold = E_T * abs_Ebv
thresholds[i] = threshold
if abs_theta >= threshold:
cur_theta = 0
Ts.append(np.float64(i - i_prev))
i_s.append(i)
i_prev = i
E_T = _ewma(np.array(Ts), window=np.int64(len(Ts)))[-1]
abs_Ebv = np.abs(
_ewma(bvs_val[:i], window=np.int64(E_T_init * 3))[-1]
) # window of 3 bars
return Ts, abs_thetas, thresholds, i_s
Ts, abs_thetas, thresholds, i_s = compute_Ts(eth_signed_flow.bv, E_T_init, abs_Ebv_init)
plt.figure(figsize=(15, 5))
plt.plot(abs_thetas)
plt.plot(thresholds)
n = eth_signed_flow.shape[0]
i_iter = iter(i_s + [n])
i_cur = i_iter.__next__()
grpId = np.zeros(n)
for i in range(1, n):
if i <= i_cur:
grpId[i] = grpId[i - 1]
else:
grpId[i] = grpId[i - 1] + 1
i_cur = i_iter.__next__()
data_dollar_imb_grp = eth_signed_flow.assign(grpId=grpId)
data_dollar_imb_vwap = data_dollar_imb_grp.groupby("grpId").apply(compute_vwap).vwap
len(data_dollar_imb_vwap)
eth_time_vwap["dollar_rep"] = data_dollar_imb_vwap.values
eth_time_vwap.head()
plt.figure(figsize=(15, 5))
eth_time_vwap.vwap.plot()
| false | 0 | 2,958 | 1 | 2,958 | 2,958 |
||
69009985
|
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn import metrics
df_train = pd.read_csv("../input/house-prices-advanced-regression-techniques/train.csv")
df_test = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv")
df = pd.concat([df_train, df_test])
# # Data Exploration
# Goal: understand our dataset features and their naturee ans show some visualizations.
df_train.describe()
# descriptive statistics summary for SalePrice
df_train["SalePrice"].describe()
# SalePrice histogram
sns.distplot(df_train["SalePrice"])
# **We conclude:
# 1- It deviates from the normal distribution.
# 2- It is right skewed (positive skewness).**
# skewness and kurtosis
print("Skewness: %f" % df_train["SalePrice"].skew())
print("Kurtosis: %f" % df_train["SalePrice"].kurt())
# **The SalePrice data is highly right skewed as the skewness > 1 and has heavy outliers(leptokurtic) as the kurtosis > 3**
# Finding the columns with more correlation with SalePrice
df_train.corr()["SalePrice"].sort_values(ascending=False)
# scatter plot of Above ground living area vs. saleprice
var = "GrLivArea"
df_train.plot.scatter(x=var, y="SalePrice", ylim=(0, 800000))
print(df_train["SalePrice"].corr(df_train[var]))
# **From the correlation coefficient and the above graph we conclude that: there is a positive correlation between the above ground living area and the price so as the first increases the second increases accordingly.**
# scatter plot of Above ground living area vs. saleprice
var = "GarageArea"
df_train.plot.scatter(x=var, y="SalePrice", ylim=(0, 800000))
print(df_train["SalePrice"].corr(df_train[var]))
# scatter plot of Total square feet of basement area vs. saleprice
var = "TotalBsmtSF"
df_train.plot.scatter(x=var, y="SalePrice", ylim=(0, 800000))
print(df_train["SalePrice"].corr(df_train[var]))
# **From the correlation coefficient and the above graph we conclude that: there is a positive correlation between the total square feet of basement area and the price so as the first increases the second increases accordingly.**
# box plot of overallqual vs. saleprice
var = "OverallQual"
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.boxplot(x=var, y="SalePrice", data=df_train)
fig.axis(ymin=0, ymax=800000)
# **SalePrice is related to overall quality, where the box plot shows how sales prices increase with the overall quality.**
# box plot of year-built vs. saleprice
var = "YearBuilt"
f, ax = plt.subplots(figsize=(16, 8))
fig = sns.boxplot(x=var, y="SalePrice", data=df_train)
fig.axis(ymin=0, ymax=800000)
plt.xticks(rotation=90)
# **We could conclude that more money is spent on new houses than on old ones.**
# # Preprocessing
# **Goal: remove some columns which are not going to be usefull for our model and impute null values of the numerical and categorical variables with median and mode.**
# ### Data Imputation
def show_missing(df):
# Shows percentage of null values in each column
pd.options.display.max_rows = None
display(((df.isnull().sum() / len(df)) * 100))
show_missing(df)
# DROP the columns which has more than 50% null values
df_train.drop(["Alley", "MiscFeature", "PoolQC", "Fence", "Id"], axis=1, inplace=True)
df_test.drop(["Alley", "MiscFeature", "PoolQC", "Fence", "Id"], axis=1, inplace=True)
df = pd.concat([df_train, df_test])
# Finding numerical data column names
num_variables = [i for i in df.columns if df.dtypes[i] != "object"]
num_variables
df_train.corr()["SalePrice"].sort_values(ascending=False)
# Get columns with correlation less than 0.1 for removing
num_del = [i for i in num_variables if df_train.corr()["SalePrice"][i] < 0.1]
num_del
# Droping the columns and updating the changes in df
df_train.drop(num_del, axis=1, inplace=True)
df_test.drop(num_del, axis=1, inplace=True)
df = pd.concat([df_train, df_test])
show_missing(df)
# ### Imputing the missing values in all columns
def impute_null(df):
cat_v = [
i for i in df.columns if df.dtypes[i] == "object" if df[i].isnull().values.any()
]
num_v = [
i for i in df.columns if df.dtypes[i] != "object" if df[i].isnull().values.any()
]
for i in num_v:
df[i].fillna(df_train[i].median(), inplace=True)
for i in cat_v:
df[i].fillna(df_train[i].mode()[0], inplace=True)
impute_null(df_train)
impute_null(df_test)
df = pd.concat([df_train, df_test])
show_missing(df_train)
show_missing(df_test)
show_missing(df)
# ### Feature Encoding
# #### Label Encoding for ordinal features
cat_variables = [i for i in df.columns if df.dtypes[i] == "object"]
cat_variables
# Handling Ordinal categories using LabelEncoder
ord_dict = {
"LotShape": ["Reg", "IR1", "IR2", "IR3"],
"LandSlope": ["Gtl", "Mod", "Sev"],
"ExterQual": ["Ex", "Gd", "TA", "Fa", "Po"],
"ExterCond": ["Ex", "Gd", "TA", "Fa", "Po"],
"BsmtQual": ["Ex", "Gd", "TA", "Fa", "Po", "NB"],
"BsmtCond": ["Ex", "Gd", "TA", "Fa", "Po", "NB"],
"BsmtExposure": ["Gd", "Av", "Mn", "No", "NB"],
"BsmtFinType1": ["GLQ", "ALQ", "BLQ", "Rec", "LwQ", "Unf", "NB"],
"BsmtFinType2": ["GLQ", "ALQ", "BLQ", "Rec", "LwQ", "Unf", "NB"],
"HeatingQC": ["Ex", "Gd", "TA", "Fa", "Po"],
"KitchenQual": ["Ex", "Gd", "TA", "Fa", "Po"],
"GarageQual": ["Ex", "Gd", "TA", "Fa", "Po", "NG"],
"GarageCond": ["Ex", "Gd", "TA", "Fa", "Po", "NG"],
"Utilities": ["AllPub", "NoSeWa"],
}
cols_ord = ord_dict.keys()
le = LabelEncoder()
for col in cols_ord:
le.fit(ord_dict[col])
df_train[col] = le.transform(df_train[col])
df_test[col] = le.transform(df_test[col])
df = pd.concat([df_train, df_test])
df.head()
# Updating columnn names
cat_variables = [i for i in df.columns if df.dtypes[i] == "object"]
num_variables = [i for i in df.columns if df.dtypes[i] != "object"]
# ### Handling Outliers
# **The values which are outside the specified interquartile range are removed**
df_train.describe()
train_copy = df_train.copy()
q1 = df_train.quantile(0.25)
q3 = df_train.quantile(0.75)
iqr = q3 - q1
cutoff = 3 * iqr
cols = df_train
lower, upper = q1 - cutoff, q3 + cutoff
def TotalOutliers(df, columns, l, u):
fin = {}
for i in columns:
a = df[df[i] > u[i]].shape[0]
b = df[df[i] < l[i]].shape[0]
fin[i] = a + b
a = 0
b = 0
return fin
outliers = TotalOutliers(train_copy, num_variables, lower, upper)
# Printing the number of outliers in each column.
outliers
# Droping columns which has more outliers
df_train.drop(
[
"BsmtFinType2",
"ExterCond",
"BsmtCond",
"GarageQual",
"GarageCond",
"ScreenPorch",
],
axis=1,
inplace=True,
)
df_test.drop(
[
"BsmtFinType2",
"ExterCond",
"BsmtCond",
"GarageQual",
"GarageCond",
"ScreenPorch",
],
axis=1,
inplace=True,
)
df = pd.concat([df_train, df_test])
# ### Feature Transformation
# **Apply log transformation to transform some important features which contributes more to our model so that they can perform better. Data will be normally distributed which is good for the ML algorithm.**
# Log tranformation for SalePrice
df_train["SalePrice"] = np.log(df_train["SalePrice"])
sns.distplot(df_train["SalePrice"])
sns.distplot(df_train["GrLivArea"])
df_train["GrLivArea"] = np.log(df_train["GrLivArea"])
df_test["GrLivArea"] = np.log(df_test["GrLivArea"])
sns.distplot(df_train["GrLivArea"])
sns.distplot(df_train["TotalBsmtSF"])
df = pd.concat([df_train, df_test])
# #### One Hot Encoding for nominal features
dff = pd.get_dummies(df)
df_tr = dff[dff["SalePrice"].isnull() == False]
df_te = dff[dff["SalePrice"].isnull()]
df_te.drop(["SalePrice"], axis=1, inplace=True)
model = LinearRegression()
# Seperating training dataset into test and train for cross validation purpose
x_train, x_test, y_train, y_test = train_test_split(
df_tr.drop(["SalePrice"], axis=1), df_tr["SalePrice"], test_size=0.2, random_state=0
)
model.fit(x_train, y_train)
r_sq = model.score(x_train, y_train)
print("coefficient of determination:", r_sq)
y_pred = model.predict(x_test)
print("Validation RMSE:", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
model.fit(df_tr.drop(["SalePrice"], axis=1), df_tr["SalePrice"])
predictions = model.predict(df_te)
# Applying exponent for the predicted values because we have used log transformation
predictions = np.expm1(predictions)
predictions
# Submit the predictions
df_test = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv")
sample_submission = pd.DataFrame(
{"Id": np.asarray(df_test.Id), "SalePrice": predictions}
)
sample_submission.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/009/69009985.ipynb
| null | null |
[{"Id": 69009985, "ScriptId": 18804105, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4428295, "CreationDate": "07/25/2021 20:06:52", "VersionNumber": 3.0, "Title": "notebook813b006f04", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 266.0, "LinesInsertedFromPrevious": 17.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 249.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn import metrics
df_train = pd.read_csv("../input/house-prices-advanced-regression-techniques/train.csv")
df_test = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv")
df = pd.concat([df_train, df_test])
# # Data Exploration
# Goal: understand our dataset features and their naturee ans show some visualizations.
df_train.describe()
# descriptive statistics summary for SalePrice
df_train["SalePrice"].describe()
# SalePrice histogram
sns.distplot(df_train["SalePrice"])
# **We conclude:
# 1- It deviates from the normal distribution.
# 2- It is right skewed (positive skewness).**
# skewness and kurtosis
print("Skewness: %f" % df_train["SalePrice"].skew())
print("Kurtosis: %f" % df_train["SalePrice"].kurt())
# **The SalePrice data is highly right skewed as the skewness > 1 and has heavy outliers(leptokurtic) as the kurtosis > 3**
# Finding the columns with more correlation with SalePrice
df_train.corr()["SalePrice"].sort_values(ascending=False)
# scatter plot of Above ground living area vs. saleprice
var = "GrLivArea"
df_train.plot.scatter(x=var, y="SalePrice", ylim=(0, 800000))
print(df_train["SalePrice"].corr(df_train[var]))
# **From the correlation coefficient and the above graph we conclude that: there is a positive correlation between the above ground living area and the price so as the first increases the second increases accordingly.**
# scatter plot of Above ground living area vs. saleprice
var = "GarageArea"
df_train.plot.scatter(x=var, y="SalePrice", ylim=(0, 800000))
print(df_train["SalePrice"].corr(df_train[var]))
# scatter plot of Total square feet of basement area vs. saleprice
var = "TotalBsmtSF"
df_train.plot.scatter(x=var, y="SalePrice", ylim=(0, 800000))
print(df_train["SalePrice"].corr(df_train[var]))
# **From the correlation coefficient and the above graph we conclude that: there is a positive correlation between the total square feet of basement area and the price so as the first increases the second increases accordingly.**
# box plot of overallqual vs. saleprice
var = "OverallQual"
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.boxplot(x=var, y="SalePrice", data=df_train)
fig.axis(ymin=0, ymax=800000)
# **SalePrice is related to overall quality, where the box plot shows how sales prices increase with the overall quality.**
# box plot of year-built vs. saleprice
var = "YearBuilt"
f, ax = plt.subplots(figsize=(16, 8))
fig = sns.boxplot(x=var, y="SalePrice", data=df_train)
fig.axis(ymin=0, ymax=800000)
plt.xticks(rotation=90)
# **We could conclude that more money is spent on new houses than on old ones.**
# # Preprocessing
# **Goal: remove some columns which are not going to be usefull for our model and impute null values of the numerical and categorical variables with median and mode.**
# ### Data Imputation
def show_missing(df):
# Shows percentage of null values in each column
pd.options.display.max_rows = None
display(((df.isnull().sum() / len(df)) * 100))
show_missing(df)
# DROP the columns which has more than 50% null values
df_train.drop(["Alley", "MiscFeature", "PoolQC", "Fence", "Id"], axis=1, inplace=True)
df_test.drop(["Alley", "MiscFeature", "PoolQC", "Fence", "Id"], axis=1, inplace=True)
df = pd.concat([df_train, df_test])
# Finding numerical data column names
num_variables = [i for i in df.columns if df.dtypes[i] != "object"]
num_variables
df_train.corr()["SalePrice"].sort_values(ascending=False)
# Get columns with correlation less than 0.1 for removing
num_del = [i for i in num_variables if df_train.corr()["SalePrice"][i] < 0.1]
num_del
# Droping the columns and updating the changes in df
df_train.drop(num_del, axis=1, inplace=True)
df_test.drop(num_del, axis=1, inplace=True)
df = pd.concat([df_train, df_test])
show_missing(df)
# ### Imputing the missing values in all columns
def impute_null(df):
cat_v = [
i for i in df.columns if df.dtypes[i] == "object" if df[i].isnull().values.any()
]
num_v = [
i for i in df.columns if df.dtypes[i] != "object" if df[i].isnull().values.any()
]
for i in num_v:
df[i].fillna(df_train[i].median(), inplace=True)
for i in cat_v:
df[i].fillna(df_train[i].mode()[0], inplace=True)
impute_null(df_train)
impute_null(df_test)
df = pd.concat([df_train, df_test])
show_missing(df_train)
show_missing(df_test)
show_missing(df)
# ### Feature Encoding
# #### Label Encoding for ordinal features
cat_variables = [i for i in df.columns if df.dtypes[i] == "object"]
cat_variables
# Handling Ordinal categories using LabelEncoder
ord_dict = {
"LotShape": ["Reg", "IR1", "IR2", "IR3"],
"LandSlope": ["Gtl", "Mod", "Sev"],
"ExterQual": ["Ex", "Gd", "TA", "Fa", "Po"],
"ExterCond": ["Ex", "Gd", "TA", "Fa", "Po"],
"BsmtQual": ["Ex", "Gd", "TA", "Fa", "Po", "NB"],
"BsmtCond": ["Ex", "Gd", "TA", "Fa", "Po", "NB"],
"BsmtExposure": ["Gd", "Av", "Mn", "No", "NB"],
"BsmtFinType1": ["GLQ", "ALQ", "BLQ", "Rec", "LwQ", "Unf", "NB"],
"BsmtFinType2": ["GLQ", "ALQ", "BLQ", "Rec", "LwQ", "Unf", "NB"],
"HeatingQC": ["Ex", "Gd", "TA", "Fa", "Po"],
"KitchenQual": ["Ex", "Gd", "TA", "Fa", "Po"],
"GarageQual": ["Ex", "Gd", "TA", "Fa", "Po", "NG"],
"GarageCond": ["Ex", "Gd", "TA", "Fa", "Po", "NG"],
"Utilities": ["AllPub", "NoSeWa"],
}
cols_ord = ord_dict.keys()
le = LabelEncoder()
for col in cols_ord:
le.fit(ord_dict[col])
df_train[col] = le.transform(df_train[col])
df_test[col] = le.transform(df_test[col])
df = pd.concat([df_train, df_test])
df.head()
# Updating columnn names
cat_variables = [i for i in df.columns if df.dtypes[i] == "object"]
num_variables = [i for i in df.columns if df.dtypes[i] != "object"]
# ### Handling Outliers
# **The values which are outside the specified interquartile range are removed**
df_train.describe()
train_copy = df_train.copy()
q1 = df_train.quantile(0.25)
q3 = df_train.quantile(0.75)
iqr = q3 - q1
cutoff = 3 * iqr
cols = df_train
lower, upper = q1 - cutoff, q3 + cutoff
def TotalOutliers(df, columns, l, u):
fin = {}
for i in columns:
a = df[df[i] > u[i]].shape[0]
b = df[df[i] < l[i]].shape[0]
fin[i] = a + b
a = 0
b = 0
return fin
outliers = TotalOutliers(train_copy, num_variables, lower, upper)
# Printing the number of outliers in each column.
outliers
# Droping columns which has more outliers
df_train.drop(
[
"BsmtFinType2",
"ExterCond",
"BsmtCond",
"GarageQual",
"GarageCond",
"ScreenPorch",
],
axis=1,
inplace=True,
)
df_test.drop(
[
"BsmtFinType2",
"ExterCond",
"BsmtCond",
"GarageQual",
"GarageCond",
"ScreenPorch",
],
axis=1,
inplace=True,
)
df = pd.concat([df_train, df_test])
# ### Feature Transformation
# **Apply log transformation to transform some important features which contributes more to our model so that they can perform better. Data will be normally distributed which is good for the ML algorithm.**
# Log tranformation for SalePrice
df_train["SalePrice"] = np.log(df_train["SalePrice"])
sns.distplot(df_train["SalePrice"])
sns.distplot(df_train["GrLivArea"])
df_train["GrLivArea"] = np.log(df_train["GrLivArea"])
df_test["GrLivArea"] = np.log(df_test["GrLivArea"])
sns.distplot(df_train["GrLivArea"])
sns.distplot(df_train["TotalBsmtSF"])
df = pd.concat([df_train, df_test])
# #### One Hot Encoding for nominal features
dff = pd.get_dummies(df)
df_tr = dff[dff["SalePrice"].isnull() == False]
df_te = dff[dff["SalePrice"].isnull()]
df_te.drop(["SalePrice"], axis=1, inplace=True)
model = LinearRegression()
# Seperating training dataset into test and train for cross validation purpose
x_train, x_test, y_train, y_test = train_test_split(
df_tr.drop(["SalePrice"], axis=1), df_tr["SalePrice"], test_size=0.2, random_state=0
)
model.fit(x_train, y_train)
r_sq = model.score(x_train, y_train)
print("coefficient of determination:", r_sq)
y_pred = model.predict(x_test)
print("Validation RMSE:", np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
model.fit(df_tr.drop(["SalePrice"], axis=1), df_tr["SalePrice"])
predictions = model.predict(df_te)
# Applying exponent for the predicted values because we have used log transformation
predictions = np.expm1(predictions)
predictions
# Submit the predictions
df_test = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv")
sample_submission = pd.DataFrame(
{"Id": np.asarray(df_test.Id), "SalePrice": predictions}
)
sample_submission.to_csv("submission.csv", index=False)
| false | 0 | 2,965 | 0 | 2,965 | 2,965 |
||
69009477
|
<jupyter_start><jupyter_text>Gold Price Prediction Dataset
### Context
Historically, gold had been used as a form of currency in various parts of the world including the USA. In present times, precious metals like gold are held with central banks of all countries to guarantee re-payment of foreign debts, and also to control inflation which results in reflecting the financial strength of the country. Recently, emerging world economies, such as China, Russia, and India have been big buyers of gold, whereas the USA, SoUSA, South Africa, and Australia are among the big seller of gold.
Forecasting rise and fall in the daily gold rates can help investors to decide when to buy (or sell) the commodity. But Gold prices are dependent on many factors such as prices of other precious metals, prices of crude oil, stock exchange performance, Bonds prices, currency exchange rates, etc.
The challenge of this project is to accurately predict the future adjusted closing price of Gold ETF across a given period of time in the future. The problem is a regression problem, because the output value which is the adjusted closing price in this project is continuous value.
### Content
Data for this study is collected from November 18th 2011 to January 1st 2019 from various sources. The data has 1718 rows in total and 80 columns in total. Data for attributes, such as Oil Price, Standard and Poor’s (S&P) 500 index, Dow Jones Index US Bond rates (10 years), Euro USD exchange rates, prices of precious metals Silver and Platinum and other metals such as Palladium and Rhodium, prices of US Dollar Index, Eldorado Gold Corporation and Gold Miners ETF were gathered.
The dataset has 1718 rows in total and 80 columns in total. Data for attributes, such as Oil Price, Standard and Poor’s (S&P) 500 index, Dow Jones Index US Bond rates (10 years), Euro USD exchange rates, prices of precious metals Silver and Platinum and other metals such as Palladium and Rhodium, prices of US Dollar Index, Eldorado Gold Corporation and Gold Miners ETF were gathered.
The historical data of Gold ETF fetched from Yahoo finance has 7 columns, Date, Open, High, Low, Close, Adjusted Close, and Volume, the difference between Adjusted Close and Close is that the closing price of a stock is the price of that stock at the close of the trading day. Whereas the adjusted closing price takes into account factors such as dividends, stock splits, and new stock offerings to determine a value. So, Adjusted Close is the outcome variable which is the value you have to predict.

Kaggle dataset identifier: gold-price-prediction-dataset
<jupyter_code>import pandas as pd
df = pd.read_csv('gold-price-prediction-dataset/FINAL_USO.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 1718 entries, 0 to 1717
Data columns (total 81 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Date 1718 non-null object
1 Open 1718 non-null float64
2 High 1718 non-null float64
3 Low 1718 non-null float64
4 Close 1718 non-null float64
5 Adj Close 1718 non-null float64
6 Volume 1718 non-null int64
7 SP_open 1718 non-null float64
8 SP_high 1718 non-null float64
9 SP_low 1718 non-null float64
10 SP_close 1718 non-null float64
11 SP_Ajclose 1718 non-null float64
12 SP_volume 1718 non-null int64
13 DJ_open 1718 non-null float64
14 DJ_high 1718 non-null float64
15 DJ_low 1718 non-null float64
16 DJ_close 1718 non-null float64
17 DJ_Ajclose 1718 non-null float64
18 DJ_volume 1718 non-null int64
19 EG_open 1718 non-null float64
20 EG_high 1718 non-null float64
21 EG_low 1718 non-null float64
22 EG_close 1718 non-null float64
23 EG_Ajclose 1718 non-null float64
24 EG_volume 1718 non-null int64
25 EU_Price 1718 non-null float64
26 EU_open 1718 non-null float64
27 EU_high 1718 non-null float64
28 EU_low 1718 non-null float64
29 EU_Trend 1718 non-null int64
30 OF_Price 1718 non-null float64
31 OF_Open 1718 non-null float64
32 OF_High 1718 non-null float64
33 OF_Low 1718 non-null float64
34 OF_Volume 1718 non-null int64
35 OF_Trend 1718 non-null int64
36 OS_Price 1718 non-null float64
37 OS_Open 1718 non-null float64
38 OS_High 1718 non-null float64
39 OS_Low 1718 non-null float64
40 OS_Trend 1718 non-null int64
41 SF_Price 1718 non-null int64
42 SF_Open 1718 non-null int64
43 SF_High 1718 non-null int64
44 SF_Low 1718 non-null int64
45 SF_Volume 1718 non-null int64
46 SF_Trend 1718 non-null int64
47 USB_Price 1718 non-null float64
48 USB_Open 1718 non-null float64
49 USB_High 1718 non-null float64
50 USB_Low 1718 non-null float64
51 USB_Trend 1718 non-null int64
52 PLT_Price 1718 non-null float64
53 PLT_Open 1718 non-null float64
54 PLT_High 1718 non-null float64
55 PLT_Low 1718 non-null float64
56 PLT_Trend 1718 non-null int64
57 PLD_Price 1718 non-null float64
58 PLD_Open 1718 non-null float64
59 PLD_High 1718 non-null float64
60 PLD_Low 1718 non-null float64
61 PLD_Trend 1718 non-null int64
62 RHO_PRICE 1718 non-null int64
63 USDI_Price 1718 non-null float64
64 USDI_Open 1718 non-null float64
65 USDI_High 1718 non-null float64
66 USDI_Low 1718 non-null float64
67 USDI_Volume 1718 non-null int64
68 USDI_Trend 1718 non-null int64
69 GDX_Open 1718 non-null float64
70 GDX_High 1718 non-null float64
71 GDX_Low 1718 non-null float64
72 GDX_Close 1718 non-null float64
73 GDX_Adj Close 1718 non-null float64
74 GDX_Volume 1718 non-null int64
75 USO_Open 1718 non-null float64
76 USO_High 1718 non-null float64
77 USO_Low 1718 non-null float64
78 USO_Close 1718 non-null float64
79 USO_Adj Close 1718 non-null float64
80 USO_Volume 1718 non-null int64
dtypes: float64(58), int64(22), object(1)
memory usage: 1.1+ MB
<jupyter_text>Examples:
{
"Date": "2011-12-15 00:00:00",
"Open": 154.740005,
"High": 154.949997,
"Low": 151.710007,
"Close": 152.330002,
"Adj Close": 152.330002,
"Volume": 21521900,
"SP_open": 123.029999,
"SP_high": 123.199997,
"SP_low": 121.989998,
"SP_close": 122.18,
"SP_Ajclose": 105.441238,
"SP_volume": 199109200,
"DJ_open": 11825.29004,
"DJ_high": 11967.83984,
"DJ_low": 11825.21973,
"DJ_close": 11868.80957,
"DJ_Ajclose": 11868.80957,
"DJ_volume": 136930000,
"EG_open": 74.550003,
"...": "and 61 more columns"
}
{
"Date": "2011-12-16 00:00:00",
"Open": 154.309998,
"High": 155.369995,
"Low": 153.899994,
"Close": 155.229996,
"Adj Close": 155.229996,
"Volume": 18124300,
"SP_open": 122.230003,
"SP_high": 122.949997,
"SP_low": 121.300003,
"SP_close": 121.589996,
"SP_Ajclose": 105.597549,
"SP_volume": 220481400,
"DJ_open": 11870.25,
"DJ_high": 11968.17969,
"DJ_low": 11819.30957,
"DJ_close": 11866.38965,
"DJ_Ajclose": 11866.38965,
"DJ_volume": 389520000,
"EG_open": 73.599998,
"...": "and 61 more columns"
}
{
"Date": "2011-12-19 00:00:00",
"Open": 155.479996,
"High": 155.860001,
"Low": 154.360001,
"Close": 154.869995,
"Adj Close": 154.869995,
"Volume": 12547200,
"SP_open": 122.059998,
"SP_high": 122.32,
"SP_low": 120.029999,
"SP_close": 120.290001,
"SP_Ajclose": 104.468536,
"SP_volume": 183903000,
"DJ_open": 11866.54004,
"DJ_high": 11925.87988,
"DJ_low": 11735.19043,
"DJ_close": 11766.25977,
"DJ_Ajclose": 11766.25977,
"DJ_volume": 135170000,
"EG_open": 69.099998,
"...": "and 61 more columns"
}
{
"Date": "2011-12-20 00:00:00",
"Open": 156.820007,
"High": 157.429993,
"Low": 156.580002,
"Close": 156.979996,
"Adj Close": 156.979996,
"Volume": 9136300,
"SP_open": 122.18,
"SP_high": 124.139999,
"SP_low": 120.370003,
"SP_close": 123.93,
"SP_Ajclose": 107.629784,
"SP_volume": 225418100,
"DJ_open": 11769.20996,
"DJ_high": 12117.12988,
"DJ_low": 11768.83008,
"DJ_close": 12103.58008,
"DJ_Ajclose": 12103.58008,
"DJ_volume": 165180000,
"EG_open": 66.449997,
"...": "and 61 more columns"
}
<jupyter_script>import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from datetime import datetime
import plotly.express as px
import plotly.graph_objects as go
from sklearn.model_selection import train_test_split
from sklearn.metrics import plot_confusion_matrix
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
# ## Read data
df = pd.read_csv(
"../input/gold-price-prediction-dataset/FINAL_USO.csv", parse_dates=True
)
df.head()
df.describe()
df.columns
# ## data visulization
label_name = list(df.columns)
close_value = []
for i in range(len(label_name)):
if (
str.lower(label_name[i].replace(" ", "")[-6:]) == "jclose"
or str.lower(label_name[i].replace("_", "")[-6:]) == "jclose"
):
close_value.append(label_name[i])
del close_value[2]
close_value
close_data = pd.DataFrame(df, columns=close_value)
correlation_mat = close_data.corr()
sns.heatmap(correlation_mat, annot=True)
plt.show()
fig = go.Figure([go.Scatter(x=df["Date"], y=df["Adj Close"])])
fig.show()
ma_day = [10, 20, 50]
for ma in ma_day:
column_name = f"MA for {ma} days"
df[column_name] = df["Adj Close"].rolling(ma).mean()
fig = px.line(
df,
x="Date",
y=["Adj Close", "MA for 10 days", "MA for 20 days", "MA for 50 days"],
title="Adj close",
)
fig.show()
df["Daily Return"] = df["Adj Close"].pct_change()
fig = px.scatter(df, x="Date", y="Daily Return", title="Daily Return")
fig.show()
fig = px.histogram(df, x="Date", y="Daily Return", histfunc="avg", title="Daily Return")
fig.show()
fig = px.line(df, x="Date", y=["Volume"], title="Volume")
fig.show()
volumn_max = max(df["Volume"])
index = df[df["Volume"] == volumn_max].index.values[0]
print(
"Max Volume's day is:",
df["Date"][index],
"\n" "volume:",
df["Volume"][index],
"\nthe day of close price:",
df["Adj Close"][index],
)
print("Average Adj close:", df["Adj Close"].mean())
# ## LSTM
from keras.models import Sequential
from keras.layers import (
Dense,
Dropout,
Activation,
Flatten,
LSTM,
TimeDistributed,
RepeatVector,
)
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping, ModelCheckpoint
Adj_data = df.loc[:, close_value]
adj_close = Adj_data[["Adj Close"]]
training_data_len = int(np.ceil(len(Adj_data) * 0.90))
sc = MinMaxScaler(feature_range=(0, 1))
scaled_data = sc.fit_transform(Adj_data)
sc1 = MinMaxScaler(feature_range=(0, 1))
sc_data = sc1.fit_transform(adj_close)
train_data = scaled_data[0 : int(training_data_len), :]
x_train = []
y_train = []
for i in range(60, len(train_data)):
x_train.append(train_data[i - 60 : i, :])
y_train.append(train_data[i, 0])
x_train, y_train = np.array(x_train), np.array(y_train)
# Reshape the data
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 6))
# x_train.shape
def buildManyToOneModel(shape):
model = Sequential()
model.add(LSTM(10, input_length=shape[1], input_dim=shape[2]))
# output shape: (1, 1)
model.add(Dense(1))
model.compile(loss="mse", optimizer="adam")
model.summary()
return model
model = buildManyToOneModel(x_train.shape)
callback = EarlyStopping(monitor="loss", patience=10, verbose=1, mode="auto")
model.fit(x_train, y_train, epochs=1000, batch_size=128, callbacks=[callback])
test_data = scaled_data[training_data_len - 60 :, :]
# Create the data sets x_test and y_test
x_test = []
y_test = Adj_data["Adj Close"][training_data_len:].values
for i in range(60, len(test_data)):
x_test.append(test_data[i - 60 : i, :])
x_test = np.array(x_test)
# Reshape the data
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 6))
# Get the models predicted price values
predictions = model.predict(x_test)
predictions = sc1.inverse_transform(predictions)
t = np.linspace(0, len(y_test), len(y_test))
predictions = np.reshape(predictions, len(predictions))
fig = go.Figure()
fig.add_trace(go.Scatter(x=t, y=y_test, mode="lines", name="True data"))
fig.add_trace(go.Scatter(x=t, y=predictions, mode="lines", name="predict"))
fig.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/009/69009477.ipynb
|
gold-price-prediction-dataset
|
sid321axn
|
[{"Id": 69009477, "ScriptId": 18831755, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2994131, "CreationDate": "07/25/2021 19:55:29", "VersionNumber": 1.0, "Title": "Gold price prediction by LSTM", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 147.0, "LinesInsertedFromPrevious": 147.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
|
[{"Id": 91700217, "KernelVersionId": 69009477, "SourceDatasetVersionId": 2445315}]
|
[{"Id": 2445315, "DatasetId": 1479724, "DatasourceVersionId": 2487621, "CreatorUserId": 2048048, "LicenseName": "CC0: Public Domain", "CreationDate": "07/20/2021 14:33:47", "VersionNumber": 1.0, "Title": "Gold Price Prediction Dataset", "Slug": "gold-price-prediction-dataset", "Subtitle": "Prediction Of Gold Rates Using ML Techniques", "Description": "### Context\n\nHistorically, gold had been used as a form of currency in various parts of the world including the USA. In present times, precious metals like gold are held with central banks of all countries to guarantee re-payment of foreign debts, and also to control inflation which results in reflecting the financial strength of the country. Recently, emerging world economies, such as China, Russia, and India have been big buyers of gold, whereas the USA, SoUSA, South Africa, and Australia are among the big seller of gold.\n\nForecasting rise and fall in the daily gold rates can help investors to decide when to buy (or sell) the commodity. But Gold prices are dependent on many factors such as prices of other precious metals, prices of crude oil, stock exchange performance, Bonds prices, currency exchange rates, etc.\n\nThe challenge of this project is to accurately predict the future adjusted closing price of Gold ETF across a given period of time in the future. The problem is a regression problem, because the output value which is the adjusted closing price in this project is continuous value.\n\n### Content\n\nData for this study is collected from November 18th 2011 to January 1st 2019 from various sources. The data has 1718 rows in total and 80 columns in total. Data for attributes, such as Oil Price, Standard and Poor\u2019s (S&P) 500 index, Dow Jones Index US Bond rates (10 years), Euro USD exchange rates, prices of precious metals Silver and Platinum and other metals such as Palladium and Rhodium, prices of US Dollar Index, Eldorado Gold Corporation and Gold Miners ETF were gathered.\n\nThe dataset has 1718 rows in total and 80 columns in total. Data for attributes, such as Oil Price, Standard and Poor\u2019s (S&P) 500 index, Dow Jones Index US Bond rates (10 years), Euro USD exchange rates, prices of precious metals Silver and Platinum and other metals such as Palladium and Rhodium, prices of US Dollar Index, Eldorado Gold Corporation and Gold Miners ETF were gathered.\n\nThe historical data of Gold ETF fetched from Yahoo finance has 7 columns, Date, Open, High, Low, Close, Adjusted Close, and Volume, the difference between Adjusted Close and Close is that the closing price of a stock is the price of that stock at the close of the trading day. Whereas the adjusted closing price takes into account factors such as dividends, stock splits, and new stock offerings to determine a value. So, Adjusted Close is the outcome variable which is the value you have to predict.\n\n\n\n### Acknowledgements\n\nThe data is collected from Yahoo finance.\n\n\n### Inspiration\n\nCan you predict Gold prices accurately using traditional machine learning algorithms", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1479724, "CreatorUserId": 2048048, "OwnerUserId": 2048048.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2445315.0, "CurrentDatasourceVersionId": 2487621.0, "ForumId": 1499391, "Type": 2, "CreationDate": "07/20/2021 14:33:47", "LastActivityDate": "07/20/2021", "TotalViews": 72448, "TotalDownloads": 9729, "TotalVotes": 96, "TotalKernels": 14}]
|
[{"Id": 2048048, "UserName": "sid321axn", "DisplayName": "Manu Siddhartha", "RegisterDate": "07/06/2018", "PerformanceTier": 2}]
|
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from datetime import datetime
import plotly.express as px
import plotly.graph_objects as go
from sklearn.model_selection import train_test_split
from sklearn.metrics import plot_confusion_matrix
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
# ## Read data
df = pd.read_csv(
"../input/gold-price-prediction-dataset/FINAL_USO.csv", parse_dates=True
)
df.head()
df.describe()
df.columns
# ## data visulization
label_name = list(df.columns)
close_value = []
for i in range(len(label_name)):
if (
str.lower(label_name[i].replace(" ", "")[-6:]) == "jclose"
or str.lower(label_name[i].replace("_", "")[-6:]) == "jclose"
):
close_value.append(label_name[i])
del close_value[2]
close_value
close_data = pd.DataFrame(df, columns=close_value)
correlation_mat = close_data.corr()
sns.heatmap(correlation_mat, annot=True)
plt.show()
fig = go.Figure([go.Scatter(x=df["Date"], y=df["Adj Close"])])
fig.show()
ma_day = [10, 20, 50]
for ma in ma_day:
column_name = f"MA for {ma} days"
df[column_name] = df["Adj Close"].rolling(ma).mean()
fig = px.line(
df,
x="Date",
y=["Adj Close", "MA for 10 days", "MA for 20 days", "MA for 50 days"],
title="Adj close",
)
fig.show()
df["Daily Return"] = df["Adj Close"].pct_change()
fig = px.scatter(df, x="Date", y="Daily Return", title="Daily Return")
fig.show()
fig = px.histogram(df, x="Date", y="Daily Return", histfunc="avg", title="Daily Return")
fig.show()
fig = px.line(df, x="Date", y=["Volume"], title="Volume")
fig.show()
volumn_max = max(df["Volume"])
index = df[df["Volume"] == volumn_max].index.values[0]
print(
"Max Volume's day is:",
df["Date"][index],
"\n" "volume:",
df["Volume"][index],
"\nthe day of close price:",
df["Adj Close"][index],
)
print("Average Adj close:", df["Adj Close"].mean())
# ## LSTM
from keras.models import Sequential
from keras.layers import (
Dense,
Dropout,
Activation,
Flatten,
LSTM,
TimeDistributed,
RepeatVector,
)
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping, ModelCheckpoint
Adj_data = df.loc[:, close_value]
adj_close = Adj_data[["Adj Close"]]
training_data_len = int(np.ceil(len(Adj_data) * 0.90))
sc = MinMaxScaler(feature_range=(0, 1))
scaled_data = sc.fit_transform(Adj_data)
sc1 = MinMaxScaler(feature_range=(0, 1))
sc_data = sc1.fit_transform(adj_close)
train_data = scaled_data[0 : int(training_data_len), :]
x_train = []
y_train = []
for i in range(60, len(train_data)):
x_train.append(train_data[i - 60 : i, :])
y_train.append(train_data[i, 0])
x_train, y_train = np.array(x_train), np.array(y_train)
# Reshape the data
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 6))
# x_train.shape
def buildManyToOneModel(shape):
model = Sequential()
model.add(LSTM(10, input_length=shape[1], input_dim=shape[2]))
# output shape: (1, 1)
model.add(Dense(1))
model.compile(loss="mse", optimizer="adam")
model.summary()
return model
model = buildManyToOneModel(x_train.shape)
callback = EarlyStopping(monitor="loss", patience=10, verbose=1, mode="auto")
model.fit(x_train, y_train, epochs=1000, batch_size=128, callbacks=[callback])
test_data = scaled_data[training_data_len - 60 :, :]
# Create the data sets x_test and y_test
x_test = []
y_test = Adj_data["Adj Close"][training_data_len:].values
for i in range(60, len(test_data)):
x_test.append(test_data[i - 60 : i, :])
x_test = np.array(x_test)
# Reshape the data
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 6))
# Get the models predicted price values
predictions = model.predict(x_test)
predictions = sc1.inverse_transform(predictions)
t = np.linspace(0, len(y_test), len(y_test))
predictions = np.reshape(predictions, len(predictions))
fig = go.Figure()
fig.add_trace(go.Scatter(x=t, y=y_test, mode="lines", name="True data"))
fig.add_trace(go.Scatter(x=t, y=predictions, mode="lines", name="predict"))
fig.show()
|
[{"gold-price-prediction-dataset/FINAL_USO.csv": {"column_names": "[\"Date\", \"Open\", \"High\", \"Low\", \"Close\", \"Adj Close\", \"Volume\", \"SP_open\", \"SP_high\", \"SP_low\", \"SP_close\", \"SP_Ajclose\", \"SP_volume\", \"DJ_open\", \"DJ_high\", \"DJ_low\", \"DJ_close\", \"DJ_Ajclose\", \"DJ_volume\", \"EG_open\", \"EG_high\", \"EG_low\", \"EG_close\", \"EG_Ajclose\", \"EG_volume\", \"EU_Price\", \"EU_open\", \"EU_high\", \"EU_low\", \"EU_Trend\", \"OF_Price\", \"OF_Open\", \"OF_High\", \"OF_Low\", \"OF_Volume\", \"OF_Trend\", \"OS_Price\", \"OS_Open\", \"OS_High\", \"OS_Low\", \"OS_Trend\", \"SF_Price\", \"SF_Open\", \"SF_High\", \"SF_Low\", \"SF_Volume\", \"SF_Trend\", \"USB_Price\", \"USB_Open\", \"USB_High\", \"USB_Low\", \"USB_Trend\", \"PLT_Price\", \"PLT_Open\", \"PLT_High\", \"PLT_Low\", \"PLT_Trend\", \"PLD_Price\", \"PLD_Open\", \"PLD_High\", \"PLD_Low\", \"PLD_Trend\", \"RHO_PRICE\", \"USDI_Price\", \"USDI_Open\", \"USDI_High\", \"USDI_Low\", \"USDI_Volume\", \"USDI_Trend\", \"GDX_Open\", \"GDX_High\", \"GDX_Low\", \"GDX_Close\", \"GDX_Adj Close\", \"GDX_Volume\", \"USO_Open\", \"USO_High\", \"USO_Low\", \"USO_Close\", \"USO_Adj Close\", \"USO_Volume\"]", "column_data_types": "{\"Date\": \"object\", \"Open\": \"float64\", \"High\": \"float64\", \"Low\": \"float64\", \"Close\": \"float64\", \"Adj Close\": \"float64\", \"Volume\": \"int64\", \"SP_open\": \"float64\", \"SP_high\": \"float64\", \"SP_low\": \"float64\", \"SP_close\": \"float64\", \"SP_Ajclose\": \"float64\", \"SP_volume\": \"int64\", \"DJ_open\": \"float64\", \"DJ_high\": \"float64\", \"DJ_low\": \"float64\", \"DJ_close\": \"float64\", \"DJ_Ajclose\": \"float64\", \"DJ_volume\": \"int64\", \"EG_open\": \"float64\", \"EG_high\": \"float64\", \"EG_low\": \"float64\", \"EG_close\": \"float64\", \"EG_Ajclose\": \"float64\", \"EG_volume\": \"int64\", \"EU_Price\": \"float64\", \"EU_open\": \"float64\", \"EU_high\": \"float64\", \"EU_low\": \"float64\", \"EU_Trend\": \"int64\", \"OF_Price\": \"float64\", \"OF_Open\": \"float64\", \"OF_High\": \"float64\", \"OF_Low\": \"float64\", \"OF_Volume\": \"int64\", \"OF_Trend\": \"int64\", \"OS_Price\": \"float64\", \"OS_Open\": \"float64\", \"OS_High\": \"float64\", \"OS_Low\": \"float64\", \"OS_Trend\": \"int64\", \"SF_Price\": \"int64\", \"SF_Open\": \"int64\", \"SF_High\": \"int64\", \"SF_Low\": \"int64\", \"SF_Volume\": \"int64\", \"SF_Trend\": \"int64\", \"USB_Price\": \"float64\", \"USB_Open\": \"float64\", \"USB_High\": \"float64\", \"USB_Low\": \"float64\", \"USB_Trend\": \"int64\", \"PLT_Price\": \"float64\", \"PLT_Open\": \"float64\", \"PLT_High\": \"float64\", \"PLT_Low\": \"float64\", \"PLT_Trend\": \"int64\", \"PLD_Price\": \"float64\", \"PLD_Open\": \"float64\", \"PLD_High\": \"float64\", \"PLD_Low\": \"float64\", \"PLD_Trend\": \"int64\", \"RHO_PRICE\": \"int64\", \"USDI_Price\": \"float64\", \"USDI_Open\": \"float64\", \"USDI_High\": \"float64\", \"USDI_Low\": \"float64\", \"USDI_Volume\": \"int64\", \"USDI_Trend\": \"int64\", \"GDX_Open\": \"float64\", \"GDX_High\": \"float64\", \"GDX_Low\": \"float64\", \"GDX_Close\": \"float64\", \"GDX_Adj Close\": \"float64\", \"GDX_Volume\": \"int64\", \"USO_Open\": \"float64\", \"USO_High\": \"float64\", \"USO_Low\": \"float64\", \"USO_Close\": \"float64\", \"USO_Adj Close\": \"float64\", \"USO_Volume\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1718 entries, 0 to 1717\nData columns (total 81 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Date 1718 non-null object \n 1 Open 1718 non-null float64\n 2 High 1718 non-null float64\n 3 Low 1718 non-null float64\n 4 Close 1718 non-null float64\n 5 Adj Close 1718 non-null float64\n 6 Volume 1718 non-null int64 \n 7 SP_open 1718 non-null float64\n 8 SP_high 1718 non-null float64\n 9 SP_low 1718 non-null float64\n 10 SP_close 1718 non-null float64\n 11 SP_Ajclose 1718 non-null float64\n 12 SP_volume 1718 non-null int64 \n 13 DJ_open 1718 non-null float64\n 14 DJ_high 1718 non-null float64\n 15 DJ_low 1718 non-null float64\n 16 DJ_close 1718 non-null float64\n 17 DJ_Ajclose 1718 non-null float64\n 18 DJ_volume 1718 non-null int64 \n 19 EG_open 1718 non-null float64\n 20 EG_high 1718 non-null float64\n 21 EG_low 1718 non-null float64\n 22 EG_close 1718 non-null float64\n 23 EG_Ajclose 1718 non-null float64\n 24 EG_volume 1718 non-null int64 \n 25 EU_Price 1718 non-null float64\n 26 EU_open 1718 non-null float64\n 27 EU_high 1718 non-null float64\n 28 EU_low 1718 non-null float64\n 29 EU_Trend 1718 non-null int64 \n 30 OF_Price 1718 non-null float64\n 31 OF_Open 1718 non-null float64\n 32 OF_High 1718 non-null float64\n 33 OF_Low 1718 non-null float64\n 34 OF_Volume 1718 non-null int64 \n 35 OF_Trend 1718 non-null int64 \n 36 OS_Price 1718 non-null float64\n 37 OS_Open 1718 non-null float64\n 38 OS_High 1718 non-null float64\n 39 OS_Low 1718 non-null float64\n 40 OS_Trend 1718 non-null int64 \n 41 SF_Price 1718 non-null int64 \n 42 SF_Open 1718 non-null int64 \n 43 SF_High 1718 non-null int64 \n 44 SF_Low 1718 non-null int64 \n 45 SF_Volume 1718 non-null int64 \n 46 SF_Trend 1718 non-null int64 \n 47 USB_Price 1718 non-null float64\n 48 USB_Open 1718 non-null float64\n 49 USB_High 1718 non-null float64\n 50 USB_Low 1718 non-null float64\n 51 USB_Trend 1718 non-null int64 \n 52 PLT_Price 1718 non-null float64\n 53 PLT_Open 1718 non-null float64\n 54 PLT_High 1718 non-null float64\n 55 PLT_Low 1718 non-null float64\n 56 PLT_Trend 1718 non-null int64 \n 57 PLD_Price 1718 non-null float64\n 58 PLD_Open 1718 non-null float64\n 59 PLD_High 1718 non-null float64\n 60 PLD_Low 1718 non-null float64\n 61 PLD_Trend 1718 non-null int64 \n 62 RHO_PRICE 1718 non-null int64 \n 63 USDI_Price 1718 non-null float64\n 64 USDI_Open 1718 non-null float64\n 65 USDI_High 1718 non-null float64\n 66 USDI_Low 1718 non-null float64\n 67 USDI_Volume 1718 non-null int64 \n 68 USDI_Trend 1718 non-null int64 \n 69 GDX_Open 1718 non-null float64\n 70 GDX_High 1718 non-null float64\n 71 GDX_Low 1718 non-null float64\n 72 GDX_Close 1718 non-null float64\n 73 GDX_Adj Close 1718 non-null float64\n 74 GDX_Volume 1718 non-null int64 \n 75 USO_Open 1718 non-null float64\n 76 USO_High 1718 non-null float64\n 77 USO_Low 1718 non-null float64\n 78 USO_Close 1718 non-null float64\n 79 USO_Adj Close 1718 non-null float64\n 80 USO_Volume 1718 non-null int64 \ndtypes: float64(58), int64(22), object(1)\nmemory usage: 1.1+ MB\n", "summary": "{\"Open\": {\"count\": 1718.0, \"mean\": 127.3234342403958, \"std\": 17.526993251060823, \"min\": 100.919998, \"25%\": 116.220001, \"50%\": 121.915001, \"75%\": 128.42749425, \"max\": 173.199997}, \"High\": {\"count\": 1718.0, \"mean\": 127.85423744179278, \"std\": 17.63118930056498, \"min\": 100.989998, \"25%\": 116.540001, \"50%\": 122.32500099999999, \"75%\": 129.0874975, \"max\": 174.070007}, \"Low\": {\"count\": 1718.0, \"mean\": 126.7776949441211, \"std\": 17.3965130287394, \"min\": 100.230003, \"25%\": 115.739998, \"50%\": 121.369999, \"75%\": 127.8400005, \"max\": 172.919998}, \"Close\": {\"count\": 1718.0, \"mean\": 127.31948200349244, \"std\": 17.53626908041542, \"min\": 100.5, \"25%\": 116.05250175, \"50%\": 121.79500200000001, \"75%\": 128.470001, \"max\": 173.610001}, \"Adj Close\": {\"count\": 1718.0, \"mean\": 127.31948200349244, \"std\": 17.53626908041542, \"min\": 100.5, \"25%\": 116.05250175, \"50%\": 121.79500200000001, \"75%\": 128.470001, \"max\": 173.610001}, \"Volume\": {\"count\": 1718.0, \"mean\": 8446327.124563446, \"std\": 4920730.721926837, \"min\": 1501600.0, \"25%\": 5412925.0, \"50%\": 7483900.0, \"75%\": 10207950.0, \"max\": 93804200.0}, \"SP_open\": {\"count\": 1718.0, \"mean\": 204.49002327532014, \"std\": 43.831928331516984, \"min\": 122.059998, \"25%\": 170.392498, \"50%\": 205.46499649999998, \"75%\": 237.29249975, \"max\": 293.089996}, \"SP_high\": {\"count\": 1718.0, \"mean\": 205.37263676658907, \"std\": 43.97464352645107, \"min\": 122.32, \"25%\": 170.96250550000002, \"50%\": 206.45999899999998, \"75%\": 237.72249975, \"max\": 293.940002}, \"SP_low\": {\"count\": 1718.0, \"mean\": 203.48701386903377, \"std\": 43.618939957703915, \"min\": 120.029999, \"25%\": 169.57749925, \"50%\": 204.43, \"75%\": 236.1475025, \"max\": 291.809998}, \"SP_close\": {\"count\": 1718.0, \"mean\": 204.49122230675204, \"std\": 43.77699881004908, \"min\": 120.290001, \"25%\": 170.3974995, \"50%\": 205.5299985, \"75%\": 236.88999575, \"max\": 293.579987}, \"SP_Ajclose\": {\"count\": 1718.0, \"mean\": 192.20457038242142, \"std\": 48.51416123015281, \"min\": 104.468536, \"25%\": 153.027992, \"50%\": 191.6583405, \"75%\": 228.7213895, \"max\": 290.560242}, \"SP_volume\": {\"count\": 1718.0, \"mean\": 109802624.67986031, \"std\": 49251103.38351986, \"min\": 27856500.0, \"25%\": 73870850.0, \"50%\": 99720200.0, \"75%\": 135116100.0, \"max\": 507244300.0}, \"DJ_open\": {\"count\": 1718.0, \"mean\": 18161.09439846333, \"std\": 3889.7520786960295, \"min\": 11769.20996, \"25%\": 15487.9301725, \"50%\": 17601.095705, \"75%\": 20866.907225000003, \"max\": 26833.4707}, \"DJ_high\": {\"count\": 1718.0, \"mean\": 18244.137841845168, \"std\": 3906.0086036417238, \"min\": 11925.87988, \"25%\": 15551.1701675, \"50%\": 17714.395510000002, \"75%\": 20910.837405, \"max\": 26951.81055}, \"DJ_low\": {\"count\": 1718.0, \"mean\": 18073.889094621656, \"std\": 3867.959071585826, \"min\": 11735.19043, \"25%\": 15419.3872075, \"50%\": 17510.29004, \"75%\": 20785.0356475, \"max\": 26789.08008}, \"DJ_close\": {\"count\": 1718.0, \"mean\": 18164.11904321886, \"std\": 3884.49588726716, \"min\": 11766.25977, \"25%\": 15495.66528, \"50%\": 17612.939455, \"75%\": 20851.157715, \"max\": 26828.39063}, \"DJ_Ajclose\": {\"count\": 1718.0, \"mean\": 18164.11904321886, \"std\": 3884.49588726716, \"min\": 11766.25977, \"25%\": 15495.66528, \"50%\": 17612.939455, \"75%\": 20851.157715, \"max\": 26828.39063}, \"DJ_volume\": {\"count\": 1718.0, \"mean\": 177913131.54831198, \"std\": 121275284.71055532, \"min\": 8410000.0, \"25%\": 92320000.0, \"50%\": 120695000.0, \"75%\": 263630000.0, \"max\": 900510000.0}, \"EG_open\": {\"count\": 1718.0, \"mean\": 28.276554122235158, \"std\": 20.3258613386814, \"min\": 2.77, \"25%\": 14.2, \"50%\": 22.8, \"75%\": 37.150002, \"max\": 80.199997}, \"EG_high\": {\"count\": 1718.0, \"mean\": 28.822555280558788, \"std\": 20.620623798828966, \"min\": 2.85, \"25%\": 14.55, \"50%\": 23.125, \"75%\": 37.849998, \"max\": 81.0}, \"EG_low\": {\"count\": 1718.0, \"mean\": 27.653655403376018, \"std\": 19.972314008304586, \"min\": 2.73, \"25%\": 13.7625, \"50%\": 21.8, \"75%\": 36.450001, \"max\": 77.900002}, \"EG_close\": {\"count\": 1718.0, \"mean\": 28.209301502910364, \"std\": 20.294635466913842, \"min\": 2.8, \"25%\": 14.15, \"50%\": 22.5249995, \"75%\": 37.18750125, \"max\": 79.800003}, \"EG_Ajclose\": {\"count\": 1718.0, \"mean\": 27.78395800814901, \"std\": 19.721857152775996, \"min\": 2.8, \"25%\": 14.08241775, \"50%\": 22.408089, \"75%\": 36.811499999999995, \"max\": 77.999313}, \"EG_volume\": {\"count\": 1718.0, \"mean\": 1136073.5157159488, \"std\": 730128.9004541405, \"min\": 164500.0, \"25%\": 700625.0, \"50%\": 968800.0, \"75%\": 1344775.0, \"max\": 10061200.0}, \"EU_Price\": {\"count\": 1718.0, \"mean\": 1.2084944703143192, \"std\": 0.10053478875122451, \"min\": 1.0387, \"25%\": 1.1208, \"50%\": 1.18405, \"75%\": 1.30555, \"max\": 1.3934}, \"EU_open\": {\"count\": 1718.0, \"mean\": 1.20853055878929, \"std\": 0.10057885910591212, \"min\": 1.039, \"25%\": 1.1209, \"50%\": 1.1841, \"75%\": 1.305475, \"max\": 1.3933}, \"EU_high\": {\"count\": 1718.0, \"mean\": 1.2133597206053552, \"std\": 0.10018402715420814, \"min\": 1.0419, \"25%\": 1.1258, \"50%\": 1.18785, \"75%\": 1.310275, \"max\": 1.3993}, \"EU_low\": {\"count\": 1718.0, \"mean\": 1.203691792782305, \"std\": 0.10069529978081837, \"min\": 1.0341, \"25%\": 1.1159, \"50%\": 1.17945, \"75%\": 1.2998500000000002, \"max\": 1.391}, \"EU_Trend\": {\"count\": 1718.0, \"mean\": 0.4947613504074505, \"std\": 0.5001181294080912, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"OF_Price\": {\"count\": 1718.0, \"mean\": 77.50452270081489, \"std\": 27.400703417829533, \"min\": 27.88, \"25%\": 52.152499999999996, \"50%\": 70.11500000000001, \"75%\": 107.73, \"max\": 126.22}, \"OF_Open\": {\"count\": 1718.0, \"mean\": 77.52174039580909, \"std\": 27.366112532482344, \"min\": 27.99, \"25%\": 52.2025, \"50%\": 70.09, \"75%\": 107.6825, \"max\": 126.16}, \"OF_High\": {\"count\": 1718.0, \"mean\": 78.38493597206055, \"std\": 27.393293553499312, \"min\": 28.75, \"25%\": 52.8625, \"50%\": 70.78999999999999, \"75%\": 108.44749999999999, \"max\": 128.4}, \"OF_Low\": {\"count\": 1718.0, \"mean\": 76.5938940628638, \"std\": 27.322868118450536, \"min\": 27.1, \"25%\": 51.3625, \"50%\": 69.035, \"75%\": 106.86500000000001, \"max\": 125.0}, \"OF_Volume\": {\"count\": 1718.0, \"mean\": 225958.58556461002, \"std\": 88844.54058110445, \"min\": 11520.0, \"25%\": 175940.0, \"50%\": 223485.0, \"75%\": 281382.5, \"max\": 567760.0}, \"OF_Trend\": {\"count\": 1718.0, \"mean\": 0.4988358556461001, \"std\": 0.5001442259730633, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"OS_Price\": {\"count\": 1718.0, \"mean\": 70.15309662398137, \"std\": 23.471513729241426, \"min\": 26.55, \"25%\": 48.9225, \"50%\": 64.68, \"75%\": 94.28, \"max\": 110.3}, \"OS_Open\": {\"count\": 1718.0, \"mean\": 70.27539580908032, \"std\": 23.480047166835377, \"min\": 27.34, \"25%\": 49.03, \"50%\": 64.84, \"75%\": 94.42500000000001, \"max\": 110.34}, \"OS_High\": {\"count\": 1718.0, \"mean\": 71.12025611175787, \"std\": 23.494736756631916, \"min\": 27.61, \"25%\": 49.692499999999995, \"50%\": 65.565, \"75%\": 95.39, \"max\": 112.28}, \"OS_Low\": {\"count\": 1718.0, \"mean\": 69.33098952270082, \"std\": 23.422197697323725, \"min\": 26.18, \"25%\": 48.2, \"50%\": 63.754999999999995, \"75%\": 93.475, \"max\": 109.15}, \"OS_Trend\": {\"count\": 1718.0, \"mean\": 0.5034924330616997, \"std\": 0.5001333808127211, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"SF_Price\": {\"count\": 1718.0, \"mean\": 43284.478463329455, \"std\": 7530.70401181923, \"min\": 33170.0, \"25%\": 38018.75, \"50%\": 40521.5, \"75%\": 46580.5, \"max\": 65292.0}, \"SF_Open\": {\"count\": 1718.0, \"mean\": 43308.692083818394, \"std\": 7550.42324208401, \"min\": 33146.0, \"25%\": 38028.75, \"50%\": 40528.0, \"75%\": 46661.25, \"max\": 65400.0}, \"SF_High\": {\"count\": 1718.0, \"mean\": 43671.1944121071, \"std\": 7614.3022670509035, \"min\": 33566.0, \"25%\": 38293.5, \"50%\": 40841.0, \"75%\": 47071.0, \"max\": 65723.0}, \"SF_Low\": {\"count\": 1718.0, \"mean\": 42911.91210710128, \"std\": 7443.076537838609, \"min\": 32626.0, \"25%\": 37690.25, \"50%\": 40239.0, \"75%\": 46133.25, \"max\": 64132.0}, \"SF_Volume\": {\"count\": 1718.0, \"mean\": 26912.462165308498, \"std\": 21880.9691219917, \"min\": 40.0, \"25%\": 14210.0, \"50%\": 19645.0, \"75%\": 29915.0, \"max\": 203730.0}, \"SF_Trend\": {\"count\": 1718.0, \"mean\": 0.4807916181606519, \"std\": 0.49977637596742397, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"USB_Price\": {\"count\": 1718.0, \"mean\": 2.262769499417928, \"std\": 0.43346859129576576, \"min\": 1.358, \"25%\": 1.90525, \"50%\": 2.259, \"75%\": 2.597, \"max\": 3.239}, \"USB_Open\": {\"count\": 1718.0, \"mean\": 2.2630913853317813, \"std\": 0.4339765221907247, \"min\": 1.366, \"25%\": 1.905, \"50%\": 2.259, \"75%\": 2.5977500000000004, \"max\": 3.237}, \"USB_High\": {\"count\": 1718.0, \"mean\": 2.286511641443539, \"std\": 0.43757392870154826, \"min\": 1.391, \"25%\": 1.92025, \"50%\": 2.29, \"75%\": 2.62, \"max\": 3.261}, \"USB_Low\": {\"count\": 1718.0, \"mean\": 2.2387648428405122, \"std\": 0.42996049106496337, \"min\": 1.321, \"25%\": 1.88625, \"50%\": 2.23, \"75%\": 2.575, \"max\": 3.2310000000000003}, \"USB_Trend\": {\"count\": 1718.0, \"mean\": 0.490104772991851, \"std\": 0.5000476279825139, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"PLT_Price\": {\"count\": 1718.0, \"mean\": 1183.9154249126893, \"std\": 273.84209529303314, \"min\": 775.6, \"25%\": 944.075, \"50%\": 1098.025, \"75%\": 1442.8625, \"max\": 1737.6}, \"PLT_Open\": {\"count\": 1718.0, \"mean\": 1184.3888533178115, \"std\": 273.9781678658314, \"min\": 765.3, \"25%\": 944.025, \"50%\": 1098.175, \"75%\": 1442.8125, \"max\": 1737.8}, \"PLT_High\": {\"count\": 1718.0, \"mean\": 1194.274621653085, \"std\": 275.4277908443986, \"min\": 786.5, \"25%\": 952.65, \"50%\": 1107.525, \"75%\": 1454.2875, \"max\": 1742.9}, \"PLT_Low\": {\"count\": 1718.0, \"mean\": 1173.4096915017462, \"std\": 271.79990876106933, \"min\": 756.0, \"25%\": 935.85, \"50%\": 1086.55, \"75%\": 1432.0875, \"max\": 1717.15}, \"PLT_Trend\": {\"count\": 1718.0, \"mean\": 0.4842840512223516, \"std\": 0.4998984575883836, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"PLD_Price\": {\"count\": 1718.0, \"mean\": 766.8051222351571, \"std\": 148.30718862306315, \"min\": 470.45, \"25%\": 663.2125, \"50%\": 748.3, \"75%\": 848.2, \"max\": 1197.5}, \"PLD_Open\": {\"count\": 1718.0, \"mean\": 766.3634807916181, \"std\": 148.078365164173, \"min\": 458.6, \"25%\": 663.2875, \"50%\": 748.0, \"75%\": 846.6999999999999, \"max\": 1196.0}, \"PLD_High\": {\"count\": 1718.0, \"mean\": 773.5293015133876, \"std\": 149.01029076845376, \"min\": 473.15, \"25%\": 670.1625, \"50%\": 753.8, \"75%\": 855.5999999999999, \"max\": 1208.7}, \"PLD_Low\": {\"count\": 1718.0, \"mean\": 759.4442083818393, \"std\": 147.38108738958312, \"min\": 458.6, \"25%\": 657.025, \"50%\": 742.45, \"75%\": 840.1125, \"max\": 1183.6}, \"PLD_Trend\": {\"count\": 1718.0, \"mean\": 0.5308498253783469, \"std\": 0.49919268502467823, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"RHO_PRICE\": {\"count\": 1718.0, \"mean\": 1130.442374854482, \"std\": 570.0128812546436, \"min\": 0.0, \"25%\": 785.0, \"50%\": 1100.0, \"75%\": 1307.5, \"max\": 2600.0}, \"USDI_Price\": {\"count\": 1718.0, \"mean\": 89.8094266589057, \"std\": 7.516114961023049, \"min\": 78.3, \"25%\": 81.38024999999999, \"50%\": 92.8835, \"75%\": 96.10375, \"max\": 103.288}, \"USDI_Open\": {\"count\": 1718.0, \"mean\": 89.80544237485448, \"std\": 7.520787856263057, \"min\": 78.22, \"25%\": 81.38, \"50%\": 92.905, \"75%\": 96.115, \"max\": 103.35}, \"USDI_High\": {\"count\": 1718.0, \"mean\": 90.09898137369035, \"std\": 7.567894722344003, \"min\": 78.64, \"25%\": 81.61749999999999, \"50%\": 93.155, \"75%\": 96.4725, \"max\": 103.815}, \"USDI_Low\": {\"count\": 1718.0, \"mean\": 89.50933061699651, \"std\": 7.459269060805926, \"min\": 78.12, \"25%\": 81.11125, \"50%\": 92.57249999999999, \"75%\": 95.73, \"max\": 102.975}, \"USDI_Volume\": {\"count\": 1718.0, \"mean\": 27568.300349243305, \"std\": 14643.314698800112, \"min\": 60.0, \"25%\": 18137.5, \"50%\": 24445.0, \"75%\": 33745.0, \"max\": 142820.0}, \"USDI_Trend\": {\"count\": 1718.0, \"mean\": 0.5128055878928988, \"std\": 0.49998152386734324, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"GDX_Open\": {\"count\": 1718.0, \"mean\": 26.74742721594878, \"std\": 10.620551657039828, \"min\": 12.7, \"25%\": 20.64249925, \"50%\": 23.115001, \"75%\": 27.430000749999998, \"max\": 57.52}, \"GDX_High\": {\"count\": 1718.0, \"mean\": 27.071303813154827, \"std\": 10.706387708108316, \"min\": 12.92, \"25%\": 20.9525005, \"50%\": 23.370001, \"75%\": 27.77, \"max\": 57.939999}, \"GDX_Low\": {\"count\": 1718.0, \"mean\": 26.384575068684516, \"std\": 10.490908149660692, \"min\": 12.4, \"25%\": 20.35500025, \"50%\": 22.870001, \"75%\": 26.7974995, \"max\": 56.77}, \"GDX_Close\": {\"count\": 1718.0, \"mean\": 26.71501166589057, \"std\": 10.60311013143174, \"min\": 12.47, \"25%\": 20.585, \"50%\": 23.054999000000002, \"75%\": 27.31749975, \"max\": 57.470001}, \"GDX_Adj Close\": {\"count\": 1718.0, \"mean\": 25.924623530849825, \"std\": 9.886569644014179, \"min\": 12.269618, \"25%\": 20.18094975, \"50%\": 22.6776035, \"75%\": 26.47815425, \"max\": 54.617039}, \"GDX_Volume\": {\"count\": 1718.0, \"mean\": 43565154.13271245, \"std\": 29091505.266135503, \"min\": 4729000.0, \"25%\": 22599675.0, \"50%\": 37304650.0, \"75%\": 56970550.0, \"max\": 232153600.0}, \"USO_Open\": {\"count\": 1718.0, \"mean\": 22.113416770081486, \"std\": 11.431055840355477, \"min\": 7.82, \"25%\": 11.42, \"50%\": 16.450000000000003, \"75%\": 34.419998, \"max\": 41.599998}, \"USO_High\": {\"count\": 1718.0, \"mean\": 22.307147821885913, \"std\": 11.478671421101907, \"min\": 8.03, \"25%\": 11.5, \"50%\": 16.6350005, \"75%\": 34.6674985, \"max\": 42.299999}, \"USO_Low\": {\"count\": 1718.0, \"mean\": 21.90465658731083, \"std\": 11.373996589983637, \"min\": 7.67, \"25%\": 11.3, \"50%\": 16.0399995, \"75%\": 34.10999975, \"max\": 41.299999}, \"USO_Close\": {\"count\": 1718.0, \"mean\": 22.10905120023283, \"std\": 11.432786706321563, \"min\": 7.96, \"25%\": 11.3925, \"50%\": 16.345, \"75%\": 34.4174985, \"max\": 42.009998}, \"USO_Adj Close\": {\"count\": 1718.0, \"mean\": 22.10905120023283, \"std\": 11.432786706321563, \"min\": 7.96, \"25%\": 11.3925, \"50%\": 16.345, \"75%\": 34.4174985, \"max\": 42.009998}, \"USO_Volume\": {\"count\": 1718.0, \"mean\": 19223133.178114086, \"std\": 15757431.24572554, \"min\": 1035100.0, \"25%\": 6229500.0, \"50%\": 16130150.0, \"75%\": 26723750.0, \"max\": 110265700.0}}", "examples": "{\"Date\":{\"0\":\"2011-12-15\",\"1\":\"2011-12-16\",\"2\":\"2011-12-19\",\"3\":\"2011-12-20\"},\"Open\":{\"0\":154.740005,\"1\":154.309998,\"2\":155.479996,\"3\":156.820007},\"High\":{\"0\":154.949997,\"1\":155.369995,\"2\":155.860001,\"3\":157.429993},\"Low\":{\"0\":151.710007,\"1\":153.899994,\"2\":154.360001,\"3\":156.580002},\"Close\":{\"0\":152.330002,\"1\":155.229996,\"2\":154.869995,\"3\":156.979996},\"Adj Close\":{\"0\":152.330002,\"1\":155.229996,\"2\":154.869995,\"3\":156.979996},\"Volume\":{\"0\":21521900,\"1\":18124300,\"2\":12547200,\"3\":9136300},\"SP_open\":{\"0\":123.029999,\"1\":122.230003,\"2\":122.059998,\"3\":122.18},\"SP_high\":{\"0\":123.199997,\"1\":122.949997,\"2\":122.32,\"3\":124.139999},\"SP_low\":{\"0\":121.989998,\"1\":121.300003,\"2\":120.029999,\"3\":120.370003},\"SP_close\":{\"0\":122.18,\"1\":121.589996,\"2\":120.290001,\"3\":123.93},\"SP_Ajclose\":{\"0\":105.441238,\"1\":105.597549,\"2\":104.468536,\"3\":107.629784},\"SP_volume\":{\"0\":199109200,\"1\":220481400,\"2\":183903000,\"3\":225418100},\"DJ_open\":{\"0\":11825.29004,\"1\":11870.25,\"2\":11866.54004,\"3\":11769.20996},\"DJ_high\":{\"0\":11967.83984,\"1\":11968.17969,\"2\":11925.87988,\"3\":12117.12988},\"DJ_low\":{\"0\":11825.21973,\"1\":11819.30957,\"2\":11735.19043,\"3\":11768.83008},\"DJ_close\":{\"0\":11868.80957,\"1\":11866.38965,\"2\":11766.25977,\"3\":12103.58008},\"DJ_Ajclose\":{\"0\":11868.80957,\"1\":11866.38965,\"2\":11766.25977,\"3\":12103.58008},\"DJ_volume\":{\"0\":136930000,\"1\":389520000,\"2\":135170000,\"3\":165180000},\"EG_open\":{\"0\":74.550003,\"1\":73.599998,\"2\":69.099998,\"3\":66.449997},\"EG_high\":{\"0\":76.150002,\"1\":75.099998,\"2\":69.800003,\"3\":68.099998},\"EG_low\":{\"0\":72.150002,\"1\":73.349998,\"2\":64.199997,\"3\":66.0},\"EG_close\":{\"0\":72.900002,\"1\":74.900002,\"2\":64.699997,\"3\":67.0},\"EG_Ajclose\":{\"0\":70.431755,\"1\":72.364037,\"2\":62.509384,\"3\":64.731514},\"EG_volume\":{\"0\":787900,\"1\":896600,\"2\":2096700,\"3\":875300},\"EU_Price\":{\"0\":1.3018,\"1\":1.3035,\"2\":1.2995,\"3\":1.3079},\"EU_open\":{\"0\":1.2982,\"1\":1.302,\"2\":1.3043,\"3\":1.3003},\"EU_high\":{\"0\":1.3051,\"1\":1.3087,\"2\":1.3044,\"3\":1.3133},\"EU_low\":{\"0\":1.2957,\"1\":1.2997,\"2\":1.2981,\"3\":1.2994},\"EU_Trend\":{\"0\":1,\"1\":1,\"2\":0,\"3\":1},\"OF_Price\":{\"0\":105.09,\"1\":103.35,\"2\":103.64,\"3\":106.73},\"OF_Open\":{\"0\":104.88,\"1\":103.51,\"2\":103.63,\"3\":104.3},\"OF_High\":{\"0\":106.5,\"1\":104.56,\"2\":104.57,\"3\":107.27},\"OF_Low\":{\"0\":104.88,\"1\":102.46,\"2\":102.37,\"3\":103.91},\"OF_Volume\":{\"0\":14330,\"1\":140080,\"2\":147880,\"3\":170240},\"OF_Trend\":{\"0\":1,\"1\":0,\"2\":1,\"3\":1},\"OS_Price\":{\"0\":93.42,\"1\":93.79,\"2\":94.09,\"3\":95.55},\"OS_Open\":{\"0\":94.91,\"1\":93.43,\"2\":93.77,\"3\":96.39},\"OS_High\":{\"0\":96.0,\"1\":94.8,\"2\":94.43,\"3\":99.7},\"OS_Low\":{\"0\":93.33,\"1\":92.53,\"2\":92.55,\"3\":96.39},\"OS_Trend\":{\"0\":0,\"1\":1,\"2\":1,\"3\":1},\"SF_Price\":{\"0\":53604,\"1\":53458,\"2\":52961,\"3\":53487},\"SF_Open\":{\"0\":54248,\"1\":53650,\"2\":53400,\"3\":52795},\"SF_High\":{\"0\":54248,\"1\":54030,\"2\":53400,\"3\":53575},\"SF_Low\":{\"0\":52316,\"1\":52890,\"2\":52544,\"3\":52595},\"SF_Volume\":{\"0\":119440,\"1\":65390,\"2\":67280,\"3\":55130},\"SF_Trend\":{\"0\":1,\"1\":0,\"2\":0,\"3\":1},\"USB_Price\":{\"0\":1.911,\"1\":1.851,\"2\":1.81,\"3\":1.927},\"USB_Open\":{\"0\":1.911,\"1\":1.851,\"2\":1.81,\"3\":1.927},\"USB_High\":{\"0\":1.911,\"1\":1.851,\"2\":1.81,\"3\":1.927},\"USB_Low\":{\"0\":1.911,\"1\":1.851,\"2\":1.81,\"3\":1.927},\"USB_Trend\":{\"0\":1,\"1\":0,\"2\":0,\"3\":1},\"PLT_Price\":{\"0\":1414.65,\"1\":1420.25,\"2\":1411.1,\"3\":1434.75},\"PLT_Open\":{\"0\":1420.3,\"1\":1414.75,\"2\":1422.65,\"3\":1408.95},\"PLT_High\":{\"0\":1423.35,\"1\":1431.75,\"2\":1427.6,\"3\":1436.55},\"PLT_Low\":{\"0\":1376.85,\"1\":1400.7,\"2\":1404.6,\"3\":1408.15},\"PLT_Trend\":{\"0\":0,\"1\":1,\"2\":0,\"3\":1},\"PLD_Price\":{\"0\":618.85,\"1\":623.65,\"2\":608.8,\"3\":626.65},\"PLD_Open\":{\"0\":614.7,\"1\":622.6,\"2\":626.0,\"3\":622.45},\"PLD_High\":{\"0\":615.0,\"1\":623.45,\"2\":630.0,\"3\":622.45},\"PLD_Low\":{\"0\":614.6,\"1\":622.3,\"2\":608.6,\"3\":622.45},\"PLD_Trend\":{\"0\":1,\"1\":1,\"2\":0,\"3\":1},\"RHO_PRICE\":{\"0\":1425,\"1\":1400,\"2\":1400,\"3\":1400},\"USDI_Price\":{\"0\":80.341,\"1\":80.249,\"2\":80.207,\"3\":80.273},\"USDI_Open\":{\"0\":80.565,\"1\":80.175,\"2\":80.3,\"3\":80.89},\"USDI_High\":{\"0\":80.63,\"1\":80.395,\"2\":80.47,\"3\":80.94},\"USDI_Low\":{\"0\":80.13,\"1\":79.935,\"2\":80.125,\"3\":80.035},\"USDI_Volume\":{\"0\":22850,\"1\":13150,\"2\":970,\"3\":22950},\"USDI_Trend\":{\"0\":0,\"1\":0,\"2\":0,\"3\":1},\"GDX_Open\":{\"0\":53.009998,\"1\":52.5,\"2\":52.490002,\"3\":52.380001},\"GDX_High\":{\"0\":53.139999,\"1\":53.18,\"2\":52.549999,\"3\":53.25},\"GDX_Low\":{\"0\":51.57,\"1\":52.040001,\"2\":51.029999,\"3\":52.369999},\"GDX_Close\":{\"0\":51.68,\"1\":52.68,\"2\":51.169998,\"3\":52.990002},\"GDX_Adj Close\":{\"0\":48.973877,\"1\":49.921513,\"2\":48.490578,\"3\":50.215282},\"GDX_Volume\":{\"0\":20605600,\"1\":16285400,\"2\":15120200,\"3\":11644900},\"USO_Open\":{\"0\":36.900002,\"1\":36.18,\"2\":36.389999,\"3\":37.299999},\"USO_High\":{\"0\":36.939999,\"1\":36.5,\"2\":36.450001,\"3\":37.610001},\"USO_Low\":{\"0\":36.049999,\"1\":35.73,\"2\":35.93,\"3\":37.220001},\"USO_Close\":{\"0\":36.130001,\"1\":36.27,\"2\":36.200001,\"3\":37.560001},\"USO_Adj Close\":{\"0\":36.130001,\"1\":36.27,\"2\":36.200001,\"3\":37.560001},\"USO_Volume\":{\"0\":12616700,\"1\":12578800,\"2\":7418200,\"3\":10041600}}"}}]
| true | 1 |
<start_data_description><data_path>gold-price-prediction-dataset/FINAL_USO.csv:
<column_names>
['Date', 'Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume', 'SP_open', 'SP_high', 'SP_low', 'SP_close', 'SP_Ajclose', 'SP_volume', 'DJ_open', 'DJ_high', 'DJ_low', 'DJ_close', 'DJ_Ajclose', 'DJ_volume', 'EG_open', 'EG_high', 'EG_low', 'EG_close', 'EG_Ajclose', 'EG_volume', 'EU_Price', 'EU_open', 'EU_high', 'EU_low', 'EU_Trend', 'OF_Price', 'OF_Open', 'OF_High', 'OF_Low', 'OF_Volume', 'OF_Trend', 'OS_Price', 'OS_Open', 'OS_High', 'OS_Low', 'OS_Trend', 'SF_Price', 'SF_Open', 'SF_High', 'SF_Low', 'SF_Volume', 'SF_Trend', 'USB_Price', 'USB_Open', 'USB_High', 'USB_Low', 'USB_Trend', 'PLT_Price', 'PLT_Open', 'PLT_High', 'PLT_Low', 'PLT_Trend', 'PLD_Price', 'PLD_Open', 'PLD_High', 'PLD_Low', 'PLD_Trend', 'RHO_PRICE', 'USDI_Price', 'USDI_Open', 'USDI_High', 'USDI_Low', 'USDI_Volume', 'USDI_Trend', 'GDX_Open', 'GDX_High', 'GDX_Low', 'GDX_Close', 'GDX_Adj Close', 'GDX_Volume', 'USO_Open', 'USO_High', 'USO_Low', 'USO_Close', 'USO_Adj Close', 'USO_Volume']
<column_types>
{'Date': 'object', 'Open': 'float64', 'High': 'float64', 'Low': 'float64', 'Close': 'float64', 'Adj Close': 'float64', 'Volume': 'int64', 'SP_open': 'float64', 'SP_high': 'float64', 'SP_low': 'float64', 'SP_close': 'float64', 'SP_Ajclose': 'float64', 'SP_volume': 'int64', 'DJ_open': 'float64', 'DJ_high': 'float64', 'DJ_low': 'float64', 'DJ_close': 'float64', 'DJ_Ajclose': 'float64', 'DJ_volume': 'int64', 'EG_open': 'float64', 'EG_high': 'float64', 'EG_low': 'float64', 'EG_close': 'float64', 'EG_Ajclose': 'float64', 'EG_volume': 'int64', 'EU_Price': 'float64', 'EU_open': 'float64', 'EU_high': 'float64', 'EU_low': 'float64', 'EU_Trend': 'int64', 'OF_Price': 'float64', 'OF_Open': 'float64', 'OF_High': 'float64', 'OF_Low': 'float64', 'OF_Volume': 'int64', 'OF_Trend': 'int64', 'OS_Price': 'float64', 'OS_Open': 'float64', 'OS_High': 'float64', 'OS_Low': 'float64', 'OS_Trend': 'int64', 'SF_Price': 'int64', 'SF_Open': 'int64', 'SF_High': 'int64', 'SF_Low': 'int64', 'SF_Volume': 'int64', 'SF_Trend': 'int64', 'USB_Price': 'float64', 'USB_Open': 'float64', 'USB_High': 'float64', 'USB_Low': 'float64', 'USB_Trend': 'int64', 'PLT_Price': 'float64', 'PLT_Open': 'float64', 'PLT_High': 'float64', 'PLT_Low': 'float64', 'PLT_Trend': 'int64', 'PLD_Price': 'float64', 'PLD_Open': 'float64', 'PLD_High': 'float64', 'PLD_Low': 'float64', 'PLD_Trend': 'int64', 'RHO_PRICE': 'int64', 'USDI_Price': 'float64', 'USDI_Open': 'float64', 'USDI_High': 'float64', 'USDI_Low': 'float64', 'USDI_Volume': 'int64', 'USDI_Trend': 'int64', 'GDX_Open': 'float64', 'GDX_High': 'float64', 'GDX_Low': 'float64', 'GDX_Close': 'float64', 'GDX_Adj Close': 'float64', 'GDX_Volume': 'int64', 'USO_Open': 'float64', 'USO_High': 'float64', 'USO_Low': 'float64', 'USO_Close': 'float64', 'USO_Adj Close': 'float64', 'USO_Volume': 'int64'}
<dataframe_Summary>
{'Open': {'count': 1718.0, 'mean': 127.3234342403958, 'std': 17.526993251060823, 'min': 100.919998, '25%': 116.220001, '50%': 121.915001, '75%': 128.42749425, 'max': 173.199997}, 'High': {'count': 1718.0, 'mean': 127.85423744179278, 'std': 17.63118930056498, 'min': 100.989998, '25%': 116.540001, '50%': 122.32500099999999, '75%': 129.0874975, 'max': 174.070007}, 'Low': {'count': 1718.0, 'mean': 126.7776949441211, 'std': 17.3965130287394, 'min': 100.230003, '25%': 115.739998, '50%': 121.369999, '75%': 127.8400005, 'max': 172.919998}, 'Close': {'count': 1718.0, 'mean': 127.31948200349244, 'std': 17.53626908041542, 'min': 100.5, '25%': 116.05250175, '50%': 121.79500200000001, '75%': 128.470001, 'max': 173.610001}, 'Adj Close': {'count': 1718.0, 'mean': 127.31948200349244, 'std': 17.53626908041542, 'min': 100.5, '25%': 116.05250175, '50%': 121.79500200000001, '75%': 128.470001, 'max': 173.610001}, 'Volume': {'count': 1718.0, 'mean': 8446327.124563446, 'std': 4920730.721926837, 'min': 1501600.0, '25%': 5412925.0, '50%': 7483900.0, '75%': 10207950.0, 'max': 93804200.0}, 'SP_open': {'count': 1718.0, 'mean': 204.49002327532014, 'std': 43.831928331516984, 'min': 122.059998, '25%': 170.392498, '50%': 205.46499649999998, '75%': 237.29249975, 'max': 293.089996}, 'SP_high': {'count': 1718.0, 'mean': 205.37263676658907, 'std': 43.97464352645107, 'min': 122.32, '25%': 170.96250550000002, '50%': 206.45999899999998, '75%': 237.72249975, 'max': 293.940002}, 'SP_low': {'count': 1718.0, 'mean': 203.48701386903377, 'std': 43.618939957703915, 'min': 120.029999, '25%': 169.57749925, '50%': 204.43, '75%': 236.1475025, 'max': 291.809998}, 'SP_close': {'count': 1718.0, 'mean': 204.49122230675204, 'std': 43.77699881004908, 'min': 120.290001, '25%': 170.3974995, '50%': 205.5299985, '75%': 236.88999575, 'max': 293.579987}, 'SP_Ajclose': {'count': 1718.0, 'mean': 192.20457038242142, 'std': 48.51416123015281, 'min': 104.468536, '25%': 153.027992, '50%': 191.6583405, '75%': 228.7213895, 'max': 290.560242}, 'SP_volume': {'count': 1718.0, 'mean': 109802624.67986031, 'std': 49251103.38351986, 'min': 27856500.0, '25%': 73870850.0, '50%': 99720200.0, '75%': 135116100.0, 'max': 507244300.0}, 'DJ_open': {'count': 1718.0, 'mean': 18161.09439846333, 'std': 3889.7520786960295, 'min': 11769.20996, '25%': 15487.9301725, '50%': 17601.095705, '75%': 20866.907225000003, 'max': 26833.4707}, 'DJ_high': {'count': 1718.0, 'mean': 18244.137841845168, 'std': 3906.0086036417238, 'min': 11925.87988, '25%': 15551.1701675, '50%': 17714.395510000002, '75%': 20910.837405, 'max': 26951.81055}, 'DJ_low': {'count': 1718.0, 'mean': 18073.889094621656, 'std': 3867.959071585826, 'min': 11735.19043, '25%': 15419.3872075, '50%': 17510.29004, '75%': 20785.0356475, 'max': 26789.08008}, 'DJ_close': {'count': 1718.0, 'mean': 18164.11904321886, 'std': 3884.49588726716, 'min': 11766.25977, '25%': 15495.66528, '50%': 17612.939455, '75%': 20851.157715, 'max': 26828.39063}, 'DJ_Ajclose': {'count': 1718.0, 'mean': 18164.11904321886, 'std': 3884.49588726716, 'min': 11766.25977, '25%': 15495.66528, '50%': 17612.939455, '75%': 20851.157715, 'max': 26828.39063}, 'DJ_volume': {'count': 1718.0, 'mean': 177913131.54831198, 'std': 121275284.71055532, 'min': 8410000.0, '25%': 92320000.0, '50%': 120695000.0, '75%': 263630000.0, 'max': 900510000.0}, 'EG_open': {'count': 1718.0, 'mean': 28.276554122235158, 'std': 20.3258613386814, 'min': 2.77, '25%': 14.2, '50%': 22.8, '75%': 37.150002, 'max': 80.199997}, 'EG_high': {'count': 1718.0, 'mean': 28.822555280558788, 'std': 20.620623798828966, 'min': 2.85, '25%': 14.55, '50%': 23.125, '75%': 37.849998, 'max': 81.0}, 'EG_low': {'count': 1718.0, 'mean': 27.653655403376018, 'std': 19.972314008304586, 'min': 2.73, '25%': 13.7625, '50%': 21.8, '75%': 36.450001, 'max': 77.900002}, 'EG_close': {'count': 1718.0, 'mean': 28.209301502910364, 'std': 20.294635466913842, 'min': 2.8, '25%': 14.15, '50%': 22.5249995, '75%': 37.18750125, 'max': 79.800003}, 'EG_Ajclose': {'count': 1718.0, 'mean': 27.78395800814901, 'std': 19.721857152775996, 'min': 2.8, '25%': 14.08241775, '50%': 22.408089, '75%': 36.811499999999995, 'max': 77.999313}, 'EG_volume': {'count': 1718.0, 'mean': 1136073.5157159488, 'std': 730128.9004541405, 'min': 164500.0, '25%': 700625.0, '50%': 968800.0, '75%': 1344775.0, 'max': 10061200.0}, 'EU_Price': {'count': 1718.0, 'mean': 1.2084944703143192, 'std': 0.10053478875122451, 'min': 1.0387, '25%': 1.1208, '50%': 1.18405, '75%': 1.30555, 'max': 1.3934}, 'EU_open': {'count': 1718.0, 'mean': 1.20853055878929, 'std': 0.10057885910591212, 'min': 1.039, '25%': 1.1209, '50%': 1.1841, '75%': 1.305475, 'max': 1.3933}, 'EU_high': {'count': 1718.0, 'mean': 1.2133597206053552, 'std': 0.10018402715420814, 'min': 1.0419, '25%': 1.1258, '50%': 1.18785, '75%': 1.310275, 'max': 1.3993}, 'EU_low': {'count': 1718.0, 'mean': 1.203691792782305, 'std': 0.10069529978081837, 'min': 1.0341, '25%': 1.1159, '50%': 1.17945, '75%': 1.2998500000000002, 'max': 1.391}, 'EU_Trend': {'count': 1718.0, 'mean': 0.4947613504074505, 'std': 0.5001181294080912, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'OF_Price': {'count': 1718.0, 'mean': 77.50452270081489, 'std': 27.400703417829533, 'min': 27.88, '25%': 52.152499999999996, '50%': 70.11500000000001, '75%': 107.73, 'max': 126.22}, 'OF_Open': {'count': 1718.0, 'mean': 77.52174039580909, 'std': 27.366112532482344, 'min': 27.99, '25%': 52.2025, '50%': 70.09, '75%': 107.6825, 'max': 126.16}, 'OF_High': {'count': 1718.0, 'mean': 78.38493597206055, 'std': 27.393293553499312, 'min': 28.75, '25%': 52.8625, '50%': 70.78999999999999, '75%': 108.44749999999999, 'max': 128.4}, 'OF_Low': {'count': 1718.0, 'mean': 76.5938940628638, 'std': 27.322868118450536, 'min': 27.1, '25%': 51.3625, '50%': 69.035, '75%': 106.86500000000001, 'max': 125.0}, 'OF_Volume': {'count': 1718.0, 'mean': 225958.58556461002, 'std': 88844.54058110445, 'min': 11520.0, '25%': 175940.0, '50%': 223485.0, '75%': 281382.5, 'max': 567760.0}, 'OF_Trend': {'count': 1718.0, 'mean': 0.4988358556461001, 'std': 0.5001442259730633, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'OS_Price': {'count': 1718.0, 'mean': 70.15309662398137, 'std': 23.471513729241426, 'min': 26.55, '25%': 48.9225, '50%': 64.68, '75%': 94.28, 'max': 110.3}, 'OS_Open': {'count': 1718.0, 'mean': 70.27539580908032, 'std': 23.480047166835377, 'min': 27.34, '25%': 49.03, '50%': 64.84, '75%': 94.42500000000001, 'max': 110.34}, 'OS_High': {'count': 1718.0, 'mean': 71.12025611175787, 'std': 23.494736756631916, 'min': 27.61, '25%': 49.692499999999995, '50%': 65.565, '75%': 95.39, 'max': 112.28}, 'OS_Low': {'count': 1718.0, 'mean': 69.33098952270082, 'std': 23.422197697323725, 'min': 26.18, '25%': 48.2, '50%': 63.754999999999995, '75%': 93.475, 'max': 109.15}, 'OS_Trend': {'count': 1718.0, 'mean': 0.5034924330616997, 'std': 0.5001333808127211, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'SF_Price': {'count': 1718.0, 'mean': 43284.478463329455, 'std': 7530.70401181923, 'min': 33170.0, '25%': 38018.75, '50%': 40521.5, '75%': 46580.5, 'max': 65292.0}, 'SF_Open': {'count': 1718.0, 'mean': 43308.692083818394, 'std': 7550.42324208401, 'min': 33146.0, '25%': 38028.75, '50%': 40528.0, '75%': 46661.25, 'max': 65400.0}, 'SF_High': {'count': 1718.0, 'mean': 43671.1944121071, 'std': 7614.3022670509035, 'min': 33566.0, '25%': 38293.5, '50%': 40841.0, '75%': 47071.0, 'max': 65723.0}, 'SF_Low': {'count': 1718.0, 'mean': 42911.91210710128, 'std': 7443.076537838609, 'min': 32626.0, '25%': 37690.25, '50%': 40239.0, '75%': 46133.25, 'max': 64132.0}, 'SF_Volume': {'count': 1718.0, 'mean': 26912.462165308498, 'std': 21880.9691219917, 'min': 40.0, '25%': 14210.0, '50%': 19645.0, '75%': 29915.0, 'max': 203730.0}, 'SF_Trend': {'count': 1718.0, 'mean': 0.4807916181606519, 'std': 0.49977637596742397, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'USB_Price': {'count': 1718.0, 'mean': 2.262769499417928, 'std': 0.43346859129576576, 'min': 1.358, '25%': 1.90525, '50%': 2.259, '75%': 2.597, 'max': 3.239}, 'USB_Open': {'count': 1718.0, 'mean': 2.2630913853317813, 'std': 0.4339765221907247, 'min': 1.366, '25%': 1.905, '50%': 2.259, '75%': 2.5977500000000004, 'max': 3.237}, 'USB_High': {'count': 1718.0, 'mean': 2.286511641443539, 'std': 0.43757392870154826, 'min': 1.391, '25%': 1.92025, '50%': 2.29, '75%': 2.62, 'max': 3.261}, 'USB_Low': {'count': 1718.0, 'mean': 2.2387648428405122, 'std': 0.42996049106496337, 'min': 1.321, '25%': 1.88625, '50%': 2.23, '75%': 2.575, 'max': 3.2310000000000003}, 'USB_Trend': {'count': 1718.0, 'mean': 0.490104772991851, 'std': 0.5000476279825139, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'PLT_Price': {'count': 1718.0, 'mean': 1183.9154249126893, 'std': 273.84209529303314, 'min': 775.6, '25%': 944.075, '50%': 1098.025, '75%': 1442.8625, 'max': 1737.6}, 'PLT_Open': {'count': 1718.0, 'mean': 1184.3888533178115, 'std': 273.9781678658314, 'min': 765.3, '25%': 944.025, '50%': 1098.175, '75%': 1442.8125, 'max': 1737.8}, 'PLT_High': {'count': 1718.0, 'mean': 1194.274621653085, 'std': 275.4277908443986, 'min': 786.5, '25%': 952.65, '50%': 1107.525, '75%': 1454.2875, 'max': 1742.9}, 'PLT_Low': {'count': 1718.0, 'mean': 1173.4096915017462, 'std': 271.79990876106933, 'min': 756.0, '25%': 935.85, '50%': 1086.55, '75%': 1432.0875, 'max': 1717.15}, 'PLT_Trend': {'count': 1718.0, 'mean': 0.4842840512223516, 'std': 0.4998984575883836, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'PLD_Price': {'count': 1718.0, 'mean': 766.8051222351571, 'std': 148.30718862306315, 'min': 470.45, '25%': 663.2125, '50%': 748.3, '75%': 848.2, 'max': 1197.5}, 'PLD_Open': {'count': 1718.0, 'mean': 766.3634807916181, 'std': 148.078365164173, 'min': 458.6, '25%': 663.2875, '50%': 748.0, '75%': 846.6999999999999, 'max': 1196.0}, 'PLD_High': {'count': 1718.0, 'mean': 773.5293015133876, 'std': 149.01029076845376, 'min': 473.15, '25%': 670.1625, '50%': 753.8, '75%': 855.5999999999999, 'max': 1208.7}, 'PLD_Low': {'count': 1718.0, 'mean': 759.4442083818393, 'std': 147.38108738958312, 'min': 458.6, '25%': 657.025, '50%': 742.45, '75%': 840.1125, 'max': 1183.6}, 'PLD_Trend': {'count': 1718.0, 'mean': 0.5308498253783469, 'std': 0.49919268502467823, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'RHO_PRICE': {'count': 1718.0, 'mean': 1130.442374854482, 'std': 570.0128812546436, 'min': 0.0, '25%': 785.0, '50%': 1100.0, '75%': 1307.5, 'max': 2600.0}, 'USDI_Price': {'count': 1718.0, 'mean': 89.8094266589057, 'std': 7.516114961023049, 'min': 78.3, '25%': 81.38024999999999, '50%': 92.8835, '75%': 96.10375, 'max': 103.288}, 'USDI_Open': {'count': 1718.0, 'mean': 89.80544237485448, 'std': 7.520787856263057, 'min': 78.22, '25%': 81.38, '50%': 92.905, '75%': 96.115, 'max': 103.35}, 'USDI_High': {'count': 1718.0, 'mean': 90.09898137369035, 'std': 7.567894722344003, 'min': 78.64, '25%': 81.61749999999999, '50%': 93.155, '75%': 96.4725, 'max': 103.815}, 'USDI_Low': {'count': 1718.0, 'mean': 89.50933061699651, 'std': 7.459269060805926, 'min': 78.12, '25%': 81.11125, '50%': 92.57249999999999, '75%': 95.73, 'max': 102.975}, 'USDI_Volume': {'count': 1718.0, 'mean': 27568.300349243305, 'std': 14643.314698800112, 'min': 60.0, '25%': 18137.5, '50%': 24445.0, '75%': 33745.0, 'max': 142820.0}, 'USDI_Trend': {'count': 1718.0, 'mean': 0.5128055878928988, 'std': 0.49998152386734324, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'GDX_Open': {'count': 1718.0, 'mean': 26.74742721594878, 'std': 10.620551657039828, 'min': 12.7, '25%': 20.64249925, '50%': 23.115001, '75%': 27.430000749999998, 'max': 57.52}, 'GDX_High': {'count': 1718.0, 'mean': 27.071303813154827, 'std': 10.706387708108316, 'min': 12.92, '25%': 20.9525005, '50%': 23.370001, '75%': 27.77, 'max': 57.939999}, 'GDX_Low': {'count': 1718.0, 'mean': 26.384575068684516, 'std': 10.490908149660692, 'min': 12.4, '25%': 20.35500025, '50%': 22.870001, '75%': 26.7974995, 'max': 56.77}, 'GDX_Close': {'count': 1718.0, 'mean': 26.71501166589057, 'std': 10.60311013143174, 'min': 12.47, '25%': 20.585, '50%': 23.054999000000002, '75%': 27.31749975, 'max': 57.470001}, 'GDX_Adj Close': {'count': 1718.0, 'mean': 25.924623530849825, 'std': 9.886569644014179, 'min': 12.269618, '25%': 20.18094975, '50%': 22.6776035, '75%': 26.47815425, 'max': 54.617039}, 'GDX_Volume': {'count': 1718.0, 'mean': 43565154.13271245, 'std': 29091505.266135503, 'min': 4729000.0, '25%': 22599675.0, '50%': 37304650.0, '75%': 56970550.0, 'max': 232153600.0}, 'USO_Open': {'count': 1718.0, 'mean': 22.113416770081486, 'std': 11.431055840355477, 'min': 7.82, '25%': 11.42, '50%': 16.450000000000003, '75%': 34.419998, 'max': 41.599998}, 'USO_High': {'count': 1718.0, 'mean': 22.307147821885913, 'std': 11.478671421101907, 'min': 8.03, '25%': 11.5, '50%': 16.6350005, '75%': 34.6674985, 'max': 42.299999}, 'USO_Low': {'count': 1718.0, 'mean': 21.90465658731083, 'std': 11.373996589983637, 'min': 7.67, '25%': 11.3, '50%': 16.0399995, '75%': 34.10999975, 'max': 41.299999}, 'USO_Close': {'count': 1718.0, 'mean': 22.10905120023283, 'std': 11.432786706321563, 'min': 7.96, '25%': 11.3925, '50%': 16.345, '75%': 34.4174985, 'max': 42.009998}, 'USO_Adj Close': {'count': 1718.0, 'mean': 22.10905120023283, 'std': 11.432786706321563, 'min': 7.96, '25%': 11.3925, '50%': 16.345, '75%': 34.4174985, 'max': 42.009998}, 'USO_Volume': {'count': 1718.0, 'mean': 19223133.178114086, 'std': 15757431.24572554, 'min': 1035100.0, '25%': 6229500.0, '50%': 16130150.0, '75%': 26723750.0, 'max': 110265700.0}}
<dataframe_info>
RangeIndex: 1718 entries, 0 to 1717
Data columns (total 81 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Date 1718 non-null object
1 Open 1718 non-null float64
2 High 1718 non-null float64
3 Low 1718 non-null float64
4 Close 1718 non-null float64
5 Adj Close 1718 non-null float64
6 Volume 1718 non-null int64
7 SP_open 1718 non-null float64
8 SP_high 1718 non-null float64
9 SP_low 1718 non-null float64
10 SP_close 1718 non-null float64
11 SP_Ajclose 1718 non-null float64
12 SP_volume 1718 non-null int64
13 DJ_open 1718 non-null float64
14 DJ_high 1718 non-null float64
15 DJ_low 1718 non-null float64
16 DJ_close 1718 non-null float64
17 DJ_Ajclose 1718 non-null float64
18 DJ_volume 1718 non-null int64
19 EG_open 1718 non-null float64
20 EG_high 1718 non-null float64
21 EG_low 1718 non-null float64
22 EG_close 1718 non-null float64
23 EG_Ajclose 1718 non-null float64
24 EG_volume 1718 non-null int64
25 EU_Price 1718 non-null float64
26 EU_open 1718 non-null float64
27 EU_high 1718 non-null float64
28 EU_low 1718 non-null float64
29 EU_Trend 1718 non-null int64
30 OF_Price 1718 non-null float64
31 OF_Open 1718 non-null float64
32 OF_High 1718 non-null float64
33 OF_Low 1718 non-null float64
34 OF_Volume 1718 non-null int64
35 OF_Trend 1718 non-null int64
36 OS_Price 1718 non-null float64
37 OS_Open 1718 non-null float64
38 OS_High 1718 non-null float64
39 OS_Low 1718 non-null float64
40 OS_Trend 1718 non-null int64
41 SF_Price 1718 non-null int64
42 SF_Open 1718 non-null int64
43 SF_High 1718 non-null int64
44 SF_Low 1718 non-null int64
45 SF_Volume 1718 non-null int64
46 SF_Trend 1718 non-null int64
47 USB_Price 1718 non-null float64
48 USB_Open 1718 non-null float64
49 USB_High 1718 non-null float64
50 USB_Low 1718 non-null float64
51 USB_Trend 1718 non-null int64
52 PLT_Price 1718 non-null float64
53 PLT_Open 1718 non-null float64
54 PLT_High 1718 non-null float64
55 PLT_Low 1718 non-null float64
56 PLT_Trend 1718 non-null int64
57 PLD_Price 1718 non-null float64
58 PLD_Open 1718 non-null float64
59 PLD_High 1718 non-null float64
60 PLD_Low 1718 non-null float64
61 PLD_Trend 1718 non-null int64
62 RHO_PRICE 1718 non-null int64
63 USDI_Price 1718 non-null float64
64 USDI_Open 1718 non-null float64
65 USDI_High 1718 non-null float64
66 USDI_Low 1718 non-null float64
67 USDI_Volume 1718 non-null int64
68 USDI_Trend 1718 non-null int64
69 GDX_Open 1718 non-null float64
70 GDX_High 1718 non-null float64
71 GDX_Low 1718 non-null float64
72 GDX_Close 1718 non-null float64
73 GDX_Adj Close 1718 non-null float64
74 GDX_Volume 1718 non-null int64
75 USO_Open 1718 non-null float64
76 USO_High 1718 non-null float64
77 USO_Low 1718 non-null float64
78 USO_Close 1718 non-null float64
79 USO_Adj Close 1718 non-null float64
80 USO_Volume 1718 non-null int64
dtypes: float64(58), int64(22), object(1)
memory usage: 1.1+ MB
<some_examples>
{'Date': {'0': '2011-12-15', '1': '2011-12-16', '2': '2011-12-19', '3': '2011-12-20'}, 'Open': {'0': 154.740005, '1': 154.309998, '2': 155.479996, '3': 156.820007}, 'High': {'0': 154.949997, '1': 155.369995, '2': 155.860001, '3': 157.429993}, 'Low': {'0': 151.710007, '1': 153.899994, '2': 154.360001, '3': 156.580002}, 'Close': {'0': 152.330002, '1': 155.229996, '2': 154.869995, '3': 156.979996}, 'Adj Close': {'0': 152.330002, '1': 155.229996, '2': 154.869995, '3': 156.979996}, 'Volume': {'0': 21521900, '1': 18124300, '2': 12547200, '3': 9136300}, 'SP_open': {'0': 123.029999, '1': 122.230003, '2': 122.059998, '3': 122.18}, 'SP_high': {'0': 123.199997, '1': 122.949997, '2': 122.32, '3': 124.139999}, 'SP_low': {'0': 121.989998, '1': 121.300003, '2': 120.029999, '3': 120.370003}, 'SP_close': {'0': 122.18, '1': 121.589996, '2': 120.290001, '3': 123.93}, 'SP_Ajclose': {'0': 105.441238, '1': 105.597549, '2': 104.468536, '3': 107.629784}, 'SP_volume': {'0': 199109200, '1': 220481400, '2': 183903000, '3': 225418100}, 'DJ_open': {'0': 11825.29004, '1': 11870.25, '2': 11866.54004, '3': 11769.20996}, 'DJ_high': {'0': 11967.83984, '1': 11968.17969, '2': 11925.87988, '3': 12117.12988}, 'DJ_low': {'0': 11825.21973, '1': 11819.30957, '2': 11735.19043, '3': 11768.83008}, 'DJ_close': {'0': 11868.80957, '1': 11866.38965, '2': 11766.25977, '3': 12103.58008}, 'DJ_Ajclose': {'0': 11868.80957, '1': 11866.38965, '2': 11766.25977, '3': 12103.58008}, 'DJ_volume': {'0': 136930000, '1': 389520000, '2': 135170000, '3': 165180000}, 'EG_open': {'0': 74.550003, '1': 73.599998, '2': 69.099998, '3': 66.449997}, 'EG_high': {'0': 76.150002, '1': 75.099998, '2': 69.800003, '3': 68.099998}, 'EG_low': {'0': 72.150002, '1': 73.349998, '2': 64.199997, '3': 66.0}, 'EG_close': {'0': 72.900002, '1': 74.900002, '2': 64.699997, '3': 67.0}, 'EG_Ajclose': {'0': 70.431755, '1': 72.364037, '2': 62.509384, '3': 64.731514}, 'EG_volume': {'0': 787900, '1': 896600, '2': 2096700, '3': 875300}, 'EU_Price': {'0': 1.3018, '1': 1.3035, '2': 1.2995, '3': 1.3079}, 'EU_open': {'0': 1.2982, '1': 1.302, '2': 1.3043, '3': 1.3003}, 'EU_high': {'0': 1.3051, '1': 1.3087, '2': 1.3044, '3': 1.3133}, 'EU_low': {'0': 1.2957, '1': 1.2997, '2': 1.2981, '3': 1.2994}, 'EU_Trend': {'0': 1, '1': 1, '2': 0, '3': 1}, 'OF_Price': {'0': 105.09, '1': 103.35, '2': 103.64, '3': 106.73}, 'OF_Open': {'0': 104.88, '1': 103.51, '2': 103.63, '3': 104.3}, 'OF_High': {'0': 106.5, '1': 104.56, '2': 104.57, '3': 107.27}, 'OF_Low': {'0': 104.88, '1': 102.46, '2': 102.37, '3': 103.91}, 'OF_Volume': {'0': 14330, '1': 140080, '2': 147880, '3': 170240}, 'OF_Trend': {'0': 1, '1': 0, '2': 1, '3': 1}, 'OS_Price': {'0': 93.42, '1': 93.79, '2': 94.09, '3': 95.55}, 'OS_Open': {'0': 94.91, '1': 93.43, '2': 93.77, '3': 96.39}, 'OS_High': {'0': 96.0, '1': 94.8, '2': 94.43, '3': 99.7}, 'OS_Low': {'0': 93.33, '1': 92.53, '2': 92.55, '3': 96.39}, 'OS_Trend': {'0': 0, '1': 1, '2': 1, '3': 1}, 'SF_Price': {'0': 53604, '1': 53458, '2': 52961, '3': 53487}, 'SF_Open': {'0': 54248, '1': 53650, '2': 53400, '3': 52795}, 'SF_High': {'0': 54248, '1': 54030, '2': 53400, '3': 53575}, 'SF_Low': {'0': 52316, '1': 52890, '2': 52544, '3': 52595}, 'SF_Volume': {'0': 119440, '1': 65390, '2': 67280, '3': 55130}, 'SF_Trend': {'0': 1, '1': 0, '2': 0, '3': 1}, 'USB_Price': {'0': 1.911, '1': 1.851, '2': 1.81, '3': 1.927}, 'USB_Open': {'0': 1.911, '1': 1.851, '2': 1.81, '3': 1.927}, 'USB_High': {'0': 1.911, '1': 1.851, '2': 1.81, '3': 1.927}, 'USB_Low': {'0': 1.911, '1': 1.851, '2': 1.81, '3': 1.927}, 'USB_Trend': {'0': 1, '1': 0, '2': 0, '3': 1}, 'PLT_Price': {'0': 1414.65, '1': 1420.25, '2': 1411.1, '3': 1434.75}, 'PLT_Open': {'0': 1420.3, '1': 1414.75, '2': 1422.65, '3': 1408.95}, 'PLT_High': {'0': 1423.35, '1': 1431.75, '2': 1427.6, '3': 1436.55}, 'PLT_Low': {'0': 1376.85, '1': 1400.7, '2': 1404.6, '3': 1408.15}, 'PLT_Trend': {'0': 0, '1': 1, '2': 0, '3': 1}, 'PLD_Price': {'0': 618.85, '1': 623.65, '2': 608.8, '3': 626.65}, 'PLD_Open': {'0': 614.7, '1': 622.6, '2': 626.0, '3': 622.45}, 'PLD_High': {'0': 615.0, '1': 623.45, '2': 630.0, '3': 622.45}, 'PLD_Low': {'0': 614.6, '1': 622.3, '2': 608.6, '3': 622.45}, 'PLD_Trend': {'0': 1, '1': 1, '2': 0, '3': 1}, 'RHO_PRICE': {'0': 1425, '1': 1400, '2': 1400, '3': 1400}, 'USDI_Price': {'0': 80.341, '1': 80.249, '2': 80.207, '3': 80.273}, 'USDI_Open': {'0': 80.565, '1': 80.175, '2': 80.3, '3': 80.89}, 'USDI_High': {'0': 80.63, '1': 80.395, '2': 80.47, '3': 80.94}, 'USDI_Low': {'0': 80.13, '1': 79.935, '2': 80.125, '3': 80.035}, 'USDI_Volume': {'0': 22850, '1': 13150, '2': 970, '3': 22950}, 'USDI_Trend': {'0': 0, '1': 0, '2': 0, '3': 1}, 'GDX_Open': {'0': 53.009998, '1': 52.5, '2': 52.490002, '3': 52.380001}, 'GDX_High': {'0': 53.139999, '1': 53.18, '2': 52.549999, '3': 53.25}, 'GDX_Low': {'0': 51.57, '1': 52.040001, '2': 51.029999, '3': 52.369999}, 'GDX_Close': {'0': 51.68, '1': 52.68, '2': 51.169998, '3': 52.990002}, 'GDX_Adj Close': {'0': 48.973877, '1': 49.921513, '2': 48.490578, '3': 50.215282}, 'GDX_Volume': {'0': 20605600, '1': 16285400, '2': 15120200, '3': 11644900}, 'USO_Open': {'0': 36.900002, '1': 36.18, '2': 36.389999, '3': 37.299999}, 'USO_High': {'0': 36.939999, '1': 36.5, '2': 36.450001, '3': 37.610001}, 'USO_Low': {'0': 36.049999, '1': 35.73, '2': 35.93, '3': 37.220001}, 'USO_Close': {'0': 36.130001, '1': 36.27, '2': 36.200001, '3': 37.560001}, 'USO_Adj Close': {'0': 36.130001, '1': 36.27, '2': 36.200001, '3': 37.560001}, 'USO_Volume': {'0': 12616700, '1': 12578800, '2': 7418200, '3': 10041600}}
<end_description>
| 1,424 | 3 | 5,263 | 1,424 |
69009954
|
<jupyter_start><jupyter_text>YFinance Stock Price Data for Numerai Signals
This YFiance data is regularly updated to be used for the weekly round of the Numerai Signals.
Kaggle dataset identifier: yfinance-stock-price-data-for-numerai-signals
<jupyter_script># 
# -----------------------------
# written by katsu1110
# 27 May, 2021
# -----------------------------
# This is yet another starter notebook for the [Numerai Signals](https://signals.numer.ai/).
# What we do here includes:
# - fetch US stock price data via YFinance API
# - merge the data with the Numerai Signals' historical targets
# - perform feature engineering (considering stational features)
# - modeling with XGBoost
# - submit (if you want)
# In a kaggle dataset [YFinance Stock Price Data for Numerai Signals](https://www.kaggle.com/code1110/yfinance-stock-price-data-for-numerai-signals), I fetch the stock price data on a daily basis via the YFinance API. So if you are bothered using the API for yourself, just use this dataset (it must be up-to-date).
# This content is largely inspired by the following starter.
# >End to end notebook for Numerai Signals using completely free data from Yahoo Finance, by Jason Rosenfeld (jrAI).
# https://colab.research.google.com/drive/1ECh69C0LDCUnuyvEmNFZ51l_276nkQqo#scrollTo=tTBUzPep2dm3
# Alright, let's get it started!
# # Libraries
# Let's import what we need...
import numerapi
import os
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import gc
import pathlib
from tqdm.auto import tqdm
import joblib
import json
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.preprocessing import StandardScaler, MinMaxScaler, LabelEncoder
from multiprocessing import Pool, cpu_count
import time
import requests as re
from datetime import datetime
from dateutil.relativedelta import relativedelta, FR
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
# visualize
import matplotlib.pyplot as plt
import matplotlib.style as style
from matplotlib_venn import venn2, venn3
import seaborn as sns
from matplotlib import pyplot
from matplotlib.ticker import ScalarFormatter
sns.set_context("talk")
style.use("seaborn-colorblind")
import warnings
warnings.simplefilter("ignore")
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Config
# A simple config and logging setup.
today = datetime.now().strftime("%Y-%m-%d")
today
# config class
class CFG:
"""
Set FETCH_VIA_API = True if you want to fetch the data via API.
Otherwise we use the daily-updated one in the kaggle dataset (faster).
"""
INPUT_DIR = "../input/yfinance-stock-price-data-for-numerai-signals"
OUTPUT_DIR = "./"
FETCH_VIA_API = False
SEED = 46
# Logging is always nice for your experiment:)
def init_logger(log_file="train.log"):
from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler
logger = getLogger(__name__)
logger.setLevel(INFO)
handler1 = StreamHandler()
handler1.setFormatter(Formatter("%(message)s"))
handler2 = FileHandler(filename=log_file)
handler2.setFormatter(Formatter("%(message)s"))
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
logger = init_logger(log_file=f"{CFG.OUTPUT_DIR}/{today}.log")
logger.info("Start Logging...")
# # Setup Numerai API
# First of all, let's set up the numerai signals API.
# We can do many things with this API:
# - get a ticker map (between yfinance data and numerai historical targets)
# - get the historical targets
# - get your model slot name and model_id (if private key and secret key are provided)
# - submit
# (well, maybe more)
# ## Get Tickers for Numerai Signals
# Let's first get the ticker map.
napi = numerapi.SignalsAPI()
logger.info("numerai api setup!")
# read in list of active Signals tickers which can change slightly era to era
eligible_tickers = pd.Series(napi.ticker_universe(), name="ticker")
logger.info(f"Number of eligible tickers: {len(eligible_tickers)}")
# read in yahoo to numerai ticker map, still a work in progress, h/t wsouza and
# this tickermap is a work in progress and not guaranteed to be 100% correct
ticker_map = pd.read_csv(
"https://numerai-signals-public-data.s3-us-west-2.amazonaws.com/signals_ticker_map_w_bbg.csv"
)
ticker_map = ticker_map[ticker_map.bloomberg_ticker.isin(eligible_tickers)]
numerai_tickers = ticker_map["bloomberg_ticker"]
yfinance_tickers = ticker_map["yahoo"]
logger.info(f"Number of eligible tickers in map: {len(ticker_map)}")
print(ticker_map.shape)
ticker_map.head()
# This ticker map is necessary for a successful submission if you use yfinance data.
# # Load Stock Price Data
# Now is the time to get the stock price data, fetched via the [YFiance API](https://pypi.org/project/yfinance/).
# The good thing with this API is that it is free of charge.
# The bad thing with this API is that the data is often not complete.
# For a better quality of stock price data, you might want to try out purchasing one from [Quandl](https://www.quandl.com/data/EOD-End-of-Day-US-Stock-Prices/documentation?anchor=overview).
# This is another starter using Quandl data:
# https://forum.numer.ai/t/signals-plugging-in-the-data-from-quandl/2431
# This is of course wonderful, but if you are a beginner, why not just start with a free one?
# If you want to fetch the data on your own, you can use this function...
def fetch_yfinance(ticker_map, start="2002-12-01"):
"""
# fetch yfinance data
:INPUT:
- ticker_map : Numerai eligible ticker map (pd.DataFrame)
- start : date (str)
:OUTPUT:
- full_data : pd.DataFrame ('date', 'ticker', 'close', 'raw_close', 'high', 'low', 'open', 'volume')
"""
# ticker map
numerai_tickers = ticker_map["bloomberg_ticker"]
yfinance_tickers = ticker_map["yahoo"]
# fetch
raw_data = yfinance.download(
yfinance_tickers.str.cat(sep=" "), start=start, threads=True
)
# format
cols = ["Adj Close", "Close", "High", "Low", "Open", "Volume"]
full_data = raw_data[cols].stack().reset_index()
full_data.columns = [
"date",
"ticker",
"close",
"raw_close",
"high",
"low",
"open",
"volume",
]
# map yfiance ticker to numerai tickers
full_data["ticker"] = full_data.ticker.map(
dict(zip(yfinance_tickers, numerai_tickers))
)
return full_data
if CFG.FETCH_VIA_API: # fetch data via api
logger.info("Fetch data via API...may take some time...")
import yfinance
import simplejson
df = fetch_yfinance(ticker_map, start="2002-12-01")
else: # loading from the kaggle dataset (https://www.kaggle.com/code1110/yfinance-stock-price-data-for-numerai-signals)
logger.info("Load data from the kaggle dataset...")
df = pd.read_csv(pathlib.Path(f"{CFG.INPUT_DIR}/full_data.csv"))
print(df.shape)
df.head(3)
df.tail(3)
# ## Load Targets for Numerai Signals
# For a supervised machine learning, we need a target label. That is available in the Numerai Signals, so we can just fetch it.
def read_numerai_signals_targets():
# read in Signals targets
numerai_targets = "https://numerai-signals-public-data.s3-us-west-2.amazonaws.com/signals_train_val.csv"
targets = pd.read_csv(numerai_targets)
# to datetime int
targets["friday_date"] = (
pd.to_datetime(targets["friday_date"].astype(str), format="%Y-%m-%d")
.dt.strftime("%Y%m%d")
.astype(int)
)
# # train, valid split
# train_targets = targets.query('data_type == "train"')
# valid_targets = targets.query('data_type == "validation"')
return targets
targets = read_numerai_signals_targets()
print(targets.shape, targets["friday_date"].min(), targets["friday_date"].max())
targets.head()
targets.tail()
# there are train and validation...
fig, ax = plt.subplots(1, 2, figsize=(16, 4))
ax = ax.flatten()
for i, data_type in enumerate(["train", "validation"]):
# slice
targets_ = targets.query(f'data_type == "{data_type}"')
logger.info("*" * 50)
logger.info(
"{} target: {:,} tickers (friday_date: {} - {})".format(
data_type,
targets_["ticker"].nunique(),
targets_["friday_date"].min(),
targets_["friday_date"].max(),
)
)
# plot target
ax[i].hist(targets_["target"])
ax[i].set_title(f"{data_type}")
# The target looks exactly like the one from the Numerai Tournament, where both features and targets are given to the participants.
# Also note that the train-validation split is based on time (i.e., Time-Series Split):
# - train friday_date: 20030131 - 20121228
# - validation friday_date: 20130104 - 20200228
# ## Check Ticker Overlaps
# Let's see if we have enough overlap of tickers between our yfiance stock data and the numerai targets. We need at least 5 tickers for submission.
# ticker overlap
venn3(
[
set(df["ticker"].unique().tolist()),
set(targets.query('data_type == "train"')["ticker"].unique().tolist()),
set(targets.query('data_type == "validation"')["ticker"].unique().tolist()),
],
set_labels=("yf price", "train target", "valid target"),
)
# Ah, yeah, not bad, I guess?
# Here I only use our stock price data which have ticker overlaps such that we can build a supervised machine learning model.
# select target-only tickers
df = df.loc[df["ticker"].isin(targets["ticker"])].reset_index(drop=True)
print("{:,} tickers: {:,} records".format(df["ticker"].nunique(), len(df)))
# As I mentioned earlier, the yfiance stock data is not complete. Let's see if we have enough records per ticker.
record_per_ticker = (
df.groupby("ticker")["date"].nunique().reset_index().sort_values(by="date")
)
record_per_ticker
record_per_ticker["date"].hist()
print(record_per_ticker["date"].describe())
# There are unfortunately some tickers where the number of records is small.
# Here I only use tickers with more than 1,000 records.
tickers_with_records = record_per_ticker.query("date >= 1000")["ticker"].values
df = df.loc[df["ticker"].isin(tickers_with_records)].reset_index(drop=True)
print("Here, we use {:,} tickers: {:,} records".format(df["ticker"].nunique(), len(df)))
# # Feature Engineering
# Yeah finally machine learning part!
# Here we generate sets of stock price features. There are some caveats to be aware of:
# - **No Leak**: we cannot use a feature which uses the future information (this is a forecasting task!)
# - **Stationaly features**: Our features have to work whenever (scales must be stationaly over the periods of time)
# The implementation of the feature engineering is derived from [J-Quants Tournament](https://japanexchangegroup.github.io/J-Quants-Tutorial/#anchor-2.7). Although this content is in Japanese, I believe this is one of the best resources for feature engineering in the finance domain.
# Also I add the RSI and MACD features as a bonus:D
# We generate features per ticker repeatedly. To accelerate the process, we use the parallel processing.
# first, fix date column in the yfiance stock data to be friday date (just naming along with numerai targets)
df["friday_date"] = df["date"].apply(lambda x: int(str(x).replace("-", "")))
df.tail(3)
# Ready for feature engineering?
# technical indicators
def RSI(close: pd.DataFrame, period: int = 14) -> pd.Series:
# https://gist.github.com/jmoz/1f93b264650376131ed65875782df386
"""See source https://github.com/peerchemist/finta
and fix https://www.tradingview.com/wiki/Talk:Relative_Strength_Index_(RSI)
Relative Strength Index (RSI) is a momentum oscillator that measures the speed and change of price movements.
RSI oscillates between zero and 100. Traditionally, and according to Wilder, RSI is considered overbought when above 70 and oversold when below 30.
Signals can also be generated by looking for divergences, failure swings and centerline crossovers.
RSI can also be used to identify the general trend."""
delta = close.diff()
up, down = delta.copy(), delta.copy()
up[up < 0] = 0
down[down > 0] = 0
_gain = up.ewm(com=(period - 1), min_periods=period).mean()
_loss = down.abs().ewm(com=(period - 1), min_periods=period).mean()
RS = _gain / _loss
return pd.Series(100 - (100 / (1 + RS)))
def EMA1(x, n):
"""
https://qiita.com/MuAuan/items/b08616a841be25d29817
"""
a = 2 / (n + 1)
return pd.Series(x).ewm(alpha=a).mean()
def MACD(close: pd.DataFrame, span1=12, span2=26, span3=9):
"""
Compute MACD
# https://www.learnpythonwithrune.org/pandas-calculate-the-moving-average-convergence-divergence-macd-for-a-stock/
"""
exp1 = EMA1(close, span1)
exp2 = EMA1(close, span2)
macd = exp1 - exp2
signal = EMA1(macd, span3)
return macd, signal
def feature_engineering(ticker="ZEAL DC", df=df):
"""
feature engineering
:INPUTS:
- ticker : numerai ticker name (str)
- df : yfinance dataframe (pd.DataFrame)
:OUTPUTS:
- feature_df : feature engineered dataframe (pd.DataFrame)
"""
# init
keys = ["friday_date", "ticker"]
feature_df = df.query(f'ticker == "{ticker}"')
# price features
new_feats = []
for i, f in enumerate(
[
"close",
]
):
for x in [
20,
40,
60,
]:
# return
feature_df[f"{f}_return_{x}days"] = feature_df[f].pct_change(x)
# volatility
feature_df[f"{f}_volatility_{x}days"] = (
np.log1p(feature_df[f]).pct_change().rolling(x).std()
)
# kairi mean
feature_df[f"{f}_MA_gap_{x}days"] = feature_df[f] / (
feature_df[f].rolling(x).mean()
)
# features to use
new_feats += [
f"{f}_return_{x}days",
f"{f}_volatility_{x}days",
f"{f}_MA_gap_{x}days",
]
# RSI
feature_df["RSI"] = RSI(feature_df["close"], 14)
# MACD
macd, macd_signal = MACD(feature_df["close"], 12, 26, 9)
feature_df["MACD"] = macd
feature_df["MACD_signal"] = macd_signal
new_feats += ["RSI", "MACD", "MACD_signal"]
# only new feats
feature_df = feature_df[new_feats + keys]
# fill nan
feature_df.fillna(
method="ffill", inplace=True
) # safe fillna method for a forecasting task
feature_df.fillna(method="bfill", inplace=True) # just in case for no nan
return feature_df
def add_features(df):
# FE with multiprocessing
tickers = df["ticker"].unique().tolist()
print("FE for {:,} stocks...using {:,} CPUs...".format(len(tickers), cpu_count()))
start_time = time.time()
p = Pool(cpu_count())
feature_dfs = list(tqdm(p.imap(feature_engineering, tickers), total=len(tickers)))
p.close()
p.join()
return pd.concat(feature_dfs)
feature_df = add_features(df)
del df
gc.collect()
print(feature_df.shape)
feature_df.head()
feature_df.tail()
# # Merge Targets and Features
# Feature engineering is done. Let's merge it with the numerai historical targets.
# do we have enough overlap with respect to 'friday_date'?
venn2(
[
set(feature_df["friday_date"].astype(str).unique().tolist()),
set(targets["friday_date"].astype(str).unique().tolist()),
],
set_labels=("features_days", "targets_days"),
)
# do we have enough overlap with respect to 'ticker'?
venn2(
[
set(feature_df["ticker"].astype(str).unique().tolist()),
set(targets["ticker"].astype(str).unique().tolist()),
],
set_labels=("features_ticker", "targets_ticker"),
)
# merge
feature_df["friday_date"] = feature_df["friday_date"].astype(int)
targets["friday_date"] = targets["friday_date"].astype(int)
feature_df = feature_df.merge(targets, how="left", on=["friday_date", "ticker"])
print(feature_df.shape)
feature_df.tail()
# save (just to make sure that we are on the safe side if yfinance is dead some day...)
feature_df.to_pickle(f"{CFG.OUTPUT_DIR}/feature_df.pkl")
feature_df.info()
# We now have a merged features + target table! It seems like we are ready for modeling.
# # Modeling
# Yay, finally!
# Here let's use XGBoost.
# The hyperparameters are derived from the Integration-Test, which is an example yet a strong baseline for the Numerai Tournament.
target = "target"
drops = ["data_type", target, "friday_date", "ticker"]
features = [f for f in feature_df.columns.values.tolist() if f not in drops]
logger.info("{:,} features: {}".format(len(features), features))
# train-valid split
train_set = {
"X": feature_df.query('data_type == "train"')[features],
"y": feature_df.query('data_type == "train"')[target].astype(np.float64),
}
val_set = {
"X": feature_df.query('data_type == "validation"')[features],
"y": feature_df.query('data_type == "validation"')[target].astype(np.float64),
}
assert train_set["y"].isna().sum() == 0
assert val_set["y"].isna().sum() == 0
# same parameters of the Integration-Test
import joblib
from sklearn import utils
import xgboost as xgb
import operator
params = {
"objective": "reg:squarederror",
"eval_metric": "rmse",
"colsample_bytree": 0.1,
"learning_rate": 0.01,
"max_depth": 5,
"seed": 46,
"n_estimators": 2000,
# 'tree_method': 'gpu_hist' # if you want to use GPU ...
}
# define
model = xgb.XGBRegressor(**params)
# fit
model.fit(
train_set["X"],
train_set["y"],
eval_set=[(val_set["X"], val_set["y"])],
verbose=100,
early_stopping_rounds=100,
)
# save model
joblib.dump(model, f"{CFG.OUTPUT_DIR}/xgb_model_val.pkl")
logger.info("xgb model with early stopping saved!")
# feature importance
importance = model.get_booster().get_score(importance_type="gain")
importance = sorted(importance.items(), key=operator.itemgetter(1))
feature_importance_df = pd.DataFrame(importance, columns=["features", "importance"])
# feature importance
fig, ax = plt.subplots(1, 1, figsize=(12, 10))
sns.barplot(
x="importance",
y="features",
data=feature_importance_df.sort_values(by="importance", ascending=False),
ax=ax,
)
# Looks like 'price gap the moving average' kinds of features are good signals!
# # Validation Score
# The following snipets are derived from
# https://colab.research.google.com/drive/1ECh69C0LDCUnuyvEmNFZ51l_276nkQqo#scrollTo=tTBUzPep2dm3
# Let's see how good our model predictions on the validation data are.
# Good? It's good, isn't it?
# https://colab.research.google.com/drive/1ECh69C0LDCUnuyvEmNFZ51l_276nkQqo#scrollTo=tTBUzPep2dm3
def score(df, target_name=target, pred_name="prediction"):
"""Takes df and calculates spearm correlation from pre-defined cols"""
# method="first" breaks ties based on order in array
return np.corrcoef(df[target_name], df[pred_name].rank(pct=True, method="first"))[
0, 1
]
def run_analytics(era_scores):
print(f"Mean Correlation: {era_scores.mean():.4f}")
print(f"Median Correlation: {era_scores.median():.4f}")
print(f"Standard Deviation: {era_scores.std():.4f}")
print("\n")
print(f"Mean Pseudo-Sharpe: {era_scores.mean()/era_scores.std():.4f}")
print(f"Median Pseudo-Sharpe: {era_scores.median()/era_scores.std():.4f}")
print("\n")
print(
f"Hit Rate (% positive eras): {era_scores.apply(lambda x: np.sign(x)).value_counts()[1]/len(era_scores):.2%}"
)
era_scores.rolling(10).mean().plot(
kind="line", title="Rolling Per Era Correlation Mean", figsize=(15, 4)
)
plt.axhline(y=0.0, color="r", linestyle="--")
plt.show()
era_scores.cumsum().plot(title="Cumulative Sum of Era Scores", figsize=(15, 4))
plt.axhline(y=0.0, color="r", linestyle="--")
plt.show()
# prediction for the validation set
valid_sub = feature_df.query('data_type == "validation"')[drops].copy()
valid_sub["prediction"] = model.predict(val_set["X"])
# compute score
val_era_scores = valid_sub.copy()
val_era_scores["friday_date"] = val_era_scores["friday_date"].astype(str)
val_era_scores = (
val_era_scores.loc[val_era_scores["prediction"].isna() == False]
.groupby(["friday_date"])
.apply(score)
)
run_analytics(val_era_scores)
# Well, I guess it is fairly good as a starter, isn't it?
# # Submission
# Let's use this trained model to make a submission for the Numerai Signals.
# Note that, again, yfinance data is not complete. Sometimes there is no recent data available for many tickers;(
# We need at least 5 tickers for a successful submission. Let's first check if we have at least 5 tickers in which the recent friday_date for them is indeed the recent friday date.
# recent friday date?
recent_friday = datetime.now() + relativedelta(weekday=FR(-1))
recent_friday = int(recent_friday.strftime("%Y%m%d"))
print(f"Most recent Friday: {recent_friday}")
# in case no recent friday is available...prep the second last
recent_friday2 = datetime.now() + relativedelta(weekday=FR(-2))
recent_friday2 = int(recent_friday2.strftime("%Y%m%d"))
print(f"Second most recent Friday: {recent_friday2}")
# do we have at least 5 tickers, whose the latest date matches the recent friday?
ticker_date_df = feature_df.groupby("ticker")["friday_date"].max().reset_index()
if len(ticker_date_df.loc[ticker_date_df["friday_date"] == recent_friday]) >= 5:
ticker_date_df = ticker_date_df.loc[ticker_date_df["friday_date"] == recent_friday]
else: # use dates later than the second last friday
ticker_date_df = ticker_date_df.loc[ticker_date_df["friday_date"] == recent_friday2]
recent_friday = recent_friday2
print(len(ticker_date_df))
ticker_date_df
# Good! That's fairly enough. So we only perform the inference on those tickers and submit!
# live sub
feature_df.loc[feature_df["friday_date"] == recent_friday, "data_type"] = "live"
test_sub = feature_df.query('data_type == "live"')[drops].copy()
test_sub["prediction"] = model.predict(
feature_df.query('data_type == "live"')[features]
)
logger.info(test_sub.shape)
test_sub.head()
# histogram of prediction
test_sub["prediction"].hist(bins=100)
# Let's submit! What is good with the Numerai Signals is that if you submit your predictions on the validation data, on the website, you can get more information about your model performance such as APY.
# To submit, you need to have Numerai account and have API's id and secret key. Also you need to have at least one (numerai signals') model slot.
def submit_signal(sub: pd.DataFrame, public_id: str, secret_key: str, slot_name: str):
"""
submit numerai signals prediction
"""
# setup private API
napi = numerapi.SignalsAPI(public_id, secret_key)
# write predictions to csv
model_id = napi.get_models()[f"{slot_name}"]
filename = f"sub_{model_id}.csv"
sub.to_csv(filename, index=False)
# submit
submission = napi.upload_predictions(filename, model_id=model_id)
print(f"Submitted : {slot_name}!")
# concat valid and test
sub = pd.concat([valid_sub, test_sub], ignore_index=True)
# rename to 'signal'
sub.rename(columns={"prediction": "signal"}, inplace=True)
# select necessary columns
sub = sub[["ticker", "friday_date", "data_type", "signal"]]
public_id = "<Your Numerai API ID>"
secret_key = "<Your Numerai Secret Key>"
slot_name = "<Your Numerai Signals Submission Slot Name>"
# submit_signal(sub, public_id, secret_key, slot_name) # uncomment if you submit
# save
sub.to_csv(f"{CFG.OUTPUT_DIR}/example_submission_{today}.csv", index=False)
sub.head()
sub.tail()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/009/69009954.ipynb
|
yfinance-stock-price-data-for-numerai-signals
|
code1110
|
[{"Id": 69009954, "ScriptId": 17257860, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 590240, "CreationDate": "07/25/2021 20:06:21", "VersionNumber": 18.0, "Title": "[NumeraiSignals] Starter for Beginners", "EvaluationDate": "07/25/2021", "IsChange": false, "TotalLines": 700.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 700.0, "LinesInsertedFromFork": 470.0, "LinesDeletedFromFork": 338.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 230.0, "TotalVotes": 0}]
|
[{"Id": 91701137, "KernelVersionId": 69009954, "SourceDatasetVersionId": 2456339}]
|
[{"Id": 2456339, "DatasetId": 1333356, "DatasourceVersionId": 2498737, "CreatorUserId": 590240, "LicenseName": "Other (specified in description)", "CreationDate": "07/23/2021 20:51:58", "VersionNumber": 96.0, "Title": "YFinance Stock Price Data for Numerai Signals", "Slug": "yfinance-stock-price-data-for-numerai-signals", "Subtitle": "Daily Updates of Stock OHLCV (Close, High, Low, Open, Volume)", "Description": "This YFiance data is regularly updated to be used for the weekly round of the Numerai Signals.", "VersionNotes": "fAdded stock price data (updated 2021-07-23)", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1333356, "CreatorUserId": 590240, "OwnerUserId": 590240.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 6278991.0, "CurrentDatasourceVersionId": 6358939.0, "ForumId": 1352281, "Type": 2, "CreationDate": "05/11/2021 06:17:07", "LastActivityDate": "05/11/2021", "TotalViews": 26229, "TotalDownloads": 8196, "TotalVotes": 55, "TotalKernels": 4}]
|
[{"Id": 590240, "UserName": "code1110", "DisplayName": "katsu1110", "RegisterDate": "04/18/2016", "PerformanceTier": 3}]
|
# 
# -----------------------------
# written by katsu1110
# 27 May, 2021
# -----------------------------
# This is yet another starter notebook for the [Numerai Signals](https://signals.numer.ai/).
# What we do here includes:
# - fetch US stock price data via YFinance API
# - merge the data with the Numerai Signals' historical targets
# - perform feature engineering (considering stational features)
# - modeling with XGBoost
# - submit (if you want)
# In a kaggle dataset [YFinance Stock Price Data for Numerai Signals](https://www.kaggle.com/code1110/yfinance-stock-price-data-for-numerai-signals), I fetch the stock price data on a daily basis via the YFinance API. So if you are bothered using the API for yourself, just use this dataset (it must be up-to-date).
# This content is largely inspired by the following starter.
# >End to end notebook for Numerai Signals using completely free data from Yahoo Finance, by Jason Rosenfeld (jrAI).
# https://colab.research.google.com/drive/1ECh69C0LDCUnuyvEmNFZ51l_276nkQqo#scrollTo=tTBUzPep2dm3
# Alright, let's get it started!
# # Libraries
# Let's import what we need...
import numerapi
import os
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import gc
import pathlib
from tqdm.auto import tqdm
import joblib
import json
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.preprocessing import StandardScaler, MinMaxScaler, LabelEncoder
from multiprocessing import Pool, cpu_count
import time
import requests as re
from datetime import datetime
from dateutil.relativedelta import relativedelta, FR
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
# visualize
import matplotlib.pyplot as plt
import matplotlib.style as style
from matplotlib_venn import venn2, venn3
import seaborn as sns
from matplotlib import pyplot
from matplotlib.ticker import ScalarFormatter
sns.set_context("talk")
style.use("seaborn-colorblind")
import warnings
warnings.simplefilter("ignore")
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Config
# A simple config and logging setup.
today = datetime.now().strftime("%Y-%m-%d")
today
# config class
class CFG:
"""
Set FETCH_VIA_API = True if you want to fetch the data via API.
Otherwise we use the daily-updated one in the kaggle dataset (faster).
"""
INPUT_DIR = "../input/yfinance-stock-price-data-for-numerai-signals"
OUTPUT_DIR = "./"
FETCH_VIA_API = False
SEED = 46
# Logging is always nice for your experiment:)
def init_logger(log_file="train.log"):
from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler
logger = getLogger(__name__)
logger.setLevel(INFO)
handler1 = StreamHandler()
handler1.setFormatter(Formatter("%(message)s"))
handler2 = FileHandler(filename=log_file)
handler2.setFormatter(Formatter("%(message)s"))
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
logger = init_logger(log_file=f"{CFG.OUTPUT_DIR}/{today}.log")
logger.info("Start Logging...")
# # Setup Numerai API
# First of all, let's set up the numerai signals API.
# We can do many things with this API:
# - get a ticker map (between yfinance data and numerai historical targets)
# - get the historical targets
# - get your model slot name and model_id (if private key and secret key are provided)
# - submit
# (well, maybe more)
# ## Get Tickers for Numerai Signals
# Let's first get the ticker map.
napi = numerapi.SignalsAPI()
logger.info("numerai api setup!")
# read in list of active Signals tickers which can change slightly era to era
eligible_tickers = pd.Series(napi.ticker_universe(), name="ticker")
logger.info(f"Number of eligible tickers: {len(eligible_tickers)}")
# read in yahoo to numerai ticker map, still a work in progress, h/t wsouza and
# this tickermap is a work in progress and not guaranteed to be 100% correct
ticker_map = pd.read_csv(
"https://numerai-signals-public-data.s3-us-west-2.amazonaws.com/signals_ticker_map_w_bbg.csv"
)
ticker_map = ticker_map[ticker_map.bloomberg_ticker.isin(eligible_tickers)]
numerai_tickers = ticker_map["bloomberg_ticker"]
yfinance_tickers = ticker_map["yahoo"]
logger.info(f"Number of eligible tickers in map: {len(ticker_map)}")
print(ticker_map.shape)
ticker_map.head()
# This ticker map is necessary for a successful submission if you use yfinance data.
# # Load Stock Price Data
# Now is the time to get the stock price data, fetched via the [YFiance API](https://pypi.org/project/yfinance/).
# The good thing with this API is that it is free of charge.
# The bad thing with this API is that the data is often not complete.
# For a better quality of stock price data, you might want to try out purchasing one from [Quandl](https://www.quandl.com/data/EOD-End-of-Day-US-Stock-Prices/documentation?anchor=overview).
# This is another starter using Quandl data:
# https://forum.numer.ai/t/signals-plugging-in-the-data-from-quandl/2431
# This is of course wonderful, but if you are a beginner, why not just start with a free one?
# If you want to fetch the data on your own, you can use this function...
def fetch_yfinance(ticker_map, start="2002-12-01"):
"""
# fetch yfinance data
:INPUT:
- ticker_map : Numerai eligible ticker map (pd.DataFrame)
- start : date (str)
:OUTPUT:
- full_data : pd.DataFrame ('date', 'ticker', 'close', 'raw_close', 'high', 'low', 'open', 'volume')
"""
# ticker map
numerai_tickers = ticker_map["bloomberg_ticker"]
yfinance_tickers = ticker_map["yahoo"]
# fetch
raw_data = yfinance.download(
yfinance_tickers.str.cat(sep=" "), start=start, threads=True
)
# format
cols = ["Adj Close", "Close", "High", "Low", "Open", "Volume"]
full_data = raw_data[cols].stack().reset_index()
full_data.columns = [
"date",
"ticker",
"close",
"raw_close",
"high",
"low",
"open",
"volume",
]
# map yfiance ticker to numerai tickers
full_data["ticker"] = full_data.ticker.map(
dict(zip(yfinance_tickers, numerai_tickers))
)
return full_data
if CFG.FETCH_VIA_API: # fetch data via api
logger.info("Fetch data via API...may take some time...")
import yfinance
import simplejson
df = fetch_yfinance(ticker_map, start="2002-12-01")
else: # loading from the kaggle dataset (https://www.kaggle.com/code1110/yfinance-stock-price-data-for-numerai-signals)
logger.info("Load data from the kaggle dataset...")
df = pd.read_csv(pathlib.Path(f"{CFG.INPUT_DIR}/full_data.csv"))
print(df.shape)
df.head(3)
df.tail(3)
# ## Load Targets for Numerai Signals
# For a supervised machine learning, we need a target label. That is available in the Numerai Signals, so we can just fetch it.
def read_numerai_signals_targets():
# read in Signals targets
numerai_targets = "https://numerai-signals-public-data.s3-us-west-2.amazonaws.com/signals_train_val.csv"
targets = pd.read_csv(numerai_targets)
# to datetime int
targets["friday_date"] = (
pd.to_datetime(targets["friday_date"].astype(str), format="%Y-%m-%d")
.dt.strftime("%Y%m%d")
.astype(int)
)
# # train, valid split
# train_targets = targets.query('data_type == "train"')
# valid_targets = targets.query('data_type == "validation"')
return targets
targets = read_numerai_signals_targets()
print(targets.shape, targets["friday_date"].min(), targets["friday_date"].max())
targets.head()
targets.tail()
# there are train and validation...
fig, ax = plt.subplots(1, 2, figsize=(16, 4))
ax = ax.flatten()
for i, data_type in enumerate(["train", "validation"]):
# slice
targets_ = targets.query(f'data_type == "{data_type}"')
logger.info("*" * 50)
logger.info(
"{} target: {:,} tickers (friday_date: {} - {})".format(
data_type,
targets_["ticker"].nunique(),
targets_["friday_date"].min(),
targets_["friday_date"].max(),
)
)
# plot target
ax[i].hist(targets_["target"])
ax[i].set_title(f"{data_type}")
# The target looks exactly like the one from the Numerai Tournament, where both features and targets are given to the participants.
# Also note that the train-validation split is based on time (i.e., Time-Series Split):
# - train friday_date: 20030131 - 20121228
# - validation friday_date: 20130104 - 20200228
# ## Check Ticker Overlaps
# Let's see if we have enough overlap of tickers between our yfiance stock data and the numerai targets. We need at least 5 tickers for submission.
# ticker overlap
venn3(
[
set(df["ticker"].unique().tolist()),
set(targets.query('data_type == "train"')["ticker"].unique().tolist()),
set(targets.query('data_type == "validation"')["ticker"].unique().tolist()),
],
set_labels=("yf price", "train target", "valid target"),
)
# Ah, yeah, not bad, I guess?
# Here I only use our stock price data which have ticker overlaps such that we can build a supervised machine learning model.
# select target-only tickers
df = df.loc[df["ticker"].isin(targets["ticker"])].reset_index(drop=True)
print("{:,} tickers: {:,} records".format(df["ticker"].nunique(), len(df)))
# As I mentioned earlier, the yfiance stock data is not complete. Let's see if we have enough records per ticker.
record_per_ticker = (
df.groupby("ticker")["date"].nunique().reset_index().sort_values(by="date")
)
record_per_ticker
record_per_ticker["date"].hist()
print(record_per_ticker["date"].describe())
# There are unfortunately some tickers where the number of records is small.
# Here I only use tickers with more than 1,000 records.
tickers_with_records = record_per_ticker.query("date >= 1000")["ticker"].values
df = df.loc[df["ticker"].isin(tickers_with_records)].reset_index(drop=True)
print("Here, we use {:,} tickers: {:,} records".format(df["ticker"].nunique(), len(df)))
# # Feature Engineering
# Yeah finally machine learning part!
# Here we generate sets of stock price features. There are some caveats to be aware of:
# - **No Leak**: we cannot use a feature which uses the future information (this is a forecasting task!)
# - **Stationaly features**: Our features have to work whenever (scales must be stationaly over the periods of time)
# The implementation of the feature engineering is derived from [J-Quants Tournament](https://japanexchangegroup.github.io/J-Quants-Tutorial/#anchor-2.7). Although this content is in Japanese, I believe this is one of the best resources for feature engineering in the finance domain.
# Also I add the RSI and MACD features as a bonus:D
# We generate features per ticker repeatedly. To accelerate the process, we use the parallel processing.
# first, fix date column in the yfiance stock data to be friday date (just naming along with numerai targets)
df["friday_date"] = df["date"].apply(lambda x: int(str(x).replace("-", "")))
df.tail(3)
# Ready for feature engineering?
# technical indicators
def RSI(close: pd.DataFrame, period: int = 14) -> pd.Series:
# https://gist.github.com/jmoz/1f93b264650376131ed65875782df386
"""See source https://github.com/peerchemist/finta
and fix https://www.tradingview.com/wiki/Talk:Relative_Strength_Index_(RSI)
Relative Strength Index (RSI) is a momentum oscillator that measures the speed and change of price movements.
RSI oscillates between zero and 100. Traditionally, and according to Wilder, RSI is considered overbought when above 70 and oversold when below 30.
Signals can also be generated by looking for divergences, failure swings and centerline crossovers.
RSI can also be used to identify the general trend."""
delta = close.diff()
up, down = delta.copy(), delta.copy()
up[up < 0] = 0
down[down > 0] = 0
_gain = up.ewm(com=(period - 1), min_periods=period).mean()
_loss = down.abs().ewm(com=(period - 1), min_periods=period).mean()
RS = _gain / _loss
return pd.Series(100 - (100 / (1 + RS)))
def EMA1(x, n):
"""
https://qiita.com/MuAuan/items/b08616a841be25d29817
"""
a = 2 / (n + 1)
return pd.Series(x).ewm(alpha=a).mean()
def MACD(close: pd.DataFrame, span1=12, span2=26, span3=9):
"""
Compute MACD
# https://www.learnpythonwithrune.org/pandas-calculate-the-moving-average-convergence-divergence-macd-for-a-stock/
"""
exp1 = EMA1(close, span1)
exp2 = EMA1(close, span2)
macd = exp1 - exp2
signal = EMA1(macd, span3)
return macd, signal
def feature_engineering(ticker="ZEAL DC", df=df):
"""
feature engineering
:INPUTS:
- ticker : numerai ticker name (str)
- df : yfinance dataframe (pd.DataFrame)
:OUTPUTS:
- feature_df : feature engineered dataframe (pd.DataFrame)
"""
# init
keys = ["friday_date", "ticker"]
feature_df = df.query(f'ticker == "{ticker}"')
# price features
new_feats = []
for i, f in enumerate(
[
"close",
]
):
for x in [
20,
40,
60,
]:
# return
feature_df[f"{f}_return_{x}days"] = feature_df[f].pct_change(x)
# volatility
feature_df[f"{f}_volatility_{x}days"] = (
np.log1p(feature_df[f]).pct_change().rolling(x).std()
)
# kairi mean
feature_df[f"{f}_MA_gap_{x}days"] = feature_df[f] / (
feature_df[f].rolling(x).mean()
)
# features to use
new_feats += [
f"{f}_return_{x}days",
f"{f}_volatility_{x}days",
f"{f}_MA_gap_{x}days",
]
# RSI
feature_df["RSI"] = RSI(feature_df["close"], 14)
# MACD
macd, macd_signal = MACD(feature_df["close"], 12, 26, 9)
feature_df["MACD"] = macd
feature_df["MACD_signal"] = macd_signal
new_feats += ["RSI", "MACD", "MACD_signal"]
# only new feats
feature_df = feature_df[new_feats + keys]
# fill nan
feature_df.fillna(
method="ffill", inplace=True
) # safe fillna method for a forecasting task
feature_df.fillna(method="bfill", inplace=True) # just in case for no nan
return feature_df
def add_features(df):
# FE with multiprocessing
tickers = df["ticker"].unique().tolist()
print("FE for {:,} stocks...using {:,} CPUs...".format(len(tickers), cpu_count()))
start_time = time.time()
p = Pool(cpu_count())
feature_dfs = list(tqdm(p.imap(feature_engineering, tickers), total=len(tickers)))
p.close()
p.join()
return pd.concat(feature_dfs)
feature_df = add_features(df)
del df
gc.collect()
print(feature_df.shape)
feature_df.head()
feature_df.tail()
# # Merge Targets and Features
# Feature engineering is done. Let's merge it with the numerai historical targets.
# do we have enough overlap with respect to 'friday_date'?
venn2(
[
set(feature_df["friday_date"].astype(str).unique().tolist()),
set(targets["friday_date"].astype(str).unique().tolist()),
],
set_labels=("features_days", "targets_days"),
)
# do we have enough overlap with respect to 'ticker'?
venn2(
[
set(feature_df["ticker"].astype(str).unique().tolist()),
set(targets["ticker"].astype(str).unique().tolist()),
],
set_labels=("features_ticker", "targets_ticker"),
)
# merge
feature_df["friday_date"] = feature_df["friday_date"].astype(int)
targets["friday_date"] = targets["friday_date"].astype(int)
feature_df = feature_df.merge(targets, how="left", on=["friday_date", "ticker"])
print(feature_df.shape)
feature_df.tail()
# save (just to make sure that we are on the safe side if yfinance is dead some day...)
feature_df.to_pickle(f"{CFG.OUTPUT_DIR}/feature_df.pkl")
feature_df.info()
# We now have a merged features + target table! It seems like we are ready for modeling.
# # Modeling
# Yay, finally!
# Here let's use XGBoost.
# The hyperparameters are derived from the Integration-Test, which is an example yet a strong baseline for the Numerai Tournament.
target = "target"
drops = ["data_type", target, "friday_date", "ticker"]
features = [f for f in feature_df.columns.values.tolist() if f not in drops]
logger.info("{:,} features: {}".format(len(features), features))
# train-valid split
train_set = {
"X": feature_df.query('data_type == "train"')[features],
"y": feature_df.query('data_type == "train"')[target].astype(np.float64),
}
val_set = {
"X": feature_df.query('data_type == "validation"')[features],
"y": feature_df.query('data_type == "validation"')[target].astype(np.float64),
}
assert train_set["y"].isna().sum() == 0
assert val_set["y"].isna().sum() == 0
# same parameters of the Integration-Test
import joblib
from sklearn import utils
import xgboost as xgb
import operator
params = {
"objective": "reg:squarederror",
"eval_metric": "rmse",
"colsample_bytree": 0.1,
"learning_rate": 0.01,
"max_depth": 5,
"seed": 46,
"n_estimators": 2000,
# 'tree_method': 'gpu_hist' # if you want to use GPU ...
}
# define
model = xgb.XGBRegressor(**params)
# fit
model.fit(
train_set["X"],
train_set["y"],
eval_set=[(val_set["X"], val_set["y"])],
verbose=100,
early_stopping_rounds=100,
)
# save model
joblib.dump(model, f"{CFG.OUTPUT_DIR}/xgb_model_val.pkl")
logger.info("xgb model with early stopping saved!")
# feature importance
importance = model.get_booster().get_score(importance_type="gain")
importance = sorted(importance.items(), key=operator.itemgetter(1))
feature_importance_df = pd.DataFrame(importance, columns=["features", "importance"])
# feature importance
fig, ax = plt.subplots(1, 1, figsize=(12, 10))
sns.barplot(
x="importance",
y="features",
data=feature_importance_df.sort_values(by="importance", ascending=False),
ax=ax,
)
# Looks like 'price gap the moving average' kinds of features are good signals!
# # Validation Score
# The following snipets are derived from
# https://colab.research.google.com/drive/1ECh69C0LDCUnuyvEmNFZ51l_276nkQqo#scrollTo=tTBUzPep2dm3
# Let's see how good our model predictions on the validation data are.
# Good? It's good, isn't it?
# https://colab.research.google.com/drive/1ECh69C0LDCUnuyvEmNFZ51l_276nkQqo#scrollTo=tTBUzPep2dm3
def score(df, target_name=target, pred_name="prediction"):
"""Takes df and calculates spearm correlation from pre-defined cols"""
# method="first" breaks ties based on order in array
return np.corrcoef(df[target_name], df[pred_name].rank(pct=True, method="first"))[
0, 1
]
def run_analytics(era_scores):
print(f"Mean Correlation: {era_scores.mean():.4f}")
print(f"Median Correlation: {era_scores.median():.4f}")
print(f"Standard Deviation: {era_scores.std():.4f}")
print("\n")
print(f"Mean Pseudo-Sharpe: {era_scores.mean()/era_scores.std():.4f}")
print(f"Median Pseudo-Sharpe: {era_scores.median()/era_scores.std():.4f}")
print("\n")
print(
f"Hit Rate (% positive eras): {era_scores.apply(lambda x: np.sign(x)).value_counts()[1]/len(era_scores):.2%}"
)
era_scores.rolling(10).mean().plot(
kind="line", title="Rolling Per Era Correlation Mean", figsize=(15, 4)
)
plt.axhline(y=0.0, color="r", linestyle="--")
plt.show()
era_scores.cumsum().plot(title="Cumulative Sum of Era Scores", figsize=(15, 4))
plt.axhline(y=0.0, color="r", linestyle="--")
plt.show()
# prediction for the validation set
valid_sub = feature_df.query('data_type == "validation"')[drops].copy()
valid_sub["prediction"] = model.predict(val_set["X"])
# compute score
val_era_scores = valid_sub.copy()
val_era_scores["friday_date"] = val_era_scores["friday_date"].astype(str)
val_era_scores = (
val_era_scores.loc[val_era_scores["prediction"].isna() == False]
.groupby(["friday_date"])
.apply(score)
)
run_analytics(val_era_scores)
# Well, I guess it is fairly good as a starter, isn't it?
# # Submission
# Let's use this trained model to make a submission for the Numerai Signals.
# Note that, again, yfinance data is not complete. Sometimes there is no recent data available for many tickers;(
# We need at least 5 tickers for a successful submission. Let's first check if we have at least 5 tickers in which the recent friday_date for them is indeed the recent friday date.
# recent friday date?
recent_friday = datetime.now() + relativedelta(weekday=FR(-1))
recent_friday = int(recent_friday.strftime("%Y%m%d"))
print(f"Most recent Friday: {recent_friday}")
# in case no recent friday is available...prep the second last
recent_friday2 = datetime.now() + relativedelta(weekday=FR(-2))
recent_friday2 = int(recent_friday2.strftime("%Y%m%d"))
print(f"Second most recent Friday: {recent_friday2}")
# do we have at least 5 tickers, whose the latest date matches the recent friday?
ticker_date_df = feature_df.groupby("ticker")["friday_date"].max().reset_index()
if len(ticker_date_df.loc[ticker_date_df["friday_date"] == recent_friday]) >= 5:
ticker_date_df = ticker_date_df.loc[ticker_date_df["friday_date"] == recent_friday]
else: # use dates later than the second last friday
ticker_date_df = ticker_date_df.loc[ticker_date_df["friday_date"] == recent_friday2]
recent_friday = recent_friday2
print(len(ticker_date_df))
ticker_date_df
# Good! That's fairly enough. So we only perform the inference on those tickers and submit!
# live sub
feature_df.loc[feature_df["friday_date"] == recent_friday, "data_type"] = "live"
test_sub = feature_df.query('data_type == "live"')[drops].copy()
test_sub["prediction"] = model.predict(
feature_df.query('data_type == "live"')[features]
)
logger.info(test_sub.shape)
test_sub.head()
# histogram of prediction
test_sub["prediction"].hist(bins=100)
# Let's submit! What is good with the Numerai Signals is that if you submit your predictions on the validation data, on the website, you can get more information about your model performance such as APY.
# To submit, you need to have Numerai account and have API's id and secret key. Also you need to have at least one (numerai signals') model slot.
def submit_signal(sub: pd.DataFrame, public_id: str, secret_key: str, slot_name: str):
"""
submit numerai signals prediction
"""
# setup private API
napi = numerapi.SignalsAPI(public_id, secret_key)
# write predictions to csv
model_id = napi.get_models()[f"{slot_name}"]
filename = f"sub_{model_id}.csv"
sub.to_csv(filename, index=False)
# submit
submission = napi.upload_predictions(filename, model_id=model_id)
print(f"Submitted : {slot_name}!")
# concat valid and test
sub = pd.concat([valid_sub, test_sub], ignore_index=True)
# rename to 'signal'
sub.rename(columns={"prediction": "signal"}, inplace=True)
# select necessary columns
sub = sub[["ticker", "friday_date", "data_type", "signal"]]
public_id = "<Your Numerai API ID>"
secret_key = "<Your Numerai Secret Key>"
slot_name = "<Your Numerai Signals Submission Slot Name>"
# submit_signal(sub, public_id, secret_key, slot_name) # uncomment if you submit
# save
sub.to_csv(f"{CFG.OUTPUT_DIR}/example_submission_{today}.csv", index=False)
sub.head()
sub.tail()
| false | 0 | 7,391 | 0 | 7,456 | 7,391 |
||
69009022
|
import pandas as pd
import numpy as np
import gc
import tensorflow as tf
import lightgbm as lgbm
from matplotlib import pyplot as plt
from sklearn.metrics import mean_absolute_error
from datetime import datetime, timedelta
from tqdm.auto import tqdm
# # Create Unnested Dataset
df_train = pd.read_csv(
"../input/mlb-player-digital-engagement-forecasting/train_updated.csv"
)
print(df_train.shape)
df_train.head()
df_train.info()
def json_to_df(df, column):
num_rows = len(df)
data_list = []
for row in tqdm(range(num_rows)):
json_data = df.iloc[row][column]
if str(json_data) != "nan":
data = pd.read_json(json_data)
data_list.append(data)
all_data = pd.concat(data_list, axis=0)
return all_data
player_engagement = json_to_df(df_train, "nextDayPlayerEngagement")
player_engagement.insert(
0,
"date",
pd.to_datetime(player_engagement["engagementMetricsDate"]) - timedelta(days=1),
)
player_engagement["engagementMetricsDate"] = pd.to_datetime(
player_engagement["engagementMetricsDate"]
)
player_engagement.reset_index(drop=True, inplace=True)
print(player_engagement.shape)
player_engagement.head()
player_engagement[["target1", "target2", "target3", "target4"]] = player_engagement[
["target1", "target2", "target3", "target4"]
].astype(np.float16)
# # Create Lag Features
lag = 7
lag_df = player_engagement.loc[
player_engagement["date"] >= player_engagement.loc[0, "date"] + timedelta(lag)
]
for x in tqdm(range(1, (lag + 1))):
drop_columns = [f"date_{x}", f"engagementMetricsDate_{x}"]
lag_df = lag_df.merge(
player_engagement,
how="left",
left_on=["date", "playerId"],
right_on=["engagementMetricsDate", "playerId"],
suffixes=["", f"_{x}"],
)
lag_df.drop(columns=drop_columns, inplace=True)
lag_df["date"] = lag_df["date"] - timedelta(days=1)
lag_df["date"] = lag_df["date"] + timedelta(days=lag)
lag_df = lag_df.drop(columns=["engagementMetricsDate"])
lag_df = lag_df.dropna()
lag_df.head()
feature_columns = [x for x in lag_df.columns[6:]]
feature_columns
lag_df.info()
lag_df = lag_df.sort_values(by=["date", "playerId"]).reset_index(drop=True)
lag_df.head()
# # Create Descriptive Statistics Based on Lag Features
for x in range(4):
columns = [f"target{x+1}_{i+1}" for i in range(lag)]
lag_df[f"target{x+1}_median"] = lag_df[columns].median(axis=1).astype(np.float32)
lag_df[f"target{x+1}_mean"] = lag_df[columns].mean(axis=1).astype(np.float32)
lag_df[f"target{x+1}_max"] = lag_df[columns].max(axis=1).astype(np.float32)
lag_df[f"target{x+1}_min"] = lag_df[columns].min(axis=1).astype(np.float32)
lag_df[f"target{x+1}_lower_quartile"] = (
lag_df[columns].quantile(0.25, axis=1).astype(np.float32)
)
lag_df[f"target{x+1}_upper_quartile"] = (
lag_df[columns].quantile(0.75, axis=1).astype(np.float32)
)
lag_df[f"target{x+1}_skewness"] = lag_df[columns].skew(axis=1).astype(np.float32)
lag_df = lag_df.drop(columns=columns)
lag_df.head()
lag_df.shape
target_columns = [x for x in lag_df.columns[2:6]]
target_columns
feature_columns = [x for x in lag_df.columns[6:]]
feature_columns
# # Train LightGBM Model
train_index = lag_df.loc[
lag_df["date"] < datetime(2021, 4, 1), feature_columns
].index.to_numpy()
val_index = lag_df.loc[
lag_df["date"] >= datetime(2021, 4, 1), feature_columns
].index.to_numpy()
X_train = lag_df.loc[train_index, feature_columns].to_numpy()
y_train = lag_df.loc[train_index, target_columns]
X_val = lag_df.loc[val_index, feature_columns].to_numpy()
y_val = lag_df.loc[val_index, target_columns]
def lgbm_fit(X_train, y_train, X_val, y_val, params):
model = lgbm.LGBMRegressor(**params)
model.fit(
X_train,
y_train,
eval_set=[(X_val, y_val)],
early_stopping_rounds=100,
verbose=100,
)
pred = model.predict(X_val)
score = mean_absolute_error(pred, y_val)
return model, score
# The parameters values below are copy-pasted from this [notebook](https://www.kaggle.com/lhagiimn/lightgbm-catboost-ann-2505f2) by [lhagiimn](https://www.kaggle.com/lhagiimn) at cell 14
params = {
"boosting_type": "gbrt",
"objective": "mae",
"subsample": 0.5,
"subsample_freq": 1,
"learning_rate": 0.03,
"num_leaves": 2**11 - 1,
"min_data_in_leaf": 2**12 - 1,
"feature_fraction": 0.5,
"max_bin": 100,
"n_estimators": 2500,
"boost_from_average": False,
"random_seed": 42,
}
lgbm_model1, score1 = lgbm_fit(
X_train, y_train["target1"], X_val, y_val["target1"], params
)
lgbm_model2, score2 = lgbm_fit(
X_train, y_train["target2"], X_val, y_val["target2"], params
)
lgbm_model3, score3 = lgbm_fit(
X_train, y_train["target3"], X_val, y_val["target3"], params
)
lgbm_model4, score4 = lgbm_fit(
X_train, y_train["target4"], X_val, y_val["target4"], params
)
score = (score1 + score2 + score3 + score4) / 4
print(f"Overall MAE Score:{score}")
# # Target Inference
def prediction(df):
df = df.reset_index()
df["date"] = pd.to_datetime(df["date"], format="%Y%m%d")
df["playerId"] = df["date_playerId"].apply(lambda x: x.split("_")[1]).astype(int)
for x in range(lag):
df["date"] = df["date"] - timedelta(days=1)
df = df.merge(
player_engagement,
how="left",
on=["date", "playerId"],
suffixes=["", f"_{x+1}"],
)
df = df.fillna(0.0)
for x in range(4):
columns = [f"target{x+1}_{i+1}" for i in range(lag)]
df[f"target{x+1}_median"] = df[columns].median(axis=1)
df[f"target{x+1}_mean"] = df[columns].mean(axis=1)
df[f"target{x+1}_max"] = df[columns].max(axis=1)
df[f"target{x+1}_min"] = df[columns].min(axis=1)
df[f"target{x+1}_lower_quartile"] = df[columns].quantile(0.25, axis=1)
df[f"target{x+1}_upper_quartile"] = df[columns].quantile(0.75, axis=1)
df[f"target{x+1}_skewness"] = df[columns].skew(axis=1)
df = df.drop(columns=columns)
target1_pred = lgbm_model1.predict(df[feature_columns].to_numpy())
target2_pred = lgbm_model2.predict(df[feature_columns].to_numpy())
target3_pred = lgbm_model3.predict(df[feature_columns].to_numpy())
target4_pred = lgbm_model4.predict(df[feature_columns].to_numpy())
return target1_pred, target2_pred, target3_pred, target4_pred
player_engagement = player_engagement.drop(columns=["engagementMetricsDate"])
import mlb
env = mlb.make_env() # initialize the environment
iter_test = env.iter_test() # iterator which loops over each date in test set
for test_df, sample_prediction_df in iter_test:
target1, target2, target3, target4 = prediction(sample_prediction_df)
sample_prediction_df["target1"] = np.clip(target1, 0, 100)
sample_prediction_df["target2"] = np.clip(target2, 0, 100)
sample_prediction_df["target3"] = np.clip(target3, 0, 100)
sample_prediction_df["target4"] = np.clip(target4, 0, 100)
env.predict(sample_prediction_df)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/009/69009022.ipynb
| null | null |
[{"Id": 69009022, "ScriptId": 18769980, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4060544, "CreationDate": "07/25/2021 19:45:34", "VersionNumber": 16.0, "Title": "MLB Forecasting - LightGBM", "EvaluationDate": "07/25/2021", "IsChange": true, "TotalLines": 197.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 197.0, "LinesInsertedFromFork": 81.0, "LinesDeletedFromFork": 69.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 116.0, "TotalVotes": 0}]
| null | null | null | null |
import pandas as pd
import numpy as np
import gc
import tensorflow as tf
import lightgbm as lgbm
from matplotlib import pyplot as plt
from sklearn.metrics import mean_absolute_error
from datetime import datetime, timedelta
from tqdm.auto import tqdm
# # Create Unnested Dataset
df_train = pd.read_csv(
"../input/mlb-player-digital-engagement-forecasting/train_updated.csv"
)
print(df_train.shape)
df_train.head()
df_train.info()
def json_to_df(df, column):
num_rows = len(df)
data_list = []
for row in tqdm(range(num_rows)):
json_data = df.iloc[row][column]
if str(json_data) != "nan":
data = pd.read_json(json_data)
data_list.append(data)
all_data = pd.concat(data_list, axis=0)
return all_data
player_engagement = json_to_df(df_train, "nextDayPlayerEngagement")
player_engagement.insert(
0,
"date",
pd.to_datetime(player_engagement["engagementMetricsDate"]) - timedelta(days=1),
)
player_engagement["engagementMetricsDate"] = pd.to_datetime(
player_engagement["engagementMetricsDate"]
)
player_engagement.reset_index(drop=True, inplace=True)
print(player_engagement.shape)
player_engagement.head()
player_engagement[["target1", "target2", "target3", "target4"]] = player_engagement[
["target1", "target2", "target3", "target4"]
].astype(np.float16)
# # Create Lag Features
lag = 7
lag_df = player_engagement.loc[
player_engagement["date"] >= player_engagement.loc[0, "date"] + timedelta(lag)
]
for x in tqdm(range(1, (lag + 1))):
drop_columns = [f"date_{x}", f"engagementMetricsDate_{x}"]
lag_df = lag_df.merge(
player_engagement,
how="left",
left_on=["date", "playerId"],
right_on=["engagementMetricsDate", "playerId"],
suffixes=["", f"_{x}"],
)
lag_df.drop(columns=drop_columns, inplace=True)
lag_df["date"] = lag_df["date"] - timedelta(days=1)
lag_df["date"] = lag_df["date"] + timedelta(days=lag)
lag_df = lag_df.drop(columns=["engagementMetricsDate"])
lag_df = lag_df.dropna()
lag_df.head()
feature_columns = [x for x in lag_df.columns[6:]]
feature_columns
lag_df.info()
lag_df = lag_df.sort_values(by=["date", "playerId"]).reset_index(drop=True)
lag_df.head()
# # Create Descriptive Statistics Based on Lag Features
for x in range(4):
columns = [f"target{x+1}_{i+1}" for i in range(lag)]
lag_df[f"target{x+1}_median"] = lag_df[columns].median(axis=1).astype(np.float32)
lag_df[f"target{x+1}_mean"] = lag_df[columns].mean(axis=1).astype(np.float32)
lag_df[f"target{x+1}_max"] = lag_df[columns].max(axis=1).astype(np.float32)
lag_df[f"target{x+1}_min"] = lag_df[columns].min(axis=1).astype(np.float32)
lag_df[f"target{x+1}_lower_quartile"] = (
lag_df[columns].quantile(0.25, axis=1).astype(np.float32)
)
lag_df[f"target{x+1}_upper_quartile"] = (
lag_df[columns].quantile(0.75, axis=1).astype(np.float32)
)
lag_df[f"target{x+1}_skewness"] = lag_df[columns].skew(axis=1).astype(np.float32)
lag_df = lag_df.drop(columns=columns)
lag_df.head()
lag_df.shape
target_columns = [x for x in lag_df.columns[2:6]]
target_columns
feature_columns = [x for x in lag_df.columns[6:]]
feature_columns
# # Train LightGBM Model
train_index = lag_df.loc[
lag_df["date"] < datetime(2021, 4, 1), feature_columns
].index.to_numpy()
val_index = lag_df.loc[
lag_df["date"] >= datetime(2021, 4, 1), feature_columns
].index.to_numpy()
X_train = lag_df.loc[train_index, feature_columns].to_numpy()
y_train = lag_df.loc[train_index, target_columns]
X_val = lag_df.loc[val_index, feature_columns].to_numpy()
y_val = lag_df.loc[val_index, target_columns]
def lgbm_fit(X_train, y_train, X_val, y_val, params):
model = lgbm.LGBMRegressor(**params)
model.fit(
X_train,
y_train,
eval_set=[(X_val, y_val)],
early_stopping_rounds=100,
verbose=100,
)
pred = model.predict(X_val)
score = mean_absolute_error(pred, y_val)
return model, score
# The parameters values below are copy-pasted from this [notebook](https://www.kaggle.com/lhagiimn/lightgbm-catboost-ann-2505f2) by [lhagiimn](https://www.kaggle.com/lhagiimn) at cell 14
params = {
"boosting_type": "gbrt",
"objective": "mae",
"subsample": 0.5,
"subsample_freq": 1,
"learning_rate": 0.03,
"num_leaves": 2**11 - 1,
"min_data_in_leaf": 2**12 - 1,
"feature_fraction": 0.5,
"max_bin": 100,
"n_estimators": 2500,
"boost_from_average": False,
"random_seed": 42,
}
lgbm_model1, score1 = lgbm_fit(
X_train, y_train["target1"], X_val, y_val["target1"], params
)
lgbm_model2, score2 = lgbm_fit(
X_train, y_train["target2"], X_val, y_val["target2"], params
)
lgbm_model3, score3 = lgbm_fit(
X_train, y_train["target3"], X_val, y_val["target3"], params
)
lgbm_model4, score4 = lgbm_fit(
X_train, y_train["target4"], X_val, y_val["target4"], params
)
score = (score1 + score2 + score3 + score4) / 4
print(f"Overall MAE Score:{score}")
# # Target Inference
def prediction(df):
df = df.reset_index()
df["date"] = pd.to_datetime(df["date"], format="%Y%m%d")
df["playerId"] = df["date_playerId"].apply(lambda x: x.split("_")[1]).astype(int)
for x in range(lag):
df["date"] = df["date"] - timedelta(days=1)
df = df.merge(
player_engagement,
how="left",
on=["date", "playerId"],
suffixes=["", f"_{x+1}"],
)
df = df.fillna(0.0)
for x in range(4):
columns = [f"target{x+1}_{i+1}" for i in range(lag)]
df[f"target{x+1}_median"] = df[columns].median(axis=1)
df[f"target{x+1}_mean"] = df[columns].mean(axis=1)
df[f"target{x+1}_max"] = df[columns].max(axis=1)
df[f"target{x+1}_min"] = df[columns].min(axis=1)
df[f"target{x+1}_lower_quartile"] = df[columns].quantile(0.25, axis=1)
df[f"target{x+1}_upper_quartile"] = df[columns].quantile(0.75, axis=1)
df[f"target{x+1}_skewness"] = df[columns].skew(axis=1)
df = df.drop(columns=columns)
target1_pred = lgbm_model1.predict(df[feature_columns].to_numpy())
target2_pred = lgbm_model2.predict(df[feature_columns].to_numpy())
target3_pred = lgbm_model3.predict(df[feature_columns].to_numpy())
target4_pred = lgbm_model4.predict(df[feature_columns].to_numpy())
return target1_pred, target2_pred, target3_pred, target4_pred
player_engagement = player_engagement.drop(columns=["engagementMetricsDate"])
import mlb
env = mlb.make_env() # initialize the environment
iter_test = env.iter_test() # iterator which loops over each date in test set
for test_df, sample_prediction_df in iter_test:
target1, target2, target3, target4 = prediction(sample_prediction_df)
sample_prediction_df["target1"] = np.clip(target1, 0, 100)
sample_prediction_df["target2"] = np.clip(target2, 0, 100)
sample_prediction_df["target3"] = np.clip(target3, 0, 100)
sample_prediction_df["target4"] = np.clip(target4, 0, 100)
env.predict(sample_prediction_df)
| false | 0 | 2,569 | 0 | 2,569 | 2,569 |
||
69173876
|
<jupyter_start><jupyter_text>Credit Card Fraud Detection
Context
---------
It is important that credit card companies are able to recognize fraudulent credit card transactions so that customers are not charged for items that they did not purchase.
Content
---------
The dataset contains transactions made by credit cards in September 2013 by European cardholders.
This dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions. The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.
It contains only numerical input variables which are the result of a PCA transformation. Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about the data. Features V1, V2, ... V28 are the principal components obtained with PCA, the only features which have not been transformed with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first transaction in the dataset. The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-sensitive learning. Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise.
Given the class imbalance ratio, we recommend measuring the accuracy using the Area Under the Precision-Recall Curve (AUPRC). Confusion matrix accuracy is not meaningful for unbalanced classification.
Update (03/05/2021)
---------
A simulator for transaction data has been released as part of the practical handbook on Machine Learning for Credit Card Fraud Detection - https://fraud-detection-handbook.github.io/fraud-detection-handbook/Chapter_3_GettingStarted/SimulatedDataset.html. We invite all practitioners interested in fraud detection datasets to also check out this data simulator, and the methodologies for credit card fraud detection presented in the book.
Acknowledgements
---------
The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Université Libre de Bruxelles) on big data mining and fraud detection.
More details on current and past projects on related topics are available on [https://www.researchgate.net/project/Fraud-detection-5][1] and the page of the [DefeatFraud][2] project
Please cite the following works:
Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. [Calibrating Probability with Undersampling for Unbalanced Classification.][3] In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015
Dal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. [Learned lessons in credit card fraud detection from a practitioner perspective][4], Expert systems with applications,41,10,4915-4928,2014, Pergamon
Dal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. [Credit card fraud detection: a realistic modeling and a novel learning strategy,][5] IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE
Dal Pozzolo, Andrea [Adaptive Machine learning for credit card fraud detection][6] ULB MLG PhD thesis (supervised by G. Bontempi)
Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-Aël; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. [Scarff: a scalable framework for streaming credit card fraud detection with Spark][7], Information fusion,41, 182-194,2018,Elsevier
Carcillo, Fabrizio; Le Borgne, Yann-Aël; Caelen, Olivier; Bontempi, Gianluca. [Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization,][8] International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing
Bertrand Lebichot, Yann-Aël Le Borgne, Liyun He, Frederic Oblé, Gianluca Bontempi [Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection](https://www.researchgate.net/publication/332180999_Deep-Learning_Domain_Adaptation_Techniques_for_Credit_Cards_Fraud_Detection), INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019
Fabrizio Carcillo, Yann-Aël Le Borgne, Olivier Caelen, Frederic Oblé, Gianluca Bontempi [Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection ](https://www.researchgate.net/publication/333143698_Combining_Unsupervised_and_Supervised_Learning_in_Credit_Card_Fraud_Detection) Information Sciences, 2019
Yann-Aël Le Borgne, Gianluca Bontempi [Reproducible machine Learning for Credit Card Fraud Detection - Practical Handbook ](https://www.researchgate.net/publication/351283764_Machine_Learning_for_Credit_Card_Fraud_Detection_-_Practical_Handbook)
Bertrand Lebichot, Gianmarco Paldino, Wissam Siblini, Liyun He, Frederic Oblé, Gianluca Bontempi [Incremental learning strategies for credit cards fraud detection](https://www.researchgate.net/publication/352275169_Incremental_learning_strategies_for_credit_cards_fraud_detection), IInternational Journal of Data Science and Analytics
[1]: https://www.researchgate.net/project/Fraud-detection-5
[2]: https://mlg.ulb.ac.be/wordpress/portfolio_page/defeatfraud-assessment-and-validation-of-deep-feature-engineering-and-learning-solutions-for-fraud-detection/
[3]: https://www.researchgate.net/publication/283349138_Calibrating_Probability_with_Undersampling_for_Unbalanced_Classification
[4]: https://www.researchgate.net/publication/260837261_Learned_lessons_in_credit_card_fraud_detection_from_a_practitioner_perspective
[5]: https://www.researchgate.net/publication/319867396_Credit_Card_Fraud_Detection_A_Realistic_Modeling_and_a_Novel_Learning_Strategy
[6]: http://di.ulb.ac.be/map/adalpozz/pdf/Dalpozzolo2015PhD.pdf
[7]: https://www.researchgate.net/publication/319616537_SCARFF_a_Scalable_Framework_for_Streaming_Credit_Card_Fraud_Detection_with_Spark
[8]: https://www.researchgate.net/publication/332180999_Deep-Learning_Domain_Adaptation_Techniques_for_Credit_Cards_Fraud_Detection
Kaggle dataset identifier: creditcardfraud
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Importing libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import random
import plotly.express as px
pd.set_option("display.max_rows", None, "display.max_columns", None)
from sklearn.model_selection import train_test_split
import imblearn # Major library - Please ensure this is installed
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
# -------------------------------------------------------------------
import xgboost as xgb
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import warnings
warnings.filterwarnings("ignore")
# -------------------------------------------------------------------
random.seed(100)
# ## Loading Data
# In this notebook, we are using the credit card fraud detection dataset. Since a fraud occurs rarely, the target variable is severely imbalanced, making it a perfect case to solve through different sampling methods as prescribed below. The link and detailed description to the original data can be found here : https://www.kaggle.com/mlg-ulb/creditcardfraud
dataset = pd.read_csv(r"../input/creditcardfraud/creditcard.csv")
# ------------------------------------------------------------------------------------------------
# Summary
print("Total Shape :", dataset.shape)
dataset.head()
dataset.describe()
# ## About the dataset:
# 1. The dataset consists of 29 principal components already extracted in the source dataset. The column names have been anonymized for business confidentiality purpose
# 2. The time column is for the purpose of level and the Class column is the target variable we aim to predict (Binary Classification : 0 for Non-fraud and 1 for fraud
# 3. Since the dataset comprises of principle components of the raw features/attributes, they are already scaled. But in real life problems apart form this notebook, the features need to be scaled as all of the sampling techniques employ distance (Euclidean primarily) for their internal functioning
# ### Null Check
pd.DataFrame(dataset.isnull().sum()).T
# ### Minority Class contribution in the dataset
print(
"Total fraud(Class = 1) and not-fraud(Class = 0) :\n",
dataset["Class"].value_counts(),
)
print(
"Percentage of minority samples over total Data :",
100 * dataset[dataset["Class"] == 1].shape[0] / dataset.shape[0],
"%",
)
# ## Insight:
# 1. The %contribution of Class 1 i.e fraud is abysmally low (~0.17%), hence the model will not be able to learn properly on the patterns of a fraud and hence the prediction quality will be poor.
# 2. To remediate the above case, we have an array of sampling techniques at our disposal which lead us to overcome the problem of imbalance classification
# # Splitting the data to get a baseline performance before sampling
# Test Train Split for modelling purpose
X = dataset.loc[
:,
[cols for cols in dataset.columns if ("Class" not in cols) & ("Time" not in cols)],
] # Removing time since its a level column
y = dataset.loc[:, [cols for cols in dataset.columns if "Class" in cols]]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=100
)
# ----------------------------------------------------------------------------------------------------
print("Total Shape of Train X:", X_train.shape)
print("Total Shape of Train Y:", y_train.shape)
# ## UDF for 3-d plotting of the data
def plot_3d(df, col1, col2, col3, hue_elem, name):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111, projection="3d")
ax.scatter(df[col1], df[col2], df[col3], c=df[hue_elem], marker="o")
title = "Scatter plot for :" + name
ax.set_title(title)
ax.set_xlabel(col1 + " Label")
ax.set_ylabel(col2 + " Label")
ax.set_zlabel(col3 + " Label")
plt.show()
# # Unsampled data in 3-d axis (3 Principal Components)
train = X_train.join(y_train)
print(
"Percentage of minority samples over Training Data :",
100 * train[train["Class"] == 1].shape[0] / train.shape[0],
"%",
)
# -----------------------------------------------------------------------------------------------------------------------
plot_3d(train, "V3", "V1", "V2", "Class", "Un-Sampled")
# # Baseline - Logistic Regression Model on imbalanced data (~0.17%)
lr_clf = LogisticRegression(solver="sag", random_state=100)
lr_clf.fit(X_train, y_train)
pred = lr_clf.predict(X_test)
# -----------------------------------------------
score = roc_auc_score(y_test, pred)
print("1. ROC AUC: %.3f" % score)
print("2. Accuracy :", accuracy_score(y_test, pred))
print("3. Classification Report -\n", classification_report(y_test, pred))
print("4. Confusion Matrix - \n", confusion_matrix(y_test, pred))
# # Baseline - XGB Model on imbalanced data (~0.17%)
import xgboost as xgb
xgb_clf = xgb.XGBClassifier(random_state=100)
xgb_clf.fit(X_train, y_train)
xgb_pred = xgb_clf.predict(X_test)
# -----------------------------------------------
score = roc_auc_score(y_test, xgb_pred)
print("1. ROC AUC: %.3f" % score)
print("2. Accuracy :", accuracy_score(y_test, xgb_pred))
print("3. Classification Report -\n", classification_report(y_test, xgb_pred))
print("4. Confusion Matrix - \n", confusion_matrix(y_test, xgb_pred))
# # Baseline - Random Forest Model on imbalanced data (~0.17%)
from sklearn.ensemble import RandomForestClassifier
rf_clf = RandomForestClassifier(random_state=100, n_jobs=-1)
rf_clf.fit(X_train, y_train)
rf_pred = rf_clf.predict(X_test)
# -----------------------------------------------
score = roc_auc_score(y_test, rf_pred)
print("1. ROC AUC: %.3f" % score)
print("2. Accuracy :", accuracy_score(y_test, rf_pred))
print("3. Classification Report -\n", classification_report(y_test, rf_pred))
print("4. Confusion Matrix - \n", confusion_matrix(y_test, rf_pred))
# ## Insights -
# 1. Though the accuracy is >99%, the recall of the class 1 (fraud) is low. This is a typical observation in any imbalanced classification problem.
# 2. The accuracy is not a reliable metric. Here, ROC AUC makes more sense to compare.
# # Sampling Techniques :
# 1. Over-Sampling Techniques
# 1. SMOTE
# 2. Borderline SMOTE
# 3. ADASYN
#
# 2. Under-Sampling Techniques
# 1. Random Under-sampling
# 2. Near Miss 1,2,3
# 3. Condensed Nearest Neighors
# 4. Tomek Links Removal
# 5. One Sided Selection
#
# All the above techniques can be found in one package 'imblearn'. The link for the official documentation is following:
# https://imbalanced-learn.org/stable/references/index.html
# ## SMOTE (Synthetic Minority Oversampling Technique)
# Reference Links - https://imbalanced-learn.org/stable/references/generated/imblearn.over_sampling.SMOTE.html
from imblearn.over_sampling import SMOTE
# ------------------------------------------------------------------------------------------
oversample = SMOTE(sampling_strategy=0.10, k_neighbors=3, random_state=100, n_jobs=-1)
X_train_smote, y_train_smote = oversample.fit_resample(X_train, y_train)
# ------------------------------------------------------------------------------------------
train_smote = X_train_smote.join(y_train_smote)
print(
"Percentage of minority samples over Training Data :",
100 * train_smote[train_smote["Class"] == 1].shape[0] / train_smote.shape[0],
"%",
)
# ------------------------------------------------------------------------------------------
plot_3d(train_smote, "V3", "V1", "V2", "Class", "SMOTE")
# ## Borderline SMOTE:
# Reference Links- https://imbalanced-learn.org/stable/references/generated/imblearn.over_sampling.BorderlineSMOTE.html
from imblearn.over_sampling import BorderlineSMOTE
# --------------------------------------------------------------------------------------
bsmote = BorderlineSMOTE(
sampling_strategy=0.10, k_neighbors=3, m_neighbors=5, random_state=100, n_jobs=-1
)
X_train_bsmote, y_train_bsmote = bsmote.fit_resample(X_train, y_train)
# --------------------------------------------------------------------------------------
train_bsmote = X_train_bsmote.join(y_train_bsmote)
print(
"Percentage of minority samples over Training Data :",
100 * train_bsmote[train_bsmote["Class"] == 1].shape[0] / train_bsmote.shape[0],
"%",
)
# --------------------------------------------------------------------------------------
plot_3d(train_bsmote, "V3", "V1", "V2", "Class", "Borderline-SMOTE")
# ## ADASYN (Adaptive Synthetic Minority Sampling)
# Reference Links - https://imbalanced-learn.org/stable/references/generated/imblearn.over_sampling.ADASYN.html
# transform the dataset
from imblearn.over_sampling import ADASYN
adasyn = ADASYN(sampling_strategy=0.10, n_neighbors=5, random_state=100, n_jobs=-1)
X_train_adasyn, y_train_adasyn = adasyn.fit_resample(X_train, y_train)
# -----------------------------------------------------------------------------------
train_adasyn = X_train_adasyn.join(y_train_adasyn)
print("Total datapoints :", train_adasyn.shape)
print(
"Percentage of minority samples over Training Data :",
100 * train_adasyn[train_adasyn["Class"] == 1].shape[0] / train_adasyn.shape[0],
"%",
)
# --------------------------------------------------------------------------------------
plot_3d(train_adasyn, "V3", "V1", "V2", "Class", "ADASYN")
# # Under-Sampling Techniques
# ## Random Undersampling
# Reference Links - https://imbalanced-learn.org/stable/references/generated/imblearn.under_sampling.RandomUnderSampler.html
from imblearn.under_sampling import RandomUnderSampler
rus = RandomUnderSampler(sampling_strategy=0.10, replacement=False, random_state=100)
X_train_rus, y_train_rus = rus.fit_resample(X_train, y_train)
# -----------------------------------------------------------------------------------
train_rus = X_train_rus.join(y_train_rus)
print(
"Percentage of minority samples over Training Data :",
100 * train_rus[train_rus["Class"] == 1].shape[0] / train_rus.shape[0],
"%",
)
# -----------------------------------------------------------------------------------
plot_3d(train_rus, "V3", "V1", "V2", "Class", "Random Undersampling")
# ## Near-Miss Undersampling
# Reference Links - https://imbalanced-learn.org/stable/references/generated/imblearn.under_sampling.NearMiss.html
# ## Near-Miss 1
from imblearn.under_sampling import NearMiss
nm = NearMiss(sampling_strategy=0.10, n_neighbors=1, version=1, n_jobs=-1)
X_train_nm, y_train_nm = nm.fit_resample(X_train, y_train)
# -----------------------------------------------------------------------------------
train_nm = X_train_nm.join(y_train_nm)
print("Value Counts :\n", train_nm["Class"].value_counts())
print(
"Percentage of minority samples over Training Data :",
100 * train_nm[train_nm["Class"] == 1].shape[0] / train_nm.shape[0],
"%",
)
# -----------------------------------------------------------------------------------
plot_3d(train_nm, "V3", "V1", "V2", "Class", "Near-Miss 1")
# ## Near-Miss 2
from imblearn.under_sampling import NearMiss
nm = NearMiss(sampling_strategy=0.10, n_neighbors=1, version=2, n_jobs=-1)
X_train_nm, y_train_nm = nm.fit_resample(X_train, y_train)
# -----------------------------------------------------------------------------------
train_nm = X_train_nm.join(y_train_nm)
print("Value Counts :\n", train_nm["Class"].value_counts())
print(
"Percentage of minority samples over Training Data :",
100 * train_nm[train_nm["Class"] == 1].shape[0] / train_nm.shape[0],
"%",
)
# -----------------------------------------------------------------------------------
plot_3d(train_nm, "V3", "V1", "V2", "Class", "Near-Miss 2")
# ## Near-Miss 3
from imblearn.under_sampling import NearMiss
nm = NearMiss(
sampling_strategy=0.10, n_neighbors_ver3=20, version=3, n_jobs=-1
) # n_neighbors_value arrived at w/ hit and trial
X_train_nm, y_train_nm = nm.fit_resample(X_train, y_train)
# ------------------------------------------------------------------------------------------------------------
train_nm = X_train_nm.join(y_train_nm)
print("Value Counts :\n", train_nm["Class"].value_counts())
print(
"Note : For Near-Miss-3, the desired ratio parameter doesnt work due to the nature of the algorithm"
)
print(
"Percentage of minority samples over Training Data :",
100 * train_nm[train_nm["Class"] == 1].shape[0] / train_nm.shape[0],
"%",
)
# ------------------------------------------------------------------------------------------------------------
plot_3d(train_nm, "V3", "V1", "V2", "Class", "Near-Miss 3")
# ## Note -
# For the techniques CNN, Tomek Links and OSS, the run-time is very high due to elementwise operation. Hence for the scope of this notebook, we will decrease the total rows to undersample.
# ## CNN (Condensed Nearest Neighbors)
# Reference Links - https://imbalanced-learn.org/stable/references/generated/imblearn.under_sampling.CondensedNearestNeighbour.html
from imblearn.under_sampling import CondensedNearestNeighbour
CNN = CondensedNearestNeighbour(
sampling_strategy="auto", n_seeds_S=100, n_neighbors=1, random_state=100, n_jobs=-1
)
X_train_cnn, y_train_cnn = CNN.fit_resample(
X_train[0:50000], y_train[0:50000]
) # Reduced data for the purpose of low run-time. In realtime, use the full data
# -----------------------------------------------------------------------------------------------------------------
train_cnn = X_train_cnn.join(y_train_cnn)
print("Value Counts :\n", train_nm["Class"].value_counts())
print(
"Percentage of minority samples over Training Data :",
100 * train_cnn[train_cnn["Class"] == 1].shape[0] / train_cnn.shape[0],
"%",
)
# -----------------------------------------------------------------------------------------------------------------
plot_3d(train_cnn, "V3", "V1", "V2", "Class", "CNN (Condensed Nearest Neighbor)")
# ## Tomek Links Removal
# Reference Links - https://imbalanced-learn.org/stable/references/generated/imblearn.under_sampling.TomekLinks.html
from imblearn.under_sampling import TomekLinks
tomek = TomekLinks(sampling_strategy="auto", n_jobs=-1)
X_train_tomek, y_train_tomek = tomek.fit_resample(
X_train[0:50000], y_train[0:50000]
) # Reduced data for the purpose of low run-time. In realtime, use the full data
# -----------------------------------------------------------------------------------
train_tomek = X_train_tomek.join(y_train_tomek)
print(
"Percentage of minority samples over Training Data :",
100 * train_tomek[train_tomek["Class"] == 1].shape[0] / train_tomek.shape[0],
"%",
)
# -----------------------------------------------------------------------------------------------------------------
plot_3d(train_tomek, "V3", "V1", "V2", "Class", "Tomek Link Removal")
# ## One Sided Selection (OSS)
# - This doesn't return records as per the desired ratio
# - Reference Links - https://imbalanced-learn.org/stable/references/generated/imblearn.under_sampling.TomekLinks.html
from imblearn.under_sampling import OneSidedSelection
OSS = OneSidedSelection(
sampling_strategy="auto", n_neighbors=9, n_seeds_S=100, random_state=100, n_jobs=-1
)
X_train_oss, y_train_oss = OSS.fit_resample(X_train, y_train)
# ------------------------------------------------------------------------------------------------------------
train_oss = X_train_oss.join(y_train_oss)
print(
"Percentage of minority samples over Training Data :",
100 * train_oss[train_oss["Class"] == 1].shape[0] / train_oss.shape[0],
"%",
)
# ------------------------------------------------------------------------------------------------------------
plot_3d(train_oss, "V3", "V1", "V2", "Class", "One Sided Sampling (OSS)")
print(X_train_oss.shape)
X_train_oss.head(1)
# # Passing under-sampled data into model for training
## Final X-Y pair of training to pass (Pick any technique to experiment on)
X_train_final = X_train_adasyn.copy()
y_train_final = y_train_adasyn.copy()
# -----------------------------------------------------------------------------
train_final = X_train_final.join(y_train_final)
print(
"Percentage of minority samples over Final Training Data :",
100 * train_final[train_final["Class"] == 1].shape[0] / train_final.shape[0],
"%",
)
# # Final Model - Logistic Regression Model on imbalanced data (~9%)
lr_clf = LogisticRegression(solver="sag", random_state=100)
lr_clf.fit(X_train_final, y_train_final)
pred = lr_clf.predict(X_test)
# -----------------------------------------------
score = roc_auc_score(y_test, pred)
print("1. ROC AUC: %.3f" % score)
print("2. Accuracy :", accuracy_score(y_test, pred))
print("3. Classification Report -\n", classification_report(y_test, pred))
print("4. Confusion Matrix - \n", confusion_matrix(y_test, pred))
# # Final Model - XGB Classification Model on imbalanced data (~9%)
import xgboost as xgb
xgb_clf = xgb.XGBClassifier(random_state=100, n_jobs=-1)
xgb_clf.fit(X_train_final, y_train_final)
xgb_pred = xgb_clf.predict(X_test)
# -----------------------------------------------
score = roc_auc_score(y_test, xgb_pred)
print("1. ROC AUC: %.3f" % score)
print("2. Accuracy :", accuracy_score(y_test, xgb_pred))
print("3. Classification Report -\n", classification_report(y_test, xgb_pred))
print("4. Confusion Matrix - \n", confusion_matrix(y_test, xgb_pred))
# # Final Model - Random Forest Model on imbalanced data (~9%)
from sklearn.ensemble import RandomForestClassifier
rf_clf = RandomForestClassifier(random_state=100, n_jobs=-1)
rf_clf.fit(X_train_final, y_train_final)
rf_pred = rf_clf.predict(X_test)
# -----------------------------------------------
score = roc_auc_score(y_test, rf_pred)
print("1. ROC AUC: %.3f" % score)
print("2. Accuracy :", accuracy_score(y_test, rf_pred))
print("3. Classification Report -\n", classification_report(y_test, rf_pred))
print("4. Confusion Matrix - \n", confusion_matrix(y_test, rf_pred))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173876.ipynb
|
creditcardfraud
| null |
[{"Id": 69173876, "ScriptId": 18634843, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4811800, "CreationDate": "07/27/2021 16:55:10", "VersionNumber": 2.0, "Title": "Samplng Techniques for Imbalanced Classification", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 431.0, "LinesInsertedFromPrevious": 27.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 404.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92022656, "KernelVersionId": 69173876, "SourceDatasetVersionId": 23498}]
|
[{"Id": 23498, "DatasetId": 310, "DatasourceVersionId": 23502, "CreatorUserId": 998023, "LicenseName": "Database: Open Database, Contents: Database Contents", "CreationDate": "03/23/2018 01:17:27", "VersionNumber": 3.0, "Title": "Credit Card Fraud Detection", "Slug": "creditcardfraud", "Subtitle": "Anonymized credit card transactions labeled as fraudulent or genuine", "Description": "Context\n---------\n\nIt is important that credit card companies are able to recognize fraudulent credit card transactions so that customers are not charged for items that they did not purchase.\n\nContent\n---------\n\nThe dataset contains transactions made by credit cards in September 2013 by European cardholders. \nThis dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions. The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.\n\nIt contains only numerical input variables which are the result of a PCA transformation. Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about the data. Features V1, V2, ... V28 are the principal components obtained with PCA, the only features which have not been transformed with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first transaction in the dataset. The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-sensitive learning. Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise. \n\nGiven the class imbalance ratio, we recommend measuring the accuracy using the Area Under the Precision-Recall Curve (AUPRC). Confusion matrix accuracy is not meaningful for unbalanced classification.\n\nUpdate (03/05/2021)\n---------\n\nA simulator for transaction data has been released as part of the practical handbook on Machine Learning for Credit Card Fraud Detection - https://fraud-detection-handbook.github.io/fraud-detection-handbook/Chapter_3_GettingStarted/SimulatedDataset.html. We invite all practitioners interested in fraud detection datasets to also check out this data simulator, and the methodologies for credit card fraud detection presented in the book.\n\nAcknowledgements\n---------\n\nThe dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Universit\u00e9 Libre de Bruxelles) on big data mining and fraud detection.\nMore details on current and past projects on related topics are available on [https://www.researchgate.net/project/Fraud-detection-5][1] and the page of the [DefeatFraud][2] project\n\nPlease cite the following works: \n\nAndrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. [Calibrating Probability with Undersampling for Unbalanced Classification.][3] In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015\n\nDal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. [Learned lessons in credit card fraud detection from a practitioner perspective][4], Expert systems with applications,41,10,4915-4928,2014, Pergamon\n\nDal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. [Credit card fraud detection: a realistic modeling and a novel learning strategy,][5] IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE\n\nDal Pozzolo, Andrea [Adaptive Machine learning for credit card fraud detection][6] ULB MLG PhD thesis (supervised by G. Bontempi)\n\nCarcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-A\u00ebl; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. [Scarff: a scalable framework for streaming credit card fraud detection with Spark][7], Information fusion,41, 182-194,2018,Elsevier\n\nCarcillo, Fabrizio; Le Borgne, Yann-A\u00ebl; Caelen, Olivier; Bontempi, Gianluca. [Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization,][8] International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing\n\nBertrand Lebichot, Yann-A\u00ebl Le Borgne, Liyun He, Frederic Obl\u00e9, Gianluca Bontempi [Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection](https://www.researchgate.net/publication/332180999_Deep-Learning_Domain_Adaptation_Techniques_for_Credit_Cards_Fraud_Detection), INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019\n\nFabrizio Carcillo, Yann-A\u00ebl Le Borgne, Olivier Caelen, Frederic Obl\u00e9, Gianluca Bontempi [Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection ](https://www.researchgate.net/publication/333143698_Combining_Unsupervised_and_Supervised_Learning_in_Credit_Card_Fraud_Detection) Information Sciences, 2019\n\nYann-A\u00ebl Le Borgne, Gianluca Bontempi [Reproducible machine Learning for Credit Card Fraud Detection - Practical Handbook ](https://www.researchgate.net/publication/351283764_Machine_Learning_for_Credit_Card_Fraud_Detection_-_Practical_Handbook) \n\nBertrand Lebichot, Gianmarco Paldino, Wissam Siblini, Liyun He, Frederic Obl\u00e9, Gianluca Bontempi [Incremental learning strategies for credit cards fraud detection](https://www.researchgate.net/publication/352275169_Incremental_learning_strategies_for_credit_cards_fraud_detection), IInternational Journal of Data Science and Analytics\n\n [1]: https://www.researchgate.net/project/Fraud-detection-5\n [2]: https://mlg.ulb.ac.be/wordpress/portfolio_page/defeatfraud-assessment-and-validation-of-deep-feature-engineering-and-learning-solutions-for-fraud-detection/\n [3]: https://www.researchgate.net/publication/283349138_Calibrating_Probability_with_Undersampling_for_Unbalanced_Classification\n [4]: https://www.researchgate.net/publication/260837261_Learned_lessons_in_credit_card_fraud_detection_from_a_practitioner_perspective\n [5]: https://www.researchgate.net/publication/319867396_Credit_Card_Fraud_Detection_A_Realistic_Modeling_and_a_Novel_Learning_Strategy\n [6]: http://di.ulb.ac.be/map/adalpozz/pdf/Dalpozzolo2015PhD.pdf\n [7]: https://www.researchgate.net/publication/319616537_SCARFF_a_Scalable_Framework_for_Streaming_Credit_Card_Fraud_Detection_with_Spark\n \n[8]: https://www.researchgate.net/publication/332180999_Deep-Learning_Domain_Adaptation_Techniques_for_Credit_Cards_Fraud_Detection", "VersionNotes": "Fixed preview", "TotalCompressedBytes": 150828752.0, "TotalUncompressedBytes": 69155632.0}]
|
[{"Id": 310, "CreatorUserId": 14069, "OwnerUserId": NaN, "OwnerOrganizationId": 1160.0, "CurrentDatasetVersionId": 23498.0, "CurrentDatasourceVersionId": 23502.0, "ForumId": 1838, "Type": 2, "CreationDate": "11/03/2016 13:21:36", "LastActivityDate": "02/06/2018", "TotalViews": 10310781, "TotalDownloads": 564249, "TotalVotes": 10432, "TotalKernels": 4266}]
| null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Importing libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import random
import plotly.express as px
pd.set_option("display.max_rows", None, "display.max_columns", None)
from sklearn.model_selection import train_test_split
import imblearn # Major library - Please ensure this is installed
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
# -------------------------------------------------------------------
import xgboost as xgb
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import warnings
warnings.filterwarnings("ignore")
# -------------------------------------------------------------------
random.seed(100)
# ## Loading Data
# In this notebook, we are using the credit card fraud detection dataset. Since a fraud occurs rarely, the target variable is severely imbalanced, making it a perfect case to solve through different sampling methods as prescribed below. The link and detailed description to the original data can be found here : https://www.kaggle.com/mlg-ulb/creditcardfraud
dataset = pd.read_csv(r"../input/creditcardfraud/creditcard.csv")
# ------------------------------------------------------------------------------------------------
# Summary
print("Total Shape :", dataset.shape)
dataset.head()
dataset.describe()
# ## About the dataset:
# 1. The dataset consists of 29 principal components already extracted in the source dataset. The column names have been anonymized for business confidentiality purpose
# 2. The time column is for the purpose of level and the Class column is the target variable we aim to predict (Binary Classification : 0 for Non-fraud and 1 for fraud
# 3. Since the dataset comprises of principle components of the raw features/attributes, they are already scaled. But in real life problems apart form this notebook, the features need to be scaled as all of the sampling techniques employ distance (Euclidean primarily) for their internal functioning
# ### Null Check
pd.DataFrame(dataset.isnull().sum()).T
# ### Minority Class contribution in the dataset
print(
"Total fraud(Class = 1) and not-fraud(Class = 0) :\n",
dataset["Class"].value_counts(),
)
print(
"Percentage of minority samples over total Data :",
100 * dataset[dataset["Class"] == 1].shape[0] / dataset.shape[0],
"%",
)
# ## Insight:
# 1. The %contribution of Class 1 i.e fraud is abysmally low (~0.17%), hence the model will not be able to learn properly on the patterns of a fraud and hence the prediction quality will be poor.
# 2. To remediate the above case, we have an array of sampling techniques at our disposal which lead us to overcome the problem of imbalance classification
# # Splitting the data to get a baseline performance before sampling
# Test Train Split for modelling purpose
X = dataset.loc[
:,
[cols for cols in dataset.columns if ("Class" not in cols) & ("Time" not in cols)],
] # Removing time since its a level column
y = dataset.loc[:, [cols for cols in dataset.columns if "Class" in cols]]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=100
)
# ----------------------------------------------------------------------------------------------------
print("Total Shape of Train X:", X_train.shape)
print("Total Shape of Train Y:", y_train.shape)
# ## UDF for 3-d plotting of the data
def plot_3d(df, col1, col2, col3, hue_elem, name):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111, projection="3d")
ax.scatter(df[col1], df[col2], df[col3], c=df[hue_elem], marker="o")
title = "Scatter plot for :" + name
ax.set_title(title)
ax.set_xlabel(col1 + " Label")
ax.set_ylabel(col2 + " Label")
ax.set_zlabel(col3 + " Label")
plt.show()
# # Unsampled data in 3-d axis (3 Principal Components)
train = X_train.join(y_train)
print(
"Percentage of minority samples over Training Data :",
100 * train[train["Class"] == 1].shape[0] / train.shape[0],
"%",
)
# -----------------------------------------------------------------------------------------------------------------------
plot_3d(train, "V3", "V1", "V2", "Class", "Un-Sampled")
# # Baseline - Logistic Regression Model on imbalanced data (~0.17%)
lr_clf = LogisticRegression(solver="sag", random_state=100)
lr_clf.fit(X_train, y_train)
pred = lr_clf.predict(X_test)
# -----------------------------------------------
score = roc_auc_score(y_test, pred)
print("1. ROC AUC: %.3f" % score)
print("2. Accuracy :", accuracy_score(y_test, pred))
print("3. Classification Report -\n", classification_report(y_test, pred))
print("4. Confusion Matrix - \n", confusion_matrix(y_test, pred))
# # Baseline - XGB Model on imbalanced data (~0.17%)
import xgboost as xgb
xgb_clf = xgb.XGBClassifier(random_state=100)
xgb_clf.fit(X_train, y_train)
xgb_pred = xgb_clf.predict(X_test)
# -----------------------------------------------
score = roc_auc_score(y_test, xgb_pred)
print("1. ROC AUC: %.3f" % score)
print("2. Accuracy :", accuracy_score(y_test, xgb_pred))
print("3. Classification Report -\n", classification_report(y_test, xgb_pred))
print("4. Confusion Matrix - \n", confusion_matrix(y_test, xgb_pred))
# # Baseline - Random Forest Model on imbalanced data (~0.17%)
from sklearn.ensemble import RandomForestClassifier
rf_clf = RandomForestClassifier(random_state=100, n_jobs=-1)
rf_clf.fit(X_train, y_train)
rf_pred = rf_clf.predict(X_test)
# -----------------------------------------------
score = roc_auc_score(y_test, rf_pred)
print("1. ROC AUC: %.3f" % score)
print("2. Accuracy :", accuracy_score(y_test, rf_pred))
print("3. Classification Report -\n", classification_report(y_test, rf_pred))
print("4. Confusion Matrix - \n", confusion_matrix(y_test, rf_pred))
# ## Insights -
# 1. Though the accuracy is >99%, the recall of the class 1 (fraud) is low. This is a typical observation in any imbalanced classification problem.
# 2. The accuracy is not a reliable metric. Here, ROC AUC makes more sense to compare.
# # Sampling Techniques :
# 1. Over-Sampling Techniques
# 1. SMOTE
# 2. Borderline SMOTE
# 3. ADASYN
#
# 2. Under-Sampling Techniques
# 1. Random Under-sampling
# 2. Near Miss 1,2,3
# 3. Condensed Nearest Neighors
# 4. Tomek Links Removal
# 5. One Sided Selection
#
# All the above techniques can be found in one package 'imblearn'. The link for the official documentation is following:
# https://imbalanced-learn.org/stable/references/index.html
# ## SMOTE (Synthetic Minority Oversampling Technique)
# Reference Links - https://imbalanced-learn.org/stable/references/generated/imblearn.over_sampling.SMOTE.html
from imblearn.over_sampling import SMOTE
# ------------------------------------------------------------------------------------------
oversample = SMOTE(sampling_strategy=0.10, k_neighbors=3, random_state=100, n_jobs=-1)
X_train_smote, y_train_smote = oversample.fit_resample(X_train, y_train)
# ------------------------------------------------------------------------------------------
train_smote = X_train_smote.join(y_train_smote)
print(
"Percentage of minority samples over Training Data :",
100 * train_smote[train_smote["Class"] == 1].shape[0] / train_smote.shape[0],
"%",
)
# ------------------------------------------------------------------------------------------
plot_3d(train_smote, "V3", "V1", "V2", "Class", "SMOTE")
# ## Borderline SMOTE:
# Reference Links- https://imbalanced-learn.org/stable/references/generated/imblearn.over_sampling.BorderlineSMOTE.html
from imblearn.over_sampling import BorderlineSMOTE
# --------------------------------------------------------------------------------------
bsmote = BorderlineSMOTE(
sampling_strategy=0.10, k_neighbors=3, m_neighbors=5, random_state=100, n_jobs=-1
)
X_train_bsmote, y_train_bsmote = bsmote.fit_resample(X_train, y_train)
# --------------------------------------------------------------------------------------
train_bsmote = X_train_bsmote.join(y_train_bsmote)
print(
"Percentage of minority samples over Training Data :",
100 * train_bsmote[train_bsmote["Class"] == 1].shape[0] / train_bsmote.shape[0],
"%",
)
# --------------------------------------------------------------------------------------
plot_3d(train_bsmote, "V3", "V1", "V2", "Class", "Borderline-SMOTE")
# ## ADASYN (Adaptive Synthetic Minority Sampling)
# Reference Links - https://imbalanced-learn.org/stable/references/generated/imblearn.over_sampling.ADASYN.html
# transform the dataset
from imblearn.over_sampling import ADASYN
adasyn = ADASYN(sampling_strategy=0.10, n_neighbors=5, random_state=100, n_jobs=-1)
X_train_adasyn, y_train_adasyn = adasyn.fit_resample(X_train, y_train)
# -----------------------------------------------------------------------------------
train_adasyn = X_train_adasyn.join(y_train_adasyn)
print("Total datapoints :", train_adasyn.shape)
print(
"Percentage of minority samples over Training Data :",
100 * train_adasyn[train_adasyn["Class"] == 1].shape[0] / train_adasyn.shape[0],
"%",
)
# --------------------------------------------------------------------------------------
plot_3d(train_adasyn, "V3", "V1", "V2", "Class", "ADASYN")
# # Under-Sampling Techniques
# ## Random Undersampling
# Reference Links - https://imbalanced-learn.org/stable/references/generated/imblearn.under_sampling.RandomUnderSampler.html
from imblearn.under_sampling import RandomUnderSampler
rus = RandomUnderSampler(sampling_strategy=0.10, replacement=False, random_state=100)
X_train_rus, y_train_rus = rus.fit_resample(X_train, y_train)
# -----------------------------------------------------------------------------------
train_rus = X_train_rus.join(y_train_rus)
print(
"Percentage of minority samples over Training Data :",
100 * train_rus[train_rus["Class"] == 1].shape[0] / train_rus.shape[0],
"%",
)
# -----------------------------------------------------------------------------------
plot_3d(train_rus, "V3", "V1", "V2", "Class", "Random Undersampling")
# ## Near-Miss Undersampling
# Reference Links - https://imbalanced-learn.org/stable/references/generated/imblearn.under_sampling.NearMiss.html
# ## Near-Miss 1
from imblearn.under_sampling import NearMiss
nm = NearMiss(sampling_strategy=0.10, n_neighbors=1, version=1, n_jobs=-1)
X_train_nm, y_train_nm = nm.fit_resample(X_train, y_train)
# -----------------------------------------------------------------------------------
train_nm = X_train_nm.join(y_train_nm)
print("Value Counts :\n", train_nm["Class"].value_counts())
print(
"Percentage of minority samples over Training Data :",
100 * train_nm[train_nm["Class"] == 1].shape[0] / train_nm.shape[0],
"%",
)
# -----------------------------------------------------------------------------------
plot_3d(train_nm, "V3", "V1", "V2", "Class", "Near-Miss 1")
# ## Near-Miss 2
from imblearn.under_sampling import NearMiss
nm = NearMiss(sampling_strategy=0.10, n_neighbors=1, version=2, n_jobs=-1)
X_train_nm, y_train_nm = nm.fit_resample(X_train, y_train)
# -----------------------------------------------------------------------------------
train_nm = X_train_nm.join(y_train_nm)
print("Value Counts :\n", train_nm["Class"].value_counts())
print(
"Percentage of minority samples over Training Data :",
100 * train_nm[train_nm["Class"] == 1].shape[0] / train_nm.shape[0],
"%",
)
# -----------------------------------------------------------------------------------
plot_3d(train_nm, "V3", "V1", "V2", "Class", "Near-Miss 2")
# ## Near-Miss 3
from imblearn.under_sampling import NearMiss
nm = NearMiss(
sampling_strategy=0.10, n_neighbors_ver3=20, version=3, n_jobs=-1
) # n_neighbors_value arrived at w/ hit and trial
X_train_nm, y_train_nm = nm.fit_resample(X_train, y_train)
# ------------------------------------------------------------------------------------------------------------
train_nm = X_train_nm.join(y_train_nm)
print("Value Counts :\n", train_nm["Class"].value_counts())
print(
"Note : For Near-Miss-3, the desired ratio parameter doesnt work due to the nature of the algorithm"
)
print(
"Percentage of minority samples over Training Data :",
100 * train_nm[train_nm["Class"] == 1].shape[0] / train_nm.shape[0],
"%",
)
# ------------------------------------------------------------------------------------------------------------
plot_3d(train_nm, "V3", "V1", "V2", "Class", "Near-Miss 3")
# ## Note -
# For the techniques CNN, Tomek Links and OSS, the run-time is very high due to elementwise operation. Hence for the scope of this notebook, we will decrease the total rows to undersample.
# ## CNN (Condensed Nearest Neighbors)
# Reference Links - https://imbalanced-learn.org/stable/references/generated/imblearn.under_sampling.CondensedNearestNeighbour.html
from imblearn.under_sampling import CondensedNearestNeighbour
CNN = CondensedNearestNeighbour(
sampling_strategy="auto", n_seeds_S=100, n_neighbors=1, random_state=100, n_jobs=-1
)
X_train_cnn, y_train_cnn = CNN.fit_resample(
X_train[0:50000], y_train[0:50000]
) # Reduced data for the purpose of low run-time. In realtime, use the full data
# -----------------------------------------------------------------------------------------------------------------
train_cnn = X_train_cnn.join(y_train_cnn)
print("Value Counts :\n", train_nm["Class"].value_counts())
print(
"Percentage of minority samples over Training Data :",
100 * train_cnn[train_cnn["Class"] == 1].shape[0] / train_cnn.shape[0],
"%",
)
# -----------------------------------------------------------------------------------------------------------------
plot_3d(train_cnn, "V3", "V1", "V2", "Class", "CNN (Condensed Nearest Neighbor)")
# ## Tomek Links Removal
# Reference Links - https://imbalanced-learn.org/stable/references/generated/imblearn.under_sampling.TomekLinks.html
from imblearn.under_sampling import TomekLinks
tomek = TomekLinks(sampling_strategy="auto", n_jobs=-1)
X_train_tomek, y_train_tomek = tomek.fit_resample(
X_train[0:50000], y_train[0:50000]
) # Reduced data for the purpose of low run-time. In realtime, use the full data
# -----------------------------------------------------------------------------------
train_tomek = X_train_tomek.join(y_train_tomek)
print(
"Percentage of minority samples over Training Data :",
100 * train_tomek[train_tomek["Class"] == 1].shape[0] / train_tomek.shape[0],
"%",
)
# -----------------------------------------------------------------------------------------------------------------
plot_3d(train_tomek, "V3", "V1", "V2", "Class", "Tomek Link Removal")
# ## One Sided Selection (OSS)
# - This doesn't return records as per the desired ratio
# - Reference Links - https://imbalanced-learn.org/stable/references/generated/imblearn.under_sampling.TomekLinks.html
from imblearn.under_sampling import OneSidedSelection
OSS = OneSidedSelection(
sampling_strategy="auto", n_neighbors=9, n_seeds_S=100, random_state=100, n_jobs=-1
)
X_train_oss, y_train_oss = OSS.fit_resample(X_train, y_train)
# ------------------------------------------------------------------------------------------------------------
train_oss = X_train_oss.join(y_train_oss)
print(
"Percentage of minority samples over Training Data :",
100 * train_oss[train_oss["Class"] == 1].shape[0] / train_oss.shape[0],
"%",
)
# ------------------------------------------------------------------------------------------------------------
plot_3d(train_oss, "V3", "V1", "V2", "Class", "One Sided Sampling (OSS)")
print(X_train_oss.shape)
X_train_oss.head(1)
# # Passing under-sampled data into model for training
## Final X-Y pair of training to pass (Pick any technique to experiment on)
X_train_final = X_train_adasyn.copy()
y_train_final = y_train_adasyn.copy()
# -----------------------------------------------------------------------------
train_final = X_train_final.join(y_train_final)
print(
"Percentage of minority samples over Final Training Data :",
100 * train_final[train_final["Class"] == 1].shape[0] / train_final.shape[0],
"%",
)
# # Final Model - Logistic Regression Model on imbalanced data (~9%)
lr_clf = LogisticRegression(solver="sag", random_state=100)
lr_clf.fit(X_train_final, y_train_final)
pred = lr_clf.predict(X_test)
# -----------------------------------------------
score = roc_auc_score(y_test, pred)
print("1. ROC AUC: %.3f" % score)
print("2. Accuracy :", accuracy_score(y_test, pred))
print("3. Classification Report -\n", classification_report(y_test, pred))
print("4. Confusion Matrix - \n", confusion_matrix(y_test, pred))
# # Final Model - XGB Classification Model on imbalanced data (~9%)
import xgboost as xgb
xgb_clf = xgb.XGBClassifier(random_state=100, n_jobs=-1)
xgb_clf.fit(X_train_final, y_train_final)
xgb_pred = xgb_clf.predict(X_test)
# -----------------------------------------------
score = roc_auc_score(y_test, xgb_pred)
print("1. ROC AUC: %.3f" % score)
print("2. Accuracy :", accuracy_score(y_test, xgb_pred))
print("3. Classification Report -\n", classification_report(y_test, xgb_pred))
print("4. Confusion Matrix - \n", confusion_matrix(y_test, xgb_pred))
# # Final Model - Random Forest Model on imbalanced data (~9%)
from sklearn.ensemble import RandomForestClassifier
rf_clf = RandomForestClassifier(random_state=100, n_jobs=-1)
rf_clf.fit(X_train_final, y_train_final)
rf_pred = rf_clf.predict(X_test)
# -----------------------------------------------
score = roc_auc_score(y_test, rf_pred)
print("1. ROC AUC: %.3f" % score)
print("2. Accuracy :", accuracy_score(y_test, rf_pred))
print("3. Classification Report -\n", classification_report(y_test, rf_pred))
print("4. Confusion Matrix - \n", confusion_matrix(y_test, rf_pred))
| false | 0 | 5,470 | 0 | 7,344 | 5,470 |
||
69173022
|
# # Import Libraries
import pandas as pd
import os
import io
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder
import datetime
from datetime import datetime as dt
# ALL ABOUT DATA
def Data_Details(DF, dataname):
print(
"################################################\n"
+ dataname
+ "DATA FRAME SHAPE:"
)
print(DF.shape)
print(
"################################################\n"
+ dataname
+ "DATA FRAME HEAD:"
)
print(DF.head(10))
print(
"################################################\n"
+ dataname
+ "DATA FRAME DESCRIBTION:"
)
print(DF.describe())
print(
"################################################\n"
+ dataname
+ "DATA FRAME INFO:"
)
print(DF.info())
print(
"################################################\n"
+ dataname
+ "DATA FRAME NULLS:"
)
print(DF.isna().sum())
print("-------------------------------------------------\n")
# # Reading Data
# Define the path of the dataset
dataset_path = "/kaggle/input/car-crashes-severity-prediction/"
# Reading the data
dataframe = pd.read_csv(os.path.join(dataset_path, "train.csv"))
df1 = dataframe.copy()
dataframe2 = pd.read_csv(os.path.join(dataset_path, "weather-sfcsv.csv"))
df2 = dataframe2.copy()
dataframe3 = pd.read_csv(os.path.join(dataset_path, "test.csv"))
df3 = dataframe3.copy()
import xml.etree.ElementTree as ET
xml_data = open(os.path.join(dataset_path, "holidays.xml"), "r").read() # Read file
root = ET.XML(xml_data) # Parse XML
holidaydata = []
holidaycols = ["date", "description"]
for i, child in enumerate(root):
holidaydata.append([subchild.text for subchild in child])
df_holiday = pd.DataFrame(holidaydata) # Write in DF and transpose it
df_holiday.columns = holidaycols # Update column names
df_holiday.head()
df4 = df_holiday.copy()
# # Exploring Data
# Exploring Dataset
Data_Details(df1, "Train")
Data_Details(df2, "Weather")
Data_Details(df3, "Test")
Data_Details(df4, "Holiday")
# # Merging Data
# Removing Duplicates Values from the weather dataset
df2.drop_duplicates(["Year", "Month", "Day", "Hour"], inplace=True)
# Extracting Date information
df1["timestamp"] = pd.to_datetime(df1["timestamp"])
df1["Year"] = df1["timestamp"].dt.year
df1["Month"] = df1["timestamp"].dt.month
df1["Day"] = df1["timestamp"].dt.day
df1["Hour"] = df1["timestamp"].dt.hour
df1["Weekday"] = df1["timestamp"].dt.strftime("%a")
df3["timestamp"] = pd.to_datetime(df3["timestamp"])
df3["Year"] = df3["timestamp"].dt.year
df3["Month"] = df3["timestamp"].dt.month
df3["Day"] = df3["timestamp"].dt.day
df3["Hour"] = df3["timestamp"].dt.hour
df3["Weekday"] = df3["timestamp"].dt.strftime("%a")
df4["Year"] = pd.DatetimeIndex(df4["date"]).year
df4["Month"] = pd.DatetimeIndex(df4["date"]).month
df4["Day"] = pd.DatetimeIndex(df4["date"]).day
# Merging the weather and holiday dataset with the train
df_merged0 = pd.merge(df1, df2, how="left", on=["Year", "Month", "Day", "Hour"])
df_merged = pd.merge(df_merged0, df4, how="left", on=["Year", "Month", "Day"])
# Merging the weather and holiday dataset with the test dataset
df_merged_test0 = pd.merge(df3, df2, how="left", on=["Year", "Month", "Day", "Hour"])
df_merged_test = pd.merge(df_merged_test0, df4, how="left", on=["Year", "Month", "Day"])
# Exploring the Merged Dataset
Data_Details(df_merged, "Merged train data set ")
Data_Details(df_merged_test, "Merged test data set ")
# # Cleaning Data for training
# Make a copy from the merged train dataset
df = df_merged.copy()
# Dropping the columns that have too much null values or redendant
df.drop(
columns=["Wind_Chill(F)", "Precipitation(in)", "timestamp", "date"],
axis=1,
inplace=True,
)
# Dropping the missing values of raws
df.dropna(
axis=0,
subset=["Weather_Condition", "Temperature(F)", "Humidity(%)", "Visibility(mi)"],
inplace=True,
)
# Filling the missing data of wind_speed with the mean
df["Wind_Speed(mph)"].fillna(df["Wind_Speed(mph)"].mean(), inplace=True)
# Get the number of unique values for each column
counts = df.nunique()
# Record columns to delete
to_del = [i for i, v in enumerate(counts) if v == 1]
# drop useless columns
df.drop(df.columns[to_del], axis=1, inplace=True)
# Count the values of the coulmns to determine which column to keep
print("Wind_Speed(mph)\n", df["Wind_Speed(mph)"].value_counts())
print("Crossing\n", df["Crossing"].value_counts())
print("Give_Way\n", df["Give_Way"].value_counts())
print("Junction\n", df["Junction"].value_counts())
print("No_Exit\n", df["No_Exit"].value_counts())
print("Railway\n", df["Railway"].value_counts())
print("Stop\n", df["Stop"].value_counts())
print("Amenity\n", df["Amenity"].value_counts())
print("Side\n", df["Side"].value_counts())
print("Selected\n", df["Selected"].value_counts())
# Dropping the biased columns
df.drop(columns=["Give_Way", "No_Exit", "Selected"], axis=1, inplace=True)
# calculate duplicates
dups = df.duplicated()
# report if there are any duplicates
print(dups.any())
# list all duplicate rows
df.drop_duplicates(inplace=True)
print(df.shape)
# # Cleaning Data for test
# Make a copy from the merged test dataset
df_test = df_merged_test.copy()
# Dropping the columns that have too much null values or redendant
df_test.drop(
columns=["Wind_Chill(F)", "Precipitation(in)", "timestamp", "date"],
axis=1,
inplace=True,
)
# Dropping the missing values of raws
df_test.dropna(
axis=0,
subset=["Weather_Condition", "Temperature(F)", "Humidity(%)", "Visibility(mi)"],
inplace=True,
)
# Filling the missing data of wind_speed with the mean
df_test["Wind_Speed(mph)"].fillna(df["Wind_Speed(mph)"].mean(), inplace=True)
# Get number of unique values for each column
counts2 = df_test.nunique()
# Record columns to delete
to_del = [i for i, v in enumerate(counts2) if v == 1]
# Drop useless columns
df_test.drop(df_test.columns[to_del], axis=1, inplace=True)
# Dropping the biased columns
df_test.drop(columns=["Give_Way"], axis=1, inplace=True)
# calculate duplicates
dups = df_test.duplicated()
# report if there are any duplicates
print(dups.any())
# list all duplicate rows
df_test.drop_duplicates(inplace=True)
print(df_test.shape)
# # Graphs
# Plotting the classes of the date
df["Severity"].value_counts().plot(kind="bar")
plt.xlabel("Classes")
plt.ylabel("Percentage")
plt.show()
# the graph showed that the data id imbalanced so it will be biased
# Plotting the Correlation
f, ax = plt.subplots(figsize=(32, 26))
corr = df.corr()
mp = sns.heatmap(
corr,
mask=np.zeros_like(corr, dtype=np.bool),
cmap=sns.diverging_palette(220, 10, as_cmap=True),
square=True,
ax=ax,
annot=True,
)
mp.set_title(label="dataset correlation", fontsize=20)
# # Encoding Categorical Columns
# Encoding for the train set
# Grouping the values of the weather conditions to 4 main categories
encoded_cons = []
for con in df["Weather_Condition"].values:
if "Cloudy" in con.split(" "):
encoded_cons.append("Cloudy")
elif "Fair" in con.split(" "):
encoded_cons.append("Fair")
elif "Rain" in con.split(" "):
encoded_cons.append("Rain")
else:
encoded_cons.append("other")
df["Encoded_Weather"] = encoded_cons
del df["Weather_Condition"]
# Grouping the values of the Hours to 2 main categories
encoded_hours = []
for hour in df["Hour"].values:
if 8 > hour >= 0:
encoded_hours.append("hour_1")
else:
encoded_hours.append("hour_2")
df["Encoded_Hour_effect"] = encoded_hours
del df["Hour"]
# Filling the null values of holiday and adding sat and sun to the holiday
df["description"].fillna("not_holiday", inplace=True)
for con in df["Weekday"].values:
if not 5 or 6:
df["description"] = "holiday"
# Encoding for the test set
# Grouping the values of the weather conditions to 4 main categories for the test set
encoded_cons_test = []
for con in df_test["Weather_Condition"].values:
if "Cloudy" in con.split(" "):
encoded_cons_test.append("Cloudy")
elif "Fair" in con.split(" "):
encoded_cons_test.append("Fair")
elif "Rain" in con.split(" "):
encoded_cons_test.append("Rain")
elif "Overcast" in con.split(" "):
encoded_cons.append("Rain")
else:
encoded_cons_test.append("other")
df_test["Encoded_Weather"] = encoded_cons_test
del df_test["Weather_Condition"]
# Grouping the values of the Hours to 2 main categories
encoded_hours_test = []
for hour in df_test["Hour"].values:
if 8 > hour >= 0:
encoded_hours_test.append("hour_1")
else:
encoded_hours_test.append("hour_2")
df_test["Encoded_Hour_effect"] = encoded_hours_test
del df_test["Hour"]
# Filling the null values of holiday and adding sat and sun to the holiday
df_test["description"].fillna("not_holiday", inplace=True)
for con in df_test["Weekday"].values:
if not 5 or 6:
df_test["description"] = "holiday"
# Grouping the value of holiday to 2 main categories for train
encoded_hol = []
for con in df["description"].values:
if not "not_holiday":
encoded_hol.append("holiday")
else:
encoded_hol.append("not_holiday")
df["description"] = encoded_hol
# Grouping the value of holiday to 2 main categories for test
encoded_hol_test = []
for con in df_test["description"].values:
if not "not_holiday":
encoded_hol_test.append("holiday")
else:
encoded_hol_test.append("not_holiday")
df_test["description"] = encoded_hol_test
# One Hot Encoding fot categorical Data for both train and test dataset
cols_names = df.columns
not_categorical = [
"ID",
"Lat",
"Lng",
"Distance(mi)",
"Crossing",
"Junction",
"Railway",
"Stop",
"Amenity",
"Severity",
"Year",
"Month",
"Day",
"Temperature(F)",
"Humidity(%)",
"Wind_Speed(mph)",
"Visibility(mi)",
]
target = "Severity"
categorical = [
item for item in cols_names if item not in not_categorical and item != target
]
print(categorical)
data_onehot = pd.get_dummies(df, columns=categorical)
data_onehot_test = pd.get_dummies(df_test, columns=categorical)
# # Data Splitting
data_onehot.columns
# Splitting the data into train and validation
from sklearn.model_selection import train_test_split
# Train 80 percent , Validation 20 percent
train_df, val_df = train_test_split(
data_onehot, test_size=0.2, random_state=42
) # Try adding `stratify` here
X_train = train_df.drop(columns=["ID", "Severity", "Day"])
y_train = train_df["Severity"]
X_val = val_df.drop(columns=["ID", "Severity", "Day"])
y_val = val_df["Severity"]
# # Model
# Training the model
from sklearn.ensemble import RandomForestClassifier
# Create an instance of the classifier
classifier = RandomForestClassifier(max_depth=2, random_state=0)
# Train the classifier
classifier = classifier.fit(X_train, y_train)
print(
"The accuracy of the classifier on the validation set is ",
(classifier.score(X_val, y_val)),
)
# # Prediction
data_onehot_test.columns
# Using the test set to predict the output
X_test = data_onehot_test[
[
"Lat",
"Lng",
"Distance(mi)",
"Crossing",
"Junction",
"Railway",
"Stop",
"Amenity",
"Year",
"Month",
"Temperature(F)",
"Humidity(%)",
"Wind_Speed(mph)",
"Visibility(mi)",
"Side_L",
"Side_R",
"Weekday_Fri",
"Weekday_Mon",
"Weekday_Sat",
"Weekday_Sun",
"Weekday_Thu",
"Weekday_Tue",
"Weekday_Wed",
"description_not_holiday",
"Encoded_Weather_Cloudy",
"Encoded_Weather_Fair",
"Encoded_Weather_Rain",
"Encoded_Weather_other",
"Encoded_Hour_effect_hour_1",
"Encoded_Hour_effect_hour_2",
]
]
y_test_predicted = classifier.predict(X_test)
data_onehot_test["Severity"] = y_test_predicted
data_onehot_test.head()
# # Save the output to CSV file
# Save the output in a CSV file
data_onehot_test[["ID", "Severity"]].to_csv("./submission.csv", index=False)
# # Trials and analysis of different techniques
###### Noramlization of the Data
# Scaling down/up of the data such that the normalized data falls in the range between 0 and 1.
# Normalizing the data using the quation of >> x normalized = (x – x minimum) / (x maximum – x minimum)
# data = df
# df["Distance(mi)"]=((data["Distance(mi)"]-data["Distance(mi)"].min())/(data["Distance(mi)"].max()-data["Distance(mi)"].min()))*10
# df["Wind_Chill(F)"]=((data["Wind_Chill(F)"]-data["Wind_Chill(F)"].min())/(data["Wind_Chill(F)"].max()-data["Wind_Chill(F)"].min()))*1
# df["Precipitation(in)"]=((data["Precipitation(in)"]-data["Precipitation(in)"].min())/(data["Precipitation(in)"].max()-data["Precipitation(in)"].min()))*1
# df["Temperature(F)"]=((data["Temperature(F)"]-data["Temperature(F)"].min())/(data["Temperature(F)"].max()-data["Temperature(F)"].min()))*1
# df["Humidity(%)"]=((data["Humidity(%)"]-data["Humidity(%)"].min())/(data["Humidity(%)"].max()-data["Humidity(%)"].min()))*1
# df["Wind_Speed(mph)"]=((data["Wind_Speed(mph)"]-data["Wind_Speed(mph)"].min())/(data["Wind_Speed(mph)"].max()-data["Wind_Speed(mph)"].min()))*1
# df["Visibility(mi)"]=((data["Visibility(mi)"]-data["Visibility(mi)"].min())/(data["Visibility(mi)"].max()-data["Visibility(mi)"].min()))*1
####### Imbalancing data
# In this data we found that the severity column distribution is not uniform among 4 classes. typically they composed by two classes
# and since this is considered as biased data, we have to apply
# we have to define th ethreshold for the imbalanced data by dividing them into 3 degrees(mild-moderate-extreme)
# the four classes distribution is as follows:
# Class=1, n=4346 (67.853%)
# Class=2, n=1853 (28.931%)
# Class=3, n=77 (1.202%)
# Class=0, n=129 (2.014%)
# in this section, three resampling techniques were used (Oversampling using SMOTE, ADASYN, and combination of both random undersamplling and oversampling using pipline )
############## 1 Oversampling using SMOTE
# “Synthetic Minority Oversampling Technique” (SMOTE) is utilized resampling technique.
# from pandas import read_csv
# from imblearn.over_sampling import SMOTE
# from collections import Counter
# from matplotlib import pyplot
# from sklearn.preprocessing import LabelEncoder
# import pandas as pd
## split into input and output elements
# X = data_onehot.drop('Severity', axis=1)
# y = data_onehot['Severity']
## label encode the target variable
# y = LabelEncoder().fit_transform(y)
## transform the dataset
# X, y = SMOTE().fit_resample(X, y)
## summarize distribution
# counter = Counter(y)
# for k,v in counter.items():
# per = v / len(y) * 100
# print('Class=%d, n=%d (%.3f%%)' % (k, v, per))
## plot the distribution
# plt.bar(counter.keys(), counter.values())
# plt.show()
############## 2 Oversampling using ADASYN
# ADASYN takes ideas from SMOTE and builds on them. In particular, ADASYN selects minority samples S so that “more difficult to classify” minority samples are more likely to be selected. This allows the classifier to have more opportunity to learn tough instances
# from pandas import read_csv
# from imblearn.over_sampling import SMOTE
# from collections import Counter
# from matplotlib import pyplot
# from sklearn.preprocessing import LabelEncoder
# import pandas as pd
# from imblearn.over_sampling import ADASYN
## split into input and output elements
# X = data_onehot.drop('Severity', axis=1)
# y = data_onehot['Severity']
## label encode the target variable
# y = LabelEncoder().fit_transform(y)
## transform the dataset
# X, y = ADASYN().fit_resample(X, y)
## summarize distribution
# counter = Counter(y)
# for k,v in counter.items():
# per = v / len(y) * 100
# print('Class=%d, n=%d (%.3f%%)' % (k, v, per))
## plot the distribution
# plt.bar(counter.keys(), counter.values())
# plt.show()
############## 3 from collections import Counter
# from imblearn.over_sampling import SMOTE
# from sklearn.model_selection import train_test_split
# import pandas as pd
# import numpy as np
# import warnings
# #warnings.simplefilter(action='ignore', category=FutureWarning)
# # split into input and output elements
# X = data_onehot.drop('Severity', axis=1)
# y = data_onehot['Severity']
# #Split train-test data
# X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.30)
# # summarize class distribution
# print("Before oversampling: ",Counter(y_train))
# # define oversampling strategy
# SMOTE = SMOTE()
# # fit and apply the transform
# X_train_SMOTE, y_train_SMOTE = SMOTE.fit_resample(X_train, y_train)
# # summarize class distribution
# print("After oversampling: ",Counter(y_train_SMOTE))
# #PART 2
# # import SVM libraries
# from sklearn.svm import SVC
# from sklearn.metrics import classification_report, roc_auc_score
# model=SVC()
# clf_SMOTE = model.fit(X_train_SMOTE, y_train_SMOTE)
# pred_SMOTE = clf_SMOTE.predict(X_test)
# print("ROC AUC score for oversampled SMOTE data: ", roc_auc_score(y_test, pred_SMOTE))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173022.ipynb
| null | null |
[{"Id": 69173022, "ScriptId": 18881738, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7074344, "CreationDate": "07/27/2021 16:44:23", "VersionNumber": 3.0, "Title": "Car Crashes' Severity Prediction", "EvaluationDate": "07/27/2021", "IsChange": false, "TotalLines": 510.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 510.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Import Libraries
import pandas as pd
import os
import io
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder
import datetime
from datetime import datetime as dt
# ALL ABOUT DATA
def Data_Details(DF, dataname):
print(
"################################################\n"
+ dataname
+ "DATA FRAME SHAPE:"
)
print(DF.shape)
print(
"################################################\n"
+ dataname
+ "DATA FRAME HEAD:"
)
print(DF.head(10))
print(
"################################################\n"
+ dataname
+ "DATA FRAME DESCRIBTION:"
)
print(DF.describe())
print(
"################################################\n"
+ dataname
+ "DATA FRAME INFO:"
)
print(DF.info())
print(
"################################################\n"
+ dataname
+ "DATA FRAME NULLS:"
)
print(DF.isna().sum())
print("-------------------------------------------------\n")
# # Reading Data
# Define the path of the dataset
dataset_path = "/kaggle/input/car-crashes-severity-prediction/"
# Reading the data
dataframe = pd.read_csv(os.path.join(dataset_path, "train.csv"))
df1 = dataframe.copy()
dataframe2 = pd.read_csv(os.path.join(dataset_path, "weather-sfcsv.csv"))
df2 = dataframe2.copy()
dataframe3 = pd.read_csv(os.path.join(dataset_path, "test.csv"))
df3 = dataframe3.copy()
import xml.etree.ElementTree as ET
xml_data = open(os.path.join(dataset_path, "holidays.xml"), "r").read() # Read file
root = ET.XML(xml_data) # Parse XML
holidaydata = []
holidaycols = ["date", "description"]
for i, child in enumerate(root):
holidaydata.append([subchild.text for subchild in child])
df_holiday = pd.DataFrame(holidaydata) # Write in DF and transpose it
df_holiday.columns = holidaycols # Update column names
df_holiday.head()
df4 = df_holiday.copy()
# # Exploring Data
# Exploring Dataset
Data_Details(df1, "Train")
Data_Details(df2, "Weather")
Data_Details(df3, "Test")
Data_Details(df4, "Holiday")
# # Merging Data
# Removing Duplicates Values from the weather dataset
df2.drop_duplicates(["Year", "Month", "Day", "Hour"], inplace=True)
# Extracting Date information
df1["timestamp"] = pd.to_datetime(df1["timestamp"])
df1["Year"] = df1["timestamp"].dt.year
df1["Month"] = df1["timestamp"].dt.month
df1["Day"] = df1["timestamp"].dt.day
df1["Hour"] = df1["timestamp"].dt.hour
df1["Weekday"] = df1["timestamp"].dt.strftime("%a")
df3["timestamp"] = pd.to_datetime(df3["timestamp"])
df3["Year"] = df3["timestamp"].dt.year
df3["Month"] = df3["timestamp"].dt.month
df3["Day"] = df3["timestamp"].dt.day
df3["Hour"] = df3["timestamp"].dt.hour
df3["Weekday"] = df3["timestamp"].dt.strftime("%a")
df4["Year"] = pd.DatetimeIndex(df4["date"]).year
df4["Month"] = pd.DatetimeIndex(df4["date"]).month
df4["Day"] = pd.DatetimeIndex(df4["date"]).day
# Merging the weather and holiday dataset with the train
df_merged0 = pd.merge(df1, df2, how="left", on=["Year", "Month", "Day", "Hour"])
df_merged = pd.merge(df_merged0, df4, how="left", on=["Year", "Month", "Day"])
# Merging the weather and holiday dataset with the test dataset
df_merged_test0 = pd.merge(df3, df2, how="left", on=["Year", "Month", "Day", "Hour"])
df_merged_test = pd.merge(df_merged_test0, df4, how="left", on=["Year", "Month", "Day"])
# Exploring the Merged Dataset
Data_Details(df_merged, "Merged train data set ")
Data_Details(df_merged_test, "Merged test data set ")
# # Cleaning Data for training
# Make a copy from the merged train dataset
df = df_merged.copy()
# Dropping the columns that have too much null values or redendant
df.drop(
columns=["Wind_Chill(F)", "Precipitation(in)", "timestamp", "date"],
axis=1,
inplace=True,
)
# Dropping the missing values of raws
df.dropna(
axis=0,
subset=["Weather_Condition", "Temperature(F)", "Humidity(%)", "Visibility(mi)"],
inplace=True,
)
# Filling the missing data of wind_speed with the mean
df["Wind_Speed(mph)"].fillna(df["Wind_Speed(mph)"].mean(), inplace=True)
# Get the number of unique values for each column
counts = df.nunique()
# Record columns to delete
to_del = [i for i, v in enumerate(counts) if v == 1]
# drop useless columns
df.drop(df.columns[to_del], axis=1, inplace=True)
# Count the values of the coulmns to determine which column to keep
print("Wind_Speed(mph)\n", df["Wind_Speed(mph)"].value_counts())
print("Crossing\n", df["Crossing"].value_counts())
print("Give_Way\n", df["Give_Way"].value_counts())
print("Junction\n", df["Junction"].value_counts())
print("No_Exit\n", df["No_Exit"].value_counts())
print("Railway\n", df["Railway"].value_counts())
print("Stop\n", df["Stop"].value_counts())
print("Amenity\n", df["Amenity"].value_counts())
print("Side\n", df["Side"].value_counts())
print("Selected\n", df["Selected"].value_counts())
# Dropping the biased columns
df.drop(columns=["Give_Way", "No_Exit", "Selected"], axis=1, inplace=True)
# calculate duplicates
dups = df.duplicated()
# report if there are any duplicates
print(dups.any())
# list all duplicate rows
df.drop_duplicates(inplace=True)
print(df.shape)
# # Cleaning Data for test
# Make a copy from the merged test dataset
df_test = df_merged_test.copy()
# Dropping the columns that have too much null values or redendant
df_test.drop(
columns=["Wind_Chill(F)", "Precipitation(in)", "timestamp", "date"],
axis=1,
inplace=True,
)
# Dropping the missing values of raws
df_test.dropna(
axis=0,
subset=["Weather_Condition", "Temperature(F)", "Humidity(%)", "Visibility(mi)"],
inplace=True,
)
# Filling the missing data of wind_speed with the mean
df_test["Wind_Speed(mph)"].fillna(df["Wind_Speed(mph)"].mean(), inplace=True)
# Get number of unique values for each column
counts2 = df_test.nunique()
# Record columns to delete
to_del = [i for i, v in enumerate(counts2) if v == 1]
# Drop useless columns
df_test.drop(df_test.columns[to_del], axis=1, inplace=True)
# Dropping the biased columns
df_test.drop(columns=["Give_Way"], axis=1, inplace=True)
# calculate duplicates
dups = df_test.duplicated()
# report if there are any duplicates
print(dups.any())
# list all duplicate rows
df_test.drop_duplicates(inplace=True)
print(df_test.shape)
# # Graphs
# Plotting the classes of the date
df["Severity"].value_counts().plot(kind="bar")
plt.xlabel("Classes")
plt.ylabel("Percentage")
plt.show()
# the graph showed that the data id imbalanced so it will be biased
# Plotting the Correlation
f, ax = plt.subplots(figsize=(32, 26))
corr = df.corr()
mp = sns.heatmap(
corr,
mask=np.zeros_like(corr, dtype=np.bool),
cmap=sns.diverging_palette(220, 10, as_cmap=True),
square=True,
ax=ax,
annot=True,
)
mp.set_title(label="dataset correlation", fontsize=20)
# # Encoding Categorical Columns
# Encoding for the train set
# Grouping the values of the weather conditions to 4 main categories
encoded_cons = []
for con in df["Weather_Condition"].values:
if "Cloudy" in con.split(" "):
encoded_cons.append("Cloudy")
elif "Fair" in con.split(" "):
encoded_cons.append("Fair")
elif "Rain" in con.split(" "):
encoded_cons.append("Rain")
else:
encoded_cons.append("other")
df["Encoded_Weather"] = encoded_cons
del df["Weather_Condition"]
# Grouping the values of the Hours to 2 main categories
encoded_hours = []
for hour in df["Hour"].values:
if 8 > hour >= 0:
encoded_hours.append("hour_1")
else:
encoded_hours.append("hour_2")
df["Encoded_Hour_effect"] = encoded_hours
del df["Hour"]
# Filling the null values of holiday and adding sat and sun to the holiday
df["description"].fillna("not_holiday", inplace=True)
for con in df["Weekday"].values:
if not 5 or 6:
df["description"] = "holiday"
# Encoding for the test set
# Grouping the values of the weather conditions to 4 main categories for the test set
encoded_cons_test = []
for con in df_test["Weather_Condition"].values:
if "Cloudy" in con.split(" "):
encoded_cons_test.append("Cloudy")
elif "Fair" in con.split(" "):
encoded_cons_test.append("Fair")
elif "Rain" in con.split(" "):
encoded_cons_test.append("Rain")
elif "Overcast" in con.split(" "):
encoded_cons.append("Rain")
else:
encoded_cons_test.append("other")
df_test["Encoded_Weather"] = encoded_cons_test
del df_test["Weather_Condition"]
# Grouping the values of the Hours to 2 main categories
encoded_hours_test = []
for hour in df_test["Hour"].values:
if 8 > hour >= 0:
encoded_hours_test.append("hour_1")
else:
encoded_hours_test.append("hour_2")
df_test["Encoded_Hour_effect"] = encoded_hours_test
del df_test["Hour"]
# Filling the null values of holiday and adding sat and sun to the holiday
df_test["description"].fillna("not_holiday", inplace=True)
for con in df_test["Weekday"].values:
if not 5 or 6:
df_test["description"] = "holiday"
# Grouping the value of holiday to 2 main categories for train
encoded_hol = []
for con in df["description"].values:
if not "not_holiday":
encoded_hol.append("holiday")
else:
encoded_hol.append("not_holiday")
df["description"] = encoded_hol
# Grouping the value of holiday to 2 main categories for test
encoded_hol_test = []
for con in df_test["description"].values:
if not "not_holiday":
encoded_hol_test.append("holiday")
else:
encoded_hol_test.append("not_holiday")
df_test["description"] = encoded_hol_test
# One Hot Encoding fot categorical Data for both train and test dataset
cols_names = df.columns
not_categorical = [
"ID",
"Lat",
"Lng",
"Distance(mi)",
"Crossing",
"Junction",
"Railway",
"Stop",
"Amenity",
"Severity",
"Year",
"Month",
"Day",
"Temperature(F)",
"Humidity(%)",
"Wind_Speed(mph)",
"Visibility(mi)",
]
target = "Severity"
categorical = [
item for item in cols_names if item not in not_categorical and item != target
]
print(categorical)
data_onehot = pd.get_dummies(df, columns=categorical)
data_onehot_test = pd.get_dummies(df_test, columns=categorical)
# # Data Splitting
data_onehot.columns
# Splitting the data into train and validation
from sklearn.model_selection import train_test_split
# Train 80 percent , Validation 20 percent
train_df, val_df = train_test_split(
data_onehot, test_size=0.2, random_state=42
) # Try adding `stratify` here
X_train = train_df.drop(columns=["ID", "Severity", "Day"])
y_train = train_df["Severity"]
X_val = val_df.drop(columns=["ID", "Severity", "Day"])
y_val = val_df["Severity"]
# # Model
# Training the model
from sklearn.ensemble import RandomForestClassifier
# Create an instance of the classifier
classifier = RandomForestClassifier(max_depth=2, random_state=0)
# Train the classifier
classifier = classifier.fit(X_train, y_train)
print(
"The accuracy of the classifier on the validation set is ",
(classifier.score(X_val, y_val)),
)
# # Prediction
data_onehot_test.columns
# Using the test set to predict the output
X_test = data_onehot_test[
[
"Lat",
"Lng",
"Distance(mi)",
"Crossing",
"Junction",
"Railway",
"Stop",
"Amenity",
"Year",
"Month",
"Temperature(F)",
"Humidity(%)",
"Wind_Speed(mph)",
"Visibility(mi)",
"Side_L",
"Side_R",
"Weekday_Fri",
"Weekday_Mon",
"Weekday_Sat",
"Weekday_Sun",
"Weekday_Thu",
"Weekday_Tue",
"Weekday_Wed",
"description_not_holiday",
"Encoded_Weather_Cloudy",
"Encoded_Weather_Fair",
"Encoded_Weather_Rain",
"Encoded_Weather_other",
"Encoded_Hour_effect_hour_1",
"Encoded_Hour_effect_hour_2",
]
]
y_test_predicted = classifier.predict(X_test)
data_onehot_test["Severity"] = y_test_predicted
data_onehot_test.head()
# # Save the output to CSV file
# Save the output in a CSV file
data_onehot_test[["ID", "Severity"]].to_csv("./submission.csv", index=False)
# # Trials and analysis of different techniques
###### Noramlization of the Data
# Scaling down/up of the data such that the normalized data falls in the range between 0 and 1.
# Normalizing the data using the quation of >> x normalized = (x – x minimum) / (x maximum – x minimum)
# data = df
# df["Distance(mi)"]=((data["Distance(mi)"]-data["Distance(mi)"].min())/(data["Distance(mi)"].max()-data["Distance(mi)"].min()))*10
# df["Wind_Chill(F)"]=((data["Wind_Chill(F)"]-data["Wind_Chill(F)"].min())/(data["Wind_Chill(F)"].max()-data["Wind_Chill(F)"].min()))*1
# df["Precipitation(in)"]=((data["Precipitation(in)"]-data["Precipitation(in)"].min())/(data["Precipitation(in)"].max()-data["Precipitation(in)"].min()))*1
# df["Temperature(F)"]=((data["Temperature(F)"]-data["Temperature(F)"].min())/(data["Temperature(F)"].max()-data["Temperature(F)"].min()))*1
# df["Humidity(%)"]=((data["Humidity(%)"]-data["Humidity(%)"].min())/(data["Humidity(%)"].max()-data["Humidity(%)"].min()))*1
# df["Wind_Speed(mph)"]=((data["Wind_Speed(mph)"]-data["Wind_Speed(mph)"].min())/(data["Wind_Speed(mph)"].max()-data["Wind_Speed(mph)"].min()))*1
# df["Visibility(mi)"]=((data["Visibility(mi)"]-data["Visibility(mi)"].min())/(data["Visibility(mi)"].max()-data["Visibility(mi)"].min()))*1
####### Imbalancing data
# In this data we found that the severity column distribution is not uniform among 4 classes. typically they composed by two classes
# and since this is considered as biased data, we have to apply
# we have to define th ethreshold for the imbalanced data by dividing them into 3 degrees(mild-moderate-extreme)
# the four classes distribution is as follows:
# Class=1, n=4346 (67.853%)
# Class=2, n=1853 (28.931%)
# Class=3, n=77 (1.202%)
# Class=0, n=129 (2.014%)
# in this section, three resampling techniques were used (Oversampling using SMOTE, ADASYN, and combination of both random undersamplling and oversampling using pipline )
############## 1 Oversampling using SMOTE
# “Synthetic Minority Oversampling Technique” (SMOTE) is utilized resampling technique.
# from pandas import read_csv
# from imblearn.over_sampling import SMOTE
# from collections import Counter
# from matplotlib import pyplot
# from sklearn.preprocessing import LabelEncoder
# import pandas as pd
## split into input and output elements
# X = data_onehot.drop('Severity', axis=1)
# y = data_onehot['Severity']
## label encode the target variable
# y = LabelEncoder().fit_transform(y)
## transform the dataset
# X, y = SMOTE().fit_resample(X, y)
## summarize distribution
# counter = Counter(y)
# for k,v in counter.items():
# per = v / len(y) * 100
# print('Class=%d, n=%d (%.3f%%)' % (k, v, per))
## plot the distribution
# plt.bar(counter.keys(), counter.values())
# plt.show()
############## 2 Oversampling using ADASYN
# ADASYN takes ideas from SMOTE and builds on them. In particular, ADASYN selects minority samples S so that “more difficult to classify” minority samples are more likely to be selected. This allows the classifier to have more opportunity to learn tough instances
# from pandas import read_csv
# from imblearn.over_sampling import SMOTE
# from collections import Counter
# from matplotlib import pyplot
# from sklearn.preprocessing import LabelEncoder
# import pandas as pd
# from imblearn.over_sampling import ADASYN
## split into input and output elements
# X = data_onehot.drop('Severity', axis=1)
# y = data_onehot['Severity']
## label encode the target variable
# y = LabelEncoder().fit_transform(y)
## transform the dataset
# X, y = ADASYN().fit_resample(X, y)
## summarize distribution
# counter = Counter(y)
# for k,v in counter.items():
# per = v / len(y) * 100
# print('Class=%d, n=%d (%.3f%%)' % (k, v, per))
## plot the distribution
# plt.bar(counter.keys(), counter.values())
# plt.show()
############## 3 from collections import Counter
# from imblearn.over_sampling import SMOTE
# from sklearn.model_selection import train_test_split
# import pandas as pd
# import numpy as np
# import warnings
# #warnings.simplefilter(action='ignore', category=FutureWarning)
# # split into input and output elements
# X = data_onehot.drop('Severity', axis=1)
# y = data_onehot['Severity']
# #Split train-test data
# X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.30)
# # summarize class distribution
# print("Before oversampling: ",Counter(y_train))
# # define oversampling strategy
# SMOTE = SMOTE()
# # fit and apply the transform
# X_train_SMOTE, y_train_SMOTE = SMOTE.fit_resample(X_train, y_train)
# # summarize class distribution
# print("After oversampling: ",Counter(y_train_SMOTE))
# #PART 2
# # import SVM libraries
# from sklearn.svm import SVC
# from sklearn.metrics import classification_report, roc_auc_score
# model=SVC()
# clf_SMOTE = model.fit(X_train_SMOTE, y_train_SMOTE)
# pred_SMOTE = clf_SMOTE.predict(X_test)
# print("ROC AUC score for oversampled SMOTE data: ", roc_auc_score(y_test, pred_SMOTE))
| false | 0 | 5,330 | 0 | 5,330 | 5,330 |
||
69173520
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # **Importing DATA**
train = pd.read_csv("/kaggle/input/titanic/train.csv")
test = pd.read_csv("/kaggle/input/titanic/test.csv")
# # **Mapping Surname and SEX**
train_test_data = [train, test] # combining train and test dataset
for dataset in train_test_data:
dataset["Title"] = dataset["Name"].str.extract(" ([A-Za-z]+)\.", expand=False)
title_mapping = {
"Mr": 0,
"Miss": 1,
"Mrs": 2,
"Master": 3,
"Dr": 3,
"Rev": 3,
"Col": 3,
"Major": 3,
"Mlle": 3,
"Countess": 3,
"Ms": 3,
"Lady": 3,
"Jonkheer": 3,
"Don": 3,
"Dona": 3,
"Mme": 3,
"Capt": 3,
"Sir": 3,
}
for dataset in train_test_data:
dataset["Title"] = dataset["Title"].map(title_mapping)
train.head()
# # **DELETE 'NAME' COLUMN**
train.drop("Name", axis=1, inplace=True)
test.drop("Name", axis=1, inplace=True)
train.head()
test.head()
sex_mapping = {"male": 0, "female": 1}
for dataset in train_test_data:
dataset["Sex"] = dataset["Sex"].map(sex_mapping)
train.head()
test.head()
# # Filling Missing Values for Age by grouping them by Surname and Finding Median
train["Age"].fillna(train.groupby("Title")["Age"].transform("median"), inplace=True)
test["Age"].fillna(test.groupby("Title")["Age"].transform("median"), inplace=True)
train.isnull().sum()
test.isnull().sum()
# # Encodings
for dataset in train_test_data:
dataset["Embarked"] = dataset["Embarked"].fillna("S")
embarked_mapping = {"S": 0, "C": 1, "Q": 2}
for dataset in train_test_data:
dataset["Embarked"] = dataset["Embarked"].map(embarked_mapping)
train["Fare"].fillna(train.groupby("Pclass")["Fare"].transform("median"), inplace=True)
test["Fare"].fillna(test.groupby("Pclass")["Fare"].transform("median"), inplace=True)
train["FamilySize"] = train["SibSp"] + train["Parch"] + 1
test["FamilySize"] = test["SibSp"] + test["Parch"] + 1
family_mapping = {
1: 0,
2: 0.4,
3: 0.8,
4: 1.2,
5: 1.6,
6: 2,
7: 2.4,
8: 2.8,
9: 3.2,
10: 3.6,
11: 4,
}
for dataset in train_test_data:
dataset["FamilySize"] = dataset["FamilySize"].map(family_mapping)
train.head()
test.head()
for dataset in train_test_data:
dataset["Cabin"] = dataset["Cabin"].str[:1]
train.head()
cabin_mapping = {
"A": 0,
"B": 0.4,
"C": 0.8,
"D": 1.2,
"E": 1.6,
"F": 2,
"G": 2.4,
"T": 2.8,
}
for dataset in train_test_data:
dataset["Cabin"] = dataset["Cabin"].map(cabin_mapping)
train["Cabin"].fillna(
train.groupby("Pclass")["Cabin"].transform("median"), inplace=True
)
test["Cabin"].fillna(test.groupby("Pclass")["Cabin"].transform("median"), inplace=True)
from sklearn.preprocessing import LabelEncoder
features2 = ["Age", "Fare"]
for dataset in train_test_data:
for feature in features2:
dataset[feature] = LabelEncoder().fit_transform(dataset[feature])
features_drop = ["Ticket", "SibSp", "Parch", "Cabin"]
train = train.drop(features_drop, axis=1)
test = test.drop(features_drop, axis=1)
train = train.drop(["PassengerId"], axis=1)
train_data = train.drop("Survived", axis=1)
target = train["Survived"]
train_data.shape, target.shape
train_data.head(10)
test.head()
# # **MODEL**
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=13)
model.fit(train_data, target)
test_data = test.drop("PassengerId", axis=1).copy()
prediction = model.predict(test_data)
output = pd.DataFrame({"PassengerId": test["PassengerId"], "Survived": prediction})
output.to_csv("my_third_submission.csv", index=False)
print("Your submission was successfully saved!")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173520.ipynb
| null | null |
[{"Id": 69173520, "ScriptId": 18876933, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7890851, "CreationDate": "07/27/2021 16:50:13", "VersionNumber": 6.0, "Title": "TitanicComp_170313D", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 135.0, "LinesInsertedFromPrevious": 14.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 121.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # **Importing DATA**
train = pd.read_csv("/kaggle/input/titanic/train.csv")
test = pd.read_csv("/kaggle/input/titanic/test.csv")
# # **Mapping Surname and SEX**
train_test_data = [train, test] # combining train and test dataset
for dataset in train_test_data:
dataset["Title"] = dataset["Name"].str.extract(" ([A-Za-z]+)\.", expand=False)
title_mapping = {
"Mr": 0,
"Miss": 1,
"Mrs": 2,
"Master": 3,
"Dr": 3,
"Rev": 3,
"Col": 3,
"Major": 3,
"Mlle": 3,
"Countess": 3,
"Ms": 3,
"Lady": 3,
"Jonkheer": 3,
"Don": 3,
"Dona": 3,
"Mme": 3,
"Capt": 3,
"Sir": 3,
}
for dataset in train_test_data:
dataset["Title"] = dataset["Title"].map(title_mapping)
train.head()
# # **DELETE 'NAME' COLUMN**
train.drop("Name", axis=1, inplace=True)
test.drop("Name", axis=1, inplace=True)
train.head()
test.head()
sex_mapping = {"male": 0, "female": 1}
for dataset in train_test_data:
dataset["Sex"] = dataset["Sex"].map(sex_mapping)
train.head()
test.head()
# # Filling Missing Values for Age by grouping them by Surname and Finding Median
train["Age"].fillna(train.groupby("Title")["Age"].transform("median"), inplace=True)
test["Age"].fillna(test.groupby("Title")["Age"].transform("median"), inplace=True)
train.isnull().sum()
test.isnull().sum()
# # Encodings
for dataset in train_test_data:
dataset["Embarked"] = dataset["Embarked"].fillna("S")
embarked_mapping = {"S": 0, "C": 1, "Q": 2}
for dataset in train_test_data:
dataset["Embarked"] = dataset["Embarked"].map(embarked_mapping)
train["Fare"].fillna(train.groupby("Pclass")["Fare"].transform("median"), inplace=True)
test["Fare"].fillna(test.groupby("Pclass")["Fare"].transform("median"), inplace=True)
train["FamilySize"] = train["SibSp"] + train["Parch"] + 1
test["FamilySize"] = test["SibSp"] + test["Parch"] + 1
family_mapping = {
1: 0,
2: 0.4,
3: 0.8,
4: 1.2,
5: 1.6,
6: 2,
7: 2.4,
8: 2.8,
9: 3.2,
10: 3.6,
11: 4,
}
for dataset in train_test_data:
dataset["FamilySize"] = dataset["FamilySize"].map(family_mapping)
train.head()
test.head()
for dataset in train_test_data:
dataset["Cabin"] = dataset["Cabin"].str[:1]
train.head()
cabin_mapping = {
"A": 0,
"B": 0.4,
"C": 0.8,
"D": 1.2,
"E": 1.6,
"F": 2,
"G": 2.4,
"T": 2.8,
}
for dataset in train_test_data:
dataset["Cabin"] = dataset["Cabin"].map(cabin_mapping)
train["Cabin"].fillna(
train.groupby("Pclass")["Cabin"].transform("median"), inplace=True
)
test["Cabin"].fillna(test.groupby("Pclass")["Cabin"].transform("median"), inplace=True)
from sklearn.preprocessing import LabelEncoder
features2 = ["Age", "Fare"]
for dataset in train_test_data:
for feature in features2:
dataset[feature] = LabelEncoder().fit_transform(dataset[feature])
features_drop = ["Ticket", "SibSp", "Parch", "Cabin"]
train = train.drop(features_drop, axis=1)
test = test.drop(features_drop, axis=1)
train = train.drop(["PassengerId"], axis=1)
train_data = train.drop("Survived", axis=1)
target = train["Survived"]
train_data.shape, target.shape
train_data.head(10)
test.head()
# # **MODEL**
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=13)
model.fit(train_data, target)
test_data = test.drop("PassengerId", axis=1).copy()
prediction = model.predict(test_data)
output = pd.DataFrame({"PassengerId": test["PassengerId"], "Survived": prediction})
output.to_csv("my_third_submission.csv", index=False)
print("Your submission was successfully saved!")
| false | 0 | 1,475 | 0 | 1,475 | 1,475 |
||
69173023
|
# ## Imports and Configurations
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from pandas.api.types import CategoricalDtype
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold, cross_val_score
from sklearn.feature_selection import mutual_info_classif
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from category_encoders import MEstimateEncoder
# # Data Preprocessing
# ### Clean Data
def clean(df):
# No specific cleaning is required for this dataset
return df
# ### Encode Data
# The numeric features : PassengerId, Age, SibSp, Parch, Fare
# The object dtype features : Name, Ticket
# The nominative (unordered) categorical features
features_nom = ["Sex", "Embarked", "Cabin"]
# The ordinal (ordered) categorical features
ordered_levels = {"Pclass": [1, 2, 3]}
# Add a None level for missing values
ordered_levels = {key: ["None"] + value for key, value in ordered_levels.items()}
def encode(df):
# Nominal categories
for name in features_nom:
df[name] = df[name].astype("category")
# Add a None category for missing values
if "None" not in df[name].cat.categories:
if df[name].isnull().sum() != 0:
df[name].cat.add_categories("None", inplace=True)
# Ordinal categories
for name, levels in ordered_levels.items():
df[name] = df[name].astype(CategoricalDtype(levels, ordered=True))
return df
# ### Handle Missing Values
def replace_null_values_mean_std(train_data, test_data, feature):
train_data_mean = train_data[feature].mean()
test_data_std = test_data[feature].std()
train_data[feature] = train_data[feature].replace(
np.NaN,
np.random.randint(
train_data_mean - test_data_std, train_data_mean + test_data_std
),
)
test_data[feature] = test_data[feature].replace(np.NaN, train_data_mean)
df = pd.concat([train_data, test_data])
return df
def impute(df):
for name in df.select_dtypes("number"):
df[name] = df[name].fillna(0)
for name in df.select_dtypes("category"):
if df[name].isnull().sum() != 0:
df[name] = df[name].fillna("None")
for name in df.select_dtypes("object"):
df[name] = df[name].fillna("None")
return df
# ## Load and apply preprocessing to data
# Load data set
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
train_data = train_data.set_index([pd.Index(range(891))])
test_data = test_data.set_index([pd.Index(range(891, 1309))])
# Merge the splits so we can process them together
df = pd.concat([train_data, test_data])
# Preprocessing
df = clean(df) # cleaning dataset
df = encode(df) # encode dataset dtypes
df = replace_null_values_mean_std(
train_data, test_data, "Fare"
) # replace missing values
df = replace_null_values_mean_std(
train_data, test_data, "Age"
) # replace missing values
df = impute(df) # impute missing values in the dataset
# Reform splits
train_data = df.loc[train_data.index, :]
test_data = df.loc[test_data.index, :]
train_data["Survived"] = train_data.Survived.astype("category")
# Display data
print("\nTraining Data:\n")
display(train_data.head())
print("\nTest Data:\n")
display(test_data.head())
# Display information about dtypes and missing values
print("\nTraining Data Information:\n")
display(train_data.info())
print("\nTest Data Information:\n")
display(test_data.info())
# ## Define the model, functions to score dataset and get predictions
random_forest_model = RandomForestClassifier(
# n_estimators=100, max_depth=5, random_state=1,
criterion="gini",
n_estimators=700,
min_samples_split=10,
min_samples_leaf=1,
max_features="auto",
oob_score=True,
random_state=1,
n_jobs=-1,
)
# score dataset with one hot encoding without droping first column
def score_dataset(X, y, model=random_forest_model):
# Label encoding for categoricals
for colname in X.select_dtypes(["category", "object"]):
# X[colname], _ = X[colname].factorize() # Label encoding
dummies = pd.get_dummies(
X[colname], prefix=colname, drop_first=False
) # One hot encoding
X = X.join(dummies)
X = X.drop(colname, 1)
# Metric for Housing competition is RMSLE (Root Mean Squared Log Error)
score = cross_val_score(
model,
X,
y,
cv=5,
scoring="neg_mean_squared_log_error",
)
score = -1 * score.mean()
score = np.sqrt(score)
return score
# Function to get predictions
def get_predictions(X, train_data, test_data, y, ids, model=random_forest_model):
# Label encoding for categoricals
for colname in X.select_dtypes(["category", "object"]):
# X[colname], _ = X[colname].factorize()
dummies = pd.get_dummies(
X[colname], prefix=colname, drop_first=False
) # One hot encoding
X = X.join(dummies)
X = X.drop(colname, 1)
train_data_1 = X.loc[train_data.index, :]
test_data_1 = X.loc[test_data.index, :]
model = model
model.fit(train_data_1, y)
predictions = model.predict(test_data_1)
predictions = predictions.astype(int)
output = pd.DataFrame({"PassengerId": ids, "Survived": predictions})
output.to_csv("my_submission_11.csv", index=False)
print(output.head())
return
# # Feature Engineering
X_train = train_data.copy()
y_train = X_train.pop("Survived")
baseline_score = score_dataset(X_train.copy(), y_train)
print(f"Baseline score before applying feature engineering: {baseline_score:.5f} RMSLE")
# ### Remove high cardinality columns
# High Cardinality columns (Columns with many unique values) are remove to get better score.
# Remove PassengerId, Name, Ticket
features = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Cabin", "Embarked"]
X_train_new = X_train[features].copy()
baseline_score = score_dataset(X_train_new.copy(), y_train)
print(f"New score after removing high cardinality columns: {baseline_score:.5f} RMSLE")
# ### Mutual Information
# Utility functions from Tutorial
def make_mi_scores(X, y):
X = X.copy()
for colname in X.select_dtypes(["object", "category"]):
X[colname], _ = X[colname].factorize()
# Dummies = pd.get_dummies(X[colname], drop_first=False,prefix = colname)
# X = X.join(Dummies)
# X = X.drop(colname,1)
# All discrete features should now have integer dtypes
discrete_features = [pd.api.types.is_integer_dtype(t) for t in X.dtypes]
mi_scores = mutual_info_classif(
X, y, discrete_features=discrete_features, random_state=0
)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
def plot_mi_scores(scores):
scores = scores.sort_values(ascending=True)
width = np.arange(len(scores))
ticks = list(scores.index)
plt.barh(width, scores)
plt.yticks(width, ticks)
plt.title("Mutual Information Scores")
X_all = train_data.copy()
mi_scores = make_mi_scores(X_train, y_train)
print("Mi Scores:\n", mi_scores)
plot_mi_scores(mi_scores.head(20))
sns.relplot(
x="value",
y="Survived",
col="variable",
data=X_all.melt(id_vars="Survived", value_vars=features),
facet_kws=dict(sharex=False),
)
sns.countplot(X_all["Pclass"], hue=X_all["Survived"], palette="Paired")
sns.countplot(X_all["Sex"], hue=X_all["Survived"], palette="rocket")
sns.countplot(X_all["Age"], hue=X_all["Survived"], palette="Paired")
sns.countplot(X_all["SibSp"], hue=X_all["Survived"], palette="flare")
# ## Create Features
# ### Breaking-Down Features
# **Breaking Down Name to Title**
def breakDownCabinToFirstLetter(X):
X_new = X.copy()
X_new["CabinLetter"] = np.where(
X_new["Cabin"] != "None", X_new["Cabin"].astype(str).str[0], "None"
)
X_new["CabinLetter"] = X_new["CabinLetter"].astype("category")
return X_new
def breakDownName(X, NameCol):
X1 = X.copy()
X_new = pd.DataFrame()
X_new["Title"] = NameCol.str.split(", ", n=1, expand=True)[1]
X1["Title"] = X_new["Title"].copy().str.split(".", n=1, expand=True)[0]
X1["Title"] = X1["Title"].astype("category")
return X1
def getTicketLetter(X, TicketCol):
X1 = X.copy()
# X1["TicketLength"] = TicketCol.apply(lambda x: len(x))
X1["TicketLetter"] = TicketCol.apply(lambda x: str(x)[0])
X1["TicketLetter"] = X1["TicketLetter"].astype("category")
return X1
def getNameLength(X, NameCol):
X_new = X.copy()
X_new["NameLength"] = NameCol.str.len()
return X_new
# ## Binning
def binningFare(X):
X_new = X.copy()
cut_list = list(range(0, 100, 10))
cut_list.extend(list(range(100, 700, 100)))
X_new["FareBin"] = pd.cut(X_new["Fare"], cut_list, labels=False, right=False)
X_new["FareBin"] = X_new["FareBin"].astype("category")
return X_new
def binningAge(X):
X_new = X.copy()
cut_list = [0, 4, 8, 12, 18, 30, 40, 55, 65, 80]
X_new["AgeBin"] = pd.cut(X_new["Age"], cut_list, labels=False)
X_new["AgeBin"] = X_new["AgeBin"].astype("category")
return X_new
# ### Group Transformation
def getMedianFareWithEmbarked(X):
X_new = X.copy()
X_new["MedianFareWithEmbarked"] = X_new.groupby("Embarked")["Fare"].transform(
"median"
)
return X_new
# ## Clustering With K-Means
def cluster_labels(df, features, n_clusters=20):
X = df.copy()
# Standardize
X_scaled = X.loc[:, features]
X_scaled = (X_scaled - X_scaled.mean(axis=0)) / X_scaled.std(axis=0)
# Fit the KMeans model to X_scaled and create the cluster labels
kmeans = KMeans(n_clusters=n_clusters, n_init=50, random_state=0)
X["Cluster"] = kmeans.fit_predict(X_scaled)
X["Cluster"] = X["Cluster"].astype("category")
return X, kmeans
def cluster_distance(df, features, n_clusters=20):
X = df.copy()
X_scaled = X.loc[:, features]
X_scaled = (X_scaled - X_scaled.mean(axis=0)) / X_scaled.std(axis=0)
kmeans = KMeans(n_clusters=n_clusters, n_init=50, random_state=0)
X_cd = kmeans.fit_transform(X_scaled)
# Label features and join to dataset
X_cd = pd.DataFrame(X_cd, columns=[f"Centroid_{i}" for i in range(X_cd.shape[1])])
return X_cd
# **Cluster-Distance Features**
def addClusterDistanceFeatures(X):
X_new = X.join(cluster_distance(X, cluster_features, 10))
return X_new
# ## Principal Component Analysis
def apply_pca(X, standardize=True):
# Standardize
if standardize:
X = (X - X.mean(axis=0)) / X.std(axis=0)
# Create principal components
pca = PCA()
X_pca = pca.fit_transform(X)
# Convert to dataframe
component_names = [f"PC{i+1}" for i in range(X_pca.shape[1])]
X_pca = pd.DataFrame(X_pca, columns=component_names)
# Create loadings
loadings = pd.DataFrame(
pca.components_.T, # transpose the matrix of loadings
columns=component_names, # so the columns are the principal components
index=X.columns, # and the rows are the original features
)
return pca, X_pca, loadings
def plot_variance(pca, width=8, dpi=100):
# Create figure
fig, axs = plt.subplots(1, 2)
n = pca.n_components_
grid = np.arange(1, n + 1)
# Explained variance
evr = pca.explained_variance_ratio_
axs[0].bar(grid, evr)
axs[0].set(xlabel="Component", title="% Explained Variance", ylim=(0.0, 1.0))
# Cumulative Variance
cv = np.cumsum(evr)
axs[1].plot(np.r_[0, grid], np.r_[0, cv], "o-")
axs[1].set(xlabel="Component", title="% Cumulative Variance", ylim=(0.0, 1.0))
# Set up figure
fig.set(figwidth=8, dpi=100)
return axs
# Get the corresponding corelations
pca_features = ["Age", "Fare", "SibSp", "Parch"]
def getCorrelationsForFeatures():
print("Correlation with Survived:\n")
print(train_data[pca_features].corrwith(train_data.Survived))
return
def getPcaFeatures(X):
X_new = X.loc[:, pca_features]
# `apply_pca`, defined above, reproduces the code from the tutorial
pca, X_pca, loadings = apply_pca(X_new)
print(loadings)
plot_variance(pca)
return X_pca
# **Create New Features using PCA**
def addPcaFeatures(X, X_pca):
X_with_pca_features = X.copy()
X_with_pca_features = X_with_pca_features.join(X_pca)
return X_with_pca_features
def createNewFeatureFromPCA(X):
X_with_new_features = X.copy()
X_with_new_features["TotSibSpParch"] = X["SibSp"] + X["Parch"]
# X_with_new_features["FareToAgeRatio"] = X["Fare"]/X["Age"]
return X_with_new_features
def groupFeature(X):
X_new = X.copy()
Family_category_map = {
0: "Single",
1: "Small",
2: "Small",
3: "Small",
4: "Medium",
5: "Medium",
6: "Medium",
7: "Large",
10: "Large",
}
X_new["FamilyCategory"] = X_new["TotSibSpParch"].map(Family_category_map)
X_new["FamilyCategory"] = X_new["FamilyCategory"].astype("category")
return X_new
X_train_test = pd.concat([train_data, test_data])
X = X_train_test[features]
X1 = breakDownName(X, X_train_test["Name"])
print(
f"New score after breaking-down Names: {score_dataset(X1.loc[train_data.index, :].copy(), y_train):.5f} RMSLE"
)
X2 = getNameLength(X1, X_train_test["Name"])
print(
f"New score after getting Name Length feature: {score_dataset(X2.loc[train_data.index, :].copy(), y_train):.5f} RMSLE"
)
X3 = getTicketLetter(X2, X_train_test["Ticket"])
print(
f"New score after getting Ticket Letter feature: {score_dataset(X3.loc[train_data.index, :].copy(), y_train):.5f} RMSLE"
)
X4 = breakDownCabinToFirstLetter(X3)
print(
f"New score after breaking down Cabin to Cabin's first Letter: {score_dataset(X4.loc[train_data.index, :].copy(), y_train):.5f} RMSLE"
)
X5 = binningFare(X4)
print(
f"New score after binning fare: {score_dataset(X5.loc[train_data.index, :].copy(), y_train):.5f} RMSLE"
)
X6 = binningAge(X5)
print(
f"New score after binning age: {score_dataset(X6.loc[train_data.index, :].copy(), y_train):.5f} RMSLE"
)
X7 = getMedianFareWithEmbarked(X6)
print(
f"New score after getting median of fare with embarked: {score_dataset(X7.loc[train_data.index, :].copy(), y_train):.5f} RMSLE"
)
cluster_features = ["Age", "Fare"]
X8, kmeans_model = cluster_labels(X7.copy(), cluster_features, 10)
print(
f"New score after creating cluster feature: {score_dataset(X8.loc[train_data.index, :].copy(), y_train):.5f} RMSLE"
)
X8.head()
X_clustered = X8.copy()
sns.relplot(
x="Age",
y="Fare",
hue="Cluster",
data=X_clustered,
height=6,
)
X_clustered["Cluster"] = X_clustered.Cluster.astype("category")
X_clustered["Survived"] = y_train
sns.relplot(
x="value",
y="Survived",
hue="Cluster",
col="variable",
height=4,
aspect=1,
facet_kws={"sharex": False},
col_wrap=3,
data=X_clustered.melt(
value_vars=cluster_features,
id_vars=["Survived", "Cluster"],
),
)
# X9 = addClusterDistanceFeatures (X8)
# print(f"New score after adding cluster distance features: {score_dataset(X9.loc[train_data.index, :].copy(), y):.5f} RMSLE")
X_pca = getPcaFeatures(X8)
# X10 = addPcaFeatures(X8, X_pca)
# print(f"New score after adding pca features: {score_dataset(X10.loc[train_data.index, :].copy(), y_train):.5f} RMSLE")
X11 = createNewFeatureFromPCA(X8)
print(
f"New score after creating a new feature from comparing pca loadings: {score_dataset(X11.loc[train_data.index, :].copy(), y_train):.5f} RMSLE"
)
X_plot = X11.copy()
X_plot["Survived"] = y_train
sns.countplot(x="TotSibSpParch", hue="Survived", data=X_plot)
plt.title("Count of Survival in Family Feature", size=15)
X12 = groupFeature(X11)
print(
f"New score after grouping the last feature: {score_dataset(X12.loc[train_data.index, :].copy(), y_train):.5f} RMSLE"
)
sns.countplot(
X12.loc[train_data.index, :]["FamilyCategory"], hue=y_train, palette="flare"
)
# ## Target Encoding
print("Get how many categories, each categorical feature in the dataset has:\n")
display(X12.select_dtypes(["category"]).nunique())
print("Get how many times a category occurs in the dataset:\n")
display(X12["Title"].value_counts())
# # Encoding split
# X_copy = X_with_new_features.copy()
# X_copy["Ticket"] = train_data["Ticket"]
# X_copy["Survived"] = y.copy()
# X_encode = X_copy.sample(frac=0.20, random_state=0)
# y_encode = X_encode.pop("Survived")
# # Training split
# X_pretrain = X_copy.drop(X_encode.index)
# y_train = X_pretrain.pop("Survived")
# # print(X_copy.head(900))
# # print(y_encode.head(900))
# # print(X_pretrain.head(900))
# # print(y_train.head(900))
# # Choose a set of features to encode and a value for m
# encoder = MEstimateEncoder(cols=["Ticket"], m=0.5)
# # Fit the encoder on the encoding split
# encoder.fit(X_encode, y_encode)
# # Encode the training split
# X_train = encoder.transform(X_pretrain, y_train)
# encoder_feature = ["Ticket"]
# plt.figure(dpi=90)
# ax = sns.distplot(y_train, kde=True, hist=False)
# ax = sns.distplot(X_train[encoder_feature], color='r', ax=ax, hist=True, kde=False, norm_hist=True)
# ax.set_xlabel("Survived");
# ## Finalize the features
print("Final dataset Information: \n")
display(X12.info())
mi_scores = make_mi_scores(X12.loc[train_data.index, :], y_train)
print("Mi Scores:\n", mi_scores)
plot_mi_scores(mi_scores.head(25))
sns.countplot(X12.loc[train_data.index, :]["AgeBin"], hue=y_train, palette="winter")
sns.countplot(X12.loc[train_data.index, :]["FareBin"], hue=y_train, palette="Set1")
# MedianFareWithEmbarked, Parch, AgeBin, SibSp features has removed as they have less mutual information with respect to the target Survived and the information related to them have been given to the model in terms of other features. NameLength and TotSibSpParch are also removed to get a better score.
XFinal = X12.copy()
XFinal = XFinal[
[
"Pclass",
"Sex",
"Age",
"Fare",
"Cabin",
"Embarked",
"TicketLetter",
"Title",
"CabinLetter",
"FareBin",
"Cluster",
"FamilyCategory",
]
]
score_dataset(XFinal.loc[train_data.index, :].copy(), y_train)
# ### Get Predictions
get_predictions(XFinal, train_data, test_data, y_train, test_data["PassengerId"])
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173023.ipynb
| null | null |
[{"Id": 69173023, "ScriptId": 18803165, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7890029, "CreationDate": "07/27/2021 16:44:23", "VersionNumber": 8.0, "Title": "Feature_Engineering_Titanic", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 535.0, "LinesInsertedFromPrevious": 15.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 520.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# ## Imports and Configurations
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from pandas.api.types import CategoricalDtype
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold, cross_val_score
from sklearn.feature_selection import mutual_info_classif
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from category_encoders import MEstimateEncoder
# # Data Preprocessing
# ### Clean Data
def clean(df):
# No specific cleaning is required for this dataset
return df
# ### Encode Data
# The numeric features : PassengerId, Age, SibSp, Parch, Fare
# The object dtype features : Name, Ticket
# The nominative (unordered) categorical features
features_nom = ["Sex", "Embarked", "Cabin"]
# The ordinal (ordered) categorical features
ordered_levels = {"Pclass": [1, 2, 3]}
# Add a None level for missing values
ordered_levels = {key: ["None"] + value for key, value in ordered_levels.items()}
def encode(df):
# Nominal categories
for name in features_nom:
df[name] = df[name].astype("category")
# Add a None category for missing values
if "None" not in df[name].cat.categories:
if df[name].isnull().sum() != 0:
df[name].cat.add_categories("None", inplace=True)
# Ordinal categories
for name, levels in ordered_levels.items():
df[name] = df[name].astype(CategoricalDtype(levels, ordered=True))
return df
# ### Handle Missing Values
def replace_null_values_mean_std(train_data, test_data, feature):
train_data_mean = train_data[feature].mean()
test_data_std = test_data[feature].std()
train_data[feature] = train_data[feature].replace(
np.NaN,
np.random.randint(
train_data_mean - test_data_std, train_data_mean + test_data_std
),
)
test_data[feature] = test_data[feature].replace(np.NaN, train_data_mean)
df = pd.concat([train_data, test_data])
return df
def impute(df):
for name in df.select_dtypes("number"):
df[name] = df[name].fillna(0)
for name in df.select_dtypes("category"):
if df[name].isnull().sum() != 0:
df[name] = df[name].fillna("None")
for name in df.select_dtypes("object"):
df[name] = df[name].fillna("None")
return df
# ## Load and apply preprocessing to data
# Load data set
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
train_data = train_data.set_index([pd.Index(range(891))])
test_data = test_data.set_index([pd.Index(range(891, 1309))])
# Merge the splits so we can process them together
df = pd.concat([train_data, test_data])
# Preprocessing
df = clean(df) # cleaning dataset
df = encode(df) # encode dataset dtypes
df = replace_null_values_mean_std(
train_data, test_data, "Fare"
) # replace missing values
df = replace_null_values_mean_std(
train_data, test_data, "Age"
) # replace missing values
df = impute(df) # impute missing values in the dataset
# Reform splits
train_data = df.loc[train_data.index, :]
test_data = df.loc[test_data.index, :]
train_data["Survived"] = train_data.Survived.astype("category")
# Display data
print("\nTraining Data:\n")
display(train_data.head())
print("\nTest Data:\n")
display(test_data.head())
# Display information about dtypes and missing values
print("\nTraining Data Information:\n")
display(train_data.info())
print("\nTest Data Information:\n")
display(test_data.info())
# ## Define the model, functions to score dataset and get predictions
random_forest_model = RandomForestClassifier(
# n_estimators=100, max_depth=5, random_state=1,
criterion="gini",
n_estimators=700,
min_samples_split=10,
min_samples_leaf=1,
max_features="auto",
oob_score=True,
random_state=1,
n_jobs=-1,
)
# score dataset with one hot encoding without droping first column
def score_dataset(X, y, model=random_forest_model):
# Label encoding for categoricals
for colname in X.select_dtypes(["category", "object"]):
# X[colname], _ = X[colname].factorize() # Label encoding
dummies = pd.get_dummies(
X[colname], prefix=colname, drop_first=False
) # One hot encoding
X = X.join(dummies)
X = X.drop(colname, 1)
# Metric for Housing competition is RMSLE (Root Mean Squared Log Error)
score = cross_val_score(
model,
X,
y,
cv=5,
scoring="neg_mean_squared_log_error",
)
score = -1 * score.mean()
score = np.sqrt(score)
return score
# Function to get predictions
def get_predictions(X, train_data, test_data, y, ids, model=random_forest_model):
# Label encoding for categoricals
for colname in X.select_dtypes(["category", "object"]):
# X[colname], _ = X[colname].factorize()
dummies = pd.get_dummies(
X[colname], prefix=colname, drop_first=False
) # One hot encoding
X = X.join(dummies)
X = X.drop(colname, 1)
train_data_1 = X.loc[train_data.index, :]
test_data_1 = X.loc[test_data.index, :]
model = model
model.fit(train_data_1, y)
predictions = model.predict(test_data_1)
predictions = predictions.astype(int)
output = pd.DataFrame({"PassengerId": ids, "Survived": predictions})
output.to_csv("my_submission_11.csv", index=False)
print(output.head())
return
# # Feature Engineering
X_train = train_data.copy()
y_train = X_train.pop("Survived")
baseline_score = score_dataset(X_train.copy(), y_train)
print(f"Baseline score before applying feature engineering: {baseline_score:.5f} RMSLE")
# ### Remove high cardinality columns
# High Cardinality columns (Columns with many unique values) are remove to get better score.
# Remove PassengerId, Name, Ticket
features = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Cabin", "Embarked"]
X_train_new = X_train[features].copy()
baseline_score = score_dataset(X_train_new.copy(), y_train)
print(f"New score after removing high cardinality columns: {baseline_score:.5f} RMSLE")
# ### Mutual Information
# Utility functions from Tutorial
def make_mi_scores(X, y):
X = X.copy()
for colname in X.select_dtypes(["object", "category"]):
X[colname], _ = X[colname].factorize()
# Dummies = pd.get_dummies(X[colname], drop_first=False,prefix = colname)
# X = X.join(Dummies)
# X = X.drop(colname,1)
# All discrete features should now have integer dtypes
discrete_features = [pd.api.types.is_integer_dtype(t) for t in X.dtypes]
mi_scores = mutual_info_classif(
X, y, discrete_features=discrete_features, random_state=0
)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
def plot_mi_scores(scores):
scores = scores.sort_values(ascending=True)
width = np.arange(len(scores))
ticks = list(scores.index)
plt.barh(width, scores)
plt.yticks(width, ticks)
plt.title("Mutual Information Scores")
X_all = train_data.copy()
mi_scores = make_mi_scores(X_train, y_train)
print("Mi Scores:\n", mi_scores)
plot_mi_scores(mi_scores.head(20))
sns.relplot(
x="value",
y="Survived",
col="variable",
data=X_all.melt(id_vars="Survived", value_vars=features),
facet_kws=dict(sharex=False),
)
sns.countplot(X_all["Pclass"], hue=X_all["Survived"], palette="Paired")
sns.countplot(X_all["Sex"], hue=X_all["Survived"], palette="rocket")
sns.countplot(X_all["Age"], hue=X_all["Survived"], palette="Paired")
sns.countplot(X_all["SibSp"], hue=X_all["Survived"], palette="flare")
# ## Create Features
# ### Breaking-Down Features
# **Breaking Down Name to Title**
def breakDownCabinToFirstLetter(X):
X_new = X.copy()
X_new["CabinLetter"] = np.where(
X_new["Cabin"] != "None", X_new["Cabin"].astype(str).str[0], "None"
)
X_new["CabinLetter"] = X_new["CabinLetter"].astype("category")
return X_new
def breakDownName(X, NameCol):
X1 = X.copy()
X_new = pd.DataFrame()
X_new["Title"] = NameCol.str.split(", ", n=1, expand=True)[1]
X1["Title"] = X_new["Title"].copy().str.split(".", n=1, expand=True)[0]
X1["Title"] = X1["Title"].astype("category")
return X1
def getTicketLetter(X, TicketCol):
X1 = X.copy()
# X1["TicketLength"] = TicketCol.apply(lambda x: len(x))
X1["TicketLetter"] = TicketCol.apply(lambda x: str(x)[0])
X1["TicketLetter"] = X1["TicketLetter"].astype("category")
return X1
def getNameLength(X, NameCol):
X_new = X.copy()
X_new["NameLength"] = NameCol.str.len()
return X_new
# ## Binning
def binningFare(X):
X_new = X.copy()
cut_list = list(range(0, 100, 10))
cut_list.extend(list(range(100, 700, 100)))
X_new["FareBin"] = pd.cut(X_new["Fare"], cut_list, labels=False, right=False)
X_new["FareBin"] = X_new["FareBin"].astype("category")
return X_new
def binningAge(X):
X_new = X.copy()
cut_list = [0, 4, 8, 12, 18, 30, 40, 55, 65, 80]
X_new["AgeBin"] = pd.cut(X_new["Age"], cut_list, labels=False)
X_new["AgeBin"] = X_new["AgeBin"].astype("category")
return X_new
# ### Group Transformation
def getMedianFareWithEmbarked(X):
X_new = X.copy()
X_new["MedianFareWithEmbarked"] = X_new.groupby("Embarked")["Fare"].transform(
"median"
)
return X_new
# ## Clustering With K-Means
def cluster_labels(df, features, n_clusters=20):
X = df.copy()
# Standardize
X_scaled = X.loc[:, features]
X_scaled = (X_scaled - X_scaled.mean(axis=0)) / X_scaled.std(axis=0)
# Fit the KMeans model to X_scaled and create the cluster labels
kmeans = KMeans(n_clusters=n_clusters, n_init=50, random_state=0)
X["Cluster"] = kmeans.fit_predict(X_scaled)
X["Cluster"] = X["Cluster"].astype("category")
return X, kmeans
def cluster_distance(df, features, n_clusters=20):
X = df.copy()
X_scaled = X.loc[:, features]
X_scaled = (X_scaled - X_scaled.mean(axis=0)) / X_scaled.std(axis=0)
kmeans = KMeans(n_clusters=n_clusters, n_init=50, random_state=0)
X_cd = kmeans.fit_transform(X_scaled)
# Label features and join to dataset
X_cd = pd.DataFrame(X_cd, columns=[f"Centroid_{i}" for i in range(X_cd.shape[1])])
return X_cd
# **Cluster-Distance Features**
def addClusterDistanceFeatures(X):
X_new = X.join(cluster_distance(X, cluster_features, 10))
return X_new
# ## Principal Component Analysis
def apply_pca(X, standardize=True):
# Standardize
if standardize:
X = (X - X.mean(axis=0)) / X.std(axis=0)
# Create principal components
pca = PCA()
X_pca = pca.fit_transform(X)
# Convert to dataframe
component_names = [f"PC{i+1}" for i in range(X_pca.shape[1])]
X_pca = pd.DataFrame(X_pca, columns=component_names)
# Create loadings
loadings = pd.DataFrame(
pca.components_.T, # transpose the matrix of loadings
columns=component_names, # so the columns are the principal components
index=X.columns, # and the rows are the original features
)
return pca, X_pca, loadings
def plot_variance(pca, width=8, dpi=100):
# Create figure
fig, axs = plt.subplots(1, 2)
n = pca.n_components_
grid = np.arange(1, n + 1)
# Explained variance
evr = pca.explained_variance_ratio_
axs[0].bar(grid, evr)
axs[0].set(xlabel="Component", title="% Explained Variance", ylim=(0.0, 1.0))
# Cumulative Variance
cv = np.cumsum(evr)
axs[1].plot(np.r_[0, grid], np.r_[0, cv], "o-")
axs[1].set(xlabel="Component", title="% Cumulative Variance", ylim=(0.0, 1.0))
# Set up figure
fig.set(figwidth=8, dpi=100)
return axs
# Get the corresponding corelations
pca_features = ["Age", "Fare", "SibSp", "Parch"]
def getCorrelationsForFeatures():
print("Correlation with Survived:\n")
print(train_data[pca_features].corrwith(train_data.Survived))
return
def getPcaFeatures(X):
X_new = X.loc[:, pca_features]
# `apply_pca`, defined above, reproduces the code from the tutorial
pca, X_pca, loadings = apply_pca(X_new)
print(loadings)
plot_variance(pca)
return X_pca
# **Create New Features using PCA**
def addPcaFeatures(X, X_pca):
X_with_pca_features = X.copy()
X_with_pca_features = X_with_pca_features.join(X_pca)
return X_with_pca_features
def createNewFeatureFromPCA(X):
X_with_new_features = X.copy()
X_with_new_features["TotSibSpParch"] = X["SibSp"] + X["Parch"]
# X_with_new_features["FareToAgeRatio"] = X["Fare"]/X["Age"]
return X_with_new_features
def groupFeature(X):
X_new = X.copy()
Family_category_map = {
0: "Single",
1: "Small",
2: "Small",
3: "Small",
4: "Medium",
5: "Medium",
6: "Medium",
7: "Large",
10: "Large",
}
X_new["FamilyCategory"] = X_new["TotSibSpParch"].map(Family_category_map)
X_new["FamilyCategory"] = X_new["FamilyCategory"].astype("category")
return X_new
X_train_test = pd.concat([train_data, test_data])
X = X_train_test[features]
X1 = breakDownName(X, X_train_test["Name"])
print(
f"New score after breaking-down Names: {score_dataset(X1.loc[train_data.index, :].copy(), y_train):.5f} RMSLE"
)
X2 = getNameLength(X1, X_train_test["Name"])
print(
f"New score after getting Name Length feature: {score_dataset(X2.loc[train_data.index, :].copy(), y_train):.5f} RMSLE"
)
X3 = getTicketLetter(X2, X_train_test["Ticket"])
print(
f"New score after getting Ticket Letter feature: {score_dataset(X3.loc[train_data.index, :].copy(), y_train):.5f} RMSLE"
)
X4 = breakDownCabinToFirstLetter(X3)
print(
f"New score after breaking down Cabin to Cabin's first Letter: {score_dataset(X4.loc[train_data.index, :].copy(), y_train):.5f} RMSLE"
)
X5 = binningFare(X4)
print(
f"New score after binning fare: {score_dataset(X5.loc[train_data.index, :].copy(), y_train):.5f} RMSLE"
)
X6 = binningAge(X5)
print(
f"New score after binning age: {score_dataset(X6.loc[train_data.index, :].copy(), y_train):.5f} RMSLE"
)
X7 = getMedianFareWithEmbarked(X6)
print(
f"New score after getting median of fare with embarked: {score_dataset(X7.loc[train_data.index, :].copy(), y_train):.5f} RMSLE"
)
cluster_features = ["Age", "Fare"]
X8, kmeans_model = cluster_labels(X7.copy(), cluster_features, 10)
print(
f"New score after creating cluster feature: {score_dataset(X8.loc[train_data.index, :].copy(), y_train):.5f} RMSLE"
)
X8.head()
X_clustered = X8.copy()
sns.relplot(
x="Age",
y="Fare",
hue="Cluster",
data=X_clustered,
height=6,
)
X_clustered["Cluster"] = X_clustered.Cluster.astype("category")
X_clustered["Survived"] = y_train
sns.relplot(
x="value",
y="Survived",
hue="Cluster",
col="variable",
height=4,
aspect=1,
facet_kws={"sharex": False},
col_wrap=3,
data=X_clustered.melt(
value_vars=cluster_features,
id_vars=["Survived", "Cluster"],
),
)
# X9 = addClusterDistanceFeatures (X8)
# print(f"New score after adding cluster distance features: {score_dataset(X9.loc[train_data.index, :].copy(), y):.5f} RMSLE")
X_pca = getPcaFeatures(X8)
# X10 = addPcaFeatures(X8, X_pca)
# print(f"New score after adding pca features: {score_dataset(X10.loc[train_data.index, :].copy(), y_train):.5f} RMSLE")
X11 = createNewFeatureFromPCA(X8)
print(
f"New score after creating a new feature from comparing pca loadings: {score_dataset(X11.loc[train_data.index, :].copy(), y_train):.5f} RMSLE"
)
X_plot = X11.copy()
X_plot["Survived"] = y_train
sns.countplot(x="TotSibSpParch", hue="Survived", data=X_plot)
plt.title("Count of Survival in Family Feature", size=15)
X12 = groupFeature(X11)
print(
f"New score after grouping the last feature: {score_dataset(X12.loc[train_data.index, :].copy(), y_train):.5f} RMSLE"
)
sns.countplot(
X12.loc[train_data.index, :]["FamilyCategory"], hue=y_train, palette="flare"
)
# ## Target Encoding
print("Get how many categories, each categorical feature in the dataset has:\n")
display(X12.select_dtypes(["category"]).nunique())
print("Get how many times a category occurs in the dataset:\n")
display(X12["Title"].value_counts())
# # Encoding split
# X_copy = X_with_new_features.copy()
# X_copy["Ticket"] = train_data["Ticket"]
# X_copy["Survived"] = y.copy()
# X_encode = X_copy.sample(frac=0.20, random_state=0)
# y_encode = X_encode.pop("Survived")
# # Training split
# X_pretrain = X_copy.drop(X_encode.index)
# y_train = X_pretrain.pop("Survived")
# # print(X_copy.head(900))
# # print(y_encode.head(900))
# # print(X_pretrain.head(900))
# # print(y_train.head(900))
# # Choose a set of features to encode and a value for m
# encoder = MEstimateEncoder(cols=["Ticket"], m=0.5)
# # Fit the encoder on the encoding split
# encoder.fit(X_encode, y_encode)
# # Encode the training split
# X_train = encoder.transform(X_pretrain, y_train)
# encoder_feature = ["Ticket"]
# plt.figure(dpi=90)
# ax = sns.distplot(y_train, kde=True, hist=False)
# ax = sns.distplot(X_train[encoder_feature], color='r', ax=ax, hist=True, kde=False, norm_hist=True)
# ax.set_xlabel("Survived");
# ## Finalize the features
print("Final dataset Information: \n")
display(X12.info())
mi_scores = make_mi_scores(X12.loc[train_data.index, :], y_train)
print("Mi Scores:\n", mi_scores)
plot_mi_scores(mi_scores.head(25))
sns.countplot(X12.loc[train_data.index, :]["AgeBin"], hue=y_train, palette="winter")
sns.countplot(X12.loc[train_data.index, :]["FareBin"], hue=y_train, palette="Set1")
# MedianFareWithEmbarked, Parch, AgeBin, SibSp features has removed as they have less mutual information with respect to the target Survived and the information related to them have been given to the model in terms of other features. NameLength and TotSibSpParch are also removed to get a better score.
XFinal = X12.copy()
XFinal = XFinal[
[
"Pclass",
"Sex",
"Age",
"Fare",
"Cabin",
"Embarked",
"TicketLetter",
"Title",
"CabinLetter",
"FareBin",
"Cluster",
"FamilyCategory",
]
]
score_dataset(XFinal.loc[train_data.index, :].copy(), y_train)
# ### Get Predictions
get_predictions(XFinal, train_data, test_data, y_train, test_data["PassengerId"])
| false | 0 | 6,326 | 0 | 6,326 | 6,326 |
||
69173489
|
# ## You're here!
# Welcome to your first competition in the [ITI's AI Pro training program](https://ai.iti.gov.eg/epita/ai-engineer/)! We hope you enjoy and learn as much as we did prepairing this competition.
# ## Introduction
# In the competition, it's required to predict the `Severity` of a car crash given info about the crash, e.g., location.
# This is the getting started notebook. Things are kept simple so that it's easier to understand the steps and modify it.
# Feel free to `Fork` this notebook and share it with your modifications **OR** use it to create your submissions.
# ### Prerequisites
# You should know how to use python and a little bit of Machine Learning. You can apply the techniques you learned in the training program and submit the new solutions!
# ### Checklist
# You can participate in this competition the way you perefer. However, I recommend following these steps if this is your first time joining a competition on Kaggle.
# * Fork this notebook and run the cells in order.
# * Submit this solution.
# * Make changes to the data processing step as you see fit.
# * Submit the new solutions.
# *You can submit up to 5 submissions per day. You can select only one of the submission you make to be considered in the final ranking.*
# Don't hesitate to leave a comment or contact me if you have any question!
# ## Import the libraries
# We'll use `pandas` to load and manipulate the data. Other libraries will be imported in the relevant sections.
# import librsries
import pandas as pd
import os
from datetime import datetime
import xml.etree.ElementTree as ET
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.impute import KNNImputer
# ## Exploratory Data Analysis
# In this step, one should load the data and analyze it. However, I'll load the data and do minimal analysis. You are encouraged to do thorough analysis!
# Let's load the data using `pandas` and have a look at the generated `DataFrame`.
dataset_path = "/kaggle/input/car-crashes-severity-prediction/"
df = pd.read_csv(os.path.join(dataset_path, "train.csv"))
print("The shape of the dataset is {}.\n\n".format(df.shape))
df.head()
df2 = df.copy()
# test data
test_df = pd.read_csv(os.path.join(dataset_path, "test.csv"))
test_df.head()
# no missing values
df.isnull().sum()
for i in df.columns:
print(f"column name {i}")
print(df[i].value_counts())
print("-----------------------------------------")
# we find column bump is redundant all have the same value
# same with column give_way and the 3 values are at severity 2 so not very impactful and column No_Exit
# import XML and parsing to read XML file
dataset_path = "/kaggle/input/car-crashes-severity-prediction/"
tree = ET.parse(os.path.join(dataset_path, "holidays.xml"))
root = tree.getroot()
for child in root:
for c in child:
print(c.text)
break
dates = []
for date in root.iter("date"):
dates.append(date.text)
holidays = []
for holiday in root.iter("description"):
holidays.append(holiday.text)
# holidays
holiday_df = pd.DataFrame({"day_temp": dates, "holidays": holidays})
holiday_df.shape
# add holiday column to dataframe
df["day"] = df["timestamp"].apply(lambda x: x.split(" ")[0])
test_df["day"] = test_df["timestamp"].apply(lambda x: x.split(" ")[0])
df.head()
holiday_df.info()
df = pd.merge(
left=df, right=holiday_df, how="left", left_on=["day"], right_on=["day_temp"]
)
df.info()
test_df = pd.merge(
left=test_df, right=holiday_df, how="left", left_on=["day"], right_on=["day_temp"]
)
# We've got 6407 examples in the dataset with 14 featues, 1 ID, and the `Severity` of the crash.
# By looking at the features and a sample from the data, the features look of numerical and catogerical types. What about some descriptive statistics?
df = df.drop(["day_temp"], axis=1)
test_df = test_df.drop(["day_temp"], axis=1)
# load weather data set and merge with df
dataset_path = "/kaggle/input/car-crashes-severity-prediction/"
weather = pd.read_csv(os.path.join(dataset_path, "weather-sfcsv.csv"))
weather = weather.astype({"Year": "str", "Month": "str", "Day": "str", "Hour": "str"})
weather.info()
weather.describe()
for i in weather.columns:
print(f"column name {i}")
print(weather[i].value_counts())
print("------------------------------------")
# from this we find some interesting details
# selected column is redundant as all entries are the same
# visibilty column mostly conatain the value 10 for above 85 percent of the data
weather["date"] = (
weather["Year"]
+ "-"
+ weather["Month"]
+ "-"
+ weather["Day"]
+ " "
+ weather["Hour"]
+ ":00"
)
weather.drop(columns=["Year", "Month", "Day", "Hour"], inplace=True)
weather["date"] = weather["date"].apply(
lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M")
)
# drop Humidity(%) , Wind_Chill(F) as there are a lot of null values and correlated with other
weather.drop(
columns=["Precipitation(in)", "Wind_Chill(F)", "Selected"], axis=1, inplace=True
)
weather["Temperature(F)"] = weather["Temperature(F)"].fillna(
weather["Temperature(F)"].median()
)
weather["Humidity(%)"] = weather["Humidity(%)"].fillna(weather["Humidity(%)"].median())
weather["Wind_Speed(mph)"] = weather["Wind_Speed(mph)"].fillna(
weather["Wind_Speed(mph)"].median()
)
weather["Visibility(mi)"] = weather["Visibility(mi)"].fillna(
weather["Visibility(mi)"].median()
)
weather["Weather_Condition"] = weather["Weather_Condition"].fillna(
weather["Weather_Condition"].mode()[0]
)
# there is duplicated dates with different weathers at the same data so we choose the first entry and drop the duplicated date row
# weather[weather['date'].duplicated()].sort_values('date')
weather.drop_duplicates(subset=["date"], inplace=True, ignore_index=True)
# slicing timestamp datetime format
df["timestamp1"] = df["timestamp"].apply(lambda x: x[:19])
test_df["timestamp1"] = test_df["timestamp"].apply(lambda x: x[:19])
df["timestamp1"] = df["timestamp1"].apply(
lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S")
)
test_df["timestamp1"] = test_df["timestamp1"].apply(
lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S")
)
df["timestamp1"] = df["timestamp1"].dt.floor("H")
test_df["timestamp1"] = test_df["timestamp1"].dt.floor("H")
# merge weather with train df
df = pd.merge(
left=df, right=weather, how="left", left_on=["timestamp1"], right_on=["date"]
)
test_df = pd.merge(
left=test_df, right=weather, how="left", left_on=["timestamp1"], right_on=["date"]
)
# df=pd.get_dummies(data=df,columns=['Weather_Condition'])
# df.drop_duplicates(subset='ID',inplace=True)
len(df["Weather_Condition"].unique())
dict1 = {
"Weather_Condition": {
"Scattered Clouds": 23,
"Mostly Cloudy / Windy": 25,
"Clear": 0,
"Fair": 1,
"Partly Cloudy": 20,
"Mostly Cloudy": 24,
"Overcast": 19,
"Fair / Windy": 2,
"Light Rain": 11,
"Cloudy": 22,
"Partly Cloudy / Windy": 21,
"Rain / Windy": 4,
"Rain": 8,
"Fog": 7,
"Heavy Rain": 18,
"Haze": 17,
"Cloudy / Windy": 16,
"Smoke": 10,
"Light Rain / Windy": 3,
"Shallow Fog": 15,
"Mist": 14,
"Light Thunderstorms and Rain": 6,
"Fog / Windy": 5,
"Patches of Fog": 9,
"Light Drizzle": 13,
"Squalls": 12,
}
}
df = df.replace(dict1)
test_df = test_df.replace(dict1)
columns_convert = [
"Crossing",
"Give_Way",
"Junction",
"No_Exit",
"Railway",
"Roundabout",
"Stop",
"Amenity",
]
for i in columns_convert:
print(i)
print(df[i].sum())
# we find columns that have little to no imapct on target label so we drop them
columns_convert1 = ["Crossing", "Junction", "Railway", "Stop", "Amenity"]
df[columns_convert1] = df[columns_convert1].astype(int)
test_df[columns_convert1] = test_df[columns_convert1].astype(int)
# create columns for each part of the date
df["year"] = df["timestamp1"].dt.year
df["month"] = df["timestamp1"].dt.month
df["day"] = df["timestamp1"].dt.day
df["hour"] = df["timestamp1"].dt.hour
test_df["year"] = test_df["timestamp1"].dt.year
test_df["month"] = test_df["timestamp1"].dt.month
test_df["day"] = test_df["timestamp1"].dt.day
test_df["hour"] = test_df["timestamp1"].dt.hour
x = df.groupby(["hour"])["Severity"].count()
sns.barplot(x.index, x.values)
# bin the hours as per number of accidents
bins2 = [-0.001, 9, 14, 17, 25]
x = [0, 1, 2, 0]
df["hour"] = pd.cut(df["timestamp1"].dt.hour, bins=bins2, labels=x, ordered=False)
test_df["hour"] = pd.cut(
test_df["timestamp1"].dt.hour, bins=bins2, labels=x, ordered=False
)
# lets drop unwanted columns to avoid cluttering view and confusion
ls_to_drop = ["Bump", "Give_Way", "No_Exit", "Roundabout"]
df = df.drop(labels=ls_to_drop, axis=1)
test_df = test_df.drop(labels=ls_to_drop, axis=1)
holidays_unique = df["holidays"].unique()[1:].tolist()
df["holidays"].unique()
# convert the hnuniqueays cloumn to numerical column
df["holidays"] = df["holidays"].replace(holidays_unique, 1)
df["holidays"] = df["holidays"].fillna(value=0)
test_df["holidays"] = test_df["holidays"].replace(holidays_unique, 1)
test_df["holidays"] = test_df["holidays"].fillna(value=0)
# to check if everything is working okay
print((df["holidays"] == 1).sum())
print(df["holidays"].unique())
# #maybe check correlations for dataset without null values
# # df_non_null=df.dropna(axis=0)
# df_non_null.info()
# shows relationship between severity and holidays
df.groupby(["holidays", "Severity"])["ID"].count()
# sns.swarmplot(x=df['Severity'],y=df['hour'])
# plt.figure(figsize=[20,20])
# g = sns.PairGrid(data = df,vars=['Humidity(%)','Visibility(mi)','Severity'])
# # g.map_diag(plt.boxplot)
# g.map(sns.scatterplot)
# check correlation between Severity and weather factors
# df.corrwith(df['Severity'])
# #corrleation matrix
df.corr()
x = df.groupby("Severity").mean()["Distance(mi)"].sort_values(ascending=False)
sns.barplot(x.values, x.index, order=x.index, orient="h")
# corrleation between Severity and year
df.corrwith(df["date"].dt.year)
# corrleation between Severity and month
df.corrwith(df["date"].dt.month)
# corrleation between Severity and day
df.corrwith(df["date"].dt.year)
# corrleation between Severity and hour
df.corrwith(df["date"].dt.hour)
# fig1, ax1 = plt.subplots()
# ax1.set_title('Basic Plot')
# ax1.boxplot(df['date'].dt.hour)
# plt.show()
df["Side"] = pd.get_dummies(df["Side"])["L"]
test_df["Side"] = pd.get_dummies(test_df["Side"])["L"]
df.drop(columns="ID").describe()
x = dict(df.groupby(df["timestamp1"].dt.month).count()["ID"] / df.shape[0])
d = {"month": x}
d
x_test = dict(
test_df.groupby(test_df["timestamp1"].dt.month).count()["ID"] / test_df.shape[0]
)
d_test = {"month": x_test}
df["accidents per month"] = df.replace(d).month
test_df["accidents per month"] = test_df.replace(d_test).month
# df.drop(labels='accidents per moth',axis=1,inplace=True)
# The output shows desciptive statistics for the numerical features, `Lat`, `Lng`, `Distance(mi)`, and `Severity`. I'll use the numerical features to demonstrate how to train the model and make submissions. **However you shouldn't use the numerical features only to make the final submission if you want to make it to the top of the leaderboard.**
# ## Data Splitting
# Now it's time to split the dataset for the training step. Typically the dataset is split into 3 subsets, namely, the training, validation and test sets. In our case, the test set is already predefined. So we'll split the "training" set into training and validation sets with 0.8:0.2 ratio.
# *Note: a good way to generate reproducible results is to set the seed to the algorithms that depends on randomization. This is done with the argument `random_state` in the following command*
from sklearn.model_selection import train_test_split
# train_df, val_df = train_test_split(df_non_null, test_size=0.2, random_state=42) # Try adding `stratify` here
# X_train = train_df.drop(columns=['ID', 'Severity'])
# y_train = train_df['Severity']
# X_val = val_df.drop(columns=['ID', 'Severity'])
# y_val = val_df['Severity']
train_df, val_df = train_test_split(
df, test_size=0.2, random_state=42
) # Try adding `stratify` here
X_train = train_df.drop(columns=["ID", "Severity"])
y_train = train_df["Severity"]
X_val = val_df.drop(columns=["ID", "Severity"])
y_val = val_df["Severity"]
# As pointed out eariler, I'll use the numerical features to train the classifier. **However, you shouldn't use the numerical features only to make the final submission if you want to make it to the top of the leaderboard.**
# This cell is used to select the numerical features. IT SHOULD BE REMOVED AS YOU DO YOUR WORK.
# for df after dropping non nulls
X_train = X_train[
[
"Lat",
"Lng",
"Distance(mi)",
"Stop",
"holidays",
"hour",
"year",
"accidents per month",
"month",
"day",
]
]
X_val = X_val[
[
"Lat",
"Lng",
"Distance(mi)",
"Stop",
"holidays",
"hour",
"year",
"accidents per month",
"month",
"day",
]
]
# X_train = X_train[['Lat', 'Distance(mi)','Crossing','Junction','Railway','Stop','Amenity','Side',
# 'Temperature(F)', 'Wind_Speed(mph)', 'Visibility(mi)','holidays','Weather_Condition','accidents per month','hour']]
# X_val = X_val[['Lat', 'Distance(mi)','Crossing','Junction','Railway','Stop','Amenity','Side',
# 'Temperature(F)', 'Wind_Speed(mph)', 'Visibility(mi)','holidays','Weather_Condition','accidents per month','hour']]
# ## Model Training
# Let's train a model with the data! We'll train a Random Forest Classifier to demonstrate the process of making submissions.
from sklearn.ensemble import RandomForestClassifier
# Create an instance of the classifier
classifier = RandomForestClassifier(max_depth=2, random_state=0)
# Train the classifier
classifier = classifier.fit(X_train, y_train)
# Now let's test our classifier on the validation dataset and see the accuracy.
print(
"The accuracy of the classifier on the validation set is ",
(classifier.score(X_val, y_val)),
)
# df.drop('holidays', inplace=True, axis=1)
# df.dropna(inplace=True)
# Well. That's a good start, right? A classifier that predicts all examples' `Severity` as 2 will get around 0.63. You should get better score as you add more features and do better data preprocessing.
# ## Submission File Generation
# We have built a model and we'd like to submit our predictions on the test set! In order to do that, we'll load the test set, predict the class and save the submission file.
# First, we'll load the data.
# test_df = pd.read_csv(os.path.join(dataset_path, 'test.csv'))
# test_df.head()
# Note that the test set has the same features and doesn't have the `Severity` column.
# At this stage one must **NOT** forget to apply the same processing done on the training set on the features of the test set.
# Now we'll add `Severity` column to the test `DataFrame` and add the values of the predicted class to it.
# **I'll select the numerical features here as I did in the training set. DO NOT forget to change this step as you change the preprocessing of the training data.**
# # add holiday column to dataframe
# test_df['day']=test_df['timestamp'].apply(lambda x:x.split(' ')[0])
# test_df=pd.merge(left=test_df,right=holiday_df,how='left',left_on=['day'],right_on=['day_temp'])
# test_df['timestamp1']=test_df['timestamp'].apply(lambda x:x[:19])
# test_df['timestamp1']=test_df['timestamp1'].apply(lambda x:datetime.strptime(x,'%Y-%m-%d %H:%M:%S'))
# test_df['timestamp1']=test_df['timestamp1'].dt.round('H')
# #merge weather with test df
# test_df=pd.merge(left=test_df,right=weather,how='left',left_on=['timestamp1'],right_on=['date'])
# test_df.drop_duplicates(subset='ID',inplace=True)
# columns_convert1=['Crossing','Junction','Railway','Stop','Amenity']
# df[columns_convert1] = df[columns_convert1].astype(int)
test_df.columns
X_test = test_df.drop(columns=["ID"])
# You should update/remove the next line once you change the features used for training
X_test = X_test[
[
"Lat",
"Lng",
"Distance(mi)",
"Stop",
"holidays",
"hour",
"year",
"accidents per month",
"month",
"day",
]
]
y_test_predicted = classifier.predict(X_test)
test_df["Severity"] = y_test_predicted
test_df.head()
# Now we're ready to generate the submission file. The submission file needs the columns `ID` and `Severity` only.
test_df[["ID", "Severity"]].to_csv("/kaggle/working/submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173489.ipynb
| null | null |
[{"Id": 69173489, "ScriptId": 18878811, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4397292, "CreationDate": "07/27/2021 16:49:47", "VersionNumber": 1.0, "Title": "getting-started-car-crashes-severity-prediction ve", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 479.0, "LinesInsertedFromPrevious": 479.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# ## You're here!
# Welcome to your first competition in the [ITI's AI Pro training program](https://ai.iti.gov.eg/epita/ai-engineer/)! We hope you enjoy and learn as much as we did prepairing this competition.
# ## Introduction
# In the competition, it's required to predict the `Severity` of a car crash given info about the crash, e.g., location.
# This is the getting started notebook. Things are kept simple so that it's easier to understand the steps and modify it.
# Feel free to `Fork` this notebook and share it with your modifications **OR** use it to create your submissions.
# ### Prerequisites
# You should know how to use python and a little bit of Machine Learning. You can apply the techniques you learned in the training program and submit the new solutions!
# ### Checklist
# You can participate in this competition the way you perefer. However, I recommend following these steps if this is your first time joining a competition on Kaggle.
# * Fork this notebook and run the cells in order.
# * Submit this solution.
# * Make changes to the data processing step as you see fit.
# * Submit the new solutions.
# *You can submit up to 5 submissions per day. You can select only one of the submission you make to be considered in the final ranking.*
# Don't hesitate to leave a comment or contact me if you have any question!
# ## Import the libraries
# We'll use `pandas` to load and manipulate the data. Other libraries will be imported in the relevant sections.
# import librsries
import pandas as pd
import os
from datetime import datetime
import xml.etree.ElementTree as ET
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.impute import KNNImputer
# ## Exploratory Data Analysis
# In this step, one should load the data and analyze it. However, I'll load the data and do minimal analysis. You are encouraged to do thorough analysis!
# Let's load the data using `pandas` and have a look at the generated `DataFrame`.
dataset_path = "/kaggle/input/car-crashes-severity-prediction/"
df = pd.read_csv(os.path.join(dataset_path, "train.csv"))
print("The shape of the dataset is {}.\n\n".format(df.shape))
df.head()
df2 = df.copy()
# test data
test_df = pd.read_csv(os.path.join(dataset_path, "test.csv"))
test_df.head()
# no missing values
df.isnull().sum()
for i in df.columns:
print(f"column name {i}")
print(df[i].value_counts())
print("-----------------------------------------")
# we find column bump is redundant all have the same value
# same with column give_way and the 3 values are at severity 2 so not very impactful and column No_Exit
# import XML and parsing to read XML file
dataset_path = "/kaggle/input/car-crashes-severity-prediction/"
tree = ET.parse(os.path.join(dataset_path, "holidays.xml"))
root = tree.getroot()
for child in root:
for c in child:
print(c.text)
break
dates = []
for date in root.iter("date"):
dates.append(date.text)
holidays = []
for holiday in root.iter("description"):
holidays.append(holiday.text)
# holidays
holiday_df = pd.DataFrame({"day_temp": dates, "holidays": holidays})
holiday_df.shape
# add holiday column to dataframe
df["day"] = df["timestamp"].apply(lambda x: x.split(" ")[0])
test_df["day"] = test_df["timestamp"].apply(lambda x: x.split(" ")[0])
df.head()
holiday_df.info()
df = pd.merge(
left=df, right=holiday_df, how="left", left_on=["day"], right_on=["day_temp"]
)
df.info()
test_df = pd.merge(
left=test_df, right=holiday_df, how="left", left_on=["day"], right_on=["day_temp"]
)
# We've got 6407 examples in the dataset with 14 featues, 1 ID, and the `Severity` of the crash.
# By looking at the features and a sample from the data, the features look of numerical and catogerical types. What about some descriptive statistics?
df = df.drop(["day_temp"], axis=1)
test_df = test_df.drop(["day_temp"], axis=1)
# load weather data set and merge with df
dataset_path = "/kaggle/input/car-crashes-severity-prediction/"
weather = pd.read_csv(os.path.join(dataset_path, "weather-sfcsv.csv"))
weather = weather.astype({"Year": "str", "Month": "str", "Day": "str", "Hour": "str"})
weather.info()
weather.describe()
for i in weather.columns:
print(f"column name {i}")
print(weather[i].value_counts())
print("------------------------------------")
# from this we find some interesting details
# selected column is redundant as all entries are the same
# visibilty column mostly conatain the value 10 for above 85 percent of the data
weather["date"] = (
weather["Year"]
+ "-"
+ weather["Month"]
+ "-"
+ weather["Day"]
+ " "
+ weather["Hour"]
+ ":00"
)
weather.drop(columns=["Year", "Month", "Day", "Hour"], inplace=True)
weather["date"] = weather["date"].apply(
lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M")
)
# drop Humidity(%) , Wind_Chill(F) as there are a lot of null values and correlated with other
weather.drop(
columns=["Precipitation(in)", "Wind_Chill(F)", "Selected"], axis=1, inplace=True
)
weather["Temperature(F)"] = weather["Temperature(F)"].fillna(
weather["Temperature(F)"].median()
)
weather["Humidity(%)"] = weather["Humidity(%)"].fillna(weather["Humidity(%)"].median())
weather["Wind_Speed(mph)"] = weather["Wind_Speed(mph)"].fillna(
weather["Wind_Speed(mph)"].median()
)
weather["Visibility(mi)"] = weather["Visibility(mi)"].fillna(
weather["Visibility(mi)"].median()
)
weather["Weather_Condition"] = weather["Weather_Condition"].fillna(
weather["Weather_Condition"].mode()[0]
)
# there is duplicated dates with different weathers at the same data so we choose the first entry and drop the duplicated date row
# weather[weather['date'].duplicated()].sort_values('date')
weather.drop_duplicates(subset=["date"], inplace=True, ignore_index=True)
# slicing timestamp datetime format
df["timestamp1"] = df["timestamp"].apply(lambda x: x[:19])
test_df["timestamp1"] = test_df["timestamp"].apply(lambda x: x[:19])
df["timestamp1"] = df["timestamp1"].apply(
lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S")
)
test_df["timestamp1"] = test_df["timestamp1"].apply(
lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S")
)
df["timestamp1"] = df["timestamp1"].dt.floor("H")
test_df["timestamp1"] = test_df["timestamp1"].dt.floor("H")
# merge weather with train df
df = pd.merge(
left=df, right=weather, how="left", left_on=["timestamp1"], right_on=["date"]
)
test_df = pd.merge(
left=test_df, right=weather, how="left", left_on=["timestamp1"], right_on=["date"]
)
# df=pd.get_dummies(data=df,columns=['Weather_Condition'])
# df.drop_duplicates(subset='ID',inplace=True)
len(df["Weather_Condition"].unique())
dict1 = {
"Weather_Condition": {
"Scattered Clouds": 23,
"Mostly Cloudy / Windy": 25,
"Clear": 0,
"Fair": 1,
"Partly Cloudy": 20,
"Mostly Cloudy": 24,
"Overcast": 19,
"Fair / Windy": 2,
"Light Rain": 11,
"Cloudy": 22,
"Partly Cloudy / Windy": 21,
"Rain / Windy": 4,
"Rain": 8,
"Fog": 7,
"Heavy Rain": 18,
"Haze": 17,
"Cloudy / Windy": 16,
"Smoke": 10,
"Light Rain / Windy": 3,
"Shallow Fog": 15,
"Mist": 14,
"Light Thunderstorms and Rain": 6,
"Fog / Windy": 5,
"Patches of Fog": 9,
"Light Drizzle": 13,
"Squalls": 12,
}
}
df = df.replace(dict1)
test_df = test_df.replace(dict1)
columns_convert = [
"Crossing",
"Give_Way",
"Junction",
"No_Exit",
"Railway",
"Roundabout",
"Stop",
"Amenity",
]
for i in columns_convert:
print(i)
print(df[i].sum())
# we find columns that have little to no imapct on target label so we drop them
columns_convert1 = ["Crossing", "Junction", "Railway", "Stop", "Amenity"]
df[columns_convert1] = df[columns_convert1].astype(int)
test_df[columns_convert1] = test_df[columns_convert1].astype(int)
# create columns for each part of the date
df["year"] = df["timestamp1"].dt.year
df["month"] = df["timestamp1"].dt.month
df["day"] = df["timestamp1"].dt.day
df["hour"] = df["timestamp1"].dt.hour
test_df["year"] = test_df["timestamp1"].dt.year
test_df["month"] = test_df["timestamp1"].dt.month
test_df["day"] = test_df["timestamp1"].dt.day
test_df["hour"] = test_df["timestamp1"].dt.hour
x = df.groupby(["hour"])["Severity"].count()
sns.barplot(x.index, x.values)
# bin the hours as per number of accidents
bins2 = [-0.001, 9, 14, 17, 25]
x = [0, 1, 2, 0]
df["hour"] = pd.cut(df["timestamp1"].dt.hour, bins=bins2, labels=x, ordered=False)
test_df["hour"] = pd.cut(
test_df["timestamp1"].dt.hour, bins=bins2, labels=x, ordered=False
)
# lets drop unwanted columns to avoid cluttering view and confusion
ls_to_drop = ["Bump", "Give_Way", "No_Exit", "Roundabout"]
df = df.drop(labels=ls_to_drop, axis=1)
test_df = test_df.drop(labels=ls_to_drop, axis=1)
holidays_unique = df["holidays"].unique()[1:].tolist()
df["holidays"].unique()
# convert the hnuniqueays cloumn to numerical column
df["holidays"] = df["holidays"].replace(holidays_unique, 1)
df["holidays"] = df["holidays"].fillna(value=0)
test_df["holidays"] = test_df["holidays"].replace(holidays_unique, 1)
test_df["holidays"] = test_df["holidays"].fillna(value=0)
# to check if everything is working okay
print((df["holidays"] == 1).sum())
print(df["holidays"].unique())
# #maybe check correlations for dataset without null values
# # df_non_null=df.dropna(axis=0)
# df_non_null.info()
# shows relationship between severity and holidays
df.groupby(["holidays", "Severity"])["ID"].count()
# sns.swarmplot(x=df['Severity'],y=df['hour'])
# plt.figure(figsize=[20,20])
# g = sns.PairGrid(data = df,vars=['Humidity(%)','Visibility(mi)','Severity'])
# # g.map_diag(plt.boxplot)
# g.map(sns.scatterplot)
# check correlation between Severity and weather factors
# df.corrwith(df['Severity'])
# #corrleation matrix
df.corr()
x = df.groupby("Severity").mean()["Distance(mi)"].sort_values(ascending=False)
sns.barplot(x.values, x.index, order=x.index, orient="h")
# corrleation between Severity and year
df.corrwith(df["date"].dt.year)
# corrleation between Severity and month
df.corrwith(df["date"].dt.month)
# corrleation between Severity and day
df.corrwith(df["date"].dt.year)
# corrleation between Severity and hour
df.corrwith(df["date"].dt.hour)
# fig1, ax1 = plt.subplots()
# ax1.set_title('Basic Plot')
# ax1.boxplot(df['date'].dt.hour)
# plt.show()
df["Side"] = pd.get_dummies(df["Side"])["L"]
test_df["Side"] = pd.get_dummies(test_df["Side"])["L"]
df.drop(columns="ID").describe()
x = dict(df.groupby(df["timestamp1"].dt.month).count()["ID"] / df.shape[0])
d = {"month": x}
d
x_test = dict(
test_df.groupby(test_df["timestamp1"].dt.month).count()["ID"] / test_df.shape[0]
)
d_test = {"month": x_test}
df["accidents per month"] = df.replace(d).month
test_df["accidents per month"] = test_df.replace(d_test).month
# df.drop(labels='accidents per moth',axis=1,inplace=True)
# The output shows desciptive statistics for the numerical features, `Lat`, `Lng`, `Distance(mi)`, and `Severity`. I'll use the numerical features to demonstrate how to train the model and make submissions. **However you shouldn't use the numerical features only to make the final submission if you want to make it to the top of the leaderboard.**
# ## Data Splitting
# Now it's time to split the dataset for the training step. Typically the dataset is split into 3 subsets, namely, the training, validation and test sets. In our case, the test set is already predefined. So we'll split the "training" set into training and validation sets with 0.8:0.2 ratio.
# *Note: a good way to generate reproducible results is to set the seed to the algorithms that depends on randomization. This is done with the argument `random_state` in the following command*
from sklearn.model_selection import train_test_split
# train_df, val_df = train_test_split(df_non_null, test_size=0.2, random_state=42) # Try adding `stratify` here
# X_train = train_df.drop(columns=['ID', 'Severity'])
# y_train = train_df['Severity']
# X_val = val_df.drop(columns=['ID', 'Severity'])
# y_val = val_df['Severity']
train_df, val_df = train_test_split(
df, test_size=0.2, random_state=42
) # Try adding `stratify` here
X_train = train_df.drop(columns=["ID", "Severity"])
y_train = train_df["Severity"]
X_val = val_df.drop(columns=["ID", "Severity"])
y_val = val_df["Severity"]
# As pointed out eariler, I'll use the numerical features to train the classifier. **However, you shouldn't use the numerical features only to make the final submission if you want to make it to the top of the leaderboard.**
# This cell is used to select the numerical features. IT SHOULD BE REMOVED AS YOU DO YOUR WORK.
# for df after dropping non nulls
X_train = X_train[
[
"Lat",
"Lng",
"Distance(mi)",
"Stop",
"holidays",
"hour",
"year",
"accidents per month",
"month",
"day",
]
]
X_val = X_val[
[
"Lat",
"Lng",
"Distance(mi)",
"Stop",
"holidays",
"hour",
"year",
"accidents per month",
"month",
"day",
]
]
# X_train = X_train[['Lat', 'Distance(mi)','Crossing','Junction','Railway','Stop','Amenity','Side',
# 'Temperature(F)', 'Wind_Speed(mph)', 'Visibility(mi)','holidays','Weather_Condition','accidents per month','hour']]
# X_val = X_val[['Lat', 'Distance(mi)','Crossing','Junction','Railway','Stop','Amenity','Side',
# 'Temperature(F)', 'Wind_Speed(mph)', 'Visibility(mi)','holidays','Weather_Condition','accidents per month','hour']]
# ## Model Training
# Let's train a model with the data! We'll train a Random Forest Classifier to demonstrate the process of making submissions.
from sklearn.ensemble import RandomForestClassifier
# Create an instance of the classifier
classifier = RandomForestClassifier(max_depth=2, random_state=0)
# Train the classifier
classifier = classifier.fit(X_train, y_train)
# Now let's test our classifier on the validation dataset and see the accuracy.
print(
"The accuracy of the classifier on the validation set is ",
(classifier.score(X_val, y_val)),
)
# df.drop('holidays', inplace=True, axis=1)
# df.dropna(inplace=True)
# Well. That's a good start, right? A classifier that predicts all examples' `Severity` as 2 will get around 0.63. You should get better score as you add more features and do better data preprocessing.
# ## Submission File Generation
# We have built a model and we'd like to submit our predictions on the test set! In order to do that, we'll load the test set, predict the class and save the submission file.
# First, we'll load the data.
# test_df = pd.read_csv(os.path.join(dataset_path, 'test.csv'))
# test_df.head()
# Note that the test set has the same features and doesn't have the `Severity` column.
# At this stage one must **NOT** forget to apply the same processing done on the training set on the features of the test set.
# Now we'll add `Severity` column to the test `DataFrame` and add the values of the predicted class to it.
# **I'll select the numerical features here as I did in the training set. DO NOT forget to change this step as you change the preprocessing of the training data.**
# # add holiday column to dataframe
# test_df['day']=test_df['timestamp'].apply(lambda x:x.split(' ')[0])
# test_df=pd.merge(left=test_df,right=holiday_df,how='left',left_on=['day'],right_on=['day_temp'])
# test_df['timestamp1']=test_df['timestamp'].apply(lambda x:x[:19])
# test_df['timestamp1']=test_df['timestamp1'].apply(lambda x:datetime.strptime(x,'%Y-%m-%d %H:%M:%S'))
# test_df['timestamp1']=test_df['timestamp1'].dt.round('H')
# #merge weather with test df
# test_df=pd.merge(left=test_df,right=weather,how='left',left_on=['timestamp1'],right_on=['date'])
# test_df.drop_duplicates(subset='ID',inplace=True)
# columns_convert1=['Crossing','Junction','Railway','Stop','Amenity']
# df[columns_convert1] = df[columns_convert1].astype(int)
test_df.columns
X_test = test_df.drop(columns=["ID"])
# You should update/remove the next line once you change the features used for training
X_test = X_test[
[
"Lat",
"Lng",
"Distance(mi)",
"Stop",
"holidays",
"hour",
"year",
"accidents per month",
"month",
"day",
]
]
y_test_predicted = classifier.predict(X_test)
test_df["Severity"] = y_test_predicted
test_df.head()
# Now we're ready to generate the submission file. The submission file needs the columns `ID` and `Severity` only.
test_df[["ID", "Severity"]].to_csv("/kaggle/working/submission.csv", index=False)
| false | 0 | 5,180 | 0 | 5,180 | 5,180 |
||
69173089
|
<jupyter_start><jupyter_text>Water Quality
# Context
`Access to safe drinking-water is essential to health, a basic human right and a component of effective policy for health protection. This is important as a health and development issue at a national, regional and local level. In some regions, it has been shown that investments in water supply and sanitation can yield a net economic benefit, since the reductions in adverse health effects and health care costs outweigh the costs of undertaking the interventions.`
# Content
The water_potability.csv file contains water quality metrics for 3276 different water bodies.
### 1. pH value:
```PH is an important parameter in evaluating the acid–base balance of water. It is also the indicator of acidic or alkaline condition of water status. WHO has recommended maximum permissible limit of pH from 6.5 to 8.5. The current investigation ranges were 6.52–6.83 which are in the range of WHO standards. ```
### 2. Hardness:
```Hardness is mainly caused by calcium and magnesium salts. These salts are dissolved from geologic deposits through which water travels. The length of time water is in contact with hardness producing material helps determine how much hardness there is in raw water.
Hardness was originally defined as the capacity of water to precipitate soap caused by Calcium and Magnesium.```
### 3. Solids (Total dissolved solids - TDS):
```Water has the ability to dissolve a wide range of inorganic and some organic minerals or salts such as potassium, calcium, sodium, bicarbonates, chlorides, magnesium, sulfates etc. These minerals produced un-wanted taste and diluted color in appearance of water. This is the important parameter for the use of water. The water with high TDS value indicates that water is highly mineralized. Desirable limit for TDS is 500 mg/l and maximum limit is 1000 mg/l which prescribed for drinking purpose. ```
### 4. Chloramines:
```Chlorine and chloramine are the major disinfectants used in public water systems. Chloramines are most commonly formed when ammonia is added to chlorine to treat drinking water. Chlorine levels up to 4 milligrams per liter (mg/L or 4 parts per million (ppm)) are considered safe in drinking water.```
### 5. Sulfate:
```Sulfates are naturally occurring substances that are found in minerals, soil, and rocks. They are present in ambient air, groundwater, plants, and food. The principal commercial use of sulfate is in the chemical industry. Sulfate concentration in seawater is about 2,700 milligrams per liter (mg/L). It ranges from 3 to 30 mg/L in most freshwater supplies, although much higher concentrations (1000 mg/L) are found in some geographic locations. ```
### 6. Conductivity:
```Pure water is not a good conductor of electric current rather’s a good insulator. Increase in ions concentration enhances the electrical conductivity of water. Generally, the amount of dissolved solids in water determines the electrical conductivity. Electrical conductivity (EC) actually measures the ionic process of a solution that enables it to transmit current. According to WHO standards, EC value should not exceeded 400 μS/cm. ```
### 7. Organic_carbon:
```Total Organic Carbon (TOC) in source waters comes from decaying natural organic matter (NOM) as well as synthetic sources. TOC is a measure of the total amount of carbon in organic compounds in pure water. According to US EPA < 2 mg/L as TOC in treated / drinking water, and < 4 mg/Lit in source water which is use for treatment.```
### 8. Trihalomethanes:
```THMs are chemicals which may be found in water treated with chlorine. The concentration of THMs in drinking water varies according to the level of organic material in the water, the amount of chlorine required to treat the water, and the temperature of the water that is being treated. THM levels up to 80 ppm is considered safe in drinking water.```
### 9. Turbidity:
```The turbidity of water depends on the quantity of solid matter present in the suspended state. It is a measure of light emitting properties of water and the test is used to indicate the quality of waste discharge with respect to colloidal matter. The mean turbidity value obtained for Wondo Genet Campus (0.98 NTU) is lower than the WHO recommended value of 5.00 NTU.```
### 10. Potability:
```Indicates if water is safe for human consumption where 1 means Potable and 0 means Not potable.```
Kaggle dataset identifier: water-potability
<jupyter_code>import pandas as pd
df = pd.read_csv('water-potability/water_potability.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 3276 entries, 0 to 3275
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 ph 2785 non-null float64
1 Hardness 3276 non-null float64
2 Solids 3276 non-null float64
3 Chloramines 3276 non-null float64
4 Sulfate 2495 non-null float64
5 Conductivity 3276 non-null float64
6 Organic_carbon 3276 non-null float64
7 Trihalomethanes 3114 non-null float64
8 Turbidity 3276 non-null float64
9 Potability 3276 non-null int64
dtypes: float64(9), int64(1)
memory usage: 256.1 KB
<jupyter_text>Examples:
{
"ph": NaN,
"Hardness": 204.8904554713,
"Solids": 20791.318980747,
"Chloramines": 7.3002118732,
"Sulfate": 368.5164413498,
"Conductivity": 564.3086541722,
"Organic_carbon": 10.379783078100001,
"Trihalomethanes": 86.9909704615,
"Turbidity": 2.9631353806,
"Potability": 0.0
}
{
"ph": 3.7160800754,
"Hardness": 129.4229205149,
"Solids": 18630.0578579703,
"Chloramines": 6.6352458839,
"Sulfate": NaN,
"Conductivity": 592.8853591349,
"Organic_carbon": 15.1800131164,
"Trihalomethanes": 56.3290762845,
"Turbidity": 4.5006562749,
"Potability": 0.0
}
{
"ph": 8.0991241893,
"Hardness": 224.2362593936,
"Solids": 19909.5417322924,
"Chloramines": 9.2758836027,
"Sulfate": NaN,
"Conductivity": 418.6062130645,
"Organic_carbon": 16.8686369296,
"Trihalomethanes": 66.4200925118,
"Turbidity": 3.0559337497,
"Potability": 0.0
}
{
"ph": 8.3167658842,
"Hardness": 214.3733940856,
"Solids": 22018.4174407753,
"Chloramines": 8.0593323774,
"Sulfate": 356.8861356431,
"Conductivity": 363.2665161642,
"Organic_carbon": 18.4365244955,
"Trihalomethanes": 100.3416743651,
"Turbidity": 4.6287705368,
"Potability": 0.0
}
<jupyter_script>import pandas as pd
water = pd.read_csv("../input/water-potability/water_potability.csv")
water.head()
water.info()
water.describe()
Potability_0 = water[water.Potability == 0]
Potability_0.head()
round(Potability_0.isnull().sum() * 100 / len(Potability_0), 2).sort_values(
ascending=False
)
Potability_0.describe()
# #### Treating Missing value for Potability_0 records
Potability_0.fillna(Potability_0.median(), inplace=True)
Potability_0.describe()
Potability_1 = water[water.Potability == 1]
Potability_1.head()
round(Potability_1.isnull().sum() * 100 / len(Potability_1), 2).sort_values(
ascending=False
)
# #### Treating Missing value for Potability_1 records
Potability_1.fillna(Potability_1.median(), inplace=True)
Potability_1.describe()
import numpy as np
water = pd.concat([Potability_1, Potability_0], axis=0)
water = water.iloc[np.random.permutation(len(water))]
water = water.reset_index(drop=True)
water.head()
water.nunique()
round(water.Potability.value_counts() * 100 / len(water), 2)
# This is based on paper named "XBNet : An Extremely Boosted Neural Network"
# Link : https://arxiv.org/abs/2106.05239
import torch
import numpy as np
from sklearn.model_selection import train_test_split
from XBNet.training_utils import training, predict
from XBNet.models import XBNETClassifier
from XBNet.run import run_XBNET
y = water[["Potability"]]
y.head()
x = water.loc[:, :"Turbidity"]
x.head()
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.20, random_state=True, stratify=y
)
x_train.shape, x_test.shape, y_train.shape, y_test.shape
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=42)
x_train_smote, y_train_smote = sm.fit_resample(x_train, y_train)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train_smote = scaler.fit_transform(x_train_smote)
x_test = scaler.transform(x_test)
y_test = y_test.to_numpy()
y_test[:5]
y_train_smote = y_train_smote.to_numpy()
y_train_smote[:5]
y_train_smote = y_train_smote.reshape((-1))
y_train_smote.shape
y_test = y_test.reshape((-1))
y_test.shape
model = XBNETClassifier(x_train_smote, y_train_smote, num_layers=2)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
m, acc, lo, val_ac, val_lo = run_XBNET(
x_train_smote,
x_test,
y_train_smote,
y_test,
model,
criterion,
optimizer,
epochs=100,
batch_size=256,
)
import matplotlib.pyplot as plt
plt.figure(figsize=(20, 5))
plt.subplot(1, 2, 1)
plt.plot(acc, label="training accuracy")
plt.plot(val_ac, label="validation accuracy")
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.legend()
plt.grid()
plt.subplot(1, 2, 2)
plt.plot(lo, label="training loss")
plt.plot(val_lo, label="validation loss")
plt.xlabel("epochs")
plt.ylabel("loss")
plt.legend()
plt.grid()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173089.ipynb
|
water-potability
|
adityakadiwal
|
[{"Id": 69173089, "ScriptId": 18816986, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4760409, "CreationDate": "07/27/2021 16:45:05", "VersionNumber": 1.0, "Title": "XBNet on Water Quality Prediction", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 107.0, "LinesInsertedFromPrevious": 61.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 46.0, "LinesInsertedFromFork": 61.0, "LinesDeletedFromFork": 21.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 46.0, "TotalVotes": 1}]
|
[{"Id": 92021276, "KernelVersionId": 69173089, "SourceDatasetVersionId": 2157486}]
|
[{"Id": 2157486, "DatasetId": 1292407, "DatasourceVersionId": 2198621, "CreatorUserId": 5454565, "LicenseName": "CC0: Public Domain", "CreationDate": "04/25/2021 10:27:44", "VersionNumber": 3.0, "Title": "Water Quality", "Slug": "water-potability", "Subtitle": "Drinking water potability", "Description": "# Context\n\n`Access to safe drinking-water is essential to health, a basic human right and a component of effective policy for health protection. This is important as a health and development issue at a national, regional and local level. In some regions, it has been shown that investments in water supply and sanitation can yield a net economic benefit, since the reductions in adverse health effects and health care costs outweigh the costs of undertaking the interventions.`\n\n\n# Content\n\n\nThe water_potability.csv file contains water quality metrics for 3276 different water bodies. \n### 1. pH value:\n```PH is an important parameter in evaluating the acid\u2013base balance of water. It is also the indicator of acidic or alkaline condition of water status. WHO has recommended maximum permissible limit of pH from 6.5 to 8.5. The current investigation ranges were 6.52\u20136.83 which are in the range of WHO standards. ```\n\n### 2. Hardness:\n```Hardness is mainly caused by calcium and magnesium salts. These salts are dissolved from geologic deposits through which water travels. The length of time water is in contact with hardness producing material helps determine how much hardness there is in raw water.\nHardness was originally defined as the capacity of water to precipitate soap caused by Calcium and Magnesium.```\n\n### 3. Solids (Total dissolved solids - TDS): \n```Water has the ability to dissolve a wide range of inorganic and some organic minerals or salts such as potassium, calcium, sodium, bicarbonates, chlorides, magnesium, sulfates etc. These minerals produced un-wanted taste and diluted color in appearance of water. This is the important parameter for the use of water. The water with high TDS value indicates that water is highly mineralized. Desirable limit for TDS is 500 mg/l and maximum limit is 1000 mg/l which prescribed for drinking purpose. ```\n\n### 4. Chloramines: \n```Chlorine and chloramine are the major disinfectants used in public water systems. Chloramines are most commonly formed when ammonia is added to chlorine to treat drinking water. Chlorine levels up to 4 milligrams per liter (mg/L or 4 parts per million (ppm)) are considered safe in drinking water.```\n\n### 5. Sulfate: \n```Sulfates are naturally occurring substances that are found in minerals, soil, and rocks. They are present in ambient air, groundwater, plants, and food. The principal commercial use of sulfate is in the chemical industry. Sulfate concentration in seawater is about 2,700 milligrams per liter (mg/L). It ranges from 3 to 30 mg/L in most freshwater supplies, although much higher concentrations (1000 mg/L) are found in some geographic locations. ```\n\n### 6. Conductivity: \n```Pure water is not a good conductor of electric current rather\u2019s a good insulator. Increase in ions concentration enhances the electrical conductivity of water. Generally, the amount of dissolved solids in water determines the electrical conductivity. Electrical conductivity (EC) actually measures the ionic process of a solution that enables it to transmit current. According to WHO standards, EC value should not exceeded 400 \u03bcS/cm. ```\n\n### 7. Organic_carbon: \n ```Total Organic Carbon (TOC) in source waters comes from decaying natural organic matter (NOM) as well as synthetic sources. TOC is a measure of the total amount of carbon in organic compounds in pure water. According to US EPA < 2 mg/L as TOC in treated / drinking water, and < 4 mg/Lit in source water which is use for treatment.```\n\n### 8. Trihalomethanes: \n```THMs are chemicals which may be found in water treated with chlorine. The concentration of THMs in drinking water varies according to the level of organic material in the water, the amount of chlorine required to treat the water, and the temperature of the water that is being treated. THM levels up to 80 ppm is considered safe in drinking water.```\n\n### 9. Turbidity: \n```The turbidity of water depends on the quantity of solid matter present in the suspended state. It is a measure of light emitting properties of water and the test is used to indicate the quality of waste discharge with respect to colloidal matter. The mean turbidity value obtained for Wondo Genet Campus (0.98 NTU) is lower than the WHO recommended value of 5.00 NTU.```\n\n### 10. Potability: \n```Indicates if water is safe for human consumption where 1 means Potable and 0 means Not potable.```", "VersionNotes": "Removed garbage column", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1292407, "CreatorUserId": 5454565, "OwnerUserId": 5454565.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2157486.0, "CurrentDatasourceVersionId": 2198621.0, "ForumId": 1311077, "Type": 2, "CreationDate": "04/24/2021 07:18:57", "LastActivityDate": "04/24/2021", "TotalViews": 422520, "TotalDownloads": 61531, "TotalVotes": 1262, "TotalKernels": 437}]
|
[{"Id": 5454565, "UserName": "adityakadiwal", "DisplayName": "Aditya Kadiwal", "RegisterDate": "07/12/2020", "PerformanceTier": 2}]
|
import pandas as pd
water = pd.read_csv("../input/water-potability/water_potability.csv")
water.head()
water.info()
water.describe()
Potability_0 = water[water.Potability == 0]
Potability_0.head()
round(Potability_0.isnull().sum() * 100 / len(Potability_0), 2).sort_values(
ascending=False
)
Potability_0.describe()
# #### Treating Missing value for Potability_0 records
Potability_0.fillna(Potability_0.median(), inplace=True)
Potability_0.describe()
Potability_1 = water[water.Potability == 1]
Potability_1.head()
round(Potability_1.isnull().sum() * 100 / len(Potability_1), 2).sort_values(
ascending=False
)
# #### Treating Missing value for Potability_1 records
Potability_1.fillna(Potability_1.median(), inplace=True)
Potability_1.describe()
import numpy as np
water = pd.concat([Potability_1, Potability_0], axis=0)
water = water.iloc[np.random.permutation(len(water))]
water = water.reset_index(drop=True)
water.head()
water.nunique()
round(water.Potability.value_counts() * 100 / len(water), 2)
# This is based on paper named "XBNet : An Extremely Boosted Neural Network"
# Link : https://arxiv.org/abs/2106.05239
import torch
import numpy as np
from sklearn.model_selection import train_test_split
from XBNet.training_utils import training, predict
from XBNet.models import XBNETClassifier
from XBNet.run import run_XBNET
y = water[["Potability"]]
y.head()
x = water.loc[:, :"Turbidity"]
x.head()
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.20, random_state=True, stratify=y
)
x_train.shape, x_test.shape, y_train.shape, y_test.shape
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=42)
x_train_smote, y_train_smote = sm.fit_resample(x_train, y_train)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train_smote = scaler.fit_transform(x_train_smote)
x_test = scaler.transform(x_test)
y_test = y_test.to_numpy()
y_test[:5]
y_train_smote = y_train_smote.to_numpy()
y_train_smote[:5]
y_train_smote = y_train_smote.reshape((-1))
y_train_smote.shape
y_test = y_test.reshape((-1))
y_test.shape
model = XBNETClassifier(x_train_smote, y_train_smote, num_layers=2)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
m, acc, lo, val_ac, val_lo = run_XBNET(
x_train_smote,
x_test,
y_train_smote,
y_test,
model,
criterion,
optimizer,
epochs=100,
batch_size=256,
)
import matplotlib.pyplot as plt
plt.figure(figsize=(20, 5))
plt.subplot(1, 2, 1)
plt.plot(acc, label="training accuracy")
plt.plot(val_ac, label="validation accuracy")
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.legend()
plt.grid()
plt.subplot(1, 2, 2)
plt.plot(lo, label="training loss")
plt.plot(val_lo, label="validation loss")
plt.xlabel("epochs")
plt.ylabel("loss")
plt.legend()
plt.grid()
|
[{"water-potability/water_potability.csv": {"column_names": "[\"ph\", \"Hardness\", \"Solids\", \"Chloramines\", \"Sulfate\", \"Conductivity\", \"Organic_carbon\", \"Trihalomethanes\", \"Turbidity\", \"Potability\"]", "column_data_types": "{\"ph\": \"float64\", \"Hardness\": \"float64\", \"Solids\": \"float64\", \"Chloramines\": \"float64\", \"Sulfate\": \"float64\", \"Conductivity\": \"float64\", \"Organic_carbon\": \"float64\", \"Trihalomethanes\": \"float64\", \"Turbidity\": \"float64\", \"Potability\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 3276 entries, 0 to 3275\nData columns (total 10 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 ph 2785 non-null float64\n 1 Hardness 3276 non-null float64\n 2 Solids 3276 non-null float64\n 3 Chloramines 3276 non-null float64\n 4 Sulfate 2495 non-null float64\n 5 Conductivity 3276 non-null float64\n 6 Organic_carbon 3276 non-null float64\n 7 Trihalomethanes 3114 non-null float64\n 8 Turbidity 3276 non-null float64\n 9 Potability 3276 non-null int64 \ndtypes: float64(9), int64(1)\nmemory usage: 256.1 KB\n", "summary": "{\"ph\": {\"count\": 2785.0, \"mean\": 7.080794504276835, \"std\": 1.5943195187088104, \"min\": 0.0, \"25%\": 6.09309191422186, \"50%\": 7.036752103833548, \"75%\": 8.06206612314847, \"max\": 13.999999999999998}, \"Hardness\": {\"count\": 3276.0, \"mean\": 196.36949601730151, \"std\": 32.879761476294156, \"min\": 47.432, \"25%\": 176.85053787752437, \"50%\": 196.96762686363076, \"75%\": 216.66745621487073, \"max\": 323.124}, \"Solids\": {\"count\": 3276.0, \"mean\": 22014.092526077104, \"std\": 8768.570827785927, \"min\": 320.942611274359, \"25%\": 15666.69029696465, \"50%\": 20927.833606520187, \"75%\": 27332.762127438615, \"max\": 61227.19600771213}, \"Chloramines\": {\"count\": 3276.0, \"mean\": 7.122276793425786, \"std\": 1.5830848890397096, \"min\": 0.3520000000000003, \"25%\": 6.1274207554913, \"50%\": 7.130298973883081, \"75%\": 8.114887032109028, \"max\": 13.127000000000002}, \"Sulfate\": {\"count\": 2495.0, \"mean\": 333.7757766108135, \"std\": 41.416840461672706, \"min\": 129.00000000000003, \"25%\": 307.69949783471964, \"50%\": 333.073545745888, \"75%\": 359.9501703847443, \"max\": 481.0306423059972}, \"Conductivity\": {\"count\": 3276.0, \"mean\": 426.20511068255325, \"std\": 80.8240640511118, \"min\": 181.483753985146, \"25%\": 365.7344141184627, \"50%\": 421.8849682800544, \"75%\": 481.7923044877282, \"max\": 753.3426195583046}, \"Organic_carbon\": {\"count\": 3276.0, \"mean\": 14.284970247677318, \"std\": 3.308161999126874, \"min\": 2.1999999999999886, \"25%\": 12.065801333613067, \"50%\": 14.218337937208588, \"75%\": 16.557651543843434, \"max\": 28.30000000000001}, \"Trihalomethanes\": {\"count\": 3114.0, \"mean\": 66.39629294676803, \"std\": 16.175008422218657, \"min\": 0.7379999999999995, \"25%\": 55.844535620979954, \"50%\": 66.62248509808484, \"75%\": 77.33747290873062, \"max\": 124.0}, \"Turbidity\": {\"count\": 3276.0, \"mean\": 3.966786169791058, \"std\": 0.7803824084854124, \"min\": 1.45, \"25%\": 3.439710869612912, \"50%\": 3.955027562993039, \"75%\": 4.50031978728511, \"max\": 6.739}, \"Potability\": {\"count\": 3276.0, \"mean\": 0.3901098901098901, \"std\": 0.48784916967025516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}}", "examples": "{\"ph\":{\"0\":null,\"1\":3.7160800754,\"2\":8.0991241893,\"3\":8.3167658842},\"Hardness\":{\"0\":204.8904554713,\"1\":129.4229205149,\"2\":224.2362593936,\"3\":214.3733940856},\"Solids\":{\"0\":20791.318980747,\"1\":18630.0578579703,\"2\":19909.5417322924,\"3\":22018.4174407753},\"Chloramines\":{\"0\":7.3002118732,\"1\":6.6352458839,\"2\":9.2758836027,\"3\":8.0593323774},\"Sulfate\":{\"0\":368.5164413498,\"1\":null,\"2\":null,\"3\":356.8861356431},\"Conductivity\":{\"0\":564.3086541722,\"1\":592.8853591349,\"2\":418.6062130645,\"3\":363.2665161642},\"Organic_carbon\":{\"0\":10.3797830781,\"1\":15.1800131164,\"2\":16.8686369296,\"3\":18.4365244955},\"Trihalomethanes\":{\"0\":86.9909704615,\"1\":56.3290762845,\"2\":66.4200925118,\"3\":100.3416743651},\"Turbidity\":{\"0\":2.9631353806,\"1\":4.5006562749,\"2\":3.0559337497,\"3\":4.6287705368},\"Potability\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0}}"}}]
| true | 1 |
<start_data_description><data_path>water-potability/water_potability.csv:
<column_names>
['ph', 'Hardness', 'Solids', 'Chloramines', 'Sulfate', 'Conductivity', 'Organic_carbon', 'Trihalomethanes', 'Turbidity', 'Potability']
<column_types>
{'ph': 'float64', 'Hardness': 'float64', 'Solids': 'float64', 'Chloramines': 'float64', 'Sulfate': 'float64', 'Conductivity': 'float64', 'Organic_carbon': 'float64', 'Trihalomethanes': 'float64', 'Turbidity': 'float64', 'Potability': 'int64'}
<dataframe_Summary>
{'ph': {'count': 2785.0, 'mean': 7.080794504276835, 'std': 1.5943195187088104, 'min': 0.0, '25%': 6.09309191422186, '50%': 7.036752103833548, '75%': 8.06206612314847, 'max': 13.999999999999998}, 'Hardness': {'count': 3276.0, 'mean': 196.36949601730151, 'std': 32.879761476294156, 'min': 47.432, '25%': 176.85053787752437, '50%': 196.96762686363076, '75%': 216.66745621487073, 'max': 323.124}, 'Solids': {'count': 3276.0, 'mean': 22014.092526077104, 'std': 8768.570827785927, 'min': 320.942611274359, '25%': 15666.69029696465, '50%': 20927.833606520187, '75%': 27332.762127438615, 'max': 61227.19600771213}, 'Chloramines': {'count': 3276.0, 'mean': 7.122276793425786, 'std': 1.5830848890397096, 'min': 0.3520000000000003, '25%': 6.1274207554913, '50%': 7.130298973883081, '75%': 8.114887032109028, 'max': 13.127000000000002}, 'Sulfate': {'count': 2495.0, 'mean': 333.7757766108135, 'std': 41.416840461672706, 'min': 129.00000000000003, '25%': 307.69949783471964, '50%': 333.073545745888, '75%': 359.9501703847443, 'max': 481.0306423059972}, 'Conductivity': {'count': 3276.0, 'mean': 426.20511068255325, 'std': 80.8240640511118, 'min': 181.483753985146, '25%': 365.7344141184627, '50%': 421.8849682800544, '75%': 481.7923044877282, 'max': 753.3426195583046}, 'Organic_carbon': {'count': 3276.0, 'mean': 14.284970247677318, 'std': 3.308161999126874, 'min': 2.1999999999999886, '25%': 12.065801333613067, '50%': 14.218337937208588, '75%': 16.557651543843434, 'max': 28.30000000000001}, 'Trihalomethanes': {'count': 3114.0, 'mean': 66.39629294676803, 'std': 16.175008422218657, 'min': 0.7379999999999995, '25%': 55.844535620979954, '50%': 66.62248509808484, '75%': 77.33747290873062, 'max': 124.0}, 'Turbidity': {'count': 3276.0, 'mean': 3.966786169791058, 'std': 0.7803824084854124, 'min': 1.45, '25%': 3.439710869612912, '50%': 3.955027562993039, '75%': 4.50031978728511, 'max': 6.739}, 'Potability': {'count': 3276.0, 'mean': 0.3901098901098901, 'std': 0.48784916967025516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}}
<dataframe_info>
RangeIndex: 3276 entries, 0 to 3275
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 ph 2785 non-null float64
1 Hardness 3276 non-null float64
2 Solids 3276 non-null float64
3 Chloramines 3276 non-null float64
4 Sulfate 2495 non-null float64
5 Conductivity 3276 non-null float64
6 Organic_carbon 3276 non-null float64
7 Trihalomethanes 3114 non-null float64
8 Turbidity 3276 non-null float64
9 Potability 3276 non-null int64
dtypes: float64(9), int64(1)
memory usage: 256.1 KB
<some_examples>
{'ph': {'0': None, '1': 3.7160800754, '2': 8.0991241893, '3': 8.3167658842}, 'Hardness': {'0': 204.8904554713, '1': 129.4229205149, '2': 224.2362593936, '3': 214.3733940856}, 'Solids': {'0': 20791.318980747, '1': 18630.0578579703, '2': 19909.5417322924, '3': 22018.4174407753}, 'Chloramines': {'0': 7.3002118732, '1': 6.6352458839, '2': 9.2758836027, '3': 8.0593323774}, 'Sulfate': {'0': 368.5164413498, '1': None, '2': None, '3': 356.8861356431}, 'Conductivity': {'0': 564.3086541722, '1': 592.8853591349, '2': 418.6062130645, '3': 363.2665161642}, 'Organic_carbon': {'0': 10.3797830781, '1': 15.1800131164, '2': 16.8686369296, '3': 18.4365244955}, 'Trihalomethanes': {'0': 86.9909704615, '1': 56.3290762845, '2': 66.4200925118, '3': 100.3416743651}, 'Turbidity': {'0': 2.9631353806, '1': 4.5006562749, '2': 3.0559337497, '3': 4.6287705368}, 'Potability': {'0': 0, '1': 0, '2': 0, '3': 0}}
<end_description>
| 1,040 | 1 | 3,317 | 1,040 |
69173586
|
<jupyter_start><jupyter_text>Pneumothorax Binary Classification task
### Context
Pneumothorax small dataset (2027 images) for binary classification task (Pneumothorax or not)
### Content
Medical images of lungs done by radiologist during chest x-ray of the patients
Kaggle dataset identifier: pneumothorax-binary-classification-task
<jupyter_code>import pandas as pd
df = pd.read_csv('pneumothorax-binary-classification-task/train_data.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 2027 entries, 0 to 2026
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Unnamed: 0.1 2027 non-null int64
1 Unnamed: 0 2027 non-null int64
2 file_name 2027 non-null object
3 target 2027 non-null int64
dtypes: int64(3), object(1)
memory usage: 63.5+ KB
<jupyter_text>Examples:
{
"Unnamed: 0.1": 6193,
"Unnamed: 0": 6193,
"file_name": "1.2.276.0.7230010.3.1.4.8323329.491.1517875163.187701.png",
"target": 1
}
{
"Unnamed: 0.1": 377,
"Unnamed: 0": 377,
"file_name": "1.2.276.0.7230010.3.1.4.8323329.14299.1517875250.643953.png",
"target": 1
}
{
"Unnamed: 0.1": 9874,
"Unnamed: 0": 9874,
"file_name": "1.2.276.0.7230010.3.1.4.8323329.1002.1517875165.878183.png",
"target": 1
}
{
"Unnamed: 0.1": 4966,
"Unnamed: 0": 4966,
"file_name": "1.2.276.0.7230010.3.1.4.8323329.4475.1517875183.64202.png",
"target": 1
}
<jupyter_script>#
# What is Pneumothorax?
# * A pneumothorax can be defined as air in the pleural cavity. This occurs when there is a breach of the lung surface or chest wall which allows air to enter the pleural cavity and consequently cause the lung to collapse.
# * Pneumothorax can be caused by a blunt chest injury, damage from underlying lung disease, or most horrifying—it may occur for no obvious reason at all. On some occasions, a collapsed lung can be a life-threatening event.
# * Pneumothorax is usually diagnosed by a radiologist on a chest x-ray, and can sometimes be very difficult to confirm. An accurate AI algorithm to detect pneumothorax would be useful in a lot of clinical scenarios. AI could be used to triage chest radiographs for priority interpretation, or to provide a more confident diagnosis for non-radiologists.
# Importing Libraries
#
import numpy as np
import pandas as pd
from pathlib import Path
import os.path
import matplotlib.pyplot as plt
import seaborn as sns
import os
import cv2
from keras.preprocessing.image import load_img
from keras.utils import to_categorical
from keras.models import Model
from keras.layers import (
BatchNormalization,
Dense,
GlobalAveragePooling2D,
Lambda,
Dropout,
InputLayer,
Input,
)
from tensorflow import keras
from keras.applications import Xception
from keras.applications.xception import preprocess_input
from keras.callbacks import EarlyStopping
from keras.models import Sequential
#
# Importing The Dataset
#
train_img_path = "../input/pneumothorax-binary-classification-task/small_train_data_set/small_train_data_set"
labels = pd.read_csv(r"../input/pneumothorax-binary-classification-task/train_data.csv")
# Pneumothorax small dataset contains 2027 images medical images of lungs done by radiologist during chest x-ray of the patients.
labels.head()
# drop unnecessary columns
labels.drop(["Unnamed: 0", "Unnamed: 0.1"], axis=1, inplace=True)
print(f"Number of pictures in the training dataset: {labels.shape[0]}\n")
print(f"Number of different labels: {len(labels.target.unique())}\n")
print(f"Labels: {labels.target.unique()}")
#
# Data Visualization
#
plt.figure(figsize=(20, 40))
i = 1
for idx, s in labels.head(6).iterrows():
img_path = os.path.join(train_img_path, s["file_name"])
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
fig = plt.subplot(6, 2, i)
fig.imshow(img)
fig.set_title(s["target"])
i += 1
# Extracting different classes
classes = sorted(labels["target"].unique())
n_classes = len(classes)
print(f"number of class: {n_classes}")
classes_to_num = dict(zip(classes, range(n_classes)))
#
# Converting Images to Array
#
# Function to load and convert images to array
def images_to_array(data_dir, df, image_size):
image_names = df["file_name"]
image_labels = df["target"]
data_size = len(image_names)
X = np.zeros(
[data_size, image_size[0], image_size[1], image_size[2]], dtype=np.uint8
)
y = np.zeros([data_size, 1], dtype=np.uint8)
for i in range(data_size):
img_name = image_names[i]
img_dir = os.path.join(data_dir, img_name)
img_pixels = load_img(img_dir, target_size=image_size)
X[i] = img_pixels
y[i] = classes_to_num[image_labels[i]]
y = to_categorical(y)
ind = np.random.permutation(data_size)
X = X[ind]
y = y[ind]
print("Ouptut Data Size: ", X.shape)
print("Ouptut Label Size: ", y.shape)
return X, y
# Selecting image size according to pretrained models
img_size = (299, 299, 3)
X, y = images_to_array(train_img_path, labels, img_size)
#
# Extracting features using Xception
#
def get_features(model_name, data_preprocessor, weight, input_size, data):
# Prepare pipeline.
input_layer = Input(input_size)
preprocessor = Lambda(data_preprocessor)(input_layer)
base_model = model_name(weights=weight, include_top=False, input_shape=input_size)(
preprocessor
)
avg = GlobalAveragePooling2D()(base_model)
feature_extractor = Model(inputs=input_layer, outputs=avg)
# Extract feature.
feature_maps = feature_extractor.predict(data, batch_size=128, verbose=1)
print("Feature maps shape: ", feature_maps.shape)
return feature_maps
# Extracting features using Xception
Xception_preprocessor = preprocess_input
Xception_features = get_features(
Xception,
Xception_preprocessor,
"../input/keras-pretrained-models/Xception_NoTop_ImageNet.h5",
img_size,
X,
)
#
# Model Building
#
# Callbacks
EarlyStop_callback = EarlyStopping(
monitor="val_loss", patience=10, restore_best_weights=True
)
my_callback = [EarlyStop_callback]
# Adding the final layers to the above base models where the actual classification is done in the dense layers
# Building Model
model = Sequential()
model.add(InputLayer(Xception_features.shape[1:]))
model.add(Dropout(0.3))
model.add(Dense(2, activation="sigmoid"))
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["AUC"])
model.summary()
# Training the CNN on the Train features and evaluating it on the val data
history = model.fit(
Xception_features,
y,
validation_split=0.20,
callbacks=my_callback,
epochs=50,
batch_size=128,
)
# summarize history for loss
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "validation"], loc="upper left")
plt.show()
# summarize history for AUC
plt.plot(history.history["auc"])
plt.plot(history.history["val_auc"])
plt.title("model AUC")
plt.ylabel("AUC")
plt.xlabel("epoch")
plt.legend(["train", "validation"], loc="upper left")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173586.ipynb
|
pneumothorax-binary-classification-task
|
volodymyrgavrysh
|
[{"Id": 69173586, "ScriptId": 18843398, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4811132, "CreationDate": "07/27/2021 16:51:16", "VersionNumber": 3.0, "Title": "[Pneumothorax Binary Classification] Xception", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 180.0, "LinesInsertedFromPrevious": 69.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 111.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 6}]
|
[{"Id": 92022107, "KernelVersionId": 69173586, "SourceDatasetVersionId": 2461792}, {"Id": 92022106, "KernelVersionId": 69173586, "SourceDatasetVersionId": 1493925}]
|
[{"Id": 2461792, "DatasetId": 1490083, "DatasourceVersionId": 2504216, "CreatorUserId": 2226962, "LicenseName": "CC0: Public Domain", "CreationDate": "07/25/2021 14:47:08", "VersionNumber": 2.0, "Title": "Pneumothorax Binary Classification task", "Slug": "pneumothorax-binary-classification-task", "Subtitle": "2027 medical images (chest x-ray) with and without pneumothorax", "Description": "### Context\n\nPneumothorax small dataset (2027 images) for binary classification task (Pneumothorax or not)\n\n### Content\n\nMedical images of lungs done by radiologist during chest x-ray of the patients\n\n### Acknowledgements\n\nSIIM Machine Learning Committee Co-Chairs, Steven G. Langer, PhD, CIIP and George Shih, MD, MS for tirelessly leading this effort and making the challenge possible in such a short period of time.\nSIIM Machine Learning Committee Members for their dedication in annotating the dataset, helping to define the most useful metrics and running tests to prepare the challenge for launch.\nSIIM Hackathon Committee, especially Mohannad Hussain, for their crucial technical support with data conversion.\n\n### Inspiration\nOriginal data [https://www.kaggle.com/c/siim-acr-pneumothorax-segmentation/overview https://www.kaggle.com/c/siim-acr-pneumothorax-segmentation/overview ](https://www.kaggle.com/c/siim-acr-pneumothorax-segmentation/overview )", "VersionNotes": "Data Update 2021/07/25", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1490083, "CreatorUserId": 2226962, "OwnerUserId": 2226962.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2461792.0, "CurrentDatasourceVersionId": 2504216.0, "ForumId": 1509784, "Type": 2, "CreationDate": "07/25/2021 14:29:30", "LastActivityDate": "07/25/2021", "TotalViews": 14585, "TotalDownloads": 1042, "TotalVotes": 30, "TotalKernels": 4}]
|
[{"Id": 2226962, "UserName": "volodymyrgavrysh", "DisplayName": "VolodymyrGavrysh", "RegisterDate": "09/09/2018", "PerformanceTier": 2}]
|
#
# What is Pneumothorax?
# * A pneumothorax can be defined as air in the pleural cavity. This occurs when there is a breach of the lung surface or chest wall which allows air to enter the pleural cavity and consequently cause the lung to collapse.
# * Pneumothorax can be caused by a blunt chest injury, damage from underlying lung disease, or most horrifying—it may occur for no obvious reason at all. On some occasions, a collapsed lung can be a life-threatening event.
# * Pneumothorax is usually diagnosed by a radiologist on a chest x-ray, and can sometimes be very difficult to confirm. An accurate AI algorithm to detect pneumothorax would be useful in a lot of clinical scenarios. AI could be used to triage chest radiographs for priority interpretation, or to provide a more confident diagnosis for non-radiologists.
# Importing Libraries
#
import numpy as np
import pandas as pd
from pathlib import Path
import os.path
import matplotlib.pyplot as plt
import seaborn as sns
import os
import cv2
from keras.preprocessing.image import load_img
from keras.utils import to_categorical
from keras.models import Model
from keras.layers import (
BatchNormalization,
Dense,
GlobalAveragePooling2D,
Lambda,
Dropout,
InputLayer,
Input,
)
from tensorflow import keras
from keras.applications import Xception
from keras.applications.xception import preprocess_input
from keras.callbacks import EarlyStopping
from keras.models import Sequential
#
# Importing The Dataset
#
train_img_path = "../input/pneumothorax-binary-classification-task/small_train_data_set/small_train_data_set"
labels = pd.read_csv(r"../input/pneumothorax-binary-classification-task/train_data.csv")
# Pneumothorax small dataset contains 2027 images medical images of lungs done by radiologist during chest x-ray of the patients.
labels.head()
# drop unnecessary columns
labels.drop(["Unnamed: 0", "Unnamed: 0.1"], axis=1, inplace=True)
print(f"Number of pictures in the training dataset: {labels.shape[0]}\n")
print(f"Number of different labels: {len(labels.target.unique())}\n")
print(f"Labels: {labels.target.unique()}")
#
# Data Visualization
#
plt.figure(figsize=(20, 40))
i = 1
for idx, s in labels.head(6).iterrows():
img_path = os.path.join(train_img_path, s["file_name"])
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
fig = plt.subplot(6, 2, i)
fig.imshow(img)
fig.set_title(s["target"])
i += 1
# Extracting different classes
classes = sorted(labels["target"].unique())
n_classes = len(classes)
print(f"number of class: {n_classes}")
classes_to_num = dict(zip(classes, range(n_classes)))
#
# Converting Images to Array
#
# Function to load and convert images to array
def images_to_array(data_dir, df, image_size):
image_names = df["file_name"]
image_labels = df["target"]
data_size = len(image_names)
X = np.zeros(
[data_size, image_size[0], image_size[1], image_size[2]], dtype=np.uint8
)
y = np.zeros([data_size, 1], dtype=np.uint8)
for i in range(data_size):
img_name = image_names[i]
img_dir = os.path.join(data_dir, img_name)
img_pixels = load_img(img_dir, target_size=image_size)
X[i] = img_pixels
y[i] = classes_to_num[image_labels[i]]
y = to_categorical(y)
ind = np.random.permutation(data_size)
X = X[ind]
y = y[ind]
print("Ouptut Data Size: ", X.shape)
print("Ouptut Label Size: ", y.shape)
return X, y
# Selecting image size according to pretrained models
img_size = (299, 299, 3)
X, y = images_to_array(train_img_path, labels, img_size)
#
# Extracting features using Xception
#
def get_features(model_name, data_preprocessor, weight, input_size, data):
# Prepare pipeline.
input_layer = Input(input_size)
preprocessor = Lambda(data_preprocessor)(input_layer)
base_model = model_name(weights=weight, include_top=False, input_shape=input_size)(
preprocessor
)
avg = GlobalAveragePooling2D()(base_model)
feature_extractor = Model(inputs=input_layer, outputs=avg)
# Extract feature.
feature_maps = feature_extractor.predict(data, batch_size=128, verbose=1)
print("Feature maps shape: ", feature_maps.shape)
return feature_maps
# Extracting features using Xception
Xception_preprocessor = preprocess_input
Xception_features = get_features(
Xception,
Xception_preprocessor,
"../input/keras-pretrained-models/Xception_NoTop_ImageNet.h5",
img_size,
X,
)
#
# Model Building
#
# Callbacks
EarlyStop_callback = EarlyStopping(
monitor="val_loss", patience=10, restore_best_weights=True
)
my_callback = [EarlyStop_callback]
# Adding the final layers to the above base models where the actual classification is done in the dense layers
# Building Model
model = Sequential()
model.add(InputLayer(Xception_features.shape[1:]))
model.add(Dropout(0.3))
model.add(Dense(2, activation="sigmoid"))
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["AUC"])
model.summary()
# Training the CNN on the Train features and evaluating it on the val data
history = model.fit(
Xception_features,
y,
validation_split=0.20,
callbacks=my_callback,
epochs=50,
batch_size=128,
)
# summarize history for loss
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "validation"], loc="upper left")
plt.show()
# summarize history for AUC
plt.plot(history.history["auc"])
plt.plot(history.history["val_auc"])
plt.title("model AUC")
plt.ylabel("AUC")
plt.xlabel("epoch")
plt.legend(["train", "validation"], loc="upper left")
plt.show()
|
[{"pneumothorax-binary-classification-task/train_data.csv": {"column_names": "[\"Unnamed: 0.1\", \"Unnamed: 0\", \"file_name\", \"target\"]", "column_data_types": "{\"Unnamed: 0.1\": \"int64\", \"Unnamed: 0\": \"int64\", \"file_name\": \"object\", \"target\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2027 entries, 0 to 2026\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Unnamed: 0.1 2027 non-null int64 \n 1 Unnamed: 0 2027 non-null int64 \n 2 file_name 2027 non-null object\n 3 target 2027 non-null int64 \ndtypes: int64(3), object(1)\nmemory usage: 63.5+ KB\n", "summary": "{\"Unnamed: 0.1\": {\"count\": 2027.0, \"mean\": 5027.7893438579185, \"std\": 2931.1827541334287, \"min\": 6.0, \"25%\": 2489.5, \"50%\": 4968.0, \"75%\": 7591.0, \"max\": 10135.0}, \"Unnamed: 0\": {\"count\": 2027.0, \"mean\": 5027.7893438579185, \"std\": 2931.1827541334287, \"min\": 6.0, \"25%\": 2489.5, \"50%\": 4968.0, \"75%\": 7591.0, \"max\": 10135.0}, \"target\": {\"count\": 2027.0, \"mean\": 0.7878638381845091, \"std\": 0.4089216372839214, \"min\": 0.0, \"25%\": 1.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}}", "examples": "{\"Unnamed: 0.1\":{\"0\":6193,\"1\":377,\"2\":9874,\"3\":4966},\"Unnamed: 0\":{\"0\":6193,\"1\":377,\"2\":9874,\"3\":4966},\"file_name\":{\"0\":\"1.2.276.0.7230010.3.1.4.8323329.491.1517875163.187701.png\",\"1\":\"1.2.276.0.7230010.3.1.4.8323329.14299.1517875250.643953.png\",\"2\":\"1.2.276.0.7230010.3.1.4.8323329.1002.1517875165.878183.png\",\"3\":\"1.2.276.0.7230010.3.1.4.8323329.4475.1517875183.64202.png\"},\"target\":{\"0\":1,\"1\":1,\"2\":1,\"3\":1}}"}}]
| true | 1 |
<start_data_description><data_path>pneumothorax-binary-classification-task/train_data.csv:
<column_names>
['Unnamed: 0.1', 'Unnamed: 0', 'file_name', 'target']
<column_types>
{'Unnamed: 0.1': 'int64', 'Unnamed: 0': 'int64', 'file_name': 'object', 'target': 'int64'}
<dataframe_Summary>
{'Unnamed: 0.1': {'count': 2027.0, 'mean': 5027.7893438579185, 'std': 2931.1827541334287, 'min': 6.0, '25%': 2489.5, '50%': 4968.0, '75%': 7591.0, 'max': 10135.0}, 'Unnamed: 0': {'count': 2027.0, 'mean': 5027.7893438579185, 'std': 2931.1827541334287, 'min': 6.0, '25%': 2489.5, '50%': 4968.0, '75%': 7591.0, 'max': 10135.0}, 'target': {'count': 2027.0, 'mean': 0.7878638381845091, 'std': 0.4089216372839214, 'min': 0.0, '25%': 1.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}}
<dataframe_info>
RangeIndex: 2027 entries, 0 to 2026
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Unnamed: 0.1 2027 non-null int64
1 Unnamed: 0 2027 non-null int64
2 file_name 2027 non-null object
3 target 2027 non-null int64
dtypes: int64(3), object(1)
memory usage: 63.5+ KB
<some_examples>
{'Unnamed: 0.1': {'0': 6193, '1': 377, '2': 9874, '3': 4966}, 'Unnamed: 0': {'0': 6193, '1': 377, '2': 9874, '3': 4966}, 'file_name': {'0': '1.2.276.0.7230010.3.1.4.8323329.491.1517875163.187701.png', '1': '1.2.276.0.7230010.3.1.4.8323329.14299.1517875250.643953.png', '2': '1.2.276.0.7230010.3.1.4.8323329.1002.1517875165.878183.png', '3': '1.2.276.0.7230010.3.1.4.8323329.4475.1517875183.64202.png'}, 'target': {'0': 1, '1': 1, '2': 1, '3': 1}}
<end_description>
| 1,739 | 6 | 2,442 | 1,739 |
69173733
|
<jupyter_start><jupyter_text>Uber Pickups in New York City
### Uber TLC FOIL Response
This directory contains data on over 4.5 million Uber pickups in New York City from April to September 2014, and 14.3 million more Uber pickups from January to June 2015. Trip-level data on 10 other for-hire vehicle (FHV) companies, as well as aggregated data for 329 FHV companies, is also included. All the files are as they were received on August 3, Sept. 15 and Sept. 22, 2015.
FiveThirtyEight obtained the data from the [NYC Taxi & Limousine Commission (TLC)](http://www.nyc.gov/html/tlc/html/home/home.shtml) by submitting a Freedom of Information Law request on July 20, 2015. The TLC has sent us the data in batches as it continues to review trip data Uber and other HFV companies have submitted to it. The TLC's correspondence with FiveThirtyEight is included in the files `TLC_letter.pdf`, `TLC_letter2.pdf` and `TLC_letter3.pdf`. TLC records requests can be made [here](http://www.nyc.gov/html/tlc/html/passenger/records.shtml).
This data was used for four FiveThirtyEight stories: [Uber Is Serving New York’s Outer Boroughs More Than Taxis Are](http://fivethirtyeight.com/features/uber-is-serving-new-yorks-outer-boroughs-more-than-taxis-are/), [Public Transit Should Be Uber’s New Best Friend](http://fivethirtyeight.com/features/public-transit-should-be-ubers-new-best-friend/), [Uber Is Taking Millions Of Manhattan Rides Away From Taxis](http://fivethirtyeight.com/features/uber-is-taking-millions-of-manhattan-rides-away-from-taxis/), and [Is Uber Making NYC Rush-Hour Traffic Worse?](http://fivethirtyeight.com/features/is-uber-making-nyc-rush-hour-traffic-worse/).
## The Data
The dataset contains, roughly, four groups of files:
- Uber trip data from 2014 (April - September), separated by month, with detailed location information
- Uber trip data from 2015 (January - June), with less fine-grained location information
- non-Uber FHV (For-Hire Vehicle) trips. The trip information varies by company, but can include day of trip, time of trip, pickup location, driver's for-hire license number, and vehicle's for-hire license number.
- aggregate ride and vehicle statistics for all FHV companies (and, occasionally, for taxi companies)
### Uber trip data from 2014
There are six files of raw data on Uber pickups in New York City from April to September 2014. The files are separated by month and each has the following columns:
- `Date/Time` : The date and time of the Uber pickup
- `Lat` : The latitude of the Uber pickup
- `Lon` : The longitude of the Uber pickup
- `Base` : The [TLC base company](http://www.nyc.gov/html/tlc/html/industry/base_and_business.shtml) code affiliated with the Uber pickup
These files are named:
- ```uber-raw-data-apr14.csv```
- ```uber-raw-data-aug14.csv```
- ```uber-raw-data-jul14.csv```
- ```uber-raw-data-jun14.csv```
- ```uber-raw-data-may14.csv```
- ```uber-raw-data-sep14.csv```
### Uber trip data from 2015
Also included is the file `uber-raw-data-janjune-15.csv` This file has the following columns:
- `Dispatching_base_num` : The [TLC base company](http://www.nyc.gov/html/tlc/html/industry/base_and_business.shtml) code of the base that dispatched the Uber
- `Pickup_date` : The date and time of the Uber pickup
- `Affiliated_base_num` : The [TLC base company](http://www.nyc.gov/html/tlc/html/industry/base_and_business.shtml) code affiliated with the Uber pickup
- `locationID` : The pickup location ID affiliated with the Uber pickup
The `Base` codes are for the following Uber bases:
B02512 : Unter
B02598 : Hinter
B02617 : Weiter
B02682 : Schmecken
B02764 : Danach-NY
B02765 : Grun
B02835 : Dreist
B02836 : Drinnen
For coarse-grained location information from these pickups, the file `taxi-zone-lookup.csv` shows the taxi `Zone` (essentially, neighborhood) and `Borough` for each `locationID`.
### Non-Uber FLV trips
The dataset also contains 10 files of raw data on pickups from 10 for-hire vehicle (FHV) companies. The trip information varies by company, but can include day of trip, time of trip, pickup location, driver's for-hire license number, and vehicle's for-hire license number.
These files are named:
- ```American_B01362.csv```
- ```Diplo_B01196.csv```
- ```Highclass_B01717.csv```
- ```Skyline_B00111.csv```
- ```Carmel_B00256.csv```
- ```Federal_02216.csv```
- ```Lyft_B02510.csv```
- ```Dial7_B00887.csv```
- ```Firstclass_B01536.csv```
- ```Prestige_B01338.csv```
### Aggregate Statistics
There is also a file `other-FHV-data-jan-aug-2015.csv` containing daily pickup data for 329 FHV companies from January 2015 through August 2015.
The file `Uber-Jan-Feb-FOIL.csv` contains aggregated daily Uber trip statistics in January and February 2015.
Kaggle dataset identifier: uber-pickups-in-new-york-city
<jupyter_script># ### Importing Libraries
# Data Processing
import pandas as pd
import numpy as np
# Data Visulaisation
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import os
# # Listing down the required files
files = [
filename
for filename in os.listdir(r"../input/uber-pickups-in-new-york-city")
if filename.startswith("uber-")
]
files
files.remove("uber-raw-data-janjune-15.csv")
# # Concatenate the data
path = r"../input/uber-pickups-in-new-york-city"
Data = pd.DataFrame()
for file in files:
df = pd.read_csv(path + "/" + file, encoding="utf-8")
Data = pd.concat([df, Data])
Data.sample(frac=0.5)
# # Checking data attributes
Data.shape
data = Data.copy()
data.dtypes
# # Data Preprocessing
data["Date/Time"] = pd.to_datetime(data["Date/Time"], format="%m/%d/%Y %H:%M:%S")
data.dtypes
data["month"] = data["Date/Time"].dt.month
data["weekday"] = data["Date/Time"].dt.day_name()
data["day"] = data["Date/Time"].dt.day
data["hour"] = data["Date/Time"].dt.hour
data["minute"] = data["Date/Time"].dt.minute
data.head()
data.dtypes
# 01. Which weekday sees the highest number of uber trips?
weekday = pd.DataFrame(data["weekday"].value_counts()).reset_index()
weekday.columns = ["Weekday", "Count"]
px.bar(
weekday,
x="Weekday",
y="Count",
template="plotly_dark",
title="Uber trip by Weekdays",
labels={"Count": "Number of Trips"},
width=800,
height=400,
)
# Uber records highest number of cab rides on Thursdays
# 02. Which is the busiest hour in the day for uber cabs?
hour = pd.DataFrame(data["hour"].value_counts()).reset_index()
hour.columns = ["Hour", "Count"]
hour = hour.sort_values(by="Hour")
px.bar(
hour,
x="Hour",
y="Count",
template="plotly_dark",
title="Uber Rides by hour",
labels={"Count": "Number of Trips"},
width=1100,
)
# Maximum number of rides are taken between 4-7 PM in a day
plt.style.available
plt.figure(figsize=(20, 20))
plt.style.use("seaborn-dark-palette")
colors = ["#636EFA"]
for i, month in enumerate(data["month"].unique()):
plt.subplot(3, 2, i + 1)
data[data["month"] == month]["hour"].hist(color=colors)
Monthly = pd.DataFrame(data["month"].value_counts())
px.bar(
Monthly,
x=Monthly.index,
y="month",
height=400,
width=600,
labels={"index": "Months", "month": "Number of Trips"},
template="plotly_dark",
title="Uber rides by Month",
)
# Maximum number of rides were taken in September
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173733.ipynb
|
uber-pickups-in-new-york-city
| null |
[{"Id": 69173733, "ScriptId": 18848982, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6341134, "CreationDate": "07/27/2021 16:53:22", "VersionNumber": 2.0, "Title": "Exploratory Analysis on Uber Pickups\ud83d\ude95", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 128.0, "LinesInsertedFromPrevious": 11.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 117.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92022374, "KernelVersionId": 69173733, "SourceDatasetVersionId": 793057}]
|
[{"Id": 793057, "DatasetId": 360, "DatasourceVersionId": 814847, "CreatorUserId": 998023, "LicenseName": "CC0: Public Domain", "CreationDate": "11/13/2019 19:52:18", "VersionNumber": 2.0, "Title": "Uber Pickups in New York City", "Slug": "uber-pickups-in-new-york-city", "Subtitle": "Trip data for over 20 million Uber (and other for-hire vehicle) trips in NYC", "Description": "### Uber TLC FOIL Response\n\nThis directory contains data on over 4.5 million Uber pickups in New York City from April to September 2014, and 14.3 million more Uber pickups from January to June 2015. Trip-level data on 10 other for-hire vehicle (FHV) companies, as well as aggregated data for 329 FHV companies, is also included. All the files are as they were received on August 3, Sept. 15 and Sept. 22, 2015. \n\nFiveThirtyEight obtained the data from the [NYC Taxi & Limousine Commission (TLC)](http://www.nyc.gov/html/tlc/html/home/home.shtml) by submitting a Freedom of Information Law request on July 20, 2015. The TLC has sent us the data in batches as it continues to review trip data Uber and other HFV companies have submitted to it. The TLC's correspondence with FiveThirtyEight is included in the files `TLC_letter.pdf`, `TLC_letter2.pdf` and `TLC_letter3.pdf`. TLC records requests can be made [here](http://www.nyc.gov/html/tlc/html/passenger/records.shtml).\n\nThis data was used for four FiveThirtyEight stories: [Uber Is Serving New York\u2019s Outer Boroughs More Than Taxis Are](http://fivethirtyeight.com/features/uber-is-serving-new-yorks-outer-boroughs-more-than-taxis-are/), [Public Transit Should Be Uber\u2019s New Best Friend](http://fivethirtyeight.com/features/public-transit-should-be-ubers-new-best-friend/), [Uber Is Taking Millions Of Manhattan Rides Away From Taxis](http://fivethirtyeight.com/features/uber-is-taking-millions-of-manhattan-rides-away-from-taxis/), and [Is Uber Making NYC Rush-Hour Traffic Worse?](http://fivethirtyeight.com/features/is-uber-making-nyc-rush-hour-traffic-worse/).\n\n## The Data\n\nThe dataset contains, roughly, four groups of files:\n\n- Uber trip data from 2014 (April - September), separated by month, with detailed location information\n- Uber trip data from 2015 (January - June), with less fine-grained location information\n- non-Uber FHV (For-Hire Vehicle) trips. The trip information varies by company, but can include day of trip, time of trip, pickup location, driver's for-hire license number, and vehicle's for-hire license number.\n- aggregate ride and vehicle statistics for all FHV companies (and, occasionally, for taxi companies)\n\n### Uber trip data from 2014\n\nThere are six files of raw data on Uber pickups in New York City from April to September 2014. The files are separated by month and each has the following columns:\n\n- `Date/Time` : The date and time of the Uber pickup\n- `Lat` : The latitude of the Uber pickup\n- `Lon` : The longitude of the Uber pickup\n- `Base` : The [TLC base company](http://www.nyc.gov/html/tlc/html/industry/base_and_business.shtml) code affiliated with the Uber pickup\n\nThese files are named:\n\n- ```uber-raw-data-apr14.csv```\n- ```uber-raw-data-aug14.csv```\n- ```uber-raw-data-jul14.csv```\n- ```uber-raw-data-jun14.csv```\n- ```uber-raw-data-may14.csv```\n- ```uber-raw-data-sep14.csv```\n\n### Uber trip data from 2015\n\nAlso included is the file `uber-raw-data-janjune-15.csv` This file has the following columns:\n\n- `Dispatching_base_num` : The [TLC base company](http://www.nyc.gov/html/tlc/html/industry/base_and_business.shtml) code of the base that dispatched the Uber\n- `Pickup_date` : The date and time of the Uber pickup\n- `Affiliated_base_num` : The [TLC base company](http://www.nyc.gov/html/tlc/html/industry/base_and_business.shtml) code affiliated with the Uber pickup\n- `locationID` : The pickup location ID affiliated with the Uber pickup\n\nThe `Base` codes are for the following Uber bases:\n\nB02512 : Unter\nB02598 : Hinter\nB02617 : Weiter\nB02682 : Schmecken\nB02764 : Danach-NY\nB02765 : Grun\nB02835 : Dreist\nB02836 : Drinnen\n\nFor coarse-grained location information from these pickups, the file `taxi-zone-lookup.csv` shows the taxi `Zone` (essentially, neighborhood) and `Borough` for each `locationID`.\n\n### Non-Uber FLV trips\n\nThe dataset also contains 10 files of raw data on pickups from 10 for-hire vehicle (FHV) companies. The trip information varies by company, but can include day of trip, time of trip, pickup location, driver's for-hire license number, and vehicle's for-hire license number.\n\nThese files are named:\n\n- ```American_B01362.csv```\n- ```Diplo_B01196.csv```\n- ```Highclass_B01717.csv```\n- ```Skyline_B00111.csv```\n- ```Carmel_B00256.csv```\n- ```Federal_02216.csv```\n- ```Lyft_B02510.csv```\n- ```Dial7_B00887.csv```\n- ```Firstclass_B01536.csv```\n- ```Prestige_B01338.csv```\n\n### Aggregate Statistics\n\nThere is also a file `other-FHV-data-jan-aug-2015.csv` containing daily pickup data for 329 FHV companies from January 2015 through August 2015.\n\nThe file `Uber-Jan-Feb-FOIL.csv` contains aggregated daily Uber trip statistics in January and February 2015.", "VersionNotes": "Fix data", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 360, "CreatorUserId": 731385, "OwnerUserId": NaN, "OwnerOrganizationId": 170.0, "CurrentDatasetVersionId": 793057.0, "CurrentDatasourceVersionId": 814847.0, "ForumId": 1925, "Type": 2, "CreationDate": "11/12/2016 23:01:44", "LastActivityDate": "02/06/2018", "TotalViews": 350601, "TotalDownloads": 44360, "TotalVotes": 602, "TotalKernels": 274}]
| null |
# ### Importing Libraries
# Data Processing
import pandas as pd
import numpy as np
# Data Visulaisation
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import os
# # Listing down the required files
files = [
filename
for filename in os.listdir(r"../input/uber-pickups-in-new-york-city")
if filename.startswith("uber-")
]
files
files.remove("uber-raw-data-janjune-15.csv")
# # Concatenate the data
path = r"../input/uber-pickups-in-new-york-city"
Data = pd.DataFrame()
for file in files:
df = pd.read_csv(path + "/" + file, encoding="utf-8")
Data = pd.concat([df, Data])
Data.sample(frac=0.5)
# # Checking data attributes
Data.shape
data = Data.copy()
data.dtypes
# # Data Preprocessing
data["Date/Time"] = pd.to_datetime(data["Date/Time"], format="%m/%d/%Y %H:%M:%S")
data.dtypes
data["month"] = data["Date/Time"].dt.month
data["weekday"] = data["Date/Time"].dt.day_name()
data["day"] = data["Date/Time"].dt.day
data["hour"] = data["Date/Time"].dt.hour
data["minute"] = data["Date/Time"].dt.minute
data.head()
data.dtypes
# 01. Which weekday sees the highest number of uber trips?
weekday = pd.DataFrame(data["weekday"].value_counts()).reset_index()
weekday.columns = ["Weekday", "Count"]
px.bar(
weekday,
x="Weekday",
y="Count",
template="plotly_dark",
title="Uber trip by Weekdays",
labels={"Count": "Number of Trips"},
width=800,
height=400,
)
# Uber records highest number of cab rides on Thursdays
# 02. Which is the busiest hour in the day for uber cabs?
hour = pd.DataFrame(data["hour"].value_counts()).reset_index()
hour.columns = ["Hour", "Count"]
hour = hour.sort_values(by="Hour")
px.bar(
hour,
x="Hour",
y="Count",
template="plotly_dark",
title="Uber Rides by hour",
labels={"Count": "Number of Trips"},
width=1100,
)
# Maximum number of rides are taken between 4-7 PM in a day
plt.style.available
plt.figure(figsize=(20, 20))
plt.style.use("seaborn-dark-palette")
colors = ["#636EFA"]
for i, month in enumerate(data["month"].unique()):
plt.subplot(3, 2, i + 1)
data[data["month"] == month]["hour"].hist(color=colors)
Monthly = pd.DataFrame(data["month"].value_counts())
px.bar(
Monthly,
x=Monthly.index,
y="month",
height=400,
width=600,
labels={"index": "Months", "month": "Number of Trips"},
template="plotly_dark",
title="Uber rides by Month",
)
# Maximum number of rides were taken in September
| false | 0 | 825 | 0 | 2,476 | 825 |
||
69173207
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
test_data_org = pd.read_csv("/kaggle/input/titanic/test.csv")
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
y_train = train_data["Survived"]
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()
print("\nNull Values in Training \n{}".format(train_data.isnull().sum()))
print("\nNull Values in Testing \n{}".format(test_data.isnull().sum()))
print("\nDuplicated values in train {}".format(train_data.duplicated().sum()))
print("Duplicated values in test {}".format(test_data.duplicated().sum()))
print("Embarkation per ports \n{}".format(train_data["Embarked"].value_counts()))
# since the most common port is Southampton the chances are that the missing one is from there
train_data["Embarked"].fillna(value="S", inplace=True)
test_data["Fare"].fillna(value=test_data.Fare.mean(), inplace=True)
print(
"Embarkation per ports after filling \n{}".format(
train_data["Embarked"].value_counts()
)
)
import matplotlib.pyplot as plt
import seaborn as sns
mean_age_miss = train_data[train_data["Name"].str.contains("Miss.", na=False)][
"Age"
].mean()
mean_age_mrs = train_data[train_data["Name"].str.contains("Mrs.", na=False)][
"Age"
].mean()
mean_age_mr = train_data[train_data["Name"].str.contains("Mr.", na=False)]["Age"].mean()
mean_age_master = train_data[train_data["Name"].str.contains("Master.", na=False)][
"Age"
].mean()
print("Mean age of Miss. title {}".format(mean_age_miss))
print("Mean age of Mrs. title {}".format(mean_age_mrs))
print("Mean age of Mr. title {}".format(mean_age_mr))
print("Mean age of Master. title {}".format(mean_age_master))
def fill_age(name_age):
name = name_age[0]
age = name_age[1]
if pd.isnull(age):
if "Mr." in name:
return mean_age_mr
if "Mrs." in name:
return mean_age_mrs
if "Miss." in name:
return mean_age_miss
if "Master." in name:
return mean_age_master
if "Dr." in name:
return mean_age_master
if "Ms." in name:
return mean_age_miss
else:
return age
train_data["Age"] = train_data[["Name", "Age"]].apply(fill_age, axis=1)
test_data["Age"] = test_data[["Name", "Age"]].apply(fill_age, axis=1)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 5))
sns.heatmap(train_data.isnull(), cmap="copper", ax=ax1)
sns.heatmap(train_data.isnull(), cmap="copper", ax=ax2)
plt.tight_layout()
import os
import zipfile
import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None # default='warn'
sns.set(style="whitegrid", palette="Set2", font_scale=1.2)
train_data["Cabin"] = pd.Series(
["X" if pd.isnull(ii) else ii[0] for ii in train_data["Cabin"]]
)
test_data["Cabin"] = pd.Series(
["X" if pd.isnull(ii) else ii[0] for ii in test_data["Cabin"]]
)
plt.figure(figsize=(12, 5))
plt.title("Box Plot of Temperatures by Modules")
sns.boxplot(x="Cabin", y="Fare", data=train_data, palette="Set2")
plt.tight_layout()
print(
"Mean Fare of Cabin B {}".format(
train_data[train_data["Cabin"] == "B"]["Fare"].mean()
)
)
print(
"Mean Fare of Cabin C {}".format(
train_data[train_data["Cabin"] == "C"]["Fare"].mean()
)
)
print(
"Mean Fare of Cabin D {}".format(
train_data[train_data["Cabin"] == "D"]["Fare"].mean()
)
)
print(
"Mean Fare of Cabin E {}".format(
train_data[train_data["Cabin"] == "E"]["Fare"].mean()
)
)
def reasign_cabin(cabin_fare):
cabin = cabin_fare[0]
fare = cabin_fare[1]
if cabin == "X":
if fare >= 113.5:
return "B"
if (fare < 113.5) and (fare > 100):
return "C"
if (fare < 100) and (fare > 57):
return "D"
if (fare < 57) and (fare > 46):
return "D"
else:
return "X"
else:
return cabin
train_data["Cabin"] = train_data[["Cabin", "Fare"]].apply(reasign_cabin, axis=1)
test_data["Cabin"] = test_data[["Cabin", "Fare"]].apply(reasign_cabin, axis=1)
plt.figure(figsize=(12, 5))
plt.title("Box Plot of Temperatures by Modules")
sns.boxplot(x="Cabin", y="Fare", data=train_data, palette="Set2")
plt.tight_layout()
print("\nNull Values in Training \n{}".format(train_data.isnull().sum()))
print("\nNull Values in Testing \n{}".format(test_data.isnull().sum()))
print("\nDuplicated values in train {}".format(train_data.duplicated().sum()))
print("Duplicated values in test {}".format(test_data.duplicated().sum()))
fig, axx = plt.subplots(1, 3, figsize=(20, 5))
axx[0].set_title("Amounth of Siblins/Spouses")
sns.countplot(x="SibSp", data=train_data, ax=axx[0])
axx[1].set_title("Amounth of parents/children")
sns.countplot(x="Parch", data=train_data, ax=axx[1])
axx[2].set_title("Distribution of Classes")
sns.countplot(x="Pclass", data=train_data, ax=axx[2])
plt.tight_layout()
def create_alone_feature(SibSp_Parch):
if (SibSp_Parch[0] + SibSp_Parch[1]) == 0:
return 1
else:
return 0
train_data["Alone"] = train_data[["SibSp", "Parch"]].apply(create_alone_feature, axis=1)
train_data["Familiars"] = 1 + train_data["SibSp"] + train_data["Parch"]
test_data["Alone"] = test_data[["SibSp", "Parch"]].apply(create_alone_feature, axis=1)
test_data["Familiars"] = 1 + test_data["SibSp"] + test_data["Parch"]
fig, axx = plt.subplots(2, 3, figsize=(20, 10))
axx[0, 0].set_title("Survivors")
sns.countplot(x="Survived", data=train_data, ax=axx[0, 0])
axx[0, 1].set_title("Survivors by Sex")
sns.countplot(x="Survived", hue="Sex", data=train_data, ax=axx[0, 1])
axx[0, 2].set_title("Survivors by Pclass")
sns.countplot(x="Survived", hue="Pclass", data=train_data, ax=axx[0, 2])
axx[1, 0].set_title("Accompanied survivors")
sns.countplot(x="Survived", hue="Alone", data=train_data, ax=axx[1, 0])
axx[1, 1].set_title("Accompanied survivors")
sns.countplot(x="Familiars", hue="Survived", data=train_data, ax=axx[1, 1])
axx[1, 2].set_title("Alone members by Pclass")
sns.countplot(x="Pclass", hue="Alone", data=train_data, ax=axx[1, 2])
plt.tight_layout()
fig, axx = plt.subplots(1, 3, figsize=(20, 5))
axx[0].set_title("Age of Survivors")
sns.boxplot(x="Survived", y="Age", data=train_data, ax=axx[0])
axx[1].set_title("Survivors by Sex")
sns.boxplot(x="Survived", y="Age", hue="Sex", data=train_data, ax=axx[1])
axx[2].set_title("Survivors by Sex")
sns.boxplot(x="Survived", y="Age", hue="Pclass", data=train_data, ax=axx[2])
plt.tight_layout()
fig, axx = plt.subplots(1, 2, figsize=(16, 5))
axx[0].set_title("Distribution of Fare of Dead ones")
sns.distplot(a=train_data[train_data["Survived"] == 0]["Fare"], ax=axx[0], bins=30)
axx[1].set_title("Distribution of Fare of Survived ones")
sns.distplot(a=train_data[train_data["Survived"] == 1]["Fare"], ax=axx[1], bins=30)
plt.tight_layout()
plt.figure(figsize=(12, 8))
sns.heatmap(train_data.corr(), annot=True)
plt.tight_layout()
categories = {"female": 1, "male": 0}
train_data["Sex"] = train_data["Sex"].map(categories)
test_data["Sex"] = test_data["Sex"].map(categories)
categories = {"S": 1, "C": 2, "Q": 3}
train_data["Embarked"] = train_data["Embarked"].map(categories)
test_data["Embarked"] = test_data["Embarked"].map(categories)
categories = train_data.Cabin.unique()
train_data["Cabin"] = train_data.Cabin.astype("category").cat.codes
test_data["Cabin"] = test_data.Cabin.astype("category").cat.codes
plt.figure(figsize=(14, 8))
sns.heatmap(train_data.corr(), annot=True)
plt.tight_layout()
train_data.head()
# dropping columns
train_data = train_data.drop(
["Name", "Ticket", "PassengerId", "Alone", "Parch", "SibSp", "Embarked"], axis=1
)
test_data = test_data.drop(
["Name", "Ticket", "PassengerId", "Alone", "Parch", "SibSp", "Embarked"], axis=1
)
train_data.head()
from sklearn.preprocessing import MinMaxScaler
# Dropping label
LABEL = "Survived"
y = train_data[LABEL]
train_data = train_data.drop(LABEL, axis=1) # Dropping label to normalize
scaler = MinMaxScaler()
scaled_train = scaler.fit_transform(train_data)
scaled_test = scaler.transform(test_data)
scaled_train = pd.DataFrame(
scaled_train, columns=train_data.columns, index=train_data.index
)
scaled_test = pd.DataFrame(
scaled_test, columns=test_data.columns, index=test_data.index
)
scaled_train.head()
X_train = scaled_train
X_test = scaled_test
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
clf = RandomForestClassifier(n_estimators=100)
# Train the model using the training sets y_pred=clf.predict(X_test)
clf.fit(X_train, y_train)
# Train the model using the training sets y_pred=clf.predict(X_test)
clf.fit(new_train, y_train)
predictions = clf.predict(new_test)
output = pd.DataFrame(
{"PassengerId": test_data_org.PassengerId, "Survived": predictions}
)
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173207.ipynb
| null | null |
[{"Id": 69173207, "ScriptId": 18604584, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7845142, "CreationDate": "07/27/2021 16:46:08", "VersionNumber": 14.0, "Title": "Titanic Competition_170294R", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 253.0, "LinesInsertedFromPrevious": 4.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 249.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
test_data_org = pd.read_csv("/kaggle/input/titanic/test.csv")
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
y_train = train_data["Survived"]
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()
print("\nNull Values in Training \n{}".format(train_data.isnull().sum()))
print("\nNull Values in Testing \n{}".format(test_data.isnull().sum()))
print("\nDuplicated values in train {}".format(train_data.duplicated().sum()))
print("Duplicated values in test {}".format(test_data.duplicated().sum()))
print("Embarkation per ports \n{}".format(train_data["Embarked"].value_counts()))
# since the most common port is Southampton the chances are that the missing one is from there
train_data["Embarked"].fillna(value="S", inplace=True)
test_data["Fare"].fillna(value=test_data.Fare.mean(), inplace=True)
print(
"Embarkation per ports after filling \n{}".format(
train_data["Embarked"].value_counts()
)
)
import matplotlib.pyplot as plt
import seaborn as sns
mean_age_miss = train_data[train_data["Name"].str.contains("Miss.", na=False)][
"Age"
].mean()
mean_age_mrs = train_data[train_data["Name"].str.contains("Mrs.", na=False)][
"Age"
].mean()
mean_age_mr = train_data[train_data["Name"].str.contains("Mr.", na=False)]["Age"].mean()
mean_age_master = train_data[train_data["Name"].str.contains("Master.", na=False)][
"Age"
].mean()
print("Mean age of Miss. title {}".format(mean_age_miss))
print("Mean age of Mrs. title {}".format(mean_age_mrs))
print("Mean age of Mr. title {}".format(mean_age_mr))
print("Mean age of Master. title {}".format(mean_age_master))
def fill_age(name_age):
name = name_age[0]
age = name_age[1]
if pd.isnull(age):
if "Mr." in name:
return mean_age_mr
if "Mrs." in name:
return mean_age_mrs
if "Miss." in name:
return mean_age_miss
if "Master." in name:
return mean_age_master
if "Dr." in name:
return mean_age_master
if "Ms." in name:
return mean_age_miss
else:
return age
train_data["Age"] = train_data[["Name", "Age"]].apply(fill_age, axis=1)
test_data["Age"] = test_data[["Name", "Age"]].apply(fill_age, axis=1)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 5))
sns.heatmap(train_data.isnull(), cmap="copper", ax=ax1)
sns.heatmap(train_data.isnull(), cmap="copper", ax=ax2)
plt.tight_layout()
import os
import zipfile
import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None # default='warn'
sns.set(style="whitegrid", palette="Set2", font_scale=1.2)
train_data["Cabin"] = pd.Series(
["X" if pd.isnull(ii) else ii[0] for ii in train_data["Cabin"]]
)
test_data["Cabin"] = pd.Series(
["X" if pd.isnull(ii) else ii[0] for ii in test_data["Cabin"]]
)
plt.figure(figsize=(12, 5))
plt.title("Box Plot of Temperatures by Modules")
sns.boxplot(x="Cabin", y="Fare", data=train_data, palette="Set2")
plt.tight_layout()
print(
"Mean Fare of Cabin B {}".format(
train_data[train_data["Cabin"] == "B"]["Fare"].mean()
)
)
print(
"Mean Fare of Cabin C {}".format(
train_data[train_data["Cabin"] == "C"]["Fare"].mean()
)
)
print(
"Mean Fare of Cabin D {}".format(
train_data[train_data["Cabin"] == "D"]["Fare"].mean()
)
)
print(
"Mean Fare of Cabin E {}".format(
train_data[train_data["Cabin"] == "E"]["Fare"].mean()
)
)
def reasign_cabin(cabin_fare):
cabin = cabin_fare[0]
fare = cabin_fare[1]
if cabin == "X":
if fare >= 113.5:
return "B"
if (fare < 113.5) and (fare > 100):
return "C"
if (fare < 100) and (fare > 57):
return "D"
if (fare < 57) and (fare > 46):
return "D"
else:
return "X"
else:
return cabin
train_data["Cabin"] = train_data[["Cabin", "Fare"]].apply(reasign_cabin, axis=1)
test_data["Cabin"] = test_data[["Cabin", "Fare"]].apply(reasign_cabin, axis=1)
plt.figure(figsize=(12, 5))
plt.title("Box Plot of Temperatures by Modules")
sns.boxplot(x="Cabin", y="Fare", data=train_data, palette="Set2")
plt.tight_layout()
print("\nNull Values in Training \n{}".format(train_data.isnull().sum()))
print("\nNull Values in Testing \n{}".format(test_data.isnull().sum()))
print("\nDuplicated values in train {}".format(train_data.duplicated().sum()))
print("Duplicated values in test {}".format(test_data.duplicated().sum()))
fig, axx = plt.subplots(1, 3, figsize=(20, 5))
axx[0].set_title("Amounth of Siblins/Spouses")
sns.countplot(x="SibSp", data=train_data, ax=axx[0])
axx[1].set_title("Amounth of parents/children")
sns.countplot(x="Parch", data=train_data, ax=axx[1])
axx[2].set_title("Distribution of Classes")
sns.countplot(x="Pclass", data=train_data, ax=axx[2])
plt.tight_layout()
def create_alone_feature(SibSp_Parch):
if (SibSp_Parch[0] + SibSp_Parch[1]) == 0:
return 1
else:
return 0
train_data["Alone"] = train_data[["SibSp", "Parch"]].apply(create_alone_feature, axis=1)
train_data["Familiars"] = 1 + train_data["SibSp"] + train_data["Parch"]
test_data["Alone"] = test_data[["SibSp", "Parch"]].apply(create_alone_feature, axis=1)
test_data["Familiars"] = 1 + test_data["SibSp"] + test_data["Parch"]
fig, axx = plt.subplots(2, 3, figsize=(20, 10))
axx[0, 0].set_title("Survivors")
sns.countplot(x="Survived", data=train_data, ax=axx[0, 0])
axx[0, 1].set_title("Survivors by Sex")
sns.countplot(x="Survived", hue="Sex", data=train_data, ax=axx[0, 1])
axx[0, 2].set_title("Survivors by Pclass")
sns.countplot(x="Survived", hue="Pclass", data=train_data, ax=axx[0, 2])
axx[1, 0].set_title("Accompanied survivors")
sns.countplot(x="Survived", hue="Alone", data=train_data, ax=axx[1, 0])
axx[1, 1].set_title("Accompanied survivors")
sns.countplot(x="Familiars", hue="Survived", data=train_data, ax=axx[1, 1])
axx[1, 2].set_title("Alone members by Pclass")
sns.countplot(x="Pclass", hue="Alone", data=train_data, ax=axx[1, 2])
plt.tight_layout()
fig, axx = plt.subplots(1, 3, figsize=(20, 5))
axx[0].set_title("Age of Survivors")
sns.boxplot(x="Survived", y="Age", data=train_data, ax=axx[0])
axx[1].set_title("Survivors by Sex")
sns.boxplot(x="Survived", y="Age", hue="Sex", data=train_data, ax=axx[1])
axx[2].set_title("Survivors by Sex")
sns.boxplot(x="Survived", y="Age", hue="Pclass", data=train_data, ax=axx[2])
plt.tight_layout()
fig, axx = plt.subplots(1, 2, figsize=(16, 5))
axx[0].set_title("Distribution of Fare of Dead ones")
sns.distplot(a=train_data[train_data["Survived"] == 0]["Fare"], ax=axx[0], bins=30)
axx[1].set_title("Distribution of Fare of Survived ones")
sns.distplot(a=train_data[train_data["Survived"] == 1]["Fare"], ax=axx[1], bins=30)
plt.tight_layout()
plt.figure(figsize=(12, 8))
sns.heatmap(train_data.corr(), annot=True)
plt.tight_layout()
categories = {"female": 1, "male": 0}
train_data["Sex"] = train_data["Sex"].map(categories)
test_data["Sex"] = test_data["Sex"].map(categories)
categories = {"S": 1, "C": 2, "Q": 3}
train_data["Embarked"] = train_data["Embarked"].map(categories)
test_data["Embarked"] = test_data["Embarked"].map(categories)
categories = train_data.Cabin.unique()
train_data["Cabin"] = train_data.Cabin.astype("category").cat.codes
test_data["Cabin"] = test_data.Cabin.astype("category").cat.codes
plt.figure(figsize=(14, 8))
sns.heatmap(train_data.corr(), annot=True)
plt.tight_layout()
train_data.head()
# dropping columns
train_data = train_data.drop(
["Name", "Ticket", "PassengerId", "Alone", "Parch", "SibSp", "Embarked"], axis=1
)
test_data = test_data.drop(
["Name", "Ticket", "PassengerId", "Alone", "Parch", "SibSp", "Embarked"], axis=1
)
train_data.head()
from sklearn.preprocessing import MinMaxScaler
# Dropping label
LABEL = "Survived"
y = train_data[LABEL]
train_data = train_data.drop(LABEL, axis=1) # Dropping label to normalize
scaler = MinMaxScaler()
scaled_train = scaler.fit_transform(train_data)
scaled_test = scaler.transform(test_data)
scaled_train = pd.DataFrame(
scaled_train, columns=train_data.columns, index=train_data.index
)
scaled_test = pd.DataFrame(
scaled_test, columns=test_data.columns, index=test_data.index
)
scaled_train.head()
X_train = scaled_train
X_test = scaled_test
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
clf = RandomForestClassifier(n_estimators=100)
# Train the model using the training sets y_pred=clf.predict(X_test)
clf.fit(X_train, y_train)
# Train the model using the training sets y_pred=clf.predict(X_test)
clf.fit(new_train, y_train)
predictions = clf.predict(new_test)
output = pd.DataFrame(
{"PassengerId": test_data_org.PassengerId, "Survived": predictions}
)
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
| false | 0 | 3,484 | 0 | 3,484 | 3,484 |
||
69173093
|
<jupyter_start><jupyter_text>Battlefront 2 Maps - Small
The dataset are devided into three parts: train, validation and test. The purpose of this dataset is to learn basic of image classification(to identify the map and the stage of the game from a single image!).
The train set are already being processed using data augmentation.
Code for you to start are available here: https://www.kaggle.com/code/bingliangli/across-the-stars-vit-pre-trained
I retrived the images from [AnarchYxNinja](https://www.youtube.com/@AnarchYxNinja)'s video, he is a legend, please check him out!
Kaggle dataset identifier: battlefront-2-maps-small
<jupyter_script>from __future__ import print_function
import glob
from itertools import chain
import os
import random
import zipfile
import copy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from PIL import Image
from sklearn.model_selection import train_test_split
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms
from tqdm.notebook import tqdm
import timm
print(f"Torch: {torch.__version__}")
# Training settings
batch_size = 32
epochs = 20
lr = 3e-5
gamma = 0.7
seed = 42
def seed_everything(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
seed_everything(seed)
import torchvision
from torchvision.transforms import ToTensor
train_data = torchvision.datasets.ImageFolder(
"../input/battlefront-2-maps-small/train", transform=ToTensor()
)
valid_data = torchvision.datasets.ImageFolder(
"../input/battlefront-2-maps-small/valid", transform=ToTensor()
)
test_data = torchvision.datasets.ImageFolder(
"../input/battlefront-2-maps-small/test", transform=ToTensor()
)
import torch.utils.data as data
from torch.autograd import Variable
import numpy as np
train_loader = data.DataLoader(train_data, batch_size=batch_size, shuffle=True)
valid_loader = data.DataLoader(valid_data, batch_size=batch_size, shuffle=True)
test_loader = data.DataLoader(test_data, batch_size=batch_size, shuffle=True)
print(len(train_data), len(train_loader))
print(len(valid_data), len(valid_loader))
print(len(test_data), len(test_loader))
from pprint import pprint
model_names = timm.list_models(pretrained=True)
pprint(model_names)
# ## Effecient Attention
# ### Visual Transformer
device = "cuda"
model = timm.create_model("efficientnetv2_rw_m", pretrained=True, num_classes=10).to(
device
)
# ### Training
# loss function
criterion = nn.CrossEntropyLoss()
# optimizer
optimizer = optim.Adam(model.parameters(), lr=lr)
# scheduler
scheduler = StepLR(optimizer, step_size=1, gamma=gamma)
n_epochs_stop = 3
min_val_loss = 10
epoch_l = []
loss_l = []
acc_l = []
v_loss_l = []
v_acc_l = []
for epoch in range(epochs):
epoch_loss = 0
epoch_accuracy = 0
for data, label in tqdm(train_loader):
data = data.to(device)
label = label.to(device)
output = model(data)
loss = criterion(output, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
acc = (output.argmax(dim=1) == label).float().mean()
epoch_accuracy += acc / len(train_loader)
epoch_loss += loss / len(train_loader)
with torch.no_grad():
epoch_val_accuracy = 0
epoch_val_loss = 0
for data, label in valid_loader:
data = data.to(device)
label = label.to(device)
val_output = model(data)
val_loss = criterion(val_output, label)
acc = (val_output.argmax(dim=1) == label).float().mean()
epoch_val_accuracy += acc / len(valid_loader)
epoch_val_loss += val_loss / len(valid_loader)
epoch_l.append(epoch + 1)
loss_l.append(epoch_loss)
acc_l.append(epoch_accuracy)
v_loss_l.append(epoch_val_loss)
v_acc_l.append(epoch_val_accuracy)
print(
f"Epoch : {epoch+1} - loss : {epoch_loss:.4f} - acc: {epoch_accuracy:.4f} - val_loss : {epoch_val_loss:.4f} - val_acc: {epoch_val_accuracy:.4f}\n"
)
if epoch_val_loss < min_val_loss:
# Saving the model
best_model = copy.deepcopy(model.state_dict())
epochs_no_improve = 0
min_val_loss = epoch_val_loss
early_stoped = False
else:
epochs_no_improve += 1
# Check early stopping condition
if epochs_no_improve == n_epochs_stop:
print("Early stopping!")
model.load_state_dict(best_model)
early_stoped = True
break
if early_stoped:
break
torch.save(model, "./vit_model_pretrained.pt")
y_pred_list = []
y_true_list = []
with torch.no_grad():
for x_batch, y_batch in tqdm(test_loader):
x_batch, y_batch = x_batch.to(device), y_batch.to(device)
y_test_pred = model(x_batch)
_, y_pred_tag = torch.max(y_test_pred, dim=1)
y_pred_list.append(y_pred_tag.cpu().numpy())
y_true_list.append(y_batch.cpu().numpy())
def flatten(new: list, target: list):
for li in target:
for value in list(li):
new.append(value)
y_pred = []
y_true = []
flatten(y_pred, y_pred_list)
flatten(y_true, y_true_list)
from sklearn.metrics import accuracy_score, f1_score
print("Overall accuracy:", accuracy_score(y_true, y_pred))
print("Overall F1:", f1_score(y_true, y_pred, average="weighted"))
from sklearn.metrics import precision_recall_fscore_support as score
precision, recall, fscore, support = score(y_true, y_pred)
print("precision: {}".format(precision))
print("recall: {}".format(recall))
print("fscore: {}".format(fscore))
print("support: {}".format(support))
from sklearn.metrics import confusion_matrix
import numpy as np
import pandas as pd
import seaborn as sns
def plot_cm(y_true, y_pred, figsize=(10, 9)):
cm = confusion_matrix(y_true, y_pred, labels=np.unique(y_true))
cm_sum = np.sum(cm, axis=1, keepdims=True)
cm_perc = cm / cm_sum.astype(float) * 100
annot = np.empty_like(cm).astype(str)
nrows, ncols = cm.shape
for i in range(nrows):
for j in range(ncols):
c = cm[i, j]
p = cm_perc[i, j]
if i == j:
s = cm_sum[i]
annot[i, j] = "%.1f%%\n%d/%d" % (p, c, s)
elif c == 0:
annot[i, j] = ""
else:
annot[i, j] = "%.1f%%\n%d" % (p, c)
cm = pd.DataFrame(cm, index=np.unique(y_true), columns=np.unique(y_true))
cm.index.name = "Actual"
cm.columns.name = "Predicted"
fig, ax = plt.subplots(figsize=figsize)
sns.heatmap(cm, cmap="YlGnBu", annot=annot, fmt="", ax=ax)
plot_cm(y_true, y_pred)
display()
loss_l_c = []
acc_l_c = []
v_loss_l_c = []
v_acc_l_c = []
for x in loss_l:
x = x.cpu().detach().numpy()
loss_l_c.append(x)
for x in acc_l:
x = x.cpu().detach().numpy()
acc_l_c.append(x)
for x in v_loss_l:
x = x.cpu().detach().numpy()
v_loss_l_c.append(x)
for x in v_acc_l:
x = x.cpu().detach().numpy()
v_acc_l_c.append(x)
import plotly.graph_objects as go
# Create traces
fig = go.Figure()
fig.add_trace(
go.Scatter(x=epoch_l, y=loss_l_c, mode="lines+markers", name="Train loss")
)
fig.add_trace(
go.Scatter(x=epoch_l, y=acc_l_c, mode="lines+markers", name="Train accuracy")
)
fig.add_trace(
go.Scatter(x=epoch_l, y=v_loss_l_c, mode="lines+markers", name="Validation loss")
)
fig.add_trace(
go.Scatter(x=epoch_l, y=v_acc_l_c, mode="lines+markers", name="Validation accuracy")
)
fig.update_layout(
title="ViT(pre-trained)",
autosize=False,
width=1000,
height=600,
)
fig.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173093.ipynb
|
battlefront-2-maps-small
|
bingliangli
|
[{"Id": 69173093, "ScriptId": 18882242, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6514796, "CreationDate": "07/27/2021 16:45:08", "VersionNumber": 1.0, "Title": "Across the Stars - Efficientnetv2", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 268.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 266.0, "LinesInsertedFromFork": 2.0, "LinesDeletedFromFork": 2.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 266.0, "TotalVotes": 0}]
|
[{"Id": 92021281, "KernelVersionId": 69173093, "SourceDatasetVersionId": 2329876}]
|
[{"Id": 2329876, "DatasetId": 1406379, "DatasourceVersionId": 2371414, "CreatorUserId": 6514796, "LicenseName": "CC0: Public Domain", "CreationDate": "06/13/2021 13:31:38", "VersionNumber": 1.0, "Title": "Battlefront 2 Maps - Small", "Slug": "battlefront-2-maps-small", "Subtitle": "A dataset created from the video game Star Wars: Battlefront II.", "Description": "The dataset are devided into three parts: train, validation and test. The purpose of this dataset is to learn basic of image classification(to identify the map and the stage of the game from a single image!).\n\nThe train set are already being processed using data augmentation.\nCode for you to start are available here: https://www.kaggle.com/code/bingliangli/across-the-stars-vit-pre-trained\n\nI retrived the images from [AnarchYxNinja](https://www.youtube.com/@AnarchYxNinja)'s video, he is a legend, please check him out!", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1406379, "CreatorUserId": 6514796, "OwnerUserId": 6514796.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2329876.0, "CurrentDatasourceVersionId": 2371414.0, "ForumId": 1425691, "Type": 2, "CreationDate": "06/13/2021 13:31:38", "LastActivityDate": "06/13/2021", "TotalViews": 1864, "TotalDownloads": 14, "TotalVotes": 5, "TotalKernels": 6}]
|
[{"Id": 6514796, "UserName": "bingliangli", "DisplayName": "Bingliang Li", "RegisterDate": "01/13/2021", "PerformanceTier": 2}]
|
from __future__ import print_function
import glob
from itertools import chain
import os
import random
import zipfile
import copy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from PIL import Image
from sklearn.model_selection import train_test_split
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms
from tqdm.notebook import tqdm
import timm
print(f"Torch: {torch.__version__}")
# Training settings
batch_size = 32
epochs = 20
lr = 3e-5
gamma = 0.7
seed = 42
def seed_everything(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
seed_everything(seed)
import torchvision
from torchvision.transforms import ToTensor
train_data = torchvision.datasets.ImageFolder(
"../input/battlefront-2-maps-small/train", transform=ToTensor()
)
valid_data = torchvision.datasets.ImageFolder(
"../input/battlefront-2-maps-small/valid", transform=ToTensor()
)
test_data = torchvision.datasets.ImageFolder(
"../input/battlefront-2-maps-small/test", transform=ToTensor()
)
import torch.utils.data as data
from torch.autograd import Variable
import numpy as np
train_loader = data.DataLoader(train_data, batch_size=batch_size, shuffle=True)
valid_loader = data.DataLoader(valid_data, batch_size=batch_size, shuffle=True)
test_loader = data.DataLoader(test_data, batch_size=batch_size, shuffle=True)
print(len(train_data), len(train_loader))
print(len(valid_data), len(valid_loader))
print(len(test_data), len(test_loader))
from pprint import pprint
model_names = timm.list_models(pretrained=True)
pprint(model_names)
# ## Effecient Attention
# ### Visual Transformer
device = "cuda"
model = timm.create_model("efficientnetv2_rw_m", pretrained=True, num_classes=10).to(
device
)
# ### Training
# loss function
criterion = nn.CrossEntropyLoss()
# optimizer
optimizer = optim.Adam(model.parameters(), lr=lr)
# scheduler
scheduler = StepLR(optimizer, step_size=1, gamma=gamma)
n_epochs_stop = 3
min_val_loss = 10
epoch_l = []
loss_l = []
acc_l = []
v_loss_l = []
v_acc_l = []
for epoch in range(epochs):
epoch_loss = 0
epoch_accuracy = 0
for data, label in tqdm(train_loader):
data = data.to(device)
label = label.to(device)
output = model(data)
loss = criterion(output, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
acc = (output.argmax(dim=1) == label).float().mean()
epoch_accuracy += acc / len(train_loader)
epoch_loss += loss / len(train_loader)
with torch.no_grad():
epoch_val_accuracy = 0
epoch_val_loss = 0
for data, label in valid_loader:
data = data.to(device)
label = label.to(device)
val_output = model(data)
val_loss = criterion(val_output, label)
acc = (val_output.argmax(dim=1) == label).float().mean()
epoch_val_accuracy += acc / len(valid_loader)
epoch_val_loss += val_loss / len(valid_loader)
epoch_l.append(epoch + 1)
loss_l.append(epoch_loss)
acc_l.append(epoch_accuracy)
v_loss_l.append(epoch_val_loss)
v_acc_l.append(epoch_val_accuracy)
print(
f"Epoch : {epoch+1} - loss : {epoch_loss:.4f} - acc: {epoch_accuracy:.4f} - val_loss : {epoch_val_loss:.4f} - val_acc: {epoch_val_accuracy:.4f}\n"
)
if epoch_val_loss < min_val_loss:
# Saving the model
best_model = copy.deepcopy(model.state_dict())
epochs_no_improve = 0
min_val_loss = epoch_val_loss
early_stoped = False
else:
epochs_no_improve += 1
# Check early stopping condition
if epochs_no_improve == n_epochs_stop:
print("Early stopping!")
model.load_state_dict(best_model)
early_stoped = True
break
if early_stoped:
break
torch.save(model, "./vit_model_pretrained.pt")
y_pred_list = []
y_true_list = []
with torch.no_grad():
for x_batch, y_batch in tqdm(test_loader):
x_batch, y_batch = x_batch.to(device), y_batch.to(device)
y_test_pred = model(x_batch)
_, y_pred_tag = torch.max(y_test_pred, dim=1)
y_pred_list.append(y_pred_tag.cpu().numpy())
y_true_list.append(y_batch.cpu().numpy())
def flatten(new: list, target: list):
for li in target:
for value in list(li):
new.append(value)
y_pred = []
y_true = []
flatten(y_pred, y_pred_list)
flatten(y_true, y_true_list)
from sklearn.metrics import accuracy_score, f1_score
print("Overall accuracy:", accuracy_score(y_true, y_pred))
print("Overall F1:", f1_score(y_true, y_pred, average="weighted"))
from sklearn.metrics import precision_recall_fscore_support as score
precision, recall, fscore, support = score(y_true, y_pred)
print("precision: {}".format(precision))
print("recall: {}".format(recall))
print("fscore: {}".format(fscore))
print("support: {}".format(support))
from sklearn.metrics import confusion_matrix
import numpy as np
import pandas as pd
import seaborn as sns
def plot_cm(y_true, y_pred, figsize=(10, 9)):
cm = confusion_matrix(y_true, y_pred, labels=np.unique(y_true))
cm_sum = np.sum(cm, axis=1, keepdims=True)
cm_perc = cm / cm_sum.astype(float) * 100
annot = np.empty_like(cm).astype(str)
nrows, ncols = cm.shape
for i in range(nrows):
for j in range(ncols):
c = cm[i, j]
p = cm_perc[i, j]
if i == j:
s = cm_sum[i]
annot[i, j] = "%.1f%%\n%d/%d" % (p, c, s)
elif c == 0:
annot[i, j] = ""
else:
annot[i, j] = "%.1f%%\n%d" % (p, c)
cm = pd.DataFrame(cm, index=np.unique(y_true), columns=np.unique(y_true))
cm.index.name = "Actual"
cm.columns.name = "Predicted"
fig, ax = plt.subplots(figsize=figsize)
sns.heatmap(cm, cmap="YlGnBu", annot=annot, fmt="", ax=ax)
plot_cm(y_true, y_pred)
display()
loss_l_c = []
acc_l_c = []
v_loss_l_c = []
v_acc_l_c = []
for x in loss_l:
x = x.cpu().detach().numpy()
loss_l_c.append(x)
for x in acc_l:
x = x.cpu().detach().numpy()
acc_l_c.append(x)
for x in v_loss_l:
x = x.cpu().detach().numpy()
v_loss_l_c.append(x)
for x in v_acc_l:
x = x.cpu().detach().numpy()
v_acc_l_c.append(x)
import plotly.graph_objects as go
# Create traces
fig = go.Figure()
fig.add_trace(
go.Scatter(x=epoch_l, y=loss_l_c, mode="lines+markers", name="Train loss")
)
fig.add_trace(
go.Scatter(x=epoch_l, y=acc_l_c, mode="lines+markers", name="Train accuracy")
)
fig.add_trace(
go.Scatter(x=epoch_l, y=v_loss_l_c, mode="lines+markers", name="Validation loss")
)
fig.add_trace(
go.Scatter(x=epoch_l, y=v_acc_l_c, mode="lines+markers", name="Validation accuracy")
)
fig.update_layout(
title="ViT(pre-trained)",
autosize=False,
width=1000,
height=600,
)
fig.show()
| false | 0 | 2,373 | 0 | 2,542 | 2,373 |
||
69173820
|
<jupyter_start><jupyter_text>Latest Covid19 Data - Kerala, India
The data contains the confirmed, recovered and deceased cases of Covid-19 cases in Kerala, India from **31/01/2020** to **24/07/2021** . This dataset can be used for EDA and time series analysis
Data scraped from https://dashboard.kerala.gov.in/index.php
PLEASE **UP VOTE** THE DATA, IF YOU FIND IT USEFUL
Kaggle dataset identifier: covid19-latest-data-kerala
<jupyter_code>import pandas as pd
df = pd.read_csv('covid19-latest-data-kerala/covid_data_kerala.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 841 entries, 0 to 840
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Date 841 non-null object
1 Confirmed 841 non-null float64
2 Recovered 785 non-null float64
3 Deceased 841 non-null float64
dtypes: float64(3), object(1)
memory usage: 26.4+ KB
<jupyter_text>Examples:
{
"Date": "2020-01-31 00:00:00",
"Confirmed": 0,
"Recovered": NaN,
"Deceased": 0
}
{
"Date": "2020-02-01 00:00:00",
"Confirmed": 0,
"Recovered": NaN,
"Deceased": 0
}
{
"Date": "2020-02-02 00:00:00",
"Confirmed": 1,
"Recovered": NaN,
"Deceased": 0
}
{
"Date": "2020-02-03 00:00:00",
"Confirmed": 1,
"Recovered": NaN,
"Deceased": 0
}
<jupyter_script># Hi. This is my first time and I'm new to this analysis.
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
data = pd.read_csv("../input/covid19-latest-data-kerala/covid_data_kerala.csv")
data.head()
data.info()
data.shape
# cleaning
data.isna().sum()
data.fillna(0)
data.info()
data["Date"].dtypes
data["Date"] = pd.to_datetime(data["Date"])
data.dtypes
# Overall Analysis
datax = data.drop(["Date"], axis=1)
datax.plot()
sns.pairplot(data)
plt.figure(figsize=(15, 5))
sns.barplot(x="Date", y="Confirmed", data=data)
plt.show()
plt.figure(figsize=(15, 5))
sns.scatterplot(x="Date", y="Confirmed", data=data)
plt.show()
plt.figure(figsize=(15, 5))
s = sns.barplot(x="Date", y="Recovered", data=data)
plt.show()
plt.figure(figsize=(15, 5))
sns.scatterplot(x="Date", y="Recovered", data=data)
plt.show()
plt.figure(figsize=(15, 5))
sns.barplot(x="Date", y="Deceased", data=data)
plt.show()
plt.figure(figsize=(15, 5))
sns.scatterplot(x="Date", y="Deceased", data=data)
plt.show()
# yearwise analysis
year = data["Date"].dt.year.unique()
data20 = data[data["Date"].dt.year == year[0]]
data21 = data[data["Date"].dt.year == year[1]]
data20.head()
data21.head()
c20 = data20.Confirmed.sum()
c21 = data21.Confirmed.sum()
r20 = data20.Recovered.sum()
r21 = data21.Recovered.sum()
d20 = data20.Deceased.sum()
d21 = data21.Deceased.sum()
print(
"Confirmed(2020,2021) :",
c20,
",",
c21,
"\n Recovered(2020,2021) :",
r20,
",",
r21,
"\n Deceased(2020,2021) :",
d20,
",",
d21,
)
data20 = data20.set_index("Date")
data21 = data21.set_index("Date")
data20.Confirmed.plot()
data20.Recovered.plot()
data20.Deceased.plot()
data21.Confirmed.plot()
data21.Recovered.plot()
data21.Deceased.plot()
data20.plot()
data21.plot()
sns.barplot(data=data20)
sns.barplot(data=data21)
sns.scatterplot(data=data20)
sns.scatterplot(data=data21)
# monthwise analysis
index = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
monthdata = pd.DataFrame(
{
"Month": index,
"Con2020": data20.resample("M").sum().reset_index().Confirmed,
"Rec2020": data20.resample("M").sum().reset_index().Recovered,
"Dec2020": data20.resample("M").sum().reset_index().Deceased,
"Con2021": data21.resample("M").sum().reset_index().Confirmed,
"Rec2021": data21.resample("M").sum().reset_index().Recovered,
"Dec2021": data21.resample("M").sum().reset_index().Deceased,
}
).set_index("Month")
monthdata
monthdata.info()
plt.figure(figsize=(15, 5))
sns.lineplot(data=monthdata, marker="o")
plt.show()
plt.figure(figsize=(15, 10))
sns.heatmap(data=monthdata, cmap="Reds")
plt.show()
sns.barplot(x=monthdata.index, y=monthdata.Con2020)
sns.barplot(x=monthdata.index, y=monthdata.Rec2020)
sns.barplot(x=monthdata.index, y=monthdata.Dec2020)
sns.barplot(x=monthdata.index, y=monthdata.Con2021)
sns.barplot(x=monthdata.index, y=monthdata.Rec2021)
sns.barplot(x=monthdata.index, y=monthdata.Dec2021)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173820.ipynb
|
covid19-latest-data-kerala
|
anandhuh
|
[{"Id": 69173820, "ScriptId": 18824452, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7979428, "CreationDate": "07/27/2021 16:54:25", "VersionNumber": 4.0, "Title": "Kerala Covid", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 129.0, "LinesInsertedFromPrevious": 39.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 90.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92022510, "KernelVersionId": 69173820, "SourceDatasetVersionId": 2458357}]
|
[{"Id": 2458357, "DatasetId": 1381959, "DatasourceVersionId": 2500767, "CreatorUserId": 6096594, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "07/24/2021 14:35:06", "VersionNumber": 7.0, "Title": "Latest Covid19 Data - Kerala, India", "Slug": "covid19-latest-data-kerala", "Subtitle": "Covid-19 Data from January 31, 2020 to May 22, 2022", "Description": "The data contains the confirmed, recovered and deceased cases of Covid-19 cases in Kerala, India from **31/01/2020** to **24/07/2021** . This dataset can be used for EDA and time series analysis\n\nData scraped from https://dashboard.kerala.gov.in/index.php\n\nPLEASE **UP VOTE** THE DATA, IF YOU FIND IT USEFUL", "VersionNotes": "On 24 July, 2021", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1381959, "CreatorUserId": 6096594, "OwnerUserId": 6096594.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3676550.0, "CurrentDatasourceVersionId": 3730677.0, "ForumId": 1401150, "Type": 2, "CreationDate": "06/01/2021 11:32:00", "LastActivityDate": "06/01/2021", "TotalViews": 8047, "TotalDownloads": 904, "TotalVotes": 87, "TotalKernels": 4}]
|
[{"Id": 6096594, "UserName": "anandhuh", "DisplayName": "Anandhu H", "RegisterDate": "11/04/2020", "PerformanceTier": 4}]
|
# Hi. This is my first time and I'm new to this analysis.
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
data = pd.read_csv("../input/covid19-latest-data-kerala/covid_data_kerala.csv")
data.head()
data.info()
data.shape
# cleaning
data.isna().sum()
data.fillna(0)
data.info()
data["Date"].dtypes
data["Date"] = pd.to_datetime(data["Date"])
data.dtypes
# Overall Analysis
datax = data.drop(["Date"], axis=1)
datax.plot()
sns.pairplot(data)
plt.figure(figsize=(15, 5))
sns.barplot(x="Date", y="Confirmed", data=data)
plt.show()
plt.figure(figsize=(15, 5))
sns.scatterplot(x="Date", y="Confirmed", data=data)
plt.show()
plt.figure(figsize=(15, 5))
s = sns.barplot(x="Date", y="Recovered", data=data)
plt.show()
plt.figure(figsize=(15, 5))
sns.scatterplot(x="Date", y="Recovered", data=data)
plt.show()
plt.figure(figsize=(15, 5))
sns.barplot(x="Date", y="Deceased", data=data)
plt.show()
plt.figure(figsize=(15, 5))
sns.scatterplot(x="Date", y="Deceased", data=data)
plt.show()
# yearwise analysis
year = data["Date"].dt.year.unique()
data20 = data[data["Date"].dt.year == year[0]]
data21 = data[data["Date"].dt.year == year[1]]
data20.head()
data21.head()
c20 = data20.Confirmed.sum()
c21 = data21.Confirmed.sum()
r20 = data20.Recovered.sum()
r21 = data21.Recovered.sum()
d20 = data20.Deceased.sum()
d21 = data21.Deceased.sum()
print(
"Confirmed(2020,2021) :",
c20,
",",
c21,
"\n Recovered(2020,2021) :",
r20,
",",
r21,
"\n Deceased(2020,2021) :",
d20,
",",
d21,
)
data20 = data20.set_index("Date")
data21 = data21.set_index("Date")
data20.Confirmed.plot()
data20.Recovered.plot()
data20.Deceased.plot()
data21.Confirmed.plot()
data21.Recovered.plot()
data21.Deceased.plot()
data20.plot()
data21.plot()
sns.barplot(data=data20)
sns.barplot(data=data21)
sns.scatterplot(data=data20)
sns.scatterplot(data=data21)
# monthwise analysis
index = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
monthdata = pd.DataFrame(
{
"Month": index,
"Con2020": data20.resample("M").sum().reset_index().Confirmed,
"Rec2020": data20.resample("M").sum().reset_index().Recovered,
"Dec2020": data20.resample("M").sum().reset_index().Deceased,
"Con2021": data21.resample("M").sum().reset_index().Confirmed,
"Rec2021": data21.resample("M").sum().reset_index().Recovered,
"Dec2021": data21.resample("M").sum().reset_index().Deceased,
}
).set_index("Month")
monthdata
monthdata.info()
plt.figure(figsize=(15, 5))
sns.lineplot(data=monthdata, marker="o")
plt.show()
plt.figure(figsize=(15, 10))
sns.heatmap(data=monthdata, cmap="Reds")
plt.show()
sns.barplot(x=monthdata.index, y=monthdata.Con2020)
sns.barplot(x=monthdata.index, y=monthdata.Rec2020)
sns.barplot(x=monthdata.index, y=monthdata.Dec2020)
sns.barplot(x=monthdata.index, y=monthdata.Con2021)
sns.barplot(x=monthdata.index, y=monthdata.Rec2021)
sns.barplot(x=monthdata.index, y=monthdata.Dec2021)
|
[{"covid19-latest-data-kerala/covid_data_kerala.csv": {"column_names": "[\"Date\", \"Confirmed\", \"Recovered\", \"Deceased\"]", "column_data_types": "{\"Date\": \"object\", \"Confirmed\": \"float64\", \"Recovered\": \"float64\", \"Deceased\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 841 entries, 0 to 840\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Date 841 non-null object \n 1 Confirmed 841 non-null float64\n 2 Recovered 785 non-null float64\n 3 Deceased 841 non-null float64\ndtypes: float64(3), object(1)\nmemory usage: 26.4+ KB\n", "summary": "{\"Confirmed\": {\"count\": 841.0, \"mean\": 7788.395957193817, \"std\": 10295.657108885549, \"min\": 0.0, \"25%\": 438.0, \"50%\": 4470.0, \"75%\": 9445.0, \"max\": 55475.0}, \"Recovered\": {\"count\": 785.0, \"mean\": 8251.403821656051, \"std\": 10575.230709175155, \"min\": 0.0, \"25%\": 915.0, \"50%\": 4749.0, \"75%\": 11067.0, \"max\": 99651.0}, \"Deceased\": {\"count\": 841.0, \"mean\": 82.6563614744352, \"std\": 126.91145060944261, \"min\": 0.0, \"25%\": 8.0, \"50%\": 25.0, \"75%\": 122.0, \"max\": 1205.0}}", "examples": "{\"Date\":{\"0\":\"2020-01-31\",\"1\":\"2020-02-01\",\"2\":\"2020-02-02\",\"3\":\"2020-02-03\"},\"Confirmed\":{\"0\":0.0,\"1\":0.0,\"2\":1.0,\"3\":1.0},\"Recovered\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null},\"Deceased\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0}}"}}]
| true | 1 |
<start_data_description><data_path>covid19-latest-data-kerala/covid_data_kerala.csv:
<column_names>
['Date', 'Confirmed', 'Recovered', 'Deceased']
<column_types>
{'Date': 'object', 'Confirmed': 'float64', 'Recovered': 'float64', 'Deceased': 'float64'}
<dataframe_Summary>
{'Confirmed': {'count': 841.0, 'mean': 7788.395957193817, 'std': 10295.657108885549, 'min': 0.0, '25%': 438.0, '50%': 4470.0, '75%': 9445.0, 'max': 55475.0}, 'Recovered': {'count': 785.0, 'mean': 8251.403821656051, 'std': 10575.230709175155, 'min': 0.0, '25%': 915.0, '50%': 4749.0, '75%': 11067.0, 'max': 99651.0}, 'Deceased': {'count': 841.0, 'mean': 82.6563614744352, 'std': 126.91145060944261, 'min': 0.0, '25%': 8.0, '50%': 25.0, '75%': 122.0, 'max': 1205.0}}
<dataframe_info>
RangeIndex: 841 entries, 0 to 840
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Date 841 non-null object
1 Confirmed 841 non-null float64
2 Recovered 785 non-null float64
3 Deceased 841 non-null float64
dtypes: float64(3), object(1)
memory usage: 26.4+ KB
<some_examples>
{'Date': {'0': '2020-01-31', '1': '2020-02-01', '2': '2020-02-02', '3': '2020-02-03'}, 'Confirmed': {'0': 0.0, '1': 0.0, '2': 1.0, '3': 1.0}, 'Recovered': {'0': None, '1': None, '2': None, '3': None}, 'Deceased': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}}
<end_description>
| 1,245 | 0 | 1,783 | 1,245 |
69173875
|
# ## You're here!
# Welcome to your first competition in the [ITI's AI Pro training program](https://ai.iti.gov.eg/epita/ai-engineer/)! We hope you enjoy and learn as much as we did prepairing this competition.
# ## Introduction
# In the competition, it's required to predict the `Severity` of a car crash given info about the crash, e.g., location.
# This is the getting started notebook. Things are kept simple so that it's easier to understand the steps and modify it.
# Feel free to `Fork` this notebook and share it with your modifications **OR** use it to create your submissions.
# ### Prerequisites
# You should know how to use python and a little bit of Machine Learning. You can apply the techniques you learned in the training program and submit the new solutions!
# ### Checklist
# You can participate in this competition the way you perefer. However, I recommend following these steps if this is your first time joining a competition on Kaggle.
# * Fork this notebook and run the cells in order.
# * Submit this solution.
# * Make changes to the data processing step as you see fit.
# * Submit the new solutions.
# *You can submit up to 5 submissions per day. You can select only one of the submission you make to be considered in the final ranking.*
# Don't hesitate to leave a comment or contact me if you have any question!
# ## Import the libraries
# We'll use `pandas` to load and manipulate the data. Other libraries will be imported in the relevant sections.
import pandas as pd
import os
import numpy as np
# ## Exploratory Data Analysis
# In this step, one should load the data and analyze it. However, I'll load the data and do minimal analysis. You are encouraged to do thorough analysis!
# Let's load the data using `pandas` and have a look at the generated `DataFrame`.
dataset_path = "/kaggle/input/car-crashes-severity-prediction/"
df = pd.read_csv(os.path.join(dataset_path, "train.csv"))
print("The shape of the dataset is {}.\n\n".format(df.shape))
df.head()
# We've got 6407 examples in the dataset with 14 featues, 1 ID, and the `Severity` of the crash.
# By looking at the features and a sample from the data, the features look of numerical and catogerical types. What about some descriptive statistics?
df["date"] = pd.to_datetime(df["timestamp"]).dt.date
df["Time"] = pd.to_datetime(df["timestamp"]).dt.time
df["Year"] = pd.to_datetime(df["timestamp"]).dt.year
df["Month"] = pd.to_datetime(df["timestamp"]).dt.month
df["Day"] = pd.to_datetime(df["timestamp"]).dt.day
df = df.drop(columns="ID")
df = df.drop(columns="timestamp")
df["Bump"] = [int(d) for d in df["Bump"]]
df["Crossing"] = [int(d) for d in df["Crossing"]]
df["Give_Way"] = [int(d) for d in df["Give_Way"]]
df["Junction"] = [int(d) for d in df["Junction"]]
df["No_Exit"] = [int(d) for d in df["No_Exit"]]
df["Railway"] = [int(d) for d in df["Railway"]]
df["Roundabout"] = [int(d) for d in df["Roundabout"]]
df["Stop"] = [int(d) for d in df["Stop"]]
df["Amenity"] = [int(d) for d in df["Amenity"]]
df["Side"] = [x if x != "L" else 0 for x in df["Side"]]
df["Side"] = [x if x != "R" else 1 for x in df["Side"]]
df1 = pd.read_csv(os.path.join(dataset_path, "weather-sfcsv.csv"))
print("The shape of the dataset is {}.\n\n".format(df1.shape))
df1 = df1.drop_duplicates(["Year", "Month", "Day"], keep="last")
import xml.etree.ElementTree as et
xtree = et.parse(os.path.join(dataset_path, "holidays.xml"))
xroot = xtree.getroot()
df_cols = ["date", "description"]
rows = []
for node in xroot:
s_description = node.find("description").text
s_date = node.find("date").text
rows.append({"description": s_description, "date": s_date})
df2 = pd.DataFrame(rows, columns=df_cols)
df2["date"] = pd.to_datetime(df2["date"]).dt.date
df5 = pd.merge(df, df2, on="date", how="left")
df5["description"] = [0 if x != "" else 1 for x in df5["description"]]
df5 = pd.merge(df5, df1, on=["Year", "Month", "Day"], how="left")
df5["Wind_Chill(F)"] = df5["Wind_Chill(F)"].fillna(np.mean(df5["Wind_Chill(F)"]))
df5["Precipitation(in)"] = df5["Precipitation(in)"].fillna(
np.mean(df5["Precipitation(in)"])
)
df5["Temperature(F)"] = df5["Temperature(F)"].fillna(np.mean(df5["Temperature(F)"]))
df5["Humidity(%)"] = df5["Humidity(%)"].fillna(np.mean(df5["Humidity(%)"]))
df5["Wind_Speed(mph)"] = df5["Wind_Speed(mph)"].fillna(np.mean(df5["Wind_Speed(mph)"]))
df5["Visibility(mi)"] = df5["Visibility(mi)"].fillna(np.mean(df5["Visibility(mi)"]))
df5["Weather_Condition"] = df5["Weather_Condition"].fillna(
df5["Weather_Condition"].value_counts().index[0]
)
WC = pd.get_dummies(df5["Weather_Condition"])
df5 = pd.concat([df5, WC], axis=1)
df5 = df5.drop(columns=["Weather_Condition", "date", "Time", "Selected"])
from sklearn.model_selection import train_test_split
from boruta import BorutaPy
train_df, val_df = train_test_split(
df5, test_size=0.2, random_state=42
) # Try adding `stratify` here
X_train = train_df.drop(columns=["Severity"])
y_train = train_df["Severity"]
X_val = val_df.drop(columns=["Severity"])
y_val = val_df["Severity"]
from sklearn.ensemble import RandomForestClassifier
# Create an instance of the classifier
classifier = RandomForestClassifier(max_depth=2, random_state=0)
br_sel = BorutaPy(classifier, n_estimators="auto", verbose=2, random_state=1)
br_sel.fit(np.array(X_train), np.array(y_train))
print(br_sel.support_)
print(br_sel.n_features_)
sel_fe = pd.DataFrame({"Fet": list(X_train.columns), "Ran": br_sel.ranking_})
sel_fe.sort_values(by="Ran")
# The output shows desciptive statistics for the numerical features, `Lat`, `Lng`, `Distance(mi)`, and `Severity`. I'll use the numerical features to demonstrate how to train the model and make submissions. **However you shouldn't use the numerical features only to make the final submission if you want to make it to the top of the leaderboard.**
# ## Data Splitting
# Now it's time to split the dataset for the training step. Typically the dataset is split into 3 subsets, namely, the training, validation and test sets. In our case, the test set is already predefined. So we'll split the "training" set into training and validation sets with 0.8:0.2 ratio.
# *Note: a good way to generate reproducible results is to set the seed to the algorithms that depends on randomization. This is done with the argument `random_state` in the following command*
X_train = X_train[
[
"Lat",
"Lng",
"Partly Cloudy",
"Distance(mi)",
"Crossing",
"Fair",
"Wind_Speed(mph)",
"Humidity(%)",
"Precipitation(in)",
"Stop",
"Amenity",
"Side",
"Year",
"Month",
"Wind_Chill(F)",
]
]
X_val = X_val[
[
"Lat",
"Lng",
"Partly Cloudy",
"Distance(mi)",
"Crossing",
"Fair",
"Wind_Speed(mph)",
"Humidity(%)",
"Precipitation(in)",
"Stop",
"Amenity",
"Side",
"Year",
"Month",
"Wind_Chill(F)",
]
]
# As pointed out eariler, I'll use the numerical features to train the classifier. **However, you shouldn't use the numerical features only to make the final submission if you want to make it to the top of the leaderboard.**
# ## Model Training
# Let's train a model with the data! We'll train a Random Forest Classifier to demonstrate the process of making submissions.
classifier = classifier.fit(X_train, y_train)
print(
"The accuracy of the classifier on the validation set is ",
(classifier.score(X_val, y_val)),
)
# Now let's test our classifier on the validation dataset and see the accuracy.
# Well. That's a good start, right? A classifier that predicts all examples' `Severity` as 2 will get around 0.63. You should get better score as you add more features and do better data preprocessing.
# ## Submission File Generation
# We have built a model and we'd like to submit our predictions on the test set! In order to do that, we'll load the test set, predict the class and save the submission file.
# First, we'll load the data.
test_df = pd.read_csv(os.path.join(dataset_path, "test.csv"))
test_df.head()
# Note that the test set has the same features and doesn't have the `Severity` column.
# At this stage one must **NOT** forget to apply the same processing done on the training set on the features of the test set.
# Now we'll add `Severity` column to the test `DataFrame` and add the values of the predicted class to it.
# **I'll select the numerical features here as I did in the training set. DO NOT forget to change this step as you change the preprocessing of the training data.**
test_df["date"] = pd.to_datetime(test_df["timestamp"]).dt.date
test_df["Time"] = pd.to_datetime(test_df["timestamp"]).dt.time
test_df["Year"] = pd.to_datetime(test_df["timestamp"]).dt.year
test_df["Month"] = pd.to_datetime(test_df["timestamp"]).dt.month
test_df["Day"] = pd.to_datetime(test_df["timestamp"]).dt.day
test_df = test_df.drop(columns="timestamp")
test_df["Bump"] = [int(d) for d in test_df["Bump"]]
test_df["Crossing"] = [int(d) for d in test_df["Crossing"]]
test_df["Give_Way"] = [int(d) for d in test_df["Give_Way"]]
test_df["Junction"] = [int(d) for d in test_df["Junction"]]
test_df["No_Exit"] = [int(d) for d in test_df["No_Exit"]]
test_df["Railway"] = [int(d) for d in test_df["Railway"]]
test_df["Roundabout"] = [int(d) for d in test_df["Roundabout"]]
test_df["Stop"] = [int(d) for d in test_df["Stop"]]
test_df["Amenity"] = [int(d) for d in test_df["Amenity"]]
test_df["Side"] = [x if x != "L" else 0 for x in test_df["Side"]]
test_df["Side"] = [x if x != "R" else 1 for x in test_df["Side"]]
test_df5 = pd.merge(test_df, df2, on="date", how="left")
test_df5["description"] = [0 if x != "" else 1 for x in test_df5["description"]]
test_df5 = pd.merge(test_df5, df1, on=["Year", "Month", "Day"], how="left")
test_df5["Wind_Chill(F)"] = test_df5["Wind_Chill(F)"].fillna(
np.mean(test_df5["Wind_Chill(F)"])
)
test_df5["Precipitation(in)"] = test_df5["Precipitation(in)"].fillna(
np.mean(test_df5["Precipitation(in)"])
)
test_df5["Temperature(F)"] = test_df5["Temperature(F)"].fillna(
np.mean(test_df5["Temperature(F)"])
)
test_df5["Humidity(%)"] = test_df5["Humidity(%)"].fillna(
np.mean(test_df5["Humidity(%)"])
)
test_df5["Wind_Speed(mph)"] = test_df5["Wind_Speed(mph)"].fillna(
np.mean(test_df5["Wind_Speed(mph)"])
)
test_df5["Visibility(mi)"] = test_df5["Visibility(mi)"].fillna(
np.mean(test_df5["Visibility(mi)"])
)
test_df5["Weather_Condition"] = test_df5["Weather_Condition"].fillna(
test_df5["Weather_Condition"].value_counts().index[0]
)
WC = pd.get_dummies(test_df5["Weather_Condition"])
test_df5 = pd.concat([test_df5, WC], axis=1)
test_df5 = test_df5.drop(columns=["Weather_Condition", "date", "Time", "Selected"])
# Now we're ready to generate the submission file. The submission file needs the columns `ID` and `Severity` only.
X_test = test_df5.drop(columns=["ID"])
print(test_df5.shape)
# You should update/remove the next line once you change the features used for training
X_test = X_test[
[
"Lat",
"Lng",
"Partly Cloudy",
"Distance(mi)",
"Crossing",
"Fair",
"Wind_Speed(mph)",
"Humidity(%)",
"Precipitation(in)",
"Stop",
"Amenity",
"Side",
"Year",
"Month",
"Wind_Chill(F)",
]
]
print(X_test.shape)
y_test_predicted = classifier.predict(X_test)
print(y_test_predicted.shape)
test_df5["Severity"] = y_test_predicted
test_df.head()
test_df5[["ID", "Severity"]].to_csv("/kaggle/working/submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173875.ipynb
| null | null |
[{"Id": 69173875, "ScriptId": 18846414, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7985535, "CreationDate": "07/27/2021 16:55:08", "VersionNumber": 2.0, "Title": "Getting Started - Car Crashes' Severity Prediction", "EvaluationDate": "07/27/2021", "IsChange": false, "TotalLines": 253.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 253.0, "LinesInsertedFromFork": 148.0, "LinesDeletedFromFork": 37.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 105.0, "TotalVotes": 0}]
| null | null | null | null |
# ## You're here!
# Welcome to your first competition in the [ITI's AI Pro training program](https://ai.iti.gov.eg/epita/ai-engineer/)! We hope you enjoy and learn as much as we did prepairing this competition.
# ## Introduction
# In the competition, it's required to predict the `Severity` of a car crash given info about the crash, e.g., location.
# This is the getting started notebook. Things are kept simple so that it's easier to understand the steps and modify it.
# Feel free to `Fork` this notebook and share it with your modifications **OR** use it to create your submissions.
# ### Prerequisites
# You should know how to use python and a little bit of Machine Learning. You can apply the techniques you learned in the training program and submit the new solutions!
# ### Checklist
# You can participate in this competition the way you perefer. However, I recommend following these steps if this is your first time joining a competition on Kaggle.
# * Fork this notebook and run the cells in order.
# * Submit this solution.
# * Make changes to the data processing step as you see fit.
# * Submit the new solutions.
# *You can submit up to 5 submissions per day. You can select only one of the submission you make to be considered in the final ranking.*
# Don't hesitate to leave a comment or contact me if you have any question!
# ## Import the libraries
# We'll use `pandas` to load and manipulate the data. Other libraries will be imported in the relevant sections.
import pandas as pd
import os
import numpy as np
# ## Exploratory Data Analysis
# In this step, one should load the data and analyze it. However, I'll load the data and do minimal analysis. You are encouraged to do thorough analysis!
# Let's load the data using `pandas` and have a look at the generated `DataFrame`.
dataset_path = "/kaggle/input/car-crashes-severity-prediction/"
df = pd.read_csv(os.path.join(dataset_path, "train.csv"))
print("The shape of the dataset is {}.\n\n".format(df.shape))
df.head()
# We've got 6407 examples in the dataset with 14 featues, 1 ID, and the `Severity` of the crash.
# By looking at the features and a sample from the data, the features look of numerical and catogerical types. What about some descriptive statistics?
df["date"] = pd.to_datetime(df["timestamp"]).dt.date
df["Time"] = pd.to_datetime(df["timestamp"]).dt.time
df["Year"] = pd.to_datetime(df["timestamp"]).dt.year
df["Month"] = pd.to_datetime(df["timestamp"]).dt.month
df["Day"] = pd.to_datetime(df["timestamp"]).dt.day
df = df.drop(columns="ID")
df = df.drop(columns="timestamp")
df["Bump"] = [int(d) for d in df["Bump"]]
df["Crossing"] = [int(d) for d in df["Crossing"]]
df["Give_Way"] = [int(d) for d in df["Give_Way"]]
df["Junction"] = [int(d) for d in df["Junction"]]
df["No_Exit"] = [int(d) for d in df["No_Exit"]]
df["Railway"] = [int(d) for d in df["Railway"]]
df["Roundabout"] = [int(d) for d in df["Roundabout"]]
df["Stop"] = [int(d) for d in df["Stop"]]
df["Amenity"] = [int(d) for d in df["Amenity"]]
df["Side"] = [x if x != "L" else 0 for x in df["Side"]]
df["Side"] = [x if x != "R" else 1 for x in df["Side"]]
df1 = pd.read_csv(os.path.join(dataset_path, "weather-sfcsv.csv"))
print("The shape of the dataset is {}.\n\n".format(df1.shape))
df1 = df1.drop_duplicates(["Year", "Month", "Day"], keep="last")
import xml.etree.ElementTree as et
xtree = et.parse(os.path.join(dataset_path, "holidays.xml"))
xroot = xtree.getroot()
df_cols = ["date", "description"]
rows = []
for node in xroot:
s_description = node.find("description").text
s_date = node.find("date").text
rows.append({"description": s_description, "date": s_date})
df2 = pd.DataFrame(rows, columns=df_cols)
df2["date"] = pd.to_datetime(df2["date"]).dt.date
df5 = pd.merge(df, df2, on="date", how="left")
df5["description"] = [0 if x != "" else 1 for x in df5["description"]]
df5 = pd.merge(df5, df1, on=["Year", "Month", "Day"], how="left")
df5["Wind_Chill(F)"] = df5["Wind_Chill(F)"].fillna(np.mean(df5["Wind_Chill(F)"]))
df5["Precipitation(in)"] = df5["Precipitation(in)"].fillna(
np.mean(df5["Precipitation(in)"])
)
df5["Temperature(F)"] = df5["Temperature(F)"].fillna(np.mean(df5["Temperature(F)"]))
df5["Humidity(%)"] = df5["Humidity(%)"].fillna(np.mean(df5["Humidity(%)"]))
df5["Wind_Speed(mph)"] = df5["Wind_Speed(mph)"].fillna(np.mean(df5["Wind_Speed(mph)"]))
df5["Visibility(mi)"] = df5["Visibility(mi)"].fillna(np.mean(df5["Visibility(mi)"]))
df5["Weather_Condition"] = df5["Weather_Condition"].fillna(
df5["Weather_Condition"].value_counts().index[0]
)
WC = pd.get_dummies(df5["Weather_Condition"])
df5 = pd.concat([df5, WC], axis=1)
df5 = df5.drop(columns=["Weather_Condition", "date", "Time", "Selected"])
from sklearn.model_selection import train_test_split
from boruta import BorutaPy
train_df, val_df = train_test_split(
df5, test_size=0.2, random_state=42
) # Try adding `stratify` here
X_train = train_df.drop(columns=["Severity"])
y_train = train_df["Severity"]
X_val = val_df.drop(columns=["Severity"])
y_val = val_df["Severity"]
from sklearn.ensemble import RandomForestClassifier
# Create an instance of the classifier
classifier = RandomForestClassifier(max_depth=2, random_state=0)
br_sel = BorutaPy(classifier, n_estimators="auto", verbose=2, random_state=1)
br_sel.fit(np.array(X_train), np.array(y_train))
print(br_sel.support_)
print(br_sel.n_features_)
sel_fe = pd.DataFrame({"Fet": list(X_train.columns), "Ran": br_sel.ranking_})
sel_fe.sort_values(by="Ran")
# The output shows desciptive statistics for the numerical features, `Lat`, `Lng`, `Distance(mi)`, and `Severity`. I'll use the numerical features to demonstrate how to train the model and make submissions. **However you shouldn't use the numerical features only to make the final submission if you want to make it to the top of the leaderboard.**
# ## Data Splitting
# Now it's time to split the dataset for the training step. Typically the dataset is split into 3 subsets, namely, the training, validation and test sets. In our case, the test set is already predefined. So we'll split the "training" set into training and validation sets with 0.8:0.2 ratio.
# *Note: a good way to generate reproducible results is to set the seed to the algorithms that depends on randomization. This is done with the argument `random_state` in the following command*
X_train = X_train[
[
"Lat",
"Lng",
"Partly Cloudy",
"Distance(mi)",
"Crossing",
"Fair",
"Wind_Speed(mph)",
"Humidity(%)",
"Precipitation(in)",
"Stop",
"Amenity",
"Side",
"Year",
"Month",
"Wind_Chill(F)",
]
]
X_val = X_val[
[
"Lat",
"Lng",
"Partly Cloudy",
"Distance(mi)",
"Crossing",
"Fair",
"Wind_Speed(mph)",
"Humidity(%)",
"Precipitation(in)",
"Stop",
"Amenity",
"Side",
"Year",
"Month",
"Wind_Chill(F)",
]
]
# As pointed out eariler, I'll use the numerical features to train the classifier. **However, you shouldn't use the numerical features only to make the final submission if you want to make it to the top of the leaderboard.**
# ## Model Training
# Let's train a model with the data! We'll train a Random Forest Classifier to demonstrate the process of making submissions.
classifier = classifier.fit(X_train, y_train)
print(
"The accuracy of the classifier on the validation set is ",
(classifier.score(X_val, y_val)),
)
# Now let's test our classifier on the validation dataset and see the accuracy.
# Well. That's a good start, right? A classifier that predicts all examples' `Severity` as 2 will get around 0.63. You should get better score as you add more features and do better data preprocessing.
# ## Submission File Generation
# We have built a model and we'd like to submit our predictions on the test set! In order to do that, we'll load the test set, predict the class and save the submission file.
# First, we'll load the data.
test_df = pd.read_csv(os.path.join(dataset_path, "test.csv"))
test_df.head()
# Note that the test set has the same features and doesn't have the `Severity` column.
# At this stage one must **NOT** forget to apply the same processing done on the training set on the features of the test set.
# Now we'll add `Severity` column to the test `DataFrame` and add the values of the predicted class to it.
# **I'll select the numerical features here as I did in the training set. DO NOT forget to change this step as you change the preprocessing of the training data.**
test_df["date"] = pd.to_datetime(test_df["timestamp"]).dt.date
test_df["Time"] = pd.to_datetime(test_df["timestamp"]).dt.time
test_df["Year"] = pd.to_datetime(test_df["timestamp"]).dt.year
test_df["Month"] = pd.to_datetime(test_df["timestamp"]).dt.month
test_df["Day"] = pd.to_datetime(test_df["timestamp"]).dt.day
test_df = test_df.drop(columns="timestamp")
test_df["Bump"] = [int(d) for d in test_df["Bump"]]
test_df["Crossing"] = [int(d) for d in test_df["Crossing"]]
test_df["Give_Way"] = [int(d) for d in test_df["Give_Way"]]
test_df["Junction"] = [int(d) for d in test_df["Junction"]]
test_df["No_Exit"] = [int(d) for d in test_df["No_Exit"]]
test_df["Railway"] = [int(d) for d in test_df["Railway"]]
test_df["Roundabout"] = [int(d) for d in test_df["Roundabout"]]
test_df["Stop"] = [int(d) for d in test_df["Stop"]]
test_df["Amenity"] = [int(d) for d in test_df["Amenity"]]
test_df["Side"] = [x if x != "L" else 0 for x in test_df["Side"]]
test_df["Side"] = [x if x != "R" else 1 for x in test_df["Side"]]
test_df5 = pd.merge(test_df, df2, on="date", how="left")
test_df5["description"] = [0 if x != "" else 1 for x in test_df5["description"]]
test_df5 = pd.merge(test_df5, df1, on=["Year", "Month", "Day"], how="left")
test_df5["Wind_Chill(F)"] = test_df5["Wind_Chill(F)"].fillna(
np.mean(test_df5["Wind_Chill(F)"])
)
test_df5["Precipitation(in)"] = test_df5["Precipitation(in)"].fillna(
np.mean(test_df5["Precipitation(in)"])
)
test_df5["Temperature(F)"] = test_df5["Temperature(F)"].fillna(
np.mean(test_df5["Temperature(F)"])
)
test_df5["Humidity(%)"] = test_df5["Humidity(%)"].fillna(
np.mean(test_df5["Humidity(%)"])
)
test_df5["Wind_Speed(mph)"] = test_df5["Wind_Speed(mph)"].fillna(
np.mean(test_df5["Wind_Speed(mph)"])
)
test_df5["Visibility(mi)"] = test_df5["Visibility(mi)"].fillna(
np.mean(test_df5["Visibility(mi)"])
)
test_df5["Weather_Condition"] = test_df5["Weather_Condition"].fillna(
test_df5["Weather_Condition"].value_counts().index[0]
)
WC = pd.get_dummies(test_df5["Weather_Condition"])
test_df5 = pd.concat([test_df5, WC], axis=1)
test_df5 = test_df5.drop(columns=["Weather_Condition", "date", "Time", "Selected"])
# Now we're ready to generate the submission file. The submission file needs the columns `ID` and `Severity` only.
X_test = test_df5.drop(columns=["ID"])
print(test_df5.shape)
# You should update/remove the next line once you change the features used for training
X_test = X_test[
[
"Lat",
"Lng",
"Partly Cloudy",
"Distance(mi)",
"Crossing",
"Fair",
"Wind_Speed(mph)",
"Humidity(%)",
"Precipitation(in)",
"Stop",
"Amenity",
"Side",
"Year",
"Month",
"Wind_Chill(F)",
]
]
print(X_test.shape)
y_test_predicted = classifier.predict(X_test)
print(y_test_predicted.shape)
test_df5["Severity"] = y_test_predicted
test_df.head()
test_df5[["ID", "Severity"]].to_csv("/kaggle/working/submission.csv", index=False)
| false | 0 | 3,711 | 0 | 3,711 | 3,711 |
||
69173351
|
import pandas as pd
import os
import numpy as np
from bs4 import BeautifulSoup
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix
from sklearn import preprocessing
train_df = pd.read_csv("/kaggle/input/car-crashes-severity-prediction/train.csv")
label_encoder = LabelEncoder()
train_df["Side"] = label_encoder.fit_transform(train_df["Side"])
train_df["Bump"] = label_encoder.fit_transform(train_df["Bump"])
train_df["Crossing"] = label_encoder.fit_transform(train_df["Crossing"])
train_df["Give_Way"] = label_encoder.fit_transform(train_df["Give_Way"])
train_df["Junction"] = label_encoder.fit_transform(train_df["Junction"])
train_df["No_Exit"] = label_encoder.fit_transform(train_df["No_Exit"])
train_df["Railway"] = label_encoder.fit_transform(train_df["Railway"])
train_df["Roundabout"] = label_encoder.fit_transform(train_df["Roundabout"])
train_df["Stop"] = label_encoder.fit_transform(train_df["Stop"])
train_df["Amenity"] = label_encoder.fit_transform(train_df["Amenity"])
train_df["Lat"] = train_df["Lat"] / train_df["Lat"].abs().max()
train_df["Lng"] = train_df["Lng"] / train_df["Lng"].abs().max()
train_df.head()
train_df["Year"] = pd.DatetimeIndex(train_df["timestamp"]).year
train_df["Month"] = pd.DatetimeIndex(train_df["timestamp"]).month
train_df["Day"] = pd.DatetimeIndex(train_df["timestamp"]).day
train_df["Hour"] = pd.DatetimeIndex(train_df["timestamp"]).hour
train_df.head()
test_df = pd.read_csv("/kaggle/input/car-crashes-severity-prediction/test.csv")
test_df["Side"] = label_encoder.fit_transform(test_df["Side"])
test_df["Bump"] = label_encoder.fit_transform(test_df["Bump"])
test_df["Crossing"] = label_encoder.fit_transform(test_df["Crossing"])
test_df["Give_Way"] = label_encoder.fit_transform(test_df["Give_Way"])
test_df["Junction"] = label_encoder.fit_transform(test_df["Junction"])
test_df["No_Exit"] = label_encoder.fit_transform(test_df["No_Exit"])
test_df["Railway"] = label_encoder.fit_transform(test_df["Railway"])
test_df["Roundabout"] = label_encoder.fit_transform(test_df["Roundabout"])
test_df["Stop"] = label_encoder.fit_transform(test_df["Stop"])
test_df["Amenity"] = label_encoder.fit_transform(test_df["Amenity"])
test_df.head()
test_df["Year"] = pd.DatetimeIndex(test_df["timestamp"]).year
test_df["Month"] = pd.DatetimeIndex(test_df["timestamp"]).month
test_df["Day"] = pd.DatetimeIndex(test_df["timestamp"]).day
test_df["Hour"] = pd.DatetimeIndex(test_df["timestamp"]).hour
test_df.head()
file = open("/kaggle/input/car-crashes-severity-prediction/holidays.xml", "r")
contents = file.read()
soup = BeautifulSoup(contents, "xml")
date = soup.find_all("date")
des = soup.find_all("description")
data = []
for i in range(0, len(date)):
rows = [date[i].get_text(), des[i].get_text()]
data.append(rows)
holidays_df = pd.DataFrame(data, columns=["Date", "Description"], dtype=float)
holidays_df.head()
holidays_df["Year"] = pd.DatetimeIndex(holidays_df["Date"]).year
holidays_df["Month"] = pd.DatetimeIndex(holidays_df["Date"]).month
# holidays_df['week']=pd.DatetimeIndex(holidays_df['Date']).weekday
holidays_df["Day"] = pd.DatetimeIndex(holidays_df["Date"]).day
holidays_df.head(10)
weather_df = pd.read_csv(
"/kaggle/input/car-crashes-severity-prediction/weather-sfcsv.csv"
)
weather_df["Selected"] = label_encoder.fit_transform(weather_df["Selected"])
weather_df["Weather_Condition"] = label_encoder.fit_transform(
weather_df["Weather_Condition"].astype(str)
)
weather_df["Weather_Condition"] = (
weather_df["Weather_Condition"] / weather_df["Weather_Condition"].abs().max()
)
weather_df.head(10)
train_holiday_df = pd.merge(
train_df,
holidays_df,
how="left",
left_on=["Year", "Month", "Day"],
right_on=["Year", "Month", "Day"],
)
train_holiday_df["week"] = pd.DatetimeIndex(train_holiday_df["timestamp"]).weekday
train_holiday_df.head()
train_weather_df = pd.merge(
train_df,
weather_df,
how="left",
left_on=["Year", "Month", "Day", "Hour"],
right_on=["Year", "Month", "Day", "Hour"],
)
train_weather_df = train_weather_df.drop_duplicates(subset="ID", keep="last")
train_weather_df.head()
train_holiday_weather_df = pd.merge(
train_weather_df,
train_holiday_df,
how="left",
left_on=[
"ID",
"Lat",
"Lng",
"Bump",
"Distance(mi)",
"Crossing",
"Give_Way",
"Junction",
"No_Exit",
"Railway",
"Roundabout",
"Stop",
"Amenity",
"Side",
"Severity",
"timestamp",
"Year",
"Month",
"Day",
"Hour",
],
right_on=[
"ID",
"Lat",
"Lng",
"Bump",
"Distance(mi)",
"Crossing",
"Give_Way",
"Junction",
"No_Exit",
"Railway",
"Roundabout",
"Stop",
"Amenity",
"Side",
"Severity",
"timestamp",
"Year",
"Month",
"Day",
"Hour",
],
)
train_holiday_weather_df["isHoliday"] = ~train_holiday_weather_df[
"Description"
].isnull()
train_holiday_weather_df["isHoliday"] = label_encoder.fit_transform(
train_holiday_weather_df["isHoliday"]
)
train_holiday_weather_df.tail(10)
train_holiday_weather_df.drop(
columns=[
"Bump",
"Roundabout",
"Distance(mi)",
"Give_Way",
"Junction",
"No_Exit",
"Railway",
"Amenity",
"timestamp",
"Year",
"Month",
"Day",
"Hour",
"Wind_Chill(F)",
"Precipitation(in)",
"Temperature(F)",
"Humidity(%)",
"Wind_Speed(mph)",
"Visibility(mi)",
"Selected",
"Date",
"Description",
"week",
],
inplace=True,
)
train_holiday_weather_df.tail(10)
train_holiday_weather_df.to_csv("final_train_data.csv")
test_holiday_df = pd.merge(
test_df,
holidays_df,
how="left",
left_on=["Year", "Month", "Day"],
right_on=["Year", "Month", "Day"],
)
test_holiday_df["week"] = pd.DatetimeIndex(test_holiday_df["timestamp"]).weekday
test_holiday_df.head()
test_weather_df = pd.merge(
test_df,
weather_df,
how="left",
left_on=["Year", "Month", "Day", "Hour"],
right_on=["Year", "Month", "Day", "Hour"],
)
test_weather_df = train_weather_df.drop_duplicates(subset="ID", keep="last")
test_weather_df.head()
test_holiday_weather_df = pd.merge(
test_weather_df,
test_holiday_df,
how="left",
left_on=[
"ID",
"Lat",
"Lng",
"Bump",
"Distance(mi)",
"Crossing",
"Give_Way",
"Junction",
"No_Exit",
"Railway",
"Roundabout",
"Stop",
"Amenity",
"Side",
"timestamp",
"Year",
"Month",
"Day",
"Hour",
],
right_on=[
"ID",
"Lat",
"Lng",
"Bump",
"Distance(mi)",
"Crossing",
"Give_Way",
"Junction",
"No_Exit",
"Railway",
"Roundabout",
"Stop",
"Amenity",
"Side",
"timestamp",
"Year",
"Month",
"Day",
"Hour",
],
)
test_holiday_weather_df["isHoliday"] = ~test_holiday_weather_df["Description"].isnull()
test_holiday_weather_df["isHoliday"] = label_encoder.fit_transform(
test_holiday_weather_df["isHoliday"]
)
test_holiday_weather_df.head()
test_holiday_weather_df.drop(
columns=[
"Bump",
"Roundabout",
"Distance(mi)",
"Give_Way",
"Junction",
"No_Exit",
"Railway",
"Amenity",
"timestamp",
"Year",
"Month",
"Day",
"Hour",
"Wind_Chill(F)",
"Precipitation(in)",
"Temperature(F)",
"Humidity(%)",
"Wind_Speed(mph)",
"Visibility(mi)",
"Selected",
"Date",
"Description",
"week",
],
inplace=True,
)
test_holiday_weather_df.head()
test_holiday_weather_df.to_csv("final_test_data.csv")
train_holiday_weather_df.drop(columns="ID").describe()
from sklearn.model_selection import train_test_split
train_df, val_df = train_test_split(
train_holiday_weather_df, test_size=0.2, random_state=42
) # Try adding `stratify` here
X_train = train_df.drop(columns=["ID", "Severity"])
y_train = train_df["Severity"]
X_val = val_df.drop(columns=["ID", "Severity"])
y_val = val_df["Severity"]
X_train = X_train[["Lat", "Lng", "Crossing", "Stop", "Weather_Condition", "isHoliday"]]
X_val = X_val[["Lat", "Lng", "Crossing", "Stop", "Weather_Condition", "isHoliday"]]
from sklearn.ensemble import RandomForestClassifier
# Create an instance of the classifier
classifier = RandomForestClassifier(max_depth=2, random_state=0)
# Train the classifier
classifier = classifier.fit(X_train, y_train)
print(
"The accuracy of the classifier on the validation set is ",
(classifier.score(X_val, y_val)),
)
test_holiday_weather_df.shape
X_test = test_holiday_weather_df.drop(columns=["ID"])
# You should update/remove the next line once you change the features used for training
X_test = X_test[["Lat", "Lng", "Crossing", "Stop", "Weather_Condition", "isHoliday"]]
y_test_predicted = classifier.predict(X_test)
test_holiday_weather_df["Severity"] = y_test_predicted
test_holiday_weather_df.head()
test_holiday_weather_df[["ID", "Severity"]].to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173351.ipynb
| null | null |
[{"Id": 69173351, "ScriptId": 18882260, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7985587, "CreationDate": "07/27/2021 16:48:01", "VersionNumber": 1.0, "Title": "notebook1dc0ddef5a", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 206.0, "LinesInsertedFromPrevious": 206.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import pandas as pd
import os
import numpy as np
from bs4 import BeautifulSoup
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix
from sklearn import preprocessing
train_df = pd.read_csv("/kaggle/input/car-crashes-severity-prediction/train.csv")
label_encoder = LabelEncoder()
train_df["Side"] = label_encoder.fit_transform(train_df["Side"])
train_df["Bump"] = label_encoder.fit_transform(train_df["Bump"])
train_df["Crossing"] = label_encoder.fit_transform(train_df["Crossing"])
train_df["Give_Way"] = label_encoder.fit_transform(train_df["Give_Way"])
train_df["Junction"] = label_encoder.fit_transform(train_df["Junction"])
train_df["No_Exit"] = label_encoder.fit_transform(train_df["No_Exit"])
train_df["Railway"] = label_encoder.fit_transform(train_df["Railway"])
train_df["Roundabout"] = label_encoder.fit_transform(train_df["Roundabout"])
train_df["Stop"] = label_encoder.fit_transform(train_df["Stop"])
train_df["Amenity"] = label_encoder.fit_transform(train_df["Amenity"])
train_df["Lat"] = train_df["Lat"] / train_df["Lat"].abs().max()
train_df["Lng"] = train_df["Lng"] / train_df["Lng"].abs().max()
train_df.head()
train_df["Year"] = pd.DatetimeIndex(train_df["timestamp"]).year
train_df["Month"] = pd.DatetimeIndex(train_df["timestamp"]).month
train_df["Day"] = pd.DatetimeIndex(train_df["timestamp"]).day
train_df["Hour"] = pd.DatetimeIndex(train_df["timestamp"]).hour
train_df.head()
test_df = pd.read_csv("/kaggle/input/car-crashes-severity-prediction/test.csv")
test_df["Side"] = label_encoder.fit_transform(test_df["Side"])
test_df["Bump"] = label_encoder.fit_transform(test_df["Bump"])
test_df["Crossing"] = label_encoder.fit_transform(test_df["Crossing"])
test_df["Give_Way"] = label_encoder.fit_transform(test_df["Give_Way"])
test_df["Junction"] = label_encoder.fit_transform(test_df["Junction"])
test_df["No_Exit"] = label_encoder.fit_transform(test_df["No_Exit"])
test_df["Railway"] = label_encoder.fit_transform(test_df["Railway"])
test_df["Roundabout"] = label_encoder.fit_transform(test_df["Roundabout"])
test_df["Stop"] = label_encoder.fit_transform(test_df["Stop"])
test_df["Amenity"] = label_encoder.fit_transform(test_df["Amenity"])
test_df.head()
test_df["Year"] = pd.DatetimeIndex(test_df["timestamp"]).year
test_df["Month"] = pd.DatetimeIndex(test_df["timestamp"]).month
test_df["Day"] = pd.DatetimeIndex(test_df["timestamp"]).day
test_df["Hour"] = pd.DatetimeIndex(test_df["timestamp"]).hour
test_df.head()
file = open("/kaggle/input/car-crashes-severity-prediction/holidays.xml", "r")
contents = file.read()
soup = BeautifulSoup(contents, "xml")
date = soup.find_all("date")
des = soup.find_all("description")
data = []
for i in range(0, len(date)):
rows = [date[i].get_text(), des[i].get_text()]
data.append(rows)
holidays_df = pd.DataFrame(data, columns=["Date", "Description"], dtype=float)
holidays_df.head()
holidays_df["Year"] = pd.DatetimeIndex(holidays_df["Date"]).year
holidays_df["Month"] = pd.DatetimeIndex(holidays_df["Date"]).month
# holidays_df['week']=pd.DatetimeIndex(holidays_df['Date']).weekday
holidays_df["Day"] = pd.DatetimeIndex(holidays_df["Date"]).day
holidays_df.head(10)
weather_df = pd.read_csv(
"/kaggle/input/car-crashes-severity-prediction/weather-sfcsv.csv"
)
weather_df["Selected"] = label_encoder.fit_transform(weather_df["Selected"])
weather_df["Weather_Condition"] = label_encoder.fit_transform(
weather_df["Weather_Condition"].astype(str)
)
weather_df["Weather_Condition"] = (
weather_df["Weather_Condition"] / weather_df["Weather_Condition"].abs().max()
)
weather_df.head(10)
train_holiday_df = pd.merge(
train_df,
holidays_df,
how="left",
left_on=["Year", "Month", "Day"],
right_on=["Year", "Month", "Day"],
)
train_holiday_df["week"] = pd.DatetimeIndex(train_holiday_df["timestamp"]).weekday
train_holiday_df.head()
train_weather_df = pd.merge(
train_df,
weather_df,
how="left",
left_on=["Year", "Month", "Day", "Hour"],
right_on=["Year", "Month", "Day", "Hour"],
)
train_weather_df = train_weather_df.drop_duplicates(subset="ID", keep="last")
train_weather_df.head()
train_holiday_weather_df = pd.merge(
train_weather_df,
train_holiday_df,
how="left",
left_on=[
"ID",
"Lat",
"Lng",
"Bump",
"Distance(mi)",
"Crossing",
"Give_Way",
"Junction",
"No_Exit",
"Railway",
"Roundabout",
"Stop",
"Amenity",
"Side",
"Severity",
"timestamp",
"Year",
"Month",
"Day",
"Hour",
],
right_on=[
"ID",
"Lat",
"Lng",
"Bump",
"Distance(mi)",
"Crossing",
"Give_Way",
"Junction",
"No_Exit",
"Railway",
"Roundabout",
"Stop",
"Amenity",
"Side",
"Severity",
"timestamp",
"Year",
"Month",
"Day",
"Hour",
],
)
train_holiday_weather_df["isHoliday"] = ~train_holiday_weather_df[
"Description"
].isnull()
train_holiday_weather_df["isHoliday"] = label_encoder.fit_transform(
train_holiday_weather_df["isHoliday"]
)
train_holiday_weather_df.tail(10)
train_holiday_weather_df.drop(
columns=[
"Bump",
"Roundabout",
"Distance(mi)",
"Give_Way",
"Junction",
"No_Exit",
"Railway",
"Amenity",
"timestamp",
"Year",
"Month",
"Day",
"Hour",
"Wind_Chill(F)",
"Precipitation(in)",
"Temperature(F)",
"Humidity(%)",
"Wind_Speed(mph)",
"Visibility(mi)",
"Selected",
"Date",
"Description",
"week",
],
inplace=True,
)
train_holiday_weather_df.tail(10)
train_holiday_weather_df.to_csv("final_train_data.csv")
test_holiday_df = pd.merge(
test_df,
holidays_df,
how="left",
left_on=["Year", "Month", "Day"],
right_on=["Year", "Month", "Day"],
)
test_holiday_df["week"] = pd.DatetimeIndex(test_holiday_df["timestamp"]).weekday
test_holiday_df.head()
test_weather_df = pd.merge(
test_df,
weather_df,
how="left",
left_on=["Year", "Month", "Day", "Hour"],
right_on=["Year", "Month", "Day", "Hour"],
)
test_weather_df = train_weather_df.drop_duplicates(subset="ID", keep="last")
test_weather_df.head()
test_holiday_weather_df = pd.merge(
test_weather_df,
test_holiday_df,
how="left",
left_on=[
"ID",
"Lat",
"Lng",
"Bump",
"Distance(mi)",
"Crossing",
"Give_Way",
"Junction",
"No_Exit",
"Railway",
"Roundabout",
"Stop",
"Amenity",
"Side",
"timestamp",
"Year",
"Month",
"Day",
"Hour",
],
right_on=[
"ID",
"Lat",
"Lng",
"Bump",
"Distance(mi)",
"Crossing",
"Give_Way",
"Junction",
"No_Exit",
"Railway",
"Roundabout",
"Stop",
"Amenity",
"Side",
"timestamp",
"Year",
"Month",
"Day",
"Hour",
],
)
test_holiday_weather_df["isHoliday"] = ~test_holiday_weather_df["Description"].isnull()
test_holiday_weather_df["isHoliday"] = label_encoder.fit_transform(
test_holiday_weather_df["isHoliday"]
)
test_holiday_weather_df.head()
test_holiday_weather_df.drop(
columns=[
"Bump",
"Roundabout",
"Distance(mi)",
"Give_Way",
"Junction",
"No_Exit",
"Railway",
"Amenity",
"timestamp",
"Year",
"Month",
"Day",
"Hour",
"Wind_Chill(F)",
"Precipitation(in)",
"Temperature(F)",
"Humidity(%)",
"Wind_Speed(mph)",
"Visibility(mi)",
"Selected",
"Date",
"Description",
"week",
],
inplace=True,
)
test_holiday_weather_df.head()
test_holiday_weather_df.to_csv("final_test_data.csv")
train_holiday_weather_df.drop(columns="ID").describe()
from sklearn.model_selection import train_test_split
train_df, val_df = train_test_split(
train_holiday_weather_df, test_size=0.2, random_state=42
) # Try adding `stratify` here
X_train = train_df.drop(columns=["ID", "Severity"])
y_train = train_df["Severity"]
X_val = val_df.drop(columns=["ID", "Severity"])
y_val = val_df["Severity"]
X_train = X_train[["Lat", "Lng", "Crossing", "Stop", "Weather_Condition", "isHoliday"]]
X_val = X_val[["Lat", "Lng", "Crossing", "Stop", "Weather_Condition", "isHoliday"]]
from sklearn.ensemble import RandomForestClassifier
# Create an instance of the classifier
classifier = RandomForestClassifier(max_depth=2, random_state=0)
# Train the classifier
classifier = classifier.fit(X_train, y_train)
print(
"The accuracy of the classifier on the validation set is ",
(classifier.score(X_val, y_val)),
)
test_holiday_weather_df.shape
X_test = test_holiday_weather_df.drop(columns=["ID"])
# You should update/remove the next line once you change the features used for training
X_test = X_test[["Lat", "Lng", "Crossing", "Stop", "Weather_Condition", "isHoliday"]]
y_test_predicted = classifier.predict(X_test)
test_holiday_weather_df["Severity"] = y_test_predicted
test_holiday_weather_df.head()
test_holiday_weather_df[["ID", "Severity"]].to_csv("submission.csv", index=False)
| false | 0 | 3,035 | 0 | 3,035 | 3,035 |
||
69173119
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import re
import warnings
warnings.filterwarnings("ignore")
# plot
plt.rcParams["figure.figsize"] = (15, 9)
sns.set_palette("gist_earth")
# Read datasets from csv
train = pd.read_csv("../input/titanic/train.csv")
test = pd.read_csv("../input/titanic/test.csv")
# Merge the 2 dataframes for EDA and feature engineeraing
full_dataset = pd.concat([train, test], axis=0, sort=True)
# Set PassengerId as Index
full_dataset.set_index("PassengerId", drop=False, inplace=True)
train = full_dataset[:891]
# Identify Missing Values
nan = full_dataset.isnull().sum()
idx_nan = nan.mask(nan == 0).dropna().index
sns.heatmap(full_dataset[idx_nan].transpose().isnull(), cmap="Greens", cbar=False)
nan[idx_nan].drop("Survived").sort_values()
print(np.count_nonzero(full_dataset["Ticket"].unique()))
def parse_ticket(str1):
"""
Function to parse the Letter part of the Ticket code
"""
m = re.search(r"(.*)(\s\d|\s\d{4,7}$)", str1)
s = re.search(r"[A-Z]+", str1)
if (
m
): # removing non alphanumeric characters and binding the numbers and letters before the space
str2 = m.group(1)
n = re.search(
r"([A-Z]+)[^A-Z0-9]*([A-Z]+)*[^A-Z0-9]*([A-Z0-9]*)[^A-Z]*([A-Z]*)*", str2
)
new_str = ""
if n:
if n.group(1):
new_str += n.group(1)
if n.group(2) or n.group(3):
if n.group(2):
new_str += n.group(2)
if n.group(3):
new_str += n.group(3)
if n.group(4):
new_str += n.group(4)
if n.group(5):
new_str += m.group(5)
elif s:
new_str = s.group(0) # Ticket with letters only
else:
new_str = "XXX" # Ticket with only numercial values
return new_str
full_dataset["Ticket_short"] = full_dataset.Ticket.map(parse_ticket)
full_dataset["Ticket_short"]
# Cabin
def parse_Cabin(cabin):
if type(cabin) == str:
m = re.search(r"([A-Z])+", cabin)
return m.group(1)
else:
return "X"
full_dataset["Cabin_short"] = full_dataset["Cabin"].map(parse_Cabin)
# Fare
# Fare Adjustment
fare_original = full_dataset["Fare"].copy()
dict_ticket_size = dict(full_dataset.groupby("Ticket").Fare.count())
ticket_size = full_dataset["Ticket"].map(dict_ticket_size)
full_dataset["Fare"] = full_dataset.Fare / ticket_size
# Plot Fare Adjustment
fig, (ax0, ax1) = plt.subplots(2)
ax0.hist(fare_original.dropna(), bins=80, color="green")
ax0.set_xlabel("Fare(Original)")
ax1.hist(full_dataset["Fare"].dropna(), bins=80, color="green")
ax1.set_xlabel("Fare (Adjusted)")
# Calculate mean fare cost for each Passenger Class
dict_fare_by_Pclass = dict(full_dataset.groupby("Pclass").Fare.mean())
# fill value according to Passenger Class
missing_fare = full_dataset.loc[full_dataset.Fare.isnull(), "Pclass"].map(
dict_fare_by_Pclass
)
full_dataset.loc[full_dataset.Fare.isnull(), "Fare"] = missing_fare
# Descriptive Statistics of the full dataset
display(full_dataset.describe())
print(f"survived: {full_dataset.Survived.mean()*100:.2f}%")
# EDA - Distributions
var_to_plot = ["Pclass", "Sex", "SibSp", "Parch", "Embarked", "Survived"]
# Plot Categorical Var
fig, axs = plt.subplots(4, 3, figsize=(15, 12))
for i, key in enumerate(var_to_plot):
sns.countplot(key, data=full_dataset, ax=axs[i // 3, i % 3], palette="Set2")
# Plot Age
plt.subplot2grid((4, 3), (2, 0), rowspan=1, colspan=3)
sns.distplot(
full_dataset.Age.dropna(), bins=range(0, 80, 2), kde=False, color="darkblue"
)
plt.xlabel("Age")
# Plot Fare
plt.subplot2grid((4, 3), (3, 0), rowspan=1, colspan=3)
sns.distplot(full_dataset.Fare.dropna(), bins=100, kde=False, color="darkblue")
plt.xlabel("Fare")
plt.tight_layout()
# Plot all categorical features with Survival rate
var_to_plot = ["Pclass", "Sex", "SibSp", "Parch", "Embarked", "Cabin_short"]
f, axs = plt.subplots(3, 5, sharey=True)
coord = [(0, 0), (0, 2), (1, 0), (1, 2), (2, 0), (2, 2)]
for i, key in enumerate(var_to_plot): # except feature Survived
plt.subplot2grid((3, 5), (coord[i]), rowspan=1, colspan=2)
sns.barplot(data=full_dataset, x=key, y="Survived", color="red")
plt.axhline(y=0.3838, color="k", linestyle="--")
# Plot Correlation
corr = pd.DataFrame(full_dataset.corr()["Survived"][:-1])
plt.subplot2grid((3, 5), (0, 4), rowspan=3, colspan=1)
sns.heatmap(corr, cmap="BrBG", annot=True, annot_kws={"fontsize": 12})
plt.tight_layout()
# **Results form the above analysis in the graphs:**
# * Sex seems to have a strong predictive power, which makes sense due to the "Women and Children First" preference instructions for deciding who can get on the lifeboats.
# * Pclass(passenger class) and Fare also showed a moderate correalation with Survival. These higher class passengers lives and have most of their activities near the deck, thus, closer to the lifeboats.
# * It is surprising to find no significant correlation between Age and Survived. Their relationship may not be linear.
# * Cabin seem to have some relationships with survival, although we have lots of Non values in this feature. Perhaps it's possible to guess those Nan values after looking into its relationships with Ticket no., Embark and PClass.
# * Embark C seem have significantly higher survival rate compared to Embark S, which also have a relatively low variance, There may be a relationship of where they board the Titanic and where they stay on boat.
# Create DataFrame Features to record potential predictors for later model training
features = pd.DataFrame()
features["Pclass"] = full_dataset["Pclass"]
features["Fare"] = full_dataset["Fare"]
features["Sex"] = full_dataset["Sex"]
d = dict(full_dataset["Ticket_short"].value_counts())
ticket_count = full_dataset["Ticket_short"].map(d)
# Show % survived by Ticket
display(
full_dataset.groupby("Ticket_short")
.Survived.aggregate(["mean", "count"])
.dropna()
.sort_values("count")
.transpose()
)
# Plot % survived by Ticket, droping those tickets with <10 count
sns.barplot(data=full_dataset[ticket_count > 10], x="Ticket_short", y="Survived")
plt.axhline(y=0.3838, color="k", linestyle="--")
features["A5"] = (full_dataset["Ticket_short"] == "A5").astype(int)
features["PC"] = (full_dataset["Ticket_short"] == "PC").astype(int)
# Plot number of survived passengers by PClass, Sex and Age
facet = sns.FacetGrid(
full_dataset, row="Pclass", col="Sex", hue="Survived", aspect=2, palette="Set1"
)
facet.map(plt.hist, "Age", histtype="step", bins=np.arange(0, 80, 4))
facet.add_legend()
# Create Age Quartiles
Age_quartile = pd.qcut(full_dataset.Age, 10)
# Plot age quartiles by sex with survival rate
sns.barplot(data=full_dataset, x=Age_quartile, y="Survived", hue="Sex")
plt.axhline(y=0.3838, color="k", linestyle="--")
plt.xticks(rotation=30)
plt.title("Across All Classes")
# **Feature Engineering part 1**
# Parse Titles from Names
def parse_title(str):
m = re.search(", (\w+ *\w*)\.", str)
return m.group(1)
title = full_dataset.Name.map(parse_title)
title.unique()
# Simplify title groups
dict_Title = {
"Capt": "Officer",
"Col": "Officer",
"Major": "Officer",
"Jonkheer": "Royalty",
"Don": "Royalty",
"Sir": "Royalty",
"Dr": "Officer",
"Rev": "Officer",
"the Countess": "Royalty",
"Dona": "Royalty",
"Mme": "Mrs",
"Mlle": "Miss",
"Ms": "Mrs",
"Mr": "Mr",
"Mrs": "Mrs",
"Miss": "Miss",
"Master": "Master",
"Lady": "Royalty",
}
title = title.map(dict_Title)
# Plot the distribution of Age by Title
plt.figure(figsize=(14, 6))
sns.violinplot(x=title, y=full_dataset["Age"])
# Calculate mean age of each title group
df_title = pd.DataFrame(title).join(full_dataset[["Age", "Survived"]])
dict_age = df_title.groupby("Name").Age.mean()
# Fill in Age according to passenger's title
idx = full_dataset.Age.isnull()
full_dataset.loc[idx, "Age"] = df_title.loc[idx, "Name"].map(dict_age)
# Plot title with Survived
sns.barplot(data=df_title, x="Name", y="Survived")
plt.axhline(y=0.3838, color="k", linestyle="--")
# Record useful features in features dataframe
features["Title"] = df_title["Name"]
features["Child"] = (full_dataset["Age"] <= 14).astype(int)
# **Feature Engineering Part 2**
# function to parse surname of the passengers
def parse_surname(name):
return name.split(",")[0]
# Calculate Family Size
family = pd.DataFrame(full_dataset[["Parch", "SibSp", "Ticket"]])
family["Family_size"] = 1 + family.Parch + family.SibSp
# Parse Surname from Name
family["Surname"] = full_dataset.Name.map(parse_surname)
# Surname Code and Surname Size
dict_scount = dict(family.groupby("Surname").Family_size.count())
dict_scode = dict(zip(dict_scount.keys(), range(len(dict_scount))))
family["Surname_code"] = family["Surname"].map(dict_scode)
family["Surname_count"] = family["Surname"].map(dict_scount)
# Examples with common surname
display(full_dataset[family.Surname == "Smith"])
#
# Function to judge if passengers are probably to be in the same family.
# Input: DataFrame with Passenger surname and ticket
# Return: Code generated to specify different families
#
def tick2fam_gen(df):
# initialize ticket dict
dict_tick2fam = {"000000": 0}
fam_counter = 0
for i in df.index:
keys = list(dict_tick2fam.keys())
chk_key = df.loc[i, "Ticket"]
for key in keys:
if len(chk_key) == len(key): # if their tickets have high similarity
if (chk_key[-4:].isdigit()) & (key[-4:].isdigit()):
if (chk_key[:-2] == key[:-2]) & (
np.abs(int(chk_key[-2:]) - int(key[-2:])) <= 10
):
dict_tick2fam[chk_key] = dict_tick2fam[key]
break
if key == keys[-1]: # no match, assign a new code to the passenger
fam_counter += 1
dict_tick2fam[chk_key] = str(fam_counter)
return dict_tick2fam
# Single out Surnames with size > true family size (may have more than 1 family involved)
surname2chk = family[family["Family_size"] < family["Surname_count"]].Surname.unique()
# chk_surname2 = family_infer[family['FamilySize'] > family['SurnameSize']].Surname.unique() # unidentified fam
# Regrouping Families according to Family Size and Ticket.
family["Surname_adj"] = family["Surname"] # new column for corrected family_group
for s in surname2chk:
family_regroup = family[family["Surname"] == s] # get family with specific surname
fam_code_dict = tick2fam_gen(
family_regroup
) # pass in df to get family codes within the same surname
for idx in family_regroup.index: # assign family code 1by1
curr_ticket = full_dataset.loc[idx].Ticket
fam_code = fam_code_dict[curr_ticket]
if (
family_regroup.loc[idx, "Family_size"] == 1
): # for passengers traveling alone
# relatives that shares surname and ticket, which Parch and SibSp failed to record
if family_regroup.Ticket.value_counts()[curr_ticket] > 1:
family.loc[idx, "Surname_adj"] = s + "-hidfam" + fam_code
# single traveler
else:
family.loc[idx, "Surname_adj"] = s + "-single" + fam_code
# different families
else:
family.loc[idx, "Surname_adj"] = s + "-fam" + fam_code
display(family[family.Surname == "Smith"])
# Assign codes to families
dict_fcount = dict(family.groupby("Surname_adj").Family_size.count())
dict_fcode = dict(zip(dict_fcount.keys(), range(len(dict_fcount))))
family["Family_code"] = family["Surname_adj"].map(dict_fcode)
family["Family_count"] = family["Surname_adj"].map(dict_fcount)
print(f"No. of Family Before Regrouping: {len(family.Surname_code.unique())}")
print(f"No. of Family After Regrouping: {len(family.Family_code.unique())}")
# Identify Groups (Those holding the same ticket code, could be friends/family)
group = pd.DataFrame(
family[["Surname_code", "Surname_count", "Family_code", "Family_count"]]
)
dict_tcount = dict(full_dataset.groupby("Ticket").PassengerId.count())
dict_tcode = dict(zip(dict_tcount.keys(), range(len(dict_tcount))))
group["Ticket_code"] = full_dataset.Ticket.map(dict_tcode)
group["Ticket_count"] = full_dataset.Ticket.map(dict_tcount)
print(f"No. of Tickets Identified: {len(group['Ticket_code'].unique())}")
display(
full_dataset[
(full_dataset.Ticket == "A/4 48871") | (full_dataset.Ticket == "A/4 48873")
]
)
# **Below Function Parameters**
# This function takes in 2 columns of labels and chain all items which share
# the same labels within each of the 2 columns
#
# input:
# * df - DataFrame
# * colA - Key for Col
# * colB - Key for Col
# output:
# * array of numeric grouping labels
#
def ChainCombineGroups(df, colA, colB):
# make a copy of DFs for iteration
data = df.copy()
search_df = data.copy()
group_count = 0
while not search_df.empty:
# Initiate pool and Select Reference item
pool = search_df.iloc[:1]
idx = pool.index
# Remove 1st item from searching df
search_df.drop(index=idx, inplace=True)
# Initialize Search
flag_init = 1
update = pd.DataFrame()
# While loop to exhausively search for commonalities, pool is updated until no more common features are found
while flag_init or not update.empty:
flag_init = 0
# target labels to look for
pool_A_uniq = np.unique(pool[colA])
pool_B_uniq = np.unique(pool[colB])
for col in [colA, colB]:
idx = []
# get all indexs of items with the same label
for num in np.unique(pool[col]):
idx.extend(search_df[search_df[col] == num].index)
# update pool
update = search_df.loc[idx]
pool = pd.concat([pool, update], axis=0)
# remove item from searching df
search_df = search_df.drop(index=idx)
# assign group num
data.loc[pool.index, "Group_"] = group_count
group_count += 1
return np.array(data["Group_"].astype(int))
# Assign Final group no.
group["Group_code"] = ChainCombineGroups(group, "Family_code", "Ticket_code")
# Calculate group sizes
dict_gcount = dict(group.groupby("Group_code").Family_code.count())
group["Group_count"] = group.Group_code.map(dict_gcount)
print(f"Family: {len(family['Family_code'].unique())}")
print(f"Group: {len(group['Ticket_code'].unique())}")
print(f"Combined: {len(group['Group_code'].unique())}\n")
print("An example of grouping the both friends and family under a same group:")
display(
pd.concat(
[
full_dataset["Ticket"],
family[["Surname", "Family_code"]],
group[["Ticket_code", "Group_code"]],
],
axis=1,
)[group["Group_code"] == 458]
)
# Prepare the df by adding the Survived features
group_final = pd.concat(
[
family[["Surname_code", "Surname_count", "Family_code", "Family_count"]],
group[["Ticket_code", "Ticket_count", "Group_code", "Group_count"]],
full_dataset["Survived"],
],
axis=1,
)
for param in [
("Surname_code", "Surname_count"),
("Family_code", "Family_count"),
("Ticket_code", "Ticket_count"),
("Group_code", "Group_count"),
]: # keep group at last
# No. of member survived in each group
n_member_survived_by_gp = group_final.groupby(param[0]).Survived.sum()
# No. of member survived in a particular group, discounting the passenger concerned
n_mem_survived = group_final[param[0]].map(n_member_survived_by_gp)
n_mem_survived_adj = n_mem_survived - group_final.Survived.apply(
lambda x: 1 if x == 1 else 0
)
# Same for the dead
n_member_dead_by_gp = (
group_final.groupby(param[0]).Survived.count()
- group_final.groupby(param[0]).Survived.sum()
)
n_mem_dead = group_final[param[0]].map(n_member_dead_by_gp)
n_mem_dead_adj = n_mem_dead - group_final.Survived.apply(
lambda x: 1 if x == 0 else 0
)
# How many people from that group that we do not have data on.
unknown_factor = (
group_final[param[1]] - n_mem_survived_adj - n_mem_dead_adj
) / group_final[param[1]]
confidence = 1 - unknown_factor
# Ratio of members survived in that group, ranging from -1 to 1, adjusted by the confidence weight
key = "Confidence_member_survived" + "_" + param[0]
ratio = (1 / group_final[param[1]]) * (n_mem_survived_adj - n_mem_dead_adj)
group_final[key] = confidence * ratio
# Display Correlation
plt.barh(group_final.corr().Survived[-4:].index, group_final.corr().Survived[-4:])
plt.xlabel("Correlation with Survived")
features["Cf_mem_survived"] = group_final["Confidence_member_survived_Group_code"]
features["Parch"] = full_dataset["Parch"]
features["SibSp"] = full_dataset["SibSp"]
features["Group_size"] = group["Group_count"]
features.head()
from sklearn.preprocessing import StandardScaler
# Standardize the continuous variables
scalar = StandardScaler()
features_z_transformed = features.copy()
continuous = ["Fare"]
features_z_transformed[continuous] = scalar.fit_transform(
features_z_transformed[continuous]
)
# Transform Sex labels into binary code
features_z_transformed.Sex = features_z_transformed.Sex.apply(
lambda x: 1 if x == "male" else 0
)
# One-hot Encoding
features_final = pd.get_dummies(features_z_transformed)
encoded = list(features_final.columns)
print("{} total features after one-hot encoding.".format(len(encoded)))
# Seperate Train Data and Test Data
features_final_train = features_final[:891]
features_final_test = features_final[891:]
# Spliting Training Sets into Train and Cross-validation sets
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit
X_train, X_test, y_train, y_test = train_test_split(
features_final_train, train.Survived, test_size=0.2, random_state=0
)
# **Below Function Parameters**
# - learner: the learning algorithm to be trained and predicted on
# - sample_size: the size of samples to be drawn from training set
# - X_train: features training set
# - y_train: income training set
# - X_test: features testing set
# - y_test: income testing set
#
# Create Model Training Pipeline
from sklearn.metrics import accuracy_score
def train_predict(learner, sample_size, X_train, y_train, X_test, y_test):
results = {}
# Fit the learner to the training data using slicing with 'sample_size' using .fit(training_features[:], training_labels[:])
learner = learner.fit(X_train[:sample_size], y_train[:sample_size])
# Get the predictions on the test set(X_test),
predictions_test = learner.predict(X_test)
# then get predictions on the training samples(X_train)
predictions_train = learner.predict(X_train)
# Compute accuracy on the training samples
results["acc_train"] = accuracy_score(y_train, predictions_train)
# Compute accuracy on test set using accuracy_score()
results["acc_test"] = accuracy_score(y_test, predictions_test)
# Success
print(
"{} trained on {} samples. Acc: {:.4f}".format(
learner.__class__.__name__, sample_size, results["acc_test"]
)
)
# Return the results
return results
from sklearn.ensemble import RandomForestClassifier
# Initialize the three models
clf_C = RandomForestClassifier(random_state=0)
# Calculate the number of samples for 10%, 50%, and 100% of the training data
samples_100 = len(y_train)
samples_10 = int(len(y_train) / 2)
samples_1 = int(len(y_train) / 10)
# Collect results on the learners
results = {}
for clf in [clf_C]:
clf_name = clf.__class__.__name__
results[clf_name] = {}
for i, samples in enumerate([samples_1, samples_10, samples_100]):
results[clf_name][i] = train_predict(
clf, samples, X_train, y_train, X_test, y_test
)
# Reshaping the Results for plotting
df = pd.DataFrame()
for i in results.items():
temp = pd.DataFrame(i[1]).rename(
columns={0: "1% of train", 1: "10% of train", 2: "100% of train"}
)
temp["model"] = i[0]
df = pd.concat([df, temp], axis=0)
df_plot = df.reset_index().melt(id_vars=["index", "model"])
# Ploting the results
fig, axs = plt.subplots(1, 2, figsize=(16, 5))
for i, key in enumerate(df_plot["index"].unique()[:2]):
ax = axs[i % 2]
sns.barplot(
data=df_plot[df_plot["index"] == key],
x="model",
y="value",
hue="variable",
ax=ax,
)
ax.set_ylim([0.6, 1])
ax.set_title(key)
ax.legend(loc="lower right")
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer
warnings.filterwarnings("ignore")
clf = RandomForestClassifier(random_state=0, oob_score=True)
parameters = {
"criterion": ["gini"],
"n_estimators": [350],
"max_depth": [5],
"min_samples_leaf": [4],
"max_leaf_nodes": [10],
"min_impurity_decrease": [0],
"max_features": [1],
}
scorer = make_scorer(accuracy_score)
grid_obj = GridSearchCV(clf, parameters, scoring=scorer, cv=10)
grid_fit = grid_obj.fit(X_train, y_train)
best_clf = grid_fit.best_estimator_
predictions = (clf.fit(X_train, y_train)).predict(X_test)
best_predictions = best_clf.predict(X_test)
print("Unoptimized model\n")
print(
"Accuracy score on testing data: {:.4f}".format(accuracy_score(y_test, predictions))
)
print("Oob score on testing data: {:.4f}".format(clf.oob_score_))
print("\nOptimized Model\n")
print(
"Final accuracy score on the testing data: {:.4f}".format(
accuracy_score(y_test, best_predictions)
)
)
print("Final oob score on the testing data: {:.4f}".format(best_clf.oob_score_))
print("\nBest Parameters\n")
best_clf
# Plot Feature Importnace
idx = np.argsort(best_clf.feature_importances_)
plt.figure(figsize=(12, 8))
plt.barh(range(len(best_clf.feature_importances_)), best_clf.feature_importances_[idx])
plt.yticks(range(len(best_clf.feature_importances_)), features_final_train.columns[idx])
plt.title("Feature Importance in the data")
# Output for Kaggle competition
final_predict = best_clf.predict(features_final_test)
prediction = pd.DataFrame(full_dataset[891:].PassengerId)
prediction["Survived"] = final_predict.astype("int")
prediction.to_csv("prediction_final_sub.csv", index=False)
print("Final prediction for test data is in prediction_final_sub.csv file!")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173119.ipynb
| null | null |
[{"Id": 69173119, "ScriptId": 18872691, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7848888, "CreationDate": "07/27/2021 16:45:27", "VersionNumber": 7.0, "Title": "Ml_Titanic_170628X_Assignment", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 614.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 613.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
| null | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import re
import warnings
warnings.filterwarnings("ignore")
# plot
plt.rcParams["figure.figsize"] = (15, 9)
sns.set_palette("gist_earth")
# Read datasets from csv
train = pd.read_csv("../input/titanic/train.csv")
test = pd.read_csv("../input/titanic/test.csv")
# Merge the 2 dataframes for EDA and feature engineeraing
full_dataset = pd.concat([train, test], axis=0, sort=True)
# Set PassengerId as Index
full_dataset.set_index("PassengerId", drop=False, inplace=True)
train = full_dataset[:891]
# Identify Missing Values
nan = full_dataset.isnull().sum()
idx_nan = nan.mask(nan == 0).dropna().index
sns.heatmap(full_dataset[idx_nan].transpose().isnull(), cmap="Greens", cbar=False)
nan[idx_nan].drop("Survived").sort_values()
print(np.count_nonzero(full_dataset["Ticket"].unique()))
def parse_ticket(str1):
"""
Function to parse the Letter part of the Ticket code
"""
m = re.search(r"(.*)(\s\d|\s\d{4,7}$)", str1)
s = re.search(r"[A-Z]+", str1)
if (
m
): # removing non alphanumeric characters and binding the numbers and letters before the space
str2 = m.group(1)
n = re.search(
r"([A-Z]+)[^A-Z0-9]*([A-Z]+)*[^A-Z0-9]*([A-Z0-9]*)[^A-Z]*([A-Z]*)*", str2
)
new_str = ""
if n:
if n.group(1):
new_str += n.group(1)
if n.group(2) or n.group(3):
if n.group(2):
new_str += n.group(2)
if n.group(3):
new_str += n.group(3)
if n.group(4):
new_str += n.group(4)
if n.group(5):
new_str += m.group(5)
elif s:
new_str = s.group(0) # Ticket with letters only
else:
new_str = "XXX" # Ticket with only numercial values
return new_str
full_dataset["Ticket_short"] = full_dataset.Ticket.map(parse_ticket)
full_dataset["Ticket_short"]
# Cabin
def parse_Cabin(cabin):
if type(cabin) == str:
m = re.search(r"([A-Z])+", cabin)
return m.group(1)
else:
return "X"
full_dataset["Cabin_short"] = full_dataset["Cabin"].map(parse_Cabin)
# Fare
# Fare Adjustment
fare_original = full_dataset["Fare"].copy()
dict_ticket_size = dict(full_dataset.groupby("Ticket").Fare.count())
ticket_size = full_dataset["Ticket"].map(dict_ticket_size)
full_dataset["Fare"] = full_dataset.Fare / ticket_size
# Plot Fare Adjustment
fig, (ax0, ax1) = plt.subplots(2)
ax0.hist(fare_original.dropna(), bins=80, color="green")
ax0.set_xlabel("Fare(Original)")
ax1.hist(full_dataset["Fare"].dropna(), bins=80, color="green")
ax1.set_xlabel("Fare (Adjusted)")
# Calculate mean fare cost for each Passenger Class
dict_fare_by_Pclass = dict(full_dataset.groupby("Pclass").Fare.mean())
# fill value according to Passenger Class
missing_fare = full_dataset.loc[full_dataset.Fare.isnull(), "Pclass"].map(
dict_fare_by_Pclass
)
full_dataset.loc[full_dataset.Fare.isnull(), "Fare"] = missing_fare
# Descriptive Statistics of the full dataset
display(full_dataset.describe())
print(f"survived: {full_dataset.Survived.mean()*100:.2f}%")
# EDA - Distributions
var_to_plot = ["Pclass", "Sex", "SibSp", "Parch", "Embarked", "Survived"]
# Plot Categorical Var
fig, axs = plt.subplots(4, 3, figsize=(15, 12))
for i, key in enumerate(var_to_plot):
sns.countplot(key, data=full_dataset, ax=axs[i // 3, i % 3], palette="Set2")
# Plot Age
plt.subplot2grid((4, 3), (2, 0), rowspan=1, colspan=3)
sns.distplot(
full_dataset.Age.dropna(), bins=range(0, 80, 2), kde=False, color="darkblue"
)
plt.xlabel("Age")
# Plot Fare
plt.subplot2grid((4, 3), (3, 0), rowspan=1, colspan=3)
sns.distplot(full_dataset.Fare.dropna(), bins=100, kde=False, color="darkblue")
plt.xlabel("Fare")
plt.tight_layout()
# Plot all categorical features with Survival rate
var_to_plot = ["Pclass", "Sex", "SibSp", "Parch", "Embarked", "Cabin_short"]
f, axs = plt.subplots(3, 5, sharey=True)
coord = [(0, 0), (0, 2), (1, 0), (1, 2), (2, 0), (2, 2)]
for i, key in enumerate(var_to_plot): # except feature Survived
plt.subplot2grid((3, 5), (coord[i]), rowspan=1, colspan=2)
sns.barplot(data=full_dataset, x=key, y="Survived", color="red")
plt.axhline(y=0.3838, color="k", linestyle="--")
# Plot Correlation
corr = pd.DataFrame(full_dataset.corr()["Survived"][:-1])
plt.subplot2grid((3, 5), (0, 4), rowspan=3, colspan=1)
sns.heatmap(corr, cmap="BrBG", annot=True, annot_kws={"fontsize": 12})
plt.tight_layout()
# **Results form the above analysis in the graphs:**
# * Sex seems to have a strong predictive power, which makes sense due to the "Women and Children First" preference instructions for deciding who can get on the lifeboats.
# * Pclass(passenger class) and Fare also showed a moderate correalation with Survival. These higher class passengers lives and have most of their activities near the deck, thus, closer to the lifeboats.
# * It is surprising to find no significant correlation between Age and Survived. Their relationship may not be linear.
# * Cabin seem to have some relationships with survival, although we have lots of Non values in this feature. Perhaps it's possible to guess those Nan values after looking into its relationships with Ticket no., Embark and PClass.
# * Embark C seem have significantly higher survival rate compared to Embark S, which also have a relatively low variance, There may be a relationship of where they board the Titanic and where they stay on boat.
# Create DataFrame Features to record potential predictors for later model training
features = pd.DataFrame()
features["Pclass"] = full_dataset["Pclass"]
features["Fare"] = full_dataset["Fare"]
features["Sex"] = full_dataset["Sex"]
d = dict(full_dataset["Ticket_short"].value_counts())
ticket_count = full_dataset["Ticket_short"].map(d)
# Show % survived by Ticket
display(
full_dataset.groupby("Ticket_short")
.Survived.aggregate(["mean", "count"])
.dropna()
.sort_values("count")
.transpose()
)
# Plot % survived by Ticket, droping those tickets with <10 count
sns.barplot(data=full_dataset[ticket_count > 10], x="Ticket_short", y="Survived")
plt.axhline(y=0.3838, color="k", linestyle="--")
features["A5"] = (full_dataset["Ticket_short"] == "A5").astype(int)
features["PC"] = (full_dataset["Ticket_short"] == "PC").astype(int)
# Plot number of survived passengers by PClass, Sex and Age
facet = sns.FacetGrid(
full_dataset, row="Pclass", col="Sex", hue="Survived", aspect=2, palette="Set1"
)
facet.map(plt.hist, "Age", histtype="step", bins=np.arange(0, 80, 4))
facet.add_legend()
# Create Age Quartiles
Age_quartile = pd.qcut(full_dataset.Age, 10)
# Plot age quartiles by sex with survival rate
sns.barplot(data=full_dataset, x=Age_quartile, y="Survived", hue="Sex")
plt.axhline(y=0.3838, color="k", linestyle="--")
plt.xticks(rotation=30)
plt.title("Across All Classes")
# **Feature Engineering part 1**
# Parse Titles from Names
def parse_title(str):
m = re.search(", (\w+ *\w*)\.", str)
return m.group(1)
title = full_dataset.Name.map(parse_title)
title.unique()
# Simplify title groups
dict_Title = {
"Capt": "Officer",
"Col": "Officer",
"Major": "Officer",
"Jonkheer": "Royalty",
"Don": "Royalty",
"Sir": "Royalty",
"Dr": "Officer",
"Rev": "Officer",
"the Countess": "Royalty",
"Dona": "Royalty",
"Mme": "Mrs",
"Mlle": "Miss",
"Ms": "Mrs",
"Mr": "Mr",
"Mrs": "Mrs",
"Miss": "Miss",
"Master": "Master",
"Lady": "Royalty",
}
title = title.map(dict_Title)
# Plot the distribution of Age by Title
plt.figure(figsize=(14, 6))
sns.violinplot(x=title, y=full_dataset["Age"])
# Calculate mean age of each title group
df_title = pd.DataFrame(title).join(full_dataset[["Age", "Survived"]])
dict_age = df_title.groupby("Name").Age.mean()
# Fill in Age according to passenger's title
idx = full_dataset.Age.isnull()
full_dataset.loc[idx, "Age"] = df_title.loc[idx, "Name"].map(dict_age)
# Plot title with Survived
sns.barplot(data=df_title, x="Name", y="Survived")
plt.axhline(y=0.3838, color="k", linestyle="--")
# Record useful features in features dataframe
features["Title"] = df_title["Name"]
features["Child"] = (full_dataset["Age"] <= 14).astype(int)
# **Feature Engineering Part 2**
# function to parse surname of the passengers
def parse_surname(name):
return name.split(",")[0]
# Calculate Family Size
family = pd.DataFrame(full_dataset[["Parch", "SibSp", "Ticket"]])
family["Family_size"] = 1 + family.Parch + family.SibSp
# Parse Surname from Name
family["Surname"] = full_dataset.Name.map(parse_surname)
# Surname Code and Surname Size
dict_scount = dict(family.groupby("Surname").Family_size.count())
dict_scode = dict(zip(dict_scount.keys(), range(len(dict_scount))))
family["Surname_code"] = family["Surname"].map(dict_scode)
family["Surname_count"] = family["Surname"].map(dict_scount)
# Examples with common surname
display(full_dataset[family.Surname == "Smith"])
#
# Function to judge if passengers are probably to be in the same family.
# Input: DataFrame with Passenger surname and ticket
# Return: Code generated to specify different families
#
def tick2fam_gen(df):
# initialize ticket dict
dict_tick2fam = {"000000": 0}
fam_counter = 0
for i in df.index:
keys = list(dict_tick2fam.keys())
chk_key = df.loc[i, "Ticket"]
for key in keys:
if len(chk_key) == len(key): # if their tickets have high similarity
if (chk_key[-4:].isdigit()) & (key[-4:].isdigit()):
if (chk_key[:-2] == key[:-2]) & (
np.abs(int(chk_key[-2:]) - int(key[-2:])) <= 10
):
dict_tick2fam[chk_key] = dict_tick2fam[key]
break
if key == keys[-1]: # no match, assign a new code to the passenger
fam_counter += 1
dict_tick2fam[chk_key] = str(fam_counter)
return dict_tick2fam
# Single out Surnames with size > true family size (may have more than 1 family involved)
surname2chk = family[family["Family_size"] < family["Surname_count"]].Surname.unique()
# chk_surname2 = family_infer[family['FamilySize'] > family['SurnameSize']].Surname.unique() # unidentified fam
# Regrouping Families according to Family Size and Ticket.
family["Surname_adj"] = family["Surname"] # new column for corrected family_group
for s in surname2chk:
family_regroup = family[family["Surname"] == s] # get family with specific surname
fam_code_dict = tick2fam_gen(
family_regroup
) # pass in df to get family codes within the same surname
for idx in family_regroup.index: # assign family code 1by1
curr_ticket = full_dataset.loc[idx].Ticket
fam_code = fam_code_dict[curr_ticket]
if (
family_regroup.loc[idx, "Family_size"] == 1
): # for passengers traveling alone
# relatives that shares surname and ticket, which Parch and SibSp failed to record
if family_regroup.Ticket.value_counts()[curr_ticket] > 1:
family.loc[idx, "Surname_adj"] = s + "-hidfam" + fam_code
# single traveler
else:
family.loc[idx, "Surname_adj"] = s + "-single" + fam_code
# different families
else:
family.loc[idx, "Surname_adj"] = s + "-fam" + fam_code
display(family[family.Surname == "Smith"])
# Assign codes to families
dict_fcount = dict(family.groupby("Surname_adj").Family_size.count())
dict_fcode = dict(zip(dict_fcount.keys(), range(len(dict_fcount))))
family["Family_code"] = family["Surname_adj"].map(dict_fcode)
family["Family_count"] = family["Surname_adj"].map(dict_fcount)
print(f"No. of Family Before Regrouping: {len(family.Surname_code.unique())}")
print(f"No. of Family After Regrouping: {len(family.Family_code.unique())}")
# Identify Groups (Those holding the same ticket code, could be friends/family)
group = pd.DataFrame(
family[["Surname_code", "Surname_count", "Family_code", "Family_count"]]
)
dict_tcount = dict(full_dataset.groupby("Ticket").PassengerId.count())
dict_tcode = dict(zip(dict_tcount.keys(), range(len(dict_tcount))))
group["Ticket_code"] = full_dataset.Ticket.map(dict_tcode)
group["Ticket_count"] = full_dataset.Ticket.map(dict_tcount)
print(f"No. of Tickets Identified: {len(group['Ticket_code'].unique())}")
display(
full_dataset[
(full_dataset.Ticket == "A/4 48871") | (full_dataset.Ticket == "A/4 48873")
]
)
# **Below Function Parameters**
# This function takes in 2 columns of labels and chain all items which share
# the same labels within each of the 2 columns
#
# input:
# * df - DataFrame
# * colA - Key for Col
# * colB - Key for Col
# output:
# * array of numeric grouping labels
#
def ChainCombineGroups(df, colA, colB):
# make a copy of DFs for iteration
data = df.copy()
search_df = data.copy()
group_count = 0
while not search_df.empty:
# Initiate pool and Select Reference item
pool = search_df.iloc[:1]
idx = pool.index
# Remove 1st item from searching df
search_df.drop(index=idx, inplace=True)
# Initialize Search
flag_init = 1
update = pd.DataFrame()
# While loop to exhausively search for commonalities, pool is updated until no more common features are found
while flag_init or not update.empty:
flag_init = 0
# target labels to look for
pool_A_uniq = np.unique(pool[colA])
pool_B_uniq = np.unique(pool[colB])
for col in [colA, colB]:
idx = []
# get all indexs of items with the same label
for num in np.unique(pool[col]):
idx.extend(search_df[search_df[col] == num].index)
# update pool
update = search_df.loc[idx]
pool = pd.concat([pool, update], axis=0)
# remove item from searching df
search_df = search_df.drop(index=idx)
# assign group num
data.loc[pool.index, "Group_"] = group_count
group_count += 1
return np.array(data["Group_"].astype(int))
# Assign Final group no.
group["Group_code"] = ChainCombineGroups(group, "Family_code", "Ticket_code")
# Calculate group sizes
dict_gcount = dict(group.groupby("Group_code").Family_code.count())
group["Group_count"] = group.Group_code.map(dict_gcount)
print(f"Family: {len(family['Family_code'].unique())}")
print(f"Group: {len(group['Ticket_code'].unique())}")
print(f"Combined: {len(group['Group_code'].unique())}\n")
print("An example of grouping the both friends and family under a same group:")
display(
pd.concat(
[
full_dataset["Ticket"],
family[["Surname", "Family_code"]],
group[["Ticket_code", "Group_code"]],
],
axis=1,
)[group["Group_code"] == 458]
)
# Prepare the df by adding the Survived features
group_final = pd.concat(
[
family[["Surname_code", "Surname_count", "Family_code", "Family_count"]],
group[["Ticket_code", "Ticket_count", "Group_code", "Group_count"]],
full_dataset["Survived"],
],
axis=1,
)
for param in [
("Surname_code", "Surname_count"),
("Family_code", "Family_count"),
("Ticket_code", "Ticket_count"),
("Group_code", "Group_count"),
]: # keep group at last
# No. of member survived in each group
n_member_survived_by_gp = group_final.groupby(param[0]).Survived.sum()
# No. of member survived in a particular group, discounting the passenger concerned
n_mem_survived = group_final[param[0]].map(n_member_survived_by_gp)
n_mem_survived_adj = n_mem_survived - group_final.Survived.apply(
lambda x: 1 if x == 1 else 0
)
# Same for the dead
n_member_dead_by_gp = (
group_final.groupby(param[0]).Survived.count()
- group_final.groupby(param[0]).Survived.sum()
)
n_mem_dead = group_final[param[0]].map(n_member_dead_by_gp)
n_mem_dead_adj = n_mem_dead - group_final.Survived.apply(
lambda x: 1 if x == 0 else 0
)
# How many people from that group that we do not have data on.
unknown_factor = (
group_final[param[1]] - n_mem_survived_adj - n_mem_dead_adj
) / group_final[param[1]]
confidence = 1 - unknown_factor
# Ratio of members survived in that group, ranging from -1 to 1, adjusted by the confidence weight
key = "Confidence_member_survived" + "_" + param[0]
ratio = (1 / group_final[param[1]]) * (n_mem_survived_adj - n_mem_dead_adj)
group_final[key] = confidence * ratio
# Display Correlation
plt.barh(group_final.corr().Survived[-4:].index, group_final.corr().Survived[-4:])
plt.xlabel("Correlation with Survived")
features["Cf_mem_survived"] = group_final["Confidence_member_survived_Group_code"]
features["Parch"] = full_dataset["Parch"]
features["SibSp"] = full_dataset["SibSp"]
features["Group_size"] = group["Group_count"]
features.head()
from sklearn.preprocessing import StandardScaler
# Standardize the continuous variables
scalar = StandardScaler()
features_z_transformed = features.copy()
continuous = ["Fare"]
features_z_transformed[continuous] = scalar.fit_transform(
features_z_transformed[continuous]
)
# Transform Sex labels into binary code
features_z_transformed.Sex = features_z_transformed.Sex.apply(
lambda x: 1 if x == "male" else 0
)
# One-hot Encoding
features_final = pd.get_dummies(features_z_transformed)
encoded = list(features_final.columns)
print("{} total features after one-hot encoding.".format(len(encoded)))
# Seperate Train Data and Test Data
features_final_train = features_final[:891]
features_final_test = features_final[891:]
# Spliting Training Sets into Train and Cross-validation sets
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit
X_train, X_test, y_train, y_test = train_test_split(
features_final_train, train.Survived, test_size=0.2, random_state=0
)
# **Below Function Parameters**
# - learner: the learning algorithm to be trained and predicted on
# - sample_size: the size of samples to be drawn from training set
# - X_train: features training set
# - y_train: income training set
# - X_test: features testing set
# - y_test: income testing set
#
# Create Model Training Pipeline
from sklearn.metrics import accuracy_score
def train_predict(learner, sample_size, X_train, y_train, X_test, y_test):
results = {}
# Fit the learner to the training data using slicing with 'sample_size' using .fit(training_features[:], training_labels[:])
learner = learner.fit(X_train[:sample_size], y_train[:sample_size])
# Get the predictions on the test set(X_test),
predictions_test = learner.predict(X_test)
# then get predictions on the training samples(X_train)
predictions_train = learner.predict(X_train)
# Compute accuracy on the training samples
results["acc_train"] = accuracy_score(y_train, predictions_train)
# Compute accuracy on test set using accuracy_score()
results["acc_test"] = accuracy_score(y_test, predictions_test)
# Success
print(
"{} trained on {} samples. Acc: {:.4f}".format(
learner.__class__.__name__, sample_size, results["acc_test"]
)
)
# Return the results
return results
from sklearn.ensemble import RandomForestClassifier
# Initialize the three models
clf_C = RandomForestClassifier(random_state=0)
# Calculate the number of samples for 10%, 50%, and 100% of the training data
samples_100 = len(y_train)
samples_10 = int(len(y_train) / 2)
samples_1 = int(len(y_train) / 10)
# Collect results on the learners
results = {}
for clf in [clf_C]:
clf_name = clf.__class__.__name__
results[clf_name] = {}
for i, samples in enumerate([samples_1, samples_10, samples_100]):
results[clf_name][i] = train_predict(
clf, samples, X_train, y_train, X_test, y_test
)
# Reshaping the Results for plotting
df = pd.DataFrame()
for i in results.items():
temp = pd.DataFrame(i[1]).rename(
columns={0: "1% of train", 1: "10% of train", 2: "100% of train"}
)
temp["model"] = i[0]
df = pd.concat([df, temp], axis=0)
df_plot = df.reset_index().melt(id_vars=["index", "model"])
# Ploting the results
fig, axs = plt.subplots(1, 2, figsize=(16, 5))
for i, key in enumerate(df_plot["index"].unique()[:2]):
ax = axs[i % 2]
sns.barplot(
data=df_plot[df_plot["index"] == key],
x="model",
y="value",
hue="variable",
ax=ax,
)
ax.set_ylim([0.6, 1])
ax.set_title(key)
ax.legend(loc="lower right")
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer
warnings.filterwarnings("ignore")
clf = RandomForestClassifier(random_state=0, oob_score=True)
parameters = {
"criterion": ["gini"],
"n_estimators": [350],
"max_depth": [5],
"min_samples_leaf": [4],
"max_leaf_nodes": [10],
"min_impurity_decrease": [0],
"max_features": [1],
}
scorer = make_scorer(accuracy_score)
grid_obj = GridSearchCV(clf, parameters, scoring=scorer, cv=10)
grid_fit = grid_obj.fit(X_train, y_train)
best_clf = grid_fit.best_estimator_
predictions = (clf.fit(X_train, y_train)).predict(X_test)
best_predictions = best_clf.predict(X_test)
print("Unoptimized model\n")
print(
"Accuracy score on testing data: {:.4f}".format(accuracy_score(y_test, predictions))
)
print("Oob score on testing data: {:.4f}".format(clf.oob_score_))
print("\nOptimized Model\n")
print(
"Final accuracy score on the testing data: {:.4f}".format(
accuracy_score(y_test, best_predictions)
)
)
print("Final oob score on the testing data: {:.4f}".format(best_clf.oob_score_))
print("\nBest Parameters\n")
best_clf
# Plot Feature Importnace
idx = np.argsort(best_clf.feature_importances_)
plt.figure(figsize=(12, 8))
plt.barh(range(len(best_clf.feature_importances_)), best_clf.feature_importances_[idx])
plt.yticks(range(len(best_clf.feature_importances_)), features_final_train.columns[idx])
plt.title("Feature Importance in the data")
# Output for Kaggle competition
final_predict = best_clf.predict(features_final_test)
prediction = pd.DataFrame(full_dataset[891:].PassengerId)
prediction["Survived"] = final_predict.astype("int")
prediction.to_csv("prediction_final_sub.csv", index=False)
print("Final prediction for test data is in prediction_final_sub.csv file!")
| false | 0 | 7,175 | 2 | 7,175 | 7,175 |
||
69173522
|
<jupyter_start><jupyter_text>BinarySegmentation endovis 17
Kaggle dataset identifier: binarysegmentation-endovis-17
<jupyter_script># # Segmentation Network Unet
#
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random
import matplotlib.pyplot as plt
from tensorflow.keras import backend as K
from tensorflow.keras.layers import (
Input,
Conv2D,
MaxPooling2D,
concatenate,
Conv2DTranspose,
BatchNormalization,
Activation,
Dropout,
)
from tensorflow.keras.optimizers import Adadelta, Nadam, Adam
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.utils import plot_model, Sequence
from tensorflow.keras.callbacks import (
TensorBoard,
ModelCheckpoint,
EarlyStopping,
ReduceLROnPlateau,
)
from tensorflow.keras.preprocessing.image import load_img, img_to_array
import tensorflow as tf
from tensorflow.python.keras.losses import binary_crossentropy
from scipy.ndimage import morphology as mp
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
from glob import glob # for getting list paths of image and labels
from random import choice, sample
from matplotlib import pyplot as plt
import cv2 # saving and loading images
# Any results you write to the current directory are saved as output.
# # listing image and their respective labels
# also here we are asserting the presence of label file w.r.t each image.
# train_img_dir = '../input/concatenation-endo/cropped_train/instrument_dataset_1/images/'
# train_mask_dir = '../input/concatenation-endo/cropped_train/instrument_dataset_1/binary_masks/'
train_img_dir = "../input/multiedovis/multiclass_segmentation/cropped_train/instrument_dataset/images/"
train_mask_dir = "../input/multiedovis/multiclass_segmentation/cropped_train/instrument_dataset/instruments_masks/"
train_imgs = os.listdir(train_img_dir) # if you have an error take a look here ...
train_masks = os.listdir(train_mask_dir)
train_imgs = sorted([i for i in train_imgs])
train_masks = sorted([i for i in train_masks if "Bipolar" in i])
print(len(train_imgs))
print(len(train_masks))
print(train_imgs[:3])
train_masks[:3]
# **Repeat same steps for validation dataset**
from sklearn.model_selection import train_test_split
val_img_dir = train_img_dir
val_mask_dir = train_mask_dir
train_imgs, val_imgs, train_masks, val_masks = train_test_split(
train_imgs, train_masks, test_size=0.13, random_state=42
)
print(len(train_masks))
print(len(val_masks))
# # Here we impliment keras custom data generator to get batch images and labels without loading whole dataset in the active memory
#
from scipy import ndimage
class DataGenerator(Sequence):
"Generates data for Keras"
def __init__(
self,
images,
image_dir,
labels,
label_dir,
batch_size=16,
dim=(224, 224, 3),
shuffle=True,
):
"Initialization"
self.dim = dim
self.images = images
self.image_dir = image_dir
self.labels = labels
self.label_dir = label_dir
self.batch_size = batch_size
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
"Denotes the number of batches per epoch"
return int(np.floor(len(self.images) / self.batch_size))
def __getitem__(self, index):
"Generate one batch of data"
# Generate indexes of the batch
indexes = self.indexes[index * self.batch_size : (index + 1) * self.batch_size]
# Find list of IDs
list_IDs_temp = [k for k in indexes]
# Generate data
X, y = self.__data_generation(list_IDs_temp)
return X, y
def on_epoch_end(self):
"Updates indexes after each epoch"
self.indexes = np.arange(len(self.images))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
"Generates data containing batch_size samples" # X : (n_samples, *dim, n_channels)
# Initialization
batch_imgs = list()
batch_labels = list()
# Generate data
for i in list_IDs_temp:
# degree=np.random.random() * 360
# Store sample
img = load_img(self.image_dir + self.images[i], target_size=self.dim)
img = img_to_array(img) / 255.0
# img = ndimage.rotate(img, degree)
# print(img)
batch_imgs.append(img)
# Store class
label = load_img(self.label_dir + self.labels[i], target_size=self.dim)
label = img_to_array(label)[:, :, 0]
label = label != 0
label = mp.binary_erosion(mp.binary_erosion(label))
label = mp.binary_dilation(mp.binary_dilation(mp.binary_dilation(label)))
label = np.expand_dims((label) * 1, axis=2)
batch_labels.append(label)
return np.array(batch_imgs, dtype=np.float32), np.array(
batch_labels, dtype=np.float32
)
# **Now we need to define our training and validation generator using above implimented class.**
train_generator = DataGenerator(
train_imgs,
train_img_dir,
train_masks,
train_mask_dir,
batch_size=36,
dim=(224, 224, 3),
shuffle=True,
)
train_steps = train_generator.__len__()
train_steps
# **After defining generator lets check the some of the dataset it generates for the training and visualize them**
X, y = train_generator.__getitem__(1)
t = 12
plt.figure(figsize=(8, 8))
plt.subplot(121)
plt.imshow(X[t])
plt.subplot(122)
plt.imshow(np.reshape(y[t], (224, 224)))
val_generator = DataGenerator(
val_imgs,
val_img_dir,
val_masks,
val_mask_dir,
batch_size=36,
dim=(224, 224, 3),
shuffle=True,
)
val_steps = val_generator.__len__()
val_steps
# # After preparing input pipeline we are going to define our U-net model
# here we first define down convolution (encoder ) and up convolution layer (decoder) and stack them up with a short circuting features from down sampling to corresponding up sampling
# full detail of the architecture is present here - https://arxiv.org/abs/1505.04597
def conv_block(tensor, nfilters, size=3, padding="same", initializer="he_normal"):
x = Conv2D(
filters=nfilters,
kernel_size=(size, size),
padding=padding,
kernel_initializer=initializer,
)(tensor)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(
filters=nfilters,
kernel_size=(size, size),
padding=padding,
kernel_initializer=initializer,
)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def deconv_block(tensor, residual, nfilters, size=3, padding="same", strides=(2, 2)):
y = Conv2DTranspose(
nfilters, kernel_size=(size, size), strides=strides, padding=padding
)(tensor)
y = concatenate([y, residual], axis=3)
y = conv_block(y, nfilters)
return y
def Unet(h, w, filters):
# down
input_layer = Input(shape=(h, w, 3), name="image_input")
conv1 = conv_block(input_layer, nfilters=filters)
conv1_out = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = conv_block(conv1_out, nfilters=filters * 2)
conv2_out = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = conv_block(conv2_out, nfilters=filters * 4)
conv3_out = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = conv_block(conv3_out, nfilters=filters * 8)
conv4_out = MaxPooling2D(pool_size=(2, 2))(conv4)
conv4_out = Dropout(0.5)(conv4_out)
conv5 = conv_block(conv4_out, nfilters=filters * 16)
conv5 = Dropout(0.5)(conv5)
# up
deconv6 = deconv_block(conv5, residual=conv4, nfilters=filters * 8)
deconv6 = Dropout(0.5)(deconv6)
deconv7 = deconv_block(deconv6, residual=conv3, nfilters=filters * 4)
deconv7 = Dropout(0.5)(deconv7)
deconv8 = deconv_block(deconv7, residual=conv2, nfilters=filters * 2)
deconv9 = deconv_block(deconv8, residual=conv1, nfilters=filters)
output_layer = Conv2D(filters=1, kernel_size=(1, 1), activation="sigmoid")(deconv9)
# using sigmoid activation for binary classification
model = Model(inputs=input_layer, outputs=output_layer, name="Unet")
return model
model = Unet(224, 224, 64)
# model.summary()
# # Here we define keras custom metric for the loss and accuracy computation
# Jaccard distance loss - this loss help to get rid of the side effects of unbalanced class label in a image (like - 80% background , 20 % human ) https://en.wikipedia.org/wiki/Jaccard_index
# dice_coef - To evaluate accuracy of the segmentation. https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient
def jaccard_distance_loss(y_true, y_pred, smooth=100):
intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return (1 - jac) * smooth
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2.0 * intersection + K.epsilon()) / (
K.sum(y_true_f) + K.sum(y_pred_f) + K.epsilon()
)
# # Defining callbacks and compile model with adam optimiser with default learning rate.
model.compile(
optimizer="adam", loss=jaccard_distance_loss, metrics=[dice_coef, "accuracy"]
)
mc = ModelCheckpoint(
mode="max",
filepath="top-weights.h5",
monitor="val_dice_coef",
save_best_only="True",
save_weights_only="True",
verbose=1,
)
es = EarlyStopping(mode="max", monitor="val_dice_coef", patience=3, verbose=1)
callbacks = []
model.metrics_names
# # Now finally train our model with above configuration and train data generator.
# model.load_weights('../input/endovis-unet/top-weights.h5')
results = model.fit_generator(
train_generator,
steps_per_epoch=train_steps,
epochs=40,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=val_steps,
)
results.history.keys()
loss = results.history["loss"]
# val_loss = results.history["val_loss"]
dice_coef = results.history["dice_coef"]
# val_dice_coef = results.history["val_dice_coef"]
acc = results.history["accuracy"]
# val_acc = results.history["val_accuracy"]
plt.plot(loss, label="loss")
# plt.plot(val_loss,label = "val loss")
plt.xlabel("iterations")
# plt.ylabel("X axis label")
plt.legend()
plt.plot(dice_coef, label="dice_coef")
# plt.plot(val_dice_coef,label = "val dice_coef")
plt.xlabel("iterations")
# plt.ylabel("X axis label")
plt.legend()
plt.plot(acc, label="acc")
# plt.plot(val_acc,label = "val acc")
plt.xlabel("iterations")
# plt.ylabel("X axis label")
plt.legend()
model.save_weights("top-weights.h5")
# # Now its time to make some predictions
# **Function to make prediction
# Note:- Dont forget to Normalise image dataset (here i divided every pixel by 255. )**
def make_prediction(model, image, shape):
img = img_to_array(load_img(image, target_size=shape))
img = np.expand_dims(img, axis=0) / 255.0
mask = model.predict(img)
mask = (mask[0] > 0.5) * 1
# print(np.unique(mask,return_counts=True))
mask = np.reshape(mask, (224, 224))
return mask
image = (
"../input/concatenation-endo/cropped_train/instrument_dataset_1/images/frame111.jpg"
)
img = img_to_array(load_img(image))
plt.imshow(img / 255.0)
img.shape
mask = make_prediction(model, image, (224, 224, 3))
mask2 = cv2.merge([mask, mask, mask]).astype("float32")
print(img.shape, mask2.shape)
mask2 = cv2.resize(mask2, (img.shape[1], img.shape[0]))
# print(mask.shape)
plt.imshow(mask2)
# **Now use the mask to get the segmented image**
h, w = img.shape[:2]
mask_resized = cv2.resize(np.uint8(mask * 1), (w, h))
mask_resized = mask_resized != 0
# print(np.unique(mask_resized,return_counts=True))
segment = np.zeros((h, w, 3))
segment[:, :, 0] = img[:, :, 0] * mask_resized
segment[:, :, 1] = img[:, :, 1] * mask_resized
segment[:, :, 2] = img[:, :, 2] * mask_resized
segment[np.where((segment == [0, 0, 0]).all(axis=2))] = [0, 0, 0]
# img[np.where((img==[255,255,255]).all(axis=2))] = [0,0,0];
plt.figure(figsize=(8, 8))
plt.imshow(segment / 255.0)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173522.ipynb
|
binarysegmentation-endovis-17
|
aithammadiabdellatif
|
[{"Id": 69173522, "ScriptId": 18323680, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4239960, "CreationDate": "07/27/2021 16:50:14", "VersionNumber": 17.0, "Title": "endovis unet binary segmentation", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 320.0, "LinesInsertedFromPrevious": 5.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 315.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92022038, "KernelVersionId": 69173522, "SourceDatasetVersionId": 2406172}, {"Id": 92022039, "KernelVersionId": 69173522, "SourceDatasetVersionId": 2422119}]
|
[{"Id": 2406172, "DatasetId": 1454365, "DatasourceVersionId": 2448233, "CreatorUserId": 4239960, "LicenseName": "Unknown", "CreationDate": "07/08/2021 10:14:33", "VersionNumber": 5.0, "Title": "BinarySegmentation endovis 17", "Slug": "binarysegmentation-endovis-17", "Subtitle": NaN, "Description": NaN, "VersionNotes": "endovis-17", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1454365, "CreatorUserId": 4239960, "OwnerUserId": 4239960.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2406172.0, "CurrentDatasourceVersionId": 2448233.0, "ForumId": 1473930, "Type": 2, "CreationDate": "07/07/2021 23:10:06", "LastActivityDate": "07/07/2021", "TotalViews": 1418, "TotalDownloads": 75, "TotalVotes": 1, "TotalKernels": 4}]
|
[{"Id": 4239960, "UserName": "aithammadiabdellatif", "DisplayName": "AIT HAMMADI Abdellatif", "RegisterDate": "12/22/2019", "PerformanceTier": 1}]
|
# # Segmentation Network Unet
#
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random
import matplotlib.pyplot as plt
from tensorflow.keras import backend as K
from tensorflow.keras.layers import (
Input,
Conv2D,
MaxPooling2D,
concatenate,
Conv2DTranspose,
BatchNormalization,
Activation,
Dropout,
)
from tensorflow.keras.optimizers import Adadelta, Nadam, Adam
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.utils import plot_model, Sequence
from tensorflow.keras.callbacks import (
TensorBoard,
ModelCheckpoint,
EarlyStopping,
ReduceLROnPlateau,
)
from tensorflow.keras.preprocessing.image import load_img, img_to_array
import tensorflow as tf
from tensorflow.python.keras.losses import binary_crossentropy
from scipy.ndimage import morphology as mp
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
from glob import glob # for getting list paths of image and labels
from random import choice, sample
from matplotlib import pyplot as plt
import cv2 # saving and loading images
# Any results you write to the current directory are saved as output.
# # listing image and their respective labels
# also here we are asserting the presence of label file w.r.t each image.
# train_img_dir = '../input/concatenation-endo/cropped_train/instrument_dataset_1/images/'
# train_mask_dir = '../input/concatenation-endo/cropped_train/instrument_dataset_1/binary_masks/'
train_img_dir = "../input/multiedovis/multiclass_segmentation/cropped_train/instrument_dataset/images/"
train_mask_dir = "../input/multiedovis/multiclass_segmentation/cropped_train/instrument_dataset/instruments_masks/"
train_imgs = os.listdir(train_img_dir) # if you have an error take a look here ...
train_masks = os.listdir(train_mask_dir)
train_imgs = sorted([i for i in train_imgs])
train_masks = sorted([i for i in train_masks if "Bipolar" in i])
print(len(train_imgs))
print(len(train_masks))
print(train_imgs[:3])
train_masks[:3]
# **Repeat same steps for validation dataset**
from sklearn.model_selection import train_test_split
val_img_dir = train_img_dir
val_mask_dir = train_mask_dir
train_imgs, val_imgs, train_masks, val_masks = train_test_split(
train_imgs, train_masks, test_size=0.13, random_state=42
)
print(len(train_masks))
print(len(val_masks))
# # Here we impliment keras custom data generator to get batch images and labels without loading whole dataset in the active memory
#
from scipy import ndimage
class DataGenerator(Sequence):
"Generates data for Keras"
def __init__(
self,
images,
image_dir,
labels,
label_dir,
batch_size=16,
dim=(224, 224, 3),
shuffle=True,
):
"Initialization"
self.dim = dim
self.images = images
self.image_dir = image_dir
self.labels = labels
self.label_dir = label_dir
self.batch_size = batch_size
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
"Denotes the number of batches per epoch"
return int(np.floor(len(self.images) / self.batch_size))
def __getitem__(self, index):
"Generate one batch of data"
# Generate indexes of the batch
indexes = self.indexes[index * self.batch_size : (index + 1) * self.batch_size]
# Find list of IDs
list_IDs_temp = [k for k in indexes]
# Generate data
X, y = self.__data_generation(list_IDs_temp)
return X, y
def on_epoch_end(self):
"Updates indexes after each epoch"
self.indexes = np.arange(len(self.images))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
"Generates data containing batch_size samples" # X : (n_samples, *dim, n_channels)
# Initialization
batch_imgs = list()
batch_labels = list()
# Generate data
for i in list_IDs_temp:
# degree=np.random.random() * 360
# Store sample
img = load_img(self.image_dir + self.images[i], target_size=self.dim)
img = img_to_array(img) / 255.0
# img = ndimage.rotate(img, degree)
# print(img)
batch_imgs.append(img)
# Store class
label = load_img(self.label_dir + self.labels[i], target_size=self.dim)
label = img_to_array(label)[:, :, 0]
label = label != 0
label = mp.binary_erosion(mp.binary_erosion(label))
label = mp.binary_dilation(mp.binary_dilation(mp.binary_dilation(label)))
label = np.expand_dims((label) * 1, axis=2)
batch_labels.append(label)
return np.array(batch_imgs, dtype=np.float32), np.array(
batch_labels, dtype=np.float32
)
# **Now we need to define our training and validation generator using above implimented class.**
train_generator = DataGenerator(
train_imgs,
train_img_dir,
train_masks,
train_mask_dir,
batch_size=36,
dim=(224, 224, 3),
shuffle=True,
)
train_steps = train_generator.__len__()
train_steps
# **After defining generator lets check the some of the dataset it generates for the training and visualize them**
X, y = train_generator.__getitem__(1)
t = 12
plt.figure(figsize=(8, 8))
plt.subplot(121)
plt.imshow(X[t])
plt.subplot(122)
plt.imshow(np.reshape(y[t], (224, 224)))
val_generator = DataGenerator(
val_imgs,
val_img_dir,
val_masks,
val_mask_dir,
batch_size=36,
dim=(224, 224, 3),
shuffle=True,
)
val_steps = val_generator.__len__()
val_steps
# # After preparing input pipeline we are going to define our U-net model
# here we first define down convolution (encoder ) and up convolution layer (decoder) and stack them up with a short circuting features from down sampling to corresponding up sampling
# full detail of the architecture is present here - https://arxiv.org/abs/1505.04597
def conv_block(tensor, nfilters, size=3, padding="same", initializer="he_normal"):
x = Conv2D(
filters=nfilters,
kernel_size=(size, size),
padding=padding,
kernel_initializer=initializer,
)(tensor)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(
filters=nfilters,
kernel_size=(size, size),
padding=padding,
kernel_initializer=initializer,
)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def deconv_block(tensor, residual, nfilters, size=3, padding="same", strides=(2, 2)):
y = Conv2DTranspose(
nfilters, kernel_size=(size, size), strides=strides, padding=padding
)(tensor)
y = concatenate([y, residual], axis=3)
y = conv_block(y, nfilters)
return y
def Unet(h, w, filters):
# down
input_layer = Input(shape=(h, w, 3), name="image_input")
conv1 = conv_block(input_layer, nfilters=filters)
conv1_out = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = conv_block(conv1_out, nfilters=filters * 2)
conv2_out = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = conv_block(conv2_out, nfilters=filters * 4)
conv3_out = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = conv_block(conv3_out, nfilters=filters * 8)
conv4_out = MaxPooling2D(pool_size=(2, 2))(conv4)
conv4_out = Dropout(0.5)(conv4_out)
conv5 = conv_block(conv4_out, nfilters=filters * 16)
conv5 = Dropout(0.5)(conv5)
# up
deconv6 = deconv_block(conv5, residual=conv4, nfilters=filters * 8)
deconv6 = Dropout(0.5)(deconv6)
deconv7 = deconv_block(deconv6, residual=conv3, nfilters=filters * 4)
deconv7 = Dropout(0.5)(deconv7)
deconv8 = deconv_block(deconv7, residual=conv2, nfilters=filters * 2)
deconv9 = deconv_block(deconv8, residual=conv1, nfilters=filters)
output_layer = Conv2D(filters=1, kernel_size=(1, 1), activation="sigmoid")(deconv9)
# using sigmoid activation for binary classification
model = Model(inputs=input_layer, outputs=output_layer, name="Unet")
return model
model = Unet(224, 224, 64)
# model.summary()
# # Here we define keras custom metric for the loss and accuracy computation
# Jaccard distance loss - this loss help to get rid of the side effects of unbalanced class label in a image (like - 80% background , 20 % human ) https://en.wikipedia.org/wiki/Jaccard_index
# dice_coef - To evaluate accuracy of the segmentation. https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient
def jaccard_distance_loss(y_true, y_pred, smooth=100):
intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return (1 - jac) * smooth
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2.0 * intersection + K.epsilon()) / (
K.sum(y_true_f) + K.sum(y_pred_f) + K.epsilon()
)
# # Defining callbacks and compile model with adam optimiser with default learning rate.
model.compile(
optimizer="adam", loss=jaccard_distance_loss, metrics=[dice_coef, "accuracy"]
)
mc = ModelCheckpoint(
mode="max",
filepath="top-weights.h5",
monitor="val_dice_coef",
save_best_only="True",
save_weights_only="True",
verbose=1,
)
es = EarlyStopping(mode="max", monitor="val_dice_coef", patience=3, verbose=1)
callbacks = []
model.metrics_names
# # Now finally train our model with above configuration and train data generator.
# model.load_weights('../input/endovis-unet/top-weights.h5')
results = model.fit_generator(
train_generator,
steps_per_epoch=train_steps,
epochs=40,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=val_steps,
)
results.history.keys()
loss = results.history["loss"]
# val_loss = results.history["val_loss"]
dice_coef = results.history["dice_coef"]
# val_dice_coef = results.history["val_dice_coef"]
acc = results.history["accuracy"]
# val_acc = results.history["val_accuracy"]
plt.plot(loss, label="loss")
# plt.plot(val_loss,label = "val loss")
plt.xlabel("iterations")
# plt.ylabel("X axis label")
plt.legend()
plt.plot(dice_coef, label="dice_coef")
# plt.plot(val_dice_coef,label = "val dice_coef")
plt.xlabel("iterations")
# plt.ylabel("X axis label")
plt.legend()
plt.plot(acc, label="acc")
# plt.plot(val_acc,label = "val acc")
plt.xlabel("iterations")
# plt.ylabel("X axis label")
plt.legend()
model.save_weights("top-weights.h5")
# # Now its time to make some predictions
# **Function to make prediction
# Note:- Dont forget to Normalise image dataset (here i divided every pixel by 255. )**
def make_prediction(model, image, shape):
img = img_to_array(load_img(image, target_size=shape))
img = np.expand_dims(img, axis=0) / 255.0
mask = model.predict(img)
mask = (mask[0] > 0.5) * 1
# print(np.unique(mask,return_counts=True))
mask = np.reshape(mask, (224, 224))
return mask
image = (
"../input/concatenation-endo/cropped_train/instrument_dataset_1/images/frame111.jpg"
)
img = img_to_array(load_img(image))
plt.imshow(img / 255.0)
img.shape
mask = make_prediction(model, image, (224, 224, 3))
mask2 = cv2.merge([mask, mask, mask]).astype("float32")
print(img.shape, mask2.shape)
mask2 = cv2.resize(mask2, (img.shape[1], img.shape[0]))
# print(mask.shape)
plt.imshow(mask2)
# **Now use the mask to get the segmented image**
h, w = img.shape[:2]
mask_resized = cv2.resize(np.uint8(mask * 1), (w, h))
mask_resized = mask_resized != 0
# print(np.unique(mask_resized,return_counts=True))
segment = np.zeros((h, w, 3))
segment[:, :, 0] = img[:, :, 0] * mask_resized
segment[:, :, 1] = img[:, :, 1] * mask_resized
segment[:, :, 2] = img[:, :, 2] * mask_resized
segment[np.where((segment == [0, 0, 0]).all(axis=2))] = [0, 0, 0]
# img[np.where((img==[255,255,255]).all(axis=2))] = [0,0,0];
plt.figure(figsize=(8, 8))
plt.imshow(segment / 255.0)
| false | 0 | 3,899 | 0 | 3,929 | 3,899 |
||
69173648
|
# importing necessary libraries
import pandas as pd
from datetime import datetime as dt
# reading the weather data
weather = pd.read_csv("../input/car-crashes-severity-prediction/weather-sfcsv.csv")
# reading the train dataset
train = pd.read_csv("../input/car-crashes-severity-prediction/train.csv")
# reading the test dataset
test = pd.read_csv("../input/car-crashes-severity-prediction/test.csv")
# converting the XML holidays file to pandas dataframe
import xml.etree.ElementTree as Xet
cols = ["date", "description"]
rows = []
# Parsing the XML file
xmlparse = Xet.parse("../input/car-crashes-severity-prediction/holidays.xml")
root = xmlparse.getroot()
for i in root:
date = i.find("date").text
description = i.find("description").text
rows.append(
{
"date": date,
"description": description,
}
)
holidays = pd.DataFrame(rows, columns=cols)
# a function that merges the holidays dataset with another given dataset
def mergeholidays(train, holidays):
train["date"] = train["timestamp"].str.slice(stop=10)
train = pd.merge(train, holidays, on="date", how="left")
# train[['description']]=train[['description']].fillna("not holiday")
train["description"] = train["description"].notna()
return train
# merging train and holidays dataset
train = mergeholidays(train, holidays)
# changing the format of the strings in the Month Day Hour columns in the weather dataset
weather["Month"] = weather["Month"].map("{:02}".format)
weather["Day"] = weather["Day"].map("{:02}".format)
weather["Hour"] = weather["Hour"].map("{:02}".format)
weather["datedate"] = (
weather["Year"].apply(str)
+ "-"
+ weather["Month"].apply(str)
+ "-"
+ weather["Day"].apply(str)
+ " "
+ weather["Hour"].apply(str)
)
# dropping the columns that are not needed
weather1 = weather.drop(
[
"Wind_Chill(F)",
"Precipitation(in)",
"Year",
"Month",
"Day",
"Hour",
"Selected",
],
1,
)
# spliting the windy condition from the Weather_condition column in the weather dataset and adding it to a seperate column
windy = []
conditions = []
for i in range(len(weather1)):
cond = weather1["Weather_Condition"].iloc[i]
if "/" in str(weather1["Weather_Condition"].iloc[i]):
cond = weather1["Weather_Condition"].iloc[i].split("/")[0].strip()
windy.append(1)
else:
windy.append(0)
conditions.append(cond)
weather1["Windy"] = windy
weather1["Weather_Condition"] = conditions
# imputing the missing values of of the weather dataset by the mean of the column
from sklearn.impute import SimpleImputer
import numpy as np
miss_mean_imputer = SimpleImputer(missing_values=np.nan, strategy="mean")
miss_mean_imputer = miss_mean_imputer.fit(
weather1[["Temperature(F)", "Visibility(mi)", "Wind_Speed(mph)", "Humidity(%)"]]
)
weather1[
["Temperature(F)", "Visibility(mi)", "Wind_Speed(mph)", "Humidity(%)"]
] = miss_mean_imputer.transform(
weather1[["Temperature(F)", "Visibility(mi)", "Wind_Speed(mph)", "Humidity(%)"]]
)
weather1.dropna(inplace=True)
weather1.reset_index(drop=True)
# removing duplicates in weather data
weather2 = weather1.loc[weather1["datedate"].duplicated() == False]
# function to left join data and weather data
def mergeweather(train, weather2):
train["datedate"] = train["timestamp"].str.slice(stop=13)
train = pd.merge(train, weather2, on="datedate", how="left")
return train
train = mergeweather(train, weather2)
train.info()
# clean full data from null values
def clean(train):
train.dropna(inplace=True)
train.reset_index(drop=True)
return train
def timeCategories2(x):
if x.hour >= 0 and x.hour < 4:
return "1"
if x.hour >= 4 and x.hour < 8:
return "2"
elif x.hour >= 8 and x.hour < 12:
return "3"
elif x.hour >= 12 and x.hour < 16:
return "4"
elif x.hour >= 16 and x.hour < 20:
return "5"
else:
return "6"
# a function to split time into night and day segments
def timeCategories(x):
if x.hour >= 6 and x.hour < 18:
return 1
else:
return 0
# a function to split days into weekdays
def weekdaycat(x):
if x in [0, 1, 2, 3, 4]:
return 0
else:
return 1
# a function to split months into seasons
def seasons(x):
if x.month in [1, 2, 3]:
return "0"
elif x.month in [4, 5, 6]:
return "1"
elif x.month in [7, 8, 9]:
return "2"
elif x.month in [10, 11, 12]:
return "3"
# encoding categorical data into numerical
def encoding(train):
train = train.replace(False, 0)
train = train.replace(True, 1)
train = train.replace("R", 0)
train = train.replace("L", 1)
y = pd.get_dummies(train["description"], prefix="day")
train = pd.concat([train, y], axis=1)
y2 = pd.get_dummies(train["Weather_Condition"], prefix="Weather")
train = pd.concat([train, y2], axis=1)
# train['Weather_other']=train['Weather_Mist']+train['Weather_Light Drizzle']+train['Weather_Squalls']+train['Weather_Patches of Fog']+train['Weather_Light Thunderstorms and Rain']
train["timestamp"] = pd.to_datetime(train["timestamp"])
train["time_category"] = train["timestamp"].apply(timeCategories)
train["weekday"] = pd.to_datetime(train["date"]).apply(dt.weekday)
# train['weekday']=train['weekday'].apply(weekdaycat)
y3 = pd.get_dummies(train["time_category"], prefix="time_category")
train = pd.concat([train, y3], axis=1)
y4 = pd.get_dummies(train["weekday"], prefix="weekday")
train = pd.concat([train, y4], axis=1)
L = [
"Weather_Mist",
"Weather_Light Drizzle",
"Weather_Squalls",
"Weather_Patches of Fog",
"Weather_Light Thunderstorms and Rain",
]
for i in L:
if i in train.columns:
train = train.drop(i, axis=1)
return train
train = clean(train)
train = encoding(train)
def encoding2(train):
train = train.replace(False, 0)
train = train.replace(True, 1)
train = train.replace("R", 0)
train = train.replace("L", 1)
y = pd.get_dummies(train["description"], prefix="day")
train = pd.concat([train, y], axis=1)
y2 = pd.get_dummies(train["Weather_Condition"], prefix="Weather")
train = pd.concat([train, y2], axis=1)
train["Weather_other"] = train["Weather_Light Drizzle"]
train["timestamp"] = pd.to_datetime(train["timestamp"])
train["time_category"] = train["timestamp"].apply(timeCategories)
train["weekday"] = pd.to_datetime(train["date"]).apply(dt.weekday)
y3 = pd.get_dummies(train["time_category"], prefix="time_category")
train = pd.concat([train, y3], axis=1)
y4 = pd.get_dummies(train["weekday"], prefix="weekday")
train = pd.concat([train, y4], axis=1)
return train
train.info()
from sklearn.ensemble import RandomForestRegressor
from sklearn.inspection import permutation_importance
from matplotlib import pyplot as plt
rf = RandomForestRegressor(n_estimators=100)
x = train.drop(
["Severity", "timestamp", "Weather_Condition", "datedate", "date"], axis=1
)
rf.fit(x, train["Severity"])
plt.rcParams["figure.figsize"] = (20, 20)
plt.barh(x.columns, rf.feature_importances_)
train
def get_relevant_features(train):
train = train[
[
"ID",
"Lat",
"Lng",
"Distance(mi)",
"Stop",
"Temperature(F)",
"Humidity(%)",
"Wind_Speed(mph)",
"weekday",
"Severity",
"Visibility(mi)",
"Weather_Fair",
]
]
return train
def get_relevant_features2(train):
train = train[
[
"ID",
"Lat",
"Lng",
"Distance(mi)",
"Stop",
"Temperature(F)",
"Humidity(%)",
"Wind_Speed(mph)",
"weekday",
"Visibility(mi)",
"Weather_Fair",
]
]
return train
train = get_relevant_features(train)
train.info()
cor = (
pd.DataFrame(train[train.columns[0:]].corr()["Severity"][:])
.apply(abs)
.sort_values(by="Severity")
)
cor
from sklearn.model_selection import train_test_split
train_df, val_df = train_test_split(
train, test_size=0.20, random_state=42, stratify=train["Severity"]
) # Try adding `stratify` here
X_train = train_df.drop(columns=["Severity", "ID"])
y_train = train_df["Severity"]
X_val = val_df.drop(columns=["Severity", "ID"])
y_val = val_df["Severity"]
from sklearn.ensemble import RandomForestClassifier
# Create an instance of the classifier
classifier = RandomForestClassifier(max_depth=22, random_state=101)
# Train the classifier
classifier = classifier.fit(X_train, y_train)
# testing data on validation data
print(
"The accuracy of the classifier on the validation set is ",
(classifier.score(X_val, y_val)),
)
# testing data on training data
print(
"The accuracy of the classifier on the validation set is ",
(classifier.score(X_train, y_train)),
)
test = mergeholidays(test, holidays)
test = mergeweather(test, weather2)
test = clean(test)
test = encoding(test)
test = get_relevant_features2(test)
X_test = test.drop(columns=["ID"])
y_test_predicted = classifier.predict(X_test)
test["Severity"] = y_test_predicted
test[["ID", "Severity"]].to_csv("/kaggle/working/submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173648.ipynb
| null | null |
[{"Id": 69173648, "ScriptId": 18860322, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7747358, "CreationDate": "07/27/2021 16:52:03", "VersionNumber": 4.0, "Title": "challenge", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 279.0, "LinesInsertedFromPrevious": 5.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 274.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# importing necessary libraries
import pandas as pd
from datetime import datetime as dt
# reading the weather data
weather = pd.read_csv("../input/car-crashes-severity-prediction/weather-sfcsv.csv")
# reading the train dataset
train = pd.read_csv("../input/car-crashes-severity-prediction/train.csv")
# reading the test dataset
test = pd.read_csv("../input/car-crashes-severity-prediction/test.csv")
# converting the XML holidays file to pandas dataframe
import xml.etree.ElementTree as Xet
cols = ["date", "description"]
rows = []
# Parsing the XML file
xmlparse = Xet.parse("../input/car-crashes-severity-prediction/holidays.xml")
root = xmlparse.getroot()
for i in root:
date = i.find("date").text
description = i.find("description").text
rows.append(
{
"date": date,
"description": description,
}
)
holidays = pd.DataFrame(rows, columns=cols)
# a function that merges the holidays dataset with another given dataset
def mergeholidays(train, holidays):
train["date"] = train["timestamp"].str.slice(stop=10)
train = pd.merge(train, holidays, on="date", how="left")
# train[['description']]=train[['description']].fillna("not holiday")
train["description"] = train["description"].notna()
return train
# merging train and holidays dataset
train = mergeholidays(train, holidays)
# changing the format of the strings in the Month Day Hour columns in the weather dataset
weather["Month"] = weather["Month"].map("{:02}".format)
weather["Day"] = weather["Day"].map("{:02}".format)
weather["Hour"] = weather["Hour"].map("{:02}".format)
weather["datedate"] = (
weather["Year"].apply(str)
+ "-"
+ weather["Month"].apply(str)
+ "-"
+ weather["Day"].apply(str)
+ " "
+ weather["Hour"].apply(str)
)
# dropping the columns that are not needed
weather1 = weather.drop(
[
"Wind_Chill(F)",
"Precipitation(in)",
"Year",
"Month",
"Day",
"Hour",
"Selected",
],
1,
)
# spliting the windy condition from the Weather_condition column in the weather dataset and adding it to a seperate column
windy = []
conditions = []
for i in range(len(weather1)):
cond = weather1["Weather_Condition"].iloc[i]
if "/" in str(weather1["Weather_Condition"].iloc[i]):
cond = weather1["Weather_Condition"].iloc[i].split("/")[0].strip()
windy.append(1)
else:
windy.append(0)
conditions.append(cond)
weather1["Windy"] = windy
weather1["Weather_Condition"] = conditions
# imputing the missing values of of the weather dataset by the mean of the column
from sklearn.impute import SimpleImputer
import numpy as np
miss_mean_imputer = SimpleImputer(missing_values=np.nan, strategy="mean")
miss_mean_imputer = miss_mean_imputer.fit(
weather1[["Temperature(F)", "Visibility(mi)", "Wind_Speed(mph)", "Humidity(%)"]]
)
weather1[
["Temperature(F)", "Visibility(mi)", "Wind_Speed(mph)", "Humidity(%)"]
] = miss_mean_imputer.transform(
weather1[["Temperature(F)", "Visibility(mi)", "Wind_Speed(mph)", "Humidity(%)"]]
)
weather1.dropna(inplace=True)
weather1.reset_index(drop=True)
# removing duplicates in weather data
weather2 = weather1.loc[weather1["datedate"].duplicated() == False]
# function to left join data and weather data
def mergeweather(train, weather2):
train["datedate"] = train["timestamp"].str.slice(stop=13)
train = pd.merge(train, weather2, on="datedate", how="left")
return train
train = mergeweather(train, weather2)
train.info()
# clean full data from null values
def clean(train):
train.dropna(inplace=True)
train.reset_index(drop=True)
return train
def timeCategories2(x):
if x.hour >= 0 and x.hour < 4:
return "1"
if x.hour >= 4 and x.hour < 8:
return "2"
elif x.hour >= 8 and x.hour < 12:
return "3"
elif x.hour >= 12 and x.hour < 16:
return "4"
elif x.hour >= 16 and x.hour < 20:
return "5"
else:
return "6"
# a function to split time into night and day segments
def timeCategories(x):
if x.hour >= 6 and x.hour < 18:
return 1
else:
return 0
# a function to split days into weekdays
def weekdaycat(x):
if x in [0, 1, 2, 3, 4]:
return 0
else:
return 1
# a function to split months into seasons
def seasons(x):
if x.month in [1, 2, 3]:
return "0"
elif x.month in [4, 5, 6]:
return "1"
elif x.month in [7, 8, 9]:
return "2"
elif x.month in [10, 11, 12]:
return "3"
# encoding categorical data into numerical
def encoding(train):
train = train.replace(False, 0)
train = train.replace(True, 1)
train = train.replace("R", 0)
train = train.replace("L", 1)
y = pd.get_dummies(train["description"], prefix="day")
train = pd.concat([train, y], axis=1)
y2 = pd.get_dummies(train["Weather_Condition"], prefix="Weather")
train = pd.concat([train, y2], axis=1)
# train['Weather_other']=train['Weather_Mist']+train['Weather_Light Drizzle']+train['Weather_Squalls']+train['Weather_Patches of Fog']+train['Weather_Light Thunderstorms and Rain']
train["timestamp"] = pd.to_datetime(train["timestamp"])
train["time_category"] = train["timestamp"].apply(timeCategories)
train["weekday"] = pd.to_datetime(train["date"]).apply(dt.weekday)
# train['weekday']=train['weekday'].apply(weekdaycat)
y3 = pd.get_dummies(train["time_category"], prefix="time_category")
train = pd.concat([train, y3], axis=1)
y4 = pd.get_dummies(train["weekday"], prefix="weekday")
train = pd.concat([train, y4], axis=1)
L = [
"Weather_Mist",
"Weather_Light Drizzle",
"Weather_Squalls",
"Weather_Patches of Fog",
"Weather_Light Thunderstorms and Rain",
]
for i in L:
if i in train.columns:
train = train.drop(i, axis=1)
return train
train = clean(train)
train = encoding(train)
def encoding2(train):
train = train.replace(False, 0)
train = train.replace(True, 1)
train = train.replace("R", 0)
train = train.replace("L", 1)
y = pd.get_dummies(train["description"], prefix="day")
train = pd.concat([train, y], axis=1)
y2 = pd.get_dummies(train["Weather_Condition"], prefix="Weather")
train = pd.concat([train, y2], axis=1)
train["Weather_other"] = train["Weather_Light Drizzle"]
train["timestamp"] = pd.to_datetime(train["timestamp"])
train["time_category"] = train["timestamp"].apply(timeCategories)
train["weekday"] = pd.to_datetime(train["date"]).apply(dt.weekday)
y3 = pd.get_dummies(train["time_category"], prefix="time_category")
train = pd.concat([train, y3], axis=1)
y4 = pd.get_dummies(train["weekday"], prefix="weekday")
train = pd.concat([train, y4], axis=1)
return train
train.info()
from sklearn.ensemble import RandomForestRegressor
from sklearn.inspection import permutation_importance
from matplotlib import pyplot as plt
rf = RandomForestRegressor(n_estimators=100)
x = train.drop(
["Severity", "timestamp", "Weather_Condition", "datedate", "date"], axis=1
)
rf.fit(x, train["Severity"])
plt.rcParams["figure.figsize"] = (20, 20)
plt.barh(x.columns, rf.feature_importances_)
train
def get_relevant_features(train):
train = train[
[
"ID",
"Lat",
"Lng",
"Distance(mi)",
"Stop",
"Temperature(F)",
"Humidity(%)",
"Wind_Speed(mph)",
"weekday",
"Severity",
"Visibility(mi)",
"Weather_Fair",
]
]
return train
def get_relevant_features2(train):
train = train[
[
"ID",
"Lat",
"Lng",
"Distance(mi)",
"Stop",
"Temperature(F)",
"Humidity(%)",
"Wind_Speed(mph)",
"weekday",
"Visibility(mi)",
"Weather_Fair",
]
]
return train
train = get_relevant_features(train)
train.info()
cor = (
pd.DataFrame(train[train.columns[0:]].corr()["Severity"][:])
.apply(abs)
.sort_values(by="Severity")
)
cor
from sklearn.model_selection import train_test_split
train_df, val_df = train_test_split(
train, test_size=0.20, random_state=42, stratify=train["Severity"]
) # Try adding `stratify` here
X_train = train_df.drop(columns=["Severity", "ID"])
y_train = train_df["Severity"]
X_val = val_df.drop(columns=["Severity", "ID"])
y_val = val_df["Severity"]
from sklearn.ensemble import RandomForestClassifier
# Create an instance of the classifier
classifier = RandomForestClassifier(max_depth=22, random_state=101)
# Train the classifier
classifier = classifier.fit(X_train, y_train)
# testing data on validation data
print(
"The accuracy of the classifier on the validation set is ",
(classifier.score(X_val, y_val)),
)
# testing data on training data
print(
"The accuracy of the classifier on the validation set is ",
(classifier.score(X_train, y_train)),
)
test = mergeholidays(test, holidays)
test = mergeweather(test, weather2)
test = clean(test)
test = encoding(test)
test = get_relevant_features2(test)
X_test = test.drop(columns=["ID"])
y_test_predicted = classifier.predict(X_test)
test["Severity"] = y_test_predicted
test[["ID", "Severity"]].to_csv("/kaggle/working/submission.csv", index=False)
| false | 0 | 2,832 | 0 | 2,832 | 2,832 |
||
69173883
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## Moscow Real Estate Price Prediction
# Geekbrains Python for Data Science course competition
# The task is to predict the price of flats in test.csv. Two datasets are given: train.csv (contains all features and prices of flats) and test.csv (only features).
# **Задача:** предсказать цены на квартиры в датасете test.csv.
# Для выполнения задачи построить модель предсказания цены на квартиры на основе данных из датасета train.csv (содержит признаки и цены на квартиры), затем с поощью данной модели предсказать цены на квартиры по данным из датасета test.csv (только признаки).
# Целевая переменная: Price
# Основная метрика: R2 - коэффициент детерминации (sklearn.metrics r2)
# #Вспомогательная метрика: MSE - средняя квадратичная ошибка (sklearn.metrics.mean_squared_error)
# **Подключение библиотек и скриптов**
import numpy as np
import pandas as pd
import random
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler, RobustScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score as r2
from sklearn.model_selection import KFold, GridSearchCV
from datetime import datetime
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore") # отключили предупреждения
# единый шрифт для графиков
matplotlib.rcParams.update({"font.size": 12})
def evaluate_preds(
train_true_values, train_pred_values, test_true_values, test_pred_values
):
print("Train R2:\t" + str(round(r2(train_true_values, train_pred_values), 3)))
print("Test R2:\t" + str(round(r2(test_true_values, test_pred_values), 3)))
plt.figure(figsize=(18, 10))
plt.subplot(121)
sns.scatterplot(x=train_pred_values, y=train_true_values)
plt.xlabel("Predicted values")
plt.ylabel("True values")
plt.title("Train sample prediction")
plt.subplot(122)
sns.scatterplot(x=test_pred_values, y=test_true_values)
plt.xlabel("Predicted values")
plt.ylabel("True values")
plt.title("Test sample prediction")
plt.show()
# Пути к файлам
TRAIN_DATASET_PATH = "../input/real-estate-price-prediction-moscow/train.csv"
TEST_DATASET_PATH = "../input/real-estate-price-prediction-moscow/test.csv"
# # **Загрузка данных**
# **Описание датасета**
# * Id - идентификационный номер квартиры
# * DistrictId - идентификационный номер района
# * Rooms - количество комнат
# * Square - общая площадь квартиры
# * LifeSquare - жилая площадь
# * KitchenSquare - площадь кухни
# * Floor - этаж
# * HouseFloor - количество этажей в доме
# * HouseYear - год постройки дома
# * Ecology_1, Ecology_2, Ecology_3 - экологические показатели местности
# * Social_1, Social_2, Social_3 - социальные показатели местности
# * Healthcare_1, Healthcare_2 - показатели местности, связанные с здравоохранением
# * Shops_1, Shops_2 - показатели, связанные с наличием магазинов, торговых центров
# * Price - цена квартиры
# считываем данные из train.csv
train_df = pd.read_csv(TRAIN_DATASET_PATH)
train_df.head(10)
# размерность train датасета (количество объектов, количество признаков)
print(train_df.shape)
print("Всего квартир:", train_df.shape[0])
print("Всего признаков:", train_df.shape[1])
# считываем данные из test.csv
test_df = pd.read_csv(TEST_DATASET_PATH)
test_df.head(10)
# размерность test датасета (количество объектов, количество признаков)
print(test_df.shape)
print("Всего квартир:", test_df.shape[0])
print("Всего признаков:", test_df.shape[1])
# **Приведение типов данных**
train_df.dtypes
test_df.dtypes
train_df["Id"] = train_df["Id"].astype(str)
train_df["DistrictId"] = train_df["DistrictId"].astype(str)
train_df["Rooms"] = train_df["Rooms"].astype(int)
train_df["HouseFloor"] = train_df["HouseFloor"].astype(int)
train_df["Ecology_1"] = train_df["Ecology_1"].astype(int)
test_df["Id"] = test_df["Id"].astype(str)
test_df["DistrictId"] = test_df["DistrictId"].astype(str)
test_df["Rooms"] = test_df["Rooms"].astype(int)
test_df["HouseFloor"] = test_df["HouseFloor"].astype(int)
test_df["Ecology_1"] = test_df["Ecology_1"].astype(int)
# # **Анализ данных**
# **1. EDA** разведочный анализ данных
plt.figure(figsize=(12, 6))
train_df["Price"].hist(bins=40)
plt.ylabel("Count")
plt.xlabel("Price")
plt.title("Целевая переменная")
plt.show()
# количественные переменные
train_df.describe()
# номинативные переменные
train_df.select_dtypes(include="object").columns.tolist()
# **2. Обработка выбросов**
train_df["Rooms"].value_counts()
# вводим новый признак, обозначающий выбросы среди квартир: "1" означает выброс, "0" не попадает под условия выброса
train_df["Rooms_outlier"] = 0
train_df.loc[(train_df["Rooms"] == 0) | (train_df["Rooms"] >= 7), "Rooms_outlier"] = 1
train_df.head()
# заменим значение на медиану в случае, если в датасете число комнат равно нулю или больше или равно 7 и площадь
# квартиры при этом больше 100 кв.м. Если же в датасете число комнат равно нулю или больше или равно 7 и площадь
# квартиры при этом меньше или равно 100 кв.м., значение "Room" заменяем на 1.
train_df.loc[(train_df["Rooms"] == 0) & (train_df["Square"] <= 100), "Rooms"] = 1
train_df.loc[(train_df["Rooms"] == 0) & (train_df["Square"] > 100), "Rooms"] = train_df[
"Rooms"
].median()
train_df.loc[(train_df["Rooms"] >= 7) & (train_df["Square"] <= 100), "Rooms"] = 1
train_df.loc[(train_df["Rooms"] >= 7) & (train_df["Square"] > 100), "Rooms"] = train_df[
"Rooms"
].median()
train_df["Rooms"].value_counts()
train_df.loc[(train_df["Square"] <= 31) | (train_df["Square"] > 300)]
# вводим новый признак, обозначающий выбросы среди общей площади квартир: "1" означает выброс, "0" не попадает под условия выброса
train_df["Square_outlier"] = 0
train_df.loc[
(train_df["Square"] <= 31) | (train_df["Square"] > 300), "Square_outlier"
] = 1
train_df.head()
train_df["Square"].quantile(0.975), train_df["Square"].quantile(0.025)
# заменим значение на квантили в случае, если в датасете общая площадь квартиры больше 300 кв.м.
# и на среднее значение, если общая площадь меньше или равна 31 кв.м.
train_df.loc[train_df["Square"] <= 31, "Square"] = train_df["Square"].mean()
train_df.loc[train_df["Square"] > 300, "Square"] = train_df["Square"].quantile(0.975)
train_df["Square"].value_counts()
train_df["KitchenSquare"].quantile(0.800), train_df["KitchenSquare"].quantile(0.200)
train_df["KitchenSquare"].quantile(0.995), train_df["KitchenSquare"].quantile(0.005)
# Заменим значения 'KitchenSquare' в датасете в зависимости от общей площади квартиры: чем меньше площадь квартиры,
# тем меньше площадь кухни
condition_1 = (train_df["KitchenSquare"].isna()) | (
train_df["KitchenSquare"] > train_df["KitchenSquare"].quantile(0.995)
)
condition_2 = (train_df["Square"] <= 31) | (train_df["Square"] <= 40)
condition_3 = (train_df["Square"] > 40) | (
train_df["Square"] <= train_df["Square"].mean()
)
condition_4 = (train_df["Square"] > train_df["Square"].mean()) | (
train_df["Square"] <= 90
)
train_df.loc[condition_1, "KitchenSquare"] = 20 ##quantile(.995)
train_df.loc[condition_2, "KitchenSquare"] = train_df["KitchenSquare"].median()
train_df.loc[condition_3, "KitchenSquare"] = 9 ##quantile(.800)
train_df.loc[condition_4, "KitchenSquare"] = 13 ##quantile(.975)
train_df.loc[train_df["Square"] > 90, "KitchenSquare"] = 20 ##quantile(.995)
train_df.loc[train_df["KitchenSquare"] < 5, "KitchenSquare"] = 5
train_df["KitchenSquare"].value_counts()
# посмотрим, в домах какой этажности находятся квартиры в датасете
train_df["HouseFloor"].sort_values().unique()
# поскольку на 26.07.2021 в самом высоком здании Москвы насчитывается 75 этажей, выбросами являются значения 0 и все больше 75
# таких выбросов всего 3, поэтому произведем замены их на средние значения без обозначения дополнительными признаками
train_df.loc[
(train_df["HouseFloor"] == 0) | (train_df["HouseFloor"] > 75), "HouseFloor"
] = train_df["HouseFloor"].mean()
train_df["HouseFloor"].sort_values().unique()
train_df["HouseFloor"].value_counts()
# проверяем соответствие указаного этажа квартиры этажности дома, в котором квартира расположена, сколько квартир находятся на более
# высоком этаже, чем вместимость дома
(train_df["Floor"] > train_df["HouseFloor"]).sum()
# вводим новый признак, обозначающий выбросы среди 'HouseFloor': "1" означает выброс, "0" не попадает под условия выброса
train_df["HouseFloor_outlier"] = 0
train_df.loc[train_df["HouseFloor"] == 0, "HouseFloor_outlier"] = 1
train_df.loc[train_df["Floor"] > train_df["HouseFloor"], "HouseFloor_outlier"] = 1
# заменим значение 'HouseFloor' в случаях, если оно равно "0" или меньше указанного в датасете этажа на значение 'Floor'.
train_df.loc[train_df["HouseFloor"] == 0, "HouseFloor"] = train_df["Floor"]
train_df.loc[train_df["Floor"] > train_df["HouseFloor"], "HouseFloor"] = train_df[
"Floor"
]
train_df["HouseFloor"].value_counts()
train_df["HouseYear"].sort_values(ascending=False)
train_df.loc[train_df["HouseYear"] > 2021, "HouseYear"] = 2021
train_df.loc[train_df["HouseYear"] <= 1900, "HouseYear"] = 1900
train_df["HouseYear"].value_counts()
# **3. Обработка пропусков**
train_df[["Square", "LifeSquare", "KitchenSquare"]].head(10)
train_df["LifeSquare_nan"] = train_df["LifeSquare"].isna() * 1
condition = (
(train_df["LifeSquare"].isna())
& (~train_df["Square"].isna())
& (~train_df["KitchenSquare"].isna())
)
train_df.loc[condition, "LifeSquare"] = (
train_df.loc[condition, "Square"] - train_df.loc[condition, "KitchenSquare"] - 10
)
train_df[["Square", "LifeSquare", "KitchenSquare"]].tail(20)
# проверяем, есть ли превышение жилой площади над общей площадью квартиры; сколько объектов с подобными данными
(train_df["LifeSquare"] >= train_df["Square"]).sum()
# вводим новый признак, обозначающий выбросы среди 'LifeSquare': "1" означает выброс, "0" не попадает под условия выброса
train_df["LifeSquare_outlier"] = 0
train_df.loc[train_df["LifeSquare"] == 0, "LifeSquare_outlier"] = 1
train_df.loc[train_df["LifeSquare"] >= train_df["Square"], "LifeSquare_outlier"] = 1
train_df["LifeSquare"].quantile(0.975), train_df["LifeSquare"].quantile(0.025)
train_df["LifeSquare"].quantile(0.650), train_df["LifeSquare"].quantile(0.350)
# заменим значение 'LifeSquare' в случаях, если оно меньше или равно квантили 025 и больше или равно общей площади квартиры
train_df.loc[
train_df["LifeSquare"] <= train_df["LifeSquare"].quantile(0.025), "LifeSquare"
] = train_df["LifeSquare"].quantile(0.025)
train_df.loc[train_df["LifeSquare"] >= train_df["Square"], "LifeSquare"] = train_df[
"LifeSquare"
].quantile(0.350)
(train_df["LifeSquare"] >= train_df["Square"]).sum()
train_df["LifeSquare"].value_counts()
train_df["Healthcare_1"].sort_values().unique()
# вводим новый признак, обозначающий выбросы среди 'Healthcare_1': "1" означает выброс, "0" не попадает под условия выброса
train_df["Healthcare_1_outlier"] = 0
train_df.loc[train_df["Healthcare_1"] == 0, "Healthcare_1_outlier"] = 1
train_df.loc[train_df["Healthcare_1"] >= 1000, "Healthcare_1_outlier"] = 1
# заменим выбросы 'Healthcare_1' на значение медианы
train_df.loc[
(train_df["Healthcare_1"] == 0) | (train_df["Healthcare_1"] >= 1000), "Healthcare_1"
] = train_df["Healthcare_1"].median()
train_df["Healthcare_1"].value_counts()
train_df["Healthcare_1"].quantile(0.700), train_df["Healthcare_1"].quantile(0.300)
# заполним пропуски значениями квантили 300
fill_Hc1 = train_df["Healthcare_1"].quantile(0.300)
train_df["Healthcare_1"] = train_df["Healthcare_1"].fillna(fill_Hc1)
train_df["Healthcare_1"].value_counts()
class DataPreprocessing:
"""Подготовка исходных данных"""
def __init__(self):
"""Параметры класса"""
self.means = None
self.medians = None
self.kitchensquare1_quantile = None
self.kitchensquare2_quantile = None
self.kitchensquare3_quantile = None
self.square_quantile = None
self.lifesquare1_quantile = None
self.lifesquare2_quantile = None
self.healthcare1_quantile = None
def fit(self, X):
"""Сохранение статистик"""
# Расчет медиан
self.medians = X.median()
self.kitchensquare1_quantile = X["KitchenSquare"].quantile(0.995)
self.kitchensquare2_quantile = X["KitchenSquare"].quantile(0.975)
self.kitchensquare3_quantile = X["KitchenSquare"].quantile(0.800)
self.square_quantile = X["Square"].quantile(0.975)
self.lifesquare1_quantile = X["LifeSquare"].quantile(0.350)
self.lifesquare2_quantile = X["LifeSquare"].quantile(0.025)
self.healthcare1_quantile = X["Healthcare_1"].quantile(0.300)
# Расчет средних значений
self.means = X.mean()
def transform(self, X):
"""Трансформация данных"""
# Rooms
X["Rooms_outlier"] = 0
X.loc[(X["Rooms"] == 0) | (X["Rooms"] >= 7), "Rooms_outlier"] = 1
X.loc[(X["Rooms"] == 0) & (X["Square"] <= 100), "Rooms"] = 1
X.loc[(X["Rooms"] == 0) & (X["Square"] > 100), "Rooms"] = self.medians["Rooms"]
X.loc[X["Rooms"] >= 7 & (X["Square"] <= 100), "Rooms"] = 1
X.loc[X["Rooms"] >= 7 & (X["Square"] > 100), "Rooms"] = self.medians["Rooms"]
# Square
X["Square_outlier"] = 0
X.loc[(X["Square"] <= 31) | (X["Square"] > 300), "Square_outlier"] = 1
X.loc[X["Square"] <= 31, "Square"] = self.means["Square"]
X.loc[X["Square"] > 300, "Square"] = self.square_quantile
# KitchenSquare
condition_1 = (X["KitchenSquare"].isna()) | (
X["KitchenSquare"] > self.kitchensquare1_quantile
)
condition_2 = (X["Square"] <= 31) | (X["Square"] <= 40)
condition_3 = (X["Square"] > 40) | (X["Square"] <= self.means["Square"])
condition_4 = (X["Square"] > self.means["Square"]) | (X["Square"] <= 90)
X.loc[condition_1, "KitchenSquare"] = self.kitchensquare1_quantile
X.loc[condition_2, "KitchenSquare"] = self.medians["KitchenSquare"]
X.loc[condition_3, "KitchenSquare"] = self.kitchensquare3_quantile
X.loc[condition_4, "KitchenSquare"] = self.kitchensquare2_quantile
X.loc[X["Square"] > 90, "KitchenSquare"] = self.kitchensquare1_quantile
X.loc[X["KitchenSquare"] < 5, "KitchenSquare"] = 5
# HouseFloor, Floor
X["HouseFloor_outlier"] = 0
X.loc[X["HouseFloor"] == 0, "HouseFloor_outlier"] = 1
X.loc[X["Floor"] > X["HouseFloor"], "HouseFloor_outlier"] = 1
X.loc[X["HouseFloor"] == 0 | (X["HouseFloor"] > 75), "HouseFloor"] = self.means[
"HouseFloor"
]
X.loc[X["HouseFloor"] == 0, "HouseFloor"] = X["Floor"]
X.loc[X["Floor"] > X["HouseFloor"], "HouseFloor"] = X["Floor"]
# HouseYear
current_year = datetime.now().year
X["HouseYear_outlier"] = 0
X.loc[X["HouseYear"] > current_year, "HouseYear_outlier"] = 1
X.loc[X["HouseYear"] <= 1900, "HouseYear_outlier"] = 1
X.loc[X["HouseYear"] > current_year, "HouseYear"] = current_year
X.loc[X["HouseYear"] <= 1900, "HouseYear_outlier"] = 1900
# LifeSquare
X["LifeSquare_nan"] = X["LifeSquare"].isna() * 1
condition = (
(X["LifeSquare"].isna())
& (~X["Square"].isna())
& (~X["KitchenSquare"].isna())
)
X.loc[condition, "LifeSquare"] = (
X.loc[condition, "Square"] - X.loc[condition, "KitchenSquare"] - 10
)
X["LifeSquare_outlier"] = 0
X.loc[X["LifeSquare"] == 0, "LifeSquare_outlier"] = 1
X.loc[X["LifeSquare"] >= X["Square"], "LifeSquare_outlier"] = 1
X.loc[
X["LifeSquare"] <= self.lifesquare2_quantile, "LifeSquare"
] = self.lifesquare2_quantile
X.loc[X["LifeSquare"] >= X["Square"], "LifeSquare"] = self.lifesquare1_quantile
# Healthcare_1
X["Healthcare_1_outlier"] = 0
X.loc[X["Healthcare_1"] == 0, "Healthcare_1_outlier"] = 1
X.loc[X["Healthcare_1"] >= 1000, "Healthcare_1_outlier"] = 1
X.loc[
(X["Healthcare_1"] == 0) | (X["Healthcare_1"] >= 1000), "Healthcare_1"
] = self.medians["Healthcare_1"]
fill_Hc1 = self.healthcare1_quantile
X["Healthcare_1"] = X["Healthcare_1"].fillna(fill_Hc1)
X.fillna(self.medians, inplace=True)
return X
# **4. Построение новых признаков**
# **Dummies**
# переводим строковые признаки в числовые
binary_to_numbers = {"A": 0, "B": 1}
train_df["Ecology_2"] = train_df["Ecology_2"].replace(binary_to_numbers)
train_df["Ecology_3"] = train_df["Ecology_3"].replace(binary_to_numbers)
train_df["Shops_2"] = train_df["Shops_2"].replace(binary_to_numbers)
# **DistrictSize, IsDistrictLarge**
# переводим в вещественный признак DistrictId
district_size = (
train_df["DistrictId"]
.value_counts()
.reset_index()
.rename(columns={"index": "DistrictId", "DistrictId": "DistrictSize"})
)
district_size.head(7)
# присоединяем к train_df
train_df = train_df.merge(district_size, on="DistrictId", how="left")
train_df.head(7)
# добавим новый признак и разделим квартиры в зависисмости от размеров района
(train_df["DistrictSize"] > 100).value_counts()
train_df["IsDistrictLarge"] = (train_df["DistrictSize"] > 100).astype(int)
# Расчет переменной в зависимости от количества комнат и района расположения квартиры - **M_Price_Room_dstr**
m_price_room_dstr = (
train_df.groupby(["DistrictId", "Rooms"], as_index=False)
.agg({"Price": "mean"})
.rename(columns={"Price": "M_Price_Room_dstr"})
)
m_price_room_dstr.head(7)
m_price_room_dstr.shape
# присоединяем к train_df
train_df = train_df.merge(m_price_room_dstr, on=["DistrictId", "Rooms"], how="left")
train_df.head(7)
# **M_PriceByFloorYear** - добавим целевую переменную, включающую признаки этажности и года постройки
def floor_to_cat(X):
X["floor_cat"] = 0
X.loc[X["Floor"] <= 2, "floor_cat"] = 1
X.loc[(X["Floor"] > 2) & (X["Floor"] <= 5), "floor_cat"] = 2
X.loc[(X["Floor"] > 5) & (X["Floor"] <= 9), "floor_cat"] = 3
X.loc[(X["Floor"] > 9) & (X["Floor"] <= 15), "floor_cat"] = 4
X.loc[X["Floor"] > 15, "floor_cat"] = 5
return X
def year_to_cat(X):
X["year_cat"] = 0
X.loc[X["HouseYear"] <= 1920, "year_cat"] = 1
X.loc[(X["HouseYear"] > 1920) & (X["HouseYear"] <= 1946), "year_cat"] = 2
X.loc[(X["HouseYear"] > 1946) & (X["HouseYear"] <= 1959), "year_cat"] = 3
X.loc[(X["HouseYear"] > 1960) & (X["HouseYear"] <= 1989), "year_cat"] = 4
X.loc[(X["HouseYear"] > 1989) & (X["HouseYear"] <= 2009), "year_cat"] = 5
X.loc[(X["HouseYear"] > 2010), "year_cat"] = 6
return X
bins = [0, 3, 5, 9, 15, train_df["Floor"].max()]
pd.cut(train_df["Floor"], bins=bins, labels=False)
train_df = year_to_cat(train_df)
train_df = floor_to_cat(train_df)
train_df.head()
m_price_by_floor_year = (
train_df.groupby(["year_cat", "floor_cat"], as_index=False)
.agg({"Price": "mean"})
.rename(columns={"Price": "M_PriceByFloorYear"})
)
m_price_by_floor_year.head(7)
# присоединяем к train_df
train_df = train_df.merge(
m_price_by_floor_year, on=["year_cat", "floor_cat"], how="left"
)
train_df.head(7)
class FeatureGenetator:
"""Генерация новых признаков"""
def __init__(self):
self.DistrictId_counts = None
self.binary_to_numbers = None
self.m_price_room_dstr = None
self.m_price_by_floor_year = None
self.house_year_max = None
self.floor_max = None
self.district_size = None
def fit(self, X, y=None):
X = X.copy()
# Binary features
self.binary_to_numbers = {"A": 0, "B": 1}
# DistrictID
self.district_size = (
X["DistrictId"]
.value_counts()
.reset_index()
.rename(columns={"index": "DistrictId", "DistrictId": "DistrictSize"})
)
# Target encoding
## District, Rooms
df = X.copy()
if y is not None:
df["Price"] = y.values
self.m_price_room_dstr = (
df.groupby(["DistrictId", "Rooms"], as_index=False)
.agg({"Price": "mean"})
.rename(columns={"Price": "M_Price_Room_dstr"})
)
self.m_price_room_dstr_mean = self.m_price_room_dstr[
"M_Price_Room_dstr"
].mean()
## floor, year
if y is not None:
self.floor_max = df["Floor"].max()
self.house_year_max = df["HouseYear"].max()
df["Price"] = y.values
df = self.floor_to_cat(df)
df = self.year_to_cat(df)
self.m_price_by_floor_year = (
df.groupby(["year_cat", "floor_cat"], as_index=False)
.agg({"Price": "mean"})
.rename(columns={"Price": "M_PriceByFloorYear"})
)
self.m_price_by_floor_year_mean = self.m_price_by_floor_year[
"M_PriceByFloorYear"
].mean()
def transform(self, X):
# Binary features
X["Ecology_2"] = X["Ecology_2"].map(
self.binary_to_numbers
) # self.binary_to_numbers = {'A': 0, 'B': 1}
X["Ecology_3"] = X["Ecology_3"].map(self.binary_to_numbers)
X["Shops_2"] = X["Shops_2"].map(self.binary_to_numbers)
# DistrictId, IsDistrictLarge
X = X.merge(self.district_size, on="DistrictId", how="left")
X["new_district"] = 0
X.loc[X["DistrictSize"].isna(), "new_district"] = 1
X["DistrictSize"].fillna(5, inplace=True)
X["IsDistrictLarge"] = (X["DistrictSize"] > 100).astype(int)
# More categorical features
X = self.floor_to_cat(X) # + столбец floor_cat
X = self.year_to_cat(X) # + столбец year_cat
# Target encoding
if self.m_price_room_dstr is not None:
X = X.merge(self.m_price_room_dstr, on=["DistrictId", "Rooms"], how="left")
X["M_Price_Room_dstr"].fillna(self.m_price_room_dstr_mean, inplace=True)
if self.m_price_by_floor_year is not None:
X = X.merge(
self.m_price_by_floor_year, on=["year_cat", "floor_cat"], how="left"
)
X["M_PriceByFloorYear"].fillna(
self.m_price_by_floor_year_mean, inplace=True
)
return X
def floor_to_cat(self, X):
X["floor_cat"] = 0
X.loc[X["Floor"] <= 2, "floor_cat"] = 1
X.loc[(X["Floor"] > 2) & (X["Floor"] <= 5), "floor_cat"] = 2
X.loc[(X["Floor"] > 5) & (X["Floor"] <= 9), "floor_cat"] = 3
X.loc[(X["Floor"] > 9) & (X["Floor"] <= 15), "floor_cat"] = 4
X.loc[X["Floor"] > 15, "floor_cat"] = 5
return X
def year_to_cat(self, X):
X["year_cat"] = 0
X.loc[X["HouseYear"] <= 1920, "year_cat"] = 1
X.loc[(X["HouseYear"] > 1920) & (X["HouseYear"] <= 1946), "year_cat"] = 2
X.loc[(X["HouseYear"] > 1946) & (X["HouseYear"] <= 1959), "year_cat"] = 3
X.loc[(X["HouseYear"] > 1960) & (X["HouseYear"] <= 1989), "year_cat"] = 4
X.loc[(X["HouseYear"] > 1989) & (X["HouseYear"] <= 2009), "year_cat"] = 5
X.loc[(X["HouseYear"] > 2010), "year_cat"] = 6
return X
# **5. Отбор признаков**
train_df.columns.tolist()
feature_names = [
"Rooms",
"Square",
"LifeSquare",
"KitchenSquare",
"Floor",
"HouseFloor",
"HouseYear",
"Ecology_1",
"Ecology_2",
"Ecology_3",
"Social_1",
"Social_2",
"Social_3",
"Helthcare_2",
"Shops_1",
"Shops_2",
]
new_feature_names = [
"Rooms_outlier",
"Square_outlier",
"HouseFloor_outlier",
"HouseYear_outlier",
"LifeSquare_nan",
"LifeSquare_outlier",
"Healthcare_1_outlier",
"DistrictSize",
"new_district",
"IsDistrictLarge",
"M_Price_Room_dstr",
"M_PriceByFloorYear",
]
target_name = "Price"
# **6. Разбиение на train и test**
train_df = pd.read_csv(TRAIN_DATASET_PATH)
test_df = pd.read_csv(TEST_DATASET_PATH)
X = train_df.drop(columns=target_name)
y = train_df[target_name]
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size=0.33, shuffle=True, random_state=39
)
preprocessor = DataPreprocessing()
preprocessor.fit(X_train)
X_train = preprocessor.transform(X_train)
X_valid = preprocessor.transform(X_valid)
test_df = preprocessor.transform(test_df)
X_train.shape, X_valid.shape, test_df.shape
features_gen = FeatureGenetator()
features_gen.fit(X_train, y_train)
X_train = features_gen.transform(X_train)
X_valid = features_gen.transform(X_valid)
test_df = features_gen.transform(test_df)
X_train.shape, X_valid.shape, test_df.shape
X_train = X_train[feature_names + new_feature_names]
X_valid = X_valid[feature_names + new_feature_names]
test_df = test_df[feature_names + new_feature_names]
X_train.isna().sum().sum(), X_valid.isna().sum().sum(), test_df.isna().sum().sum()
# **7. Построение модели**
# **Обучение**
# Случайный лес
rf_model = RandomForestRegressor(random_state=39, criterion="mse")
rf_model.fit(X_train, y_train)
# **Оценка модели**
y_train_preds = rf_model.predict(X_train)
y_test_preds = rf_model.predict(X_valid)
evaluate_preds(y_train, y_train_preds, y_valid, y_test_preds)
# проверяем на кросс-валидации
cv_score = cross_val_score(
rf_model,
X_train,
y_train,
scoring="r2",
cv=KFold(n_splits=3, shuffle=True, random_state=21),
)
cv_score
cv_score.mean()
# результат: модель переобучена (три разных результата)
# важность признаков, проверка "полезности" признаков
feature_importances = pd.DataFrame(
zip(X_train.columns, rf_model.feature_importances_),
columns=["feature_name", "importance"],
)
feature_importances.sort_values(by="importance", ascending=False)
from sklearn.ensemble import (
StackingRegressor,
VotingRegressor,
BaggingRegressor,
GradientBoostingRegressor,
)
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
gb = GradientBoostingRegressor()
stack = StackingRegressor([("lr", lr), ("rf", rf_model)], final_estimator=gb)
stack.fit(X_train, y_train)
y_train_preds = stack.predict(X_train)
y_test_preds = stack.predict(X_valid)
evaluate_preds(y_train, y_train_preds, y_valid, y_test_preds)
# **8. Прогнозирование на тестовом датасете**
test_df
submit = pd.read_csv(
"/kaggle/input/real-estate-price-prediction-moscow/sample_submission.csv"
)
submit.head()
predictions = rf_model.predict(test_df)
predictions
submit["Price"] = predictions
submit.head()
submit.to_csv("rf_submit.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173883.ipynb
| null | null |
[{"Id": 69173883, "ScriptId": 18806408, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7943276, "CreationDate": "07/27/2021 16:55:19", "VersionNumber": 11.0, "Title": "Moscow_RE_Price_Prediction", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 723.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 723.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## Moscow Real Estate Price Prediction
# Geekbrains Python for Data Science course competition
# The task is to predict the price of flats in test.csv. Two datasets are given: train.csv (contains all features and prices of flats) and test.csv (only features).
# **Задача:** предсказать цены на квартиры в датасете test.csv.
# Для выполнения задачи построить модель предсказания цены на квартиры на основе данных из датасета train.csv (содержит признаки и цены на квартиры), затем с поощью данной модели предсказать цены на квартиры по данным из датасета test.csv (только признаки).
# Целевая переменная: Price
# Основная метрика: R2 - коэффициент детерминации (sklearn.metrics r2)
# #Вспомогательная метрика: MSE - средняя квадратичная ошибка (sklearn.metrics.mean_squared_error)
# **Подключение библиотек и скриптов**
import numpy as np
import pandas as pd
import random
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler, RobustScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score as r2
from sklearn.model_selection import KFold, GridSearchCV
from datetime import datetime
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore") # отключили предупреждения
# единый шрифт для графиков
matplotlib.rcParams.update({"font.size": 12})
def evaluate_preds(
train_true_values, train_pred_values, test_true_values, test_pred_values
):
print("Train R2:\t" + str(round(r2(train_true_values, train_pred_values), 3)))
print("Test R2:\t" + str(round(r2(test_true_values, test_pred_values), 3)))
plt.figure(figsize=(18, 10))
plt.subplot(121)
sns.scatterplot(x=train_pred_values, y=train_true_values)
plt.xlabel("Predicted values")
plt.ylabel("True values")
plt.title("Train sample prediction")
plt.subplot(122)
sns.scatterplot(x=test_pred_values, y=test_true_values)
plt.xlabel("Predicted values")
plt.ylabel("True values")
plt.title("Test sample prediction")
plt.show()
# Пути к файлам
TRAIN_DATASET_PATH = "../input/real-estate-price-prediction-moscow/train.csv"
TEST_DATASET_PATH = "../input/real-estate-price-prediction-moscow/test.csv"
# # **Загрузка данных**
# **Описание датасета**
# * Id - идентификационный номер квартиры
# * DistrictId - идентификационный номер района
# * Rooms - количество комнат
# * Square - общая площадь квартиры
# * LifeSquare - жилая площадь
# * KitchenSquare - площадь кухни
# * Floor - этаж
# * HouseFloor - количество этажей в доме
# * HouseYear - год постройки дома
# * Ecology_1, Ecology_2, Ecology_3 - экологические показатели местности
# * Social_1, Social_2, Social_3 - социальные показатели местности
# * Healthcare_1, Healthcare_2 - показатели местности, связанные с здравоохранением
# * Shops_1, Shops_2 - показатели, связанные с наличием магазинов, торговых центров
# * Price - цена квартиры
# считываем данные из train.csv
train_df = pd.read_csv(TRAIN_DATASET_PATH)
train_df.head(10)
# размерность train датасета (количество объектов, количество признаков)
print(train_df.shape)
print("Всего квартир:", train_df.shape[0])
print("Всего признаков:", train_df.shape[1])
# считываем данные из test.csv
test_df = pd.read_csv(TEST_DATASET_PATH)
test_df.head(10)
# размерность test датасета (количество объектов, количество признаков)
print(test_df.shape)
print("Всего квартир:", test_df.shape[0])
print("Всего признаков:", test_df.shape[1])
# **Приведение типов данных**
train_df.dtypes
test_df.dtypes
train_df["Id"] = train_df["Id"].astype(str)
train_df["DistrictId"] = train_df["DistrictId"].astype(str)
train_df["Rooms"] = train_df["Rooms"].astype(int)
train_df["HouseFloor"] = train_df["HouseFloor"].astype(int)
train_df["Ecology_1"] = train_df["Ecology_1"].astype(int)
test_df["Id"] = test_df["Id"].astype(str)
test_df["DistrictId"] = test_df["DistrictId"].astype(str)
test_df["Rooms"] = test_df["Rooms"].astype(int)
test_df["HouseFloor"] = test_df["HouseFloor"].astype(int)
test_df["Ecology_1"] = test_df["Ecology_1"].astype(int)
# # **Анализ данных**
# **1. EDA** разведочный анализ данных
plt.figure(figsize=(12, 6))
train_df["Price"].hist(bins=40)
plt.ylabel("Count")
plt.xlabel("Price")
plt.title("Целевая переменная")
plt.show()
# количественные переменные
train_df.describe()
# номинативные переменные
train_df.select_dtypes(include="object").columns.tolist()
# **2. Обработка выбросов**
train_df["Rooms"].value_counts()
# вводим новый признак, обозначающий выбросы среди квартир: "1" означает выброс, "0" не попадает под условия выброса
train_df["Rooms_outlier"] = 0
train_df.loc[(train_df["Rooms"] == 0) | (train_df["Rooms"] >= 7), "Rooms_outlier"] = 1
train_df.head()
# заменим значение на медиану в случае, если в датасете число комнат равно нулю или больше или равно 7 и площадь
# квартиры при этом больше 100 кв.м. Если же в датасете число комнат равно нулю или больше или равно 7 и площадь
# квартиры при этом меньше или равно 100 кв.м., значение "Room" заменяем на 1.
train_df.loc[(train_df["Rooms"] == 0) & (train_df["Square"] <= 100), "Rooms"] = 1
train_df.loc[(train_df["Rooms"] == 0) & (train_df["Square"] > 100), "Rooms"] = train_df[
"Rooms"
].median()
train_df.loc[(train_df["Rooms"] >= 7) & (train_df["Square"] <= 100), "Rooms"] = 1
train_df.loc[(train_df["Rooms"] >= 7) & (train_df["Square"] > 100), "Rooms"] = train_df[
"Rooms"
].median()
train_df["Rooms"].value_counts()
train_df.loc[(train_df["Square"] <= 31) | (train_df["Square"] > 300)]
# вводим новый признак, обозначающий выбросы среди общей площади квартир: "1" означает выброс, "0" не попадает под условия выброса
train_df["Square_outlier"] = 0
train_df.loc[
(train_df["Square"] <= 31) | (train_df["Square"] > 300), "Square_outlier"
] = 1
train_df.head()
train_df["Square"].quantile(0.975), train_df["Square"].quantile(0.025)
# заменим значение на квантили в случае, если в датасете общая площадь квартиры больше 300 кв.м.
# и на среднее значение, если общая площадь меньше или равна 31 кв.м.
train_df.loc[train_df["Square"] <= 31, "Square"] = train_df["Square"].mean()
train_df.loc[train_df["Square"] > 300, "Square"] = train_df["Square"].quantile(0.975)
train_df["Square"].value_counts()
train_df["KitchenSquare"].quantile(0.800), train_df["KitchenSquare"].quantile(0.200)
train_df["KitchenSquare"].quantile(0.995), train_df["KitchenSquare"].quantile(0.005)
# Заменим значения 'KitchenSquare' в датасете в зависимости от общей площади квартиры: чем меньше площадь квартиры,
# тем меньше площадь кухни
condition_1 = (train_df["KitchenSquare"].isna()) | (
train_df["KitchenSquare"] > train_df["KitchenSquare"].quantile(0.995)
)
condition_2 = (train_df["Square"] <= 31) | (train_df["Square"] <= 40)
condition_3 = (train_df["Square"] > 40) | (
train_df["Square"] <= train_df["Square"].mean()
)
condition_4 = (train_df["Square"] > train_df["Square"].mean()) | (
train_df["Square"] <= 90
)
train_df.loc[condition_1, "KitchenSquare"] = 20 ##quantile(.995)
train_df.loc[condition_2, "KitchenSquare"] = train_df["KitchenSquare"].median()
train_df.loc[condition_3, "KitchenSquare"] = 9 ##quantile(.800)
train_df.loc[condition_4, "KitchenSquare"] = 13 ##quantile(.975)
train_df.loc[train_df["Square"] > 90, "KitchenSquare"] = 20 ##quantile(.995)
train_df.loc[train_df["KitchenSquare"] < 5, "KitchenSquare"] = 5
train_df["KitchenSquare"].value_counts()
# посмотрим, в домах какой этажности находятся квартиры в датасете
train_df["HouseFloor"].sort_values().unique()
# поскольку на 26.07.2021 в самом высоком здании Москвы насчитывается 75 этажей, выбросами являются значения 0 и все больше 75
# таких выбросов всего 3, поэтому произведем замены их на средние значения без обозначения дополнительными признаками
train_df.loc[
(train_df["HouseFloor"] == 0) | (train_df["HouseFloor"] > 75), "HouseFloor"
] = train_df["HouseFloor"].mean()
train_df["HouseFloor"].sort_values().unique()
train_df["HouseFloor"].value_counts()
# проверяем соответствие указаного этажа квартиры этажности дома, в котором квартира расположена, сколько квартир находятся на более
# высоком этаже, чем вместимость дома
(train_df["Floor"] > train_df["HouseFloor"]).sum()
# вводим новый признак, обозначающий выбросы среди 'HouseFloor': "1" означает выброс, "0" не попадает под условия выброса
train_df["HouseFloor_outlier"] = 0
train_df.loc[train_df["HouseFloor"] == 0, "HouseFloor_outlier"] = 1
train_df.loc[train_df["Floor"] > train_df["HouseFloor"], "HouseFloor_outlier"] = 1
# заменим значение 'HouseFloor' в случаях, если оно равно "0" или меньше указанного в датасете этажа на значение 'Floor'.
train_df.loc[train_df["HouseFloor"] == 0, "HouseFloor"] = train_df["Floor"]
train_df.loc[train_df["Floor"] > train_df["HouseFloor"], "HouseFloor"] = train_df[
"Floor"
]
train_df["HouseFloor"].value_counts()
train_df["HouseYear"].sort_values(ascending=False)
train_df.loc[train_df["HouseYear"] > 2021, "HouseYear"] = 2021
train_df.loc[train_df["HouseYear"] <= 1900, "HouseYear"] = 1900
train_df["HouseYear"].value_counts()
# **3. Обработка пропусков**
train_df[["Square", "LifeSquare", "KitchenSquare"]].head(10)
train_df["LifeSquare_nan"] = train_df["LifeSquare"].isna() * 1
condition = (
(train_df["LifeSquare"].isna())
& (~train_df["Square"].isna())
& (~train_df["KitchenSquare"].isna())
)
train_df.loc[condition, "LifeSquare"] = (
train_df.loc[condition, "Square"] - train_df.loc[condition, "KitchenSquare"] - 10
)
train_df[["Square", "LifeSquare", "KitchenSquare"]].tail(20)
# проверяем, есть ли превышение жилой площади над общей площадью квартиры; сколько объектов с подобными данными
(train_df["LifeSquare"] >= train_df["Square"]).sum()
# вводим новый признак, обозначающий выбросы среди 'LifeSquare': "1" означает выброс, "0" не попадает под условия выброса
train_df["LifeSquare_outlier"] = 0
train_df.loc[train_df["LifeSquare"] == 0, "LifeSquare_outlier"] = 1
train_df.loc[train_df["LifeSquare"] >= train_df["Square"], "LifeSquare_outlier"] = 1
train_df["LifeSquare"].quantile(0.975), train_df["LifeSquare"].quantile(0.025)
train_df["LifeSquare"].quantile(0.650), train_df["LifeSquare"].quantile(0.350)
# заменим значение 'LifeSquare' в случаях, если оно меньше или равно квантили 025 и больше или равно общей площади квартиры
train_df.loc[
train_df["LifeSquare"] <= train_df["LifeSquare"].quantile(0.025), "LifeSquare"
] = train_df["LifeSquare"].quantile(0.025)
train_df.loc[train_df["LifeSquare"] >= train_df["Square"], "LifeSquare"] = train_df[
"LifeSquare"
].quantile(0.350)
(train_df["LifeSquare"] >= train_df["Square"]).sum()
train_df["LifeSquare"].value_counts()
train_df["Healthcare_1"].sort_values().unique()
# вводим новый признак, обозначающий выбросы среди 'Healthcare_1': "1" означает выброс, "0" не попадает под условия выброса
train_df["Healthcare_1_outlier"] = 0
train_df.loc[train_df["Healthcare_1"] == 0, "Healthcare_1_outlier"] = 1
train_df.loc[train_df["Healthcare_1"] >= 1000, "Healthcare_1_outlier"] = 1
# заменим выбросы 'Healthcare_1' на значение медианы
train_df.loc[
(train_df["Healthcare_1"] == 0) | (train_df["Healthcare_1"] >= 1000), "Healthcare_1"
] = train_df["Healthcare_1"].median()
train_df["Healthcare_1"].value_counts()
train_df["Healthcare_1"].quantile(0.700), train_df["Healthcare_1"].quantile(0.300)
# заполним пропуски значениями квантили 300
fill_Hc1 = train_df["Healthcare_1"].quantile(0.300)
train_df["Healthcare_1"] = train_df["Healthcare_1"].fillna(fill_Hc1)
train_df["Healthcare_1"].value_counts()
class DataPreprocessing:
"""Подготовка исходных данных"""
def __init__(self):
"""Параметры класса"""
self.means = None
self.medians = None
self.kitchensquare1_quantile = None
self.kitchensquare2_quantile = None
self.kitchensquare3_quantile = None
self.square_quantile = None
self.lifesquare1_quantile = None
self.lifesquare2_quantile = None
self.healthcare1_quantile = None
def fit(self, X):
"""Сохранение статистик"""
# Расчет медиан
self.medians = X.median()
self.kitchensquare1_quantile = X["KitchenSquare"].quantile(0.995)
self.kitchensquare2_quantile = X["KitchenSquare"].quantile(0.975)
self.kitchensquare3_quantile = X["KitchenSquare"].quantile(0.800)
self.square_quantile = X["Square"].quantile(0.975)
self.lifesquare1_quantile = X["LifeSquare"].quantile(0.350)
self.lifesquare2_quantile = X["LifeSquare"].quantile(0.025)
self.healthcare1_quantile = X["Healthcare_1"].quantile(0.300)
# Расчет средних значений
self.means = X.mean()
def transform(self, X):
"""Трансформация данных"""
# Rooms
X["Rooms_outlier"] = 0
X.loc[(X["Rooms"] == 0) | (X["Rooms"] >= 7), "Rooms_outlier"] = 1
X.loc[(X["Rooms"] == 0) & (X["Square"] <= 100), "Rooms"] = 1
X.loc[(X["Rooms"] == 0) & (X["Square"] > 100), "Rooms"] = self.medians["Rooms"]
X.loc[X["Rooms"] >= 7 & (X["Square"] <= 100), "Rooms"] = 1
X.loc[X["Rooms"] >= 7 & (X["Square"] > 100), "Rooms"] = self.medians["Rooms"]
# Square
X["Square_outlier"] = 0
X.loc[(X["Square"] <= 31) | (X["Square"] > 300), "Square_outlier"] = 1
X.loc[X["Square"] <= 31, "Square"] = self.means["Square"]
X.loc[X["Square"] > 300, "Square"] = self.square_quantile
# KitchenSquare
condition_1 = (X["KitchenSquare"].isna()) | (
X["KitchenSquare"] > self.kitchensquare1_quantile
)
condition_2 = (X["Square"] <= 31) | (X["Square"] <= 40)
condition_3 = (X["Square"] > 40) | (X["Square"] <= self.means["Square"])
condition_4 = (X["Square"] > self.means["Square"]) | (X["Square"] <= 90)
X.loc[condition_1, "KitchenSquare"] = self.kitchensquare1_quantile
X.loc[condition_2, "KitchenSquare"] = self.medians["KitchenSquare"]
X.loc[condition_3, "KitchenSquare"] = self.kitchensquare3_quantile
X.loc[condition_4, "KitchenSquare"] = self.kitchensquare2_quantile
X.loc[X["Square"] > 90, "KitchenSquare"] = self.kitchensquare1_quantile
X.loc[X["KitchenSquare"] < 5, "KitchenSquare"] = 5
# HouseFloor, Floor
X["HouseFloor_outlier"] = 0
X.loc[X["HouseFloor"] == 0, "HouseFloor_outlier"] = 1
X.loc[X["Floor"] > X["HouseFloor"], "HouseFloor_outlier"] = 1
X.loc[X["HouseFloor"] == 0 | (X["HouseFloor"] > 75), "HouseFloor"] = self.means[
"HouseFloor"
]
X.loc[X["HouseFloor"] == 0, "HouseFloor"] = X["Floor"]
X.loc[X["Floor"] > X["HouseFloor"], "HouseFloor"] = X["Floor"]
# HouseYear
current_year = datetime.now().year
X["HouseYear_outlier"] = 0
X.loc[X["HouseYear"] > current_year, "HouseYear_outlier"] = 1
X.loc[X["HouseYear"] <= 1900, "HouseYear_outlier"] = 1
X.loc[X["HouseYear"] > current_year, "HouseYear"] = current_year
X.loc[X["HouseYear"] <= 1900, "HouseYear_outlier"] = 1900
# LifeSquare
X["LifeSquare_nan"] = X["LifeSquare"].isna() * 1
condition = (
(X["LifeSquare"].isna())
& (~X["Square"].isna())
& (~X["KitchenSquare"].isna())
)
X.loc[condition, "LifeSquare"] = (
X.loc[condition, "Square"] - X.loc[condition, "KitchenSquare"] - 10
)
X["LifeSquare_outlier"] = 0
X.loc[X["LifeSquare"] == 0, "LifeSquare_outlier"] = 1
X.loc[X["LifeSquare"] >= X["Square"], "LifeSquare_outlier"] = 1
X.loc[
X["LifeSquare"] <= self.lifesquare2_quantile, "LifeSquare"
] = self.lifesquare2_quantile
X.loc[X["LifeSquare"] >= X["Square"], "LifeSquare"] = self.lifesquare1_quantile
# Healthcare_1
X["Healthcare_1_outlier"] = 0
X.loc[X["Healthcare_1"] == 0, "Healthcare_1_outlier"] = 1
X.loc[X["Healthcare_1"] >= 1000, "Healthcare_1_outlier"] = 1
X.loc[
(X["Healthcare_1"] == 0) | (X["Healthcare_1"] >= 1000), "Healthcare_1"
] = self.medians["Healthcare_1"]
fill_Hc1 = self.healthcare1_quantile
X["Healthcare_1"] = X["Healthcare_1"].fillna(fill_Hc1)
X.fillna(self.medians, inplace=True)
return X
# **4. Построение новых признаков**
# **Dummies**
# переводим строковые признаки в числовые
binary_to_numbers = {"A": 0, "B": 1}
train_df["Ecology_2"] = train_df["Ecology_2"].replace(binary_to_numbers)
train_df["Ecology_3"] = train_df["Ecology_3"].replace(binary_to_numbers)
train_df["Shops_2"] = train_df["Shops_2"].replace(binary_to_numbers)
# **DistrictSize, IsDistrictLarge**
# переводим в вещественный признак DistrictId
district_size = (
train_df["DistrictId"]
.value_counts()
.reset_index()
.rename(columns={"index": "DistrictId", "DistrictId": "DistrictSize"})
)
district_size.head(7)
# присоединяем к train_df
train_df = train_df.merge(district_size, on="DistrictId", how="left")
train_df.head(7)
# добавим новый признак и разделим квартиры в зависисмости от размеров района
(train_df["DistrictSize"] > 100).value_counts()
train_df["IsDistrictLarge"] = (train_df["DistrictSize"] > 100).astype(int)
# Расчет переменной в зависимости от количества комнат и района расположения квартиры - **M_Price_Room_dstr**
m_price_room_dstr = (
train_df.groupby(["DistrictId", "Rooms"], as_index=False)
.agg({"Price": "mean"})
.rename(columns={"Price": "M_Price_Room_dstr"})
)
m_price_room_dstr.head(7)
m_price_room_dstr.shape
# присоединяем к train_df
train_df = train_df.merge(m_price_room_dstr, on=["DistrictId", "Rooms"], how="left")
train_df.head(7)
# **M_PriceByFloorYear** - добавим целевую переменную, включающую признаки этажности и года постройки
def floor_to_cat(X):
X["floor_cat"] = 0
X.loc[X["Floor"] <= 2, "floor_cat"] = 1
X.loc[(X["Floor"] > 2) & (X["Floor"] <= 5), "floor_cat"] = 2
X.loc[(X["Floor"] > 5) & (X["Floor"] <= 9), "floor_cat"] = 3
X.loc[(X["Floor"] > 9) & (X["Floor"] <= 15), "floor_cat"] = 4
X.loc[X["Floor"] > 15, "floor_cat"] = 5
return X
def year_to_cat(X):
X["year_cat"] = 0
X.loc[X["HouseYear"] <= 1920, "year_cat"] = 1
X.loc[(X["HouseYear"] > 1920) & (X["HouseYear"] <= 1946), "year_cat"] = 2
X.loc[(X["HouseYear"] > 1946) & (X["HouseYear"] <= 1959), "year_cat"] = 3
X.loc[(X["HouseYear"] > 1960) & (X["HouseYear"] <= 1989), "year_cat"] = 4
X.loc[(X["HouseYear"] > 1989) & (X["HouseYear"] <= 2009), "year_cat"] = 5
X.loc[(X["HouseYear"] > 2010), "year_cat"] = 6
return X
bins = [0, 3, 5, 9, 15, train_df["Floor"].max()]
pd.cut(train_df["Floor"], bins=bins, labels=False)
train_df = year_to_cat(train_df)
train_df = floor_to_cat(train_df)
train_df.head()
m_price_by_floor_year = (
train_df.groupby(["year_cat", "floor_cat"], as_index=False)
.agg({"Price": "mean"})
.rename(columns={"Price": "M_PriceByFloorYear"})
)
m_price_by_floor_year.head(7)
# присоединяем к train_df
train_df = train_df.merge(
m_price_by_floor_year, on=["year_cat", "floor_cat"], how="left"
)
train_df.head(7)
class FeatureGenetator:
"""Генерация новых признаков"""
def __init__(self):
self.DistrictId_counts = None
self.binary_to_numbers = None
self.m_price_room_dstr = None
self.m_price_by_floor_year = None
self.house_year_max = None
self.floor_max = None
self.district_size = None
def fit(self, X, y=None):
X = X.copy()
# Binary features
self.binary_to_numbers = {"A": 0, "B": 1}
# DistrictID
self.district_size = (
X["DistrictId"]
.value_counts()
.reset_index()
.rename(columns={"index": "DistrictId", "DistrictId": "DistrictSize"})
)
# Target encoding
## District, Rooms
df = X.copy()
if y is not None:
df["Price"] = y.values
self.m_price_room_dstr = (
df.groupby(["DistrictId", "Rooms"], as_index=False)
.agg({"Price": "mean"})
.rename(columns={"Price": "M_Price_Room_dstr"})
)
self.m_price_room_dstr_mean = self.m_price_room_dstr[
"M_Price_Room_dstr"
].mean()
## floor, year
if y is not None:
self.floor_max = df["Floor"].max()
self.house_year_max = df["HouseYear"].max()
df["Price"] = y.values
df = self.floor_to_cat(df)
df = self.year_to_cat(df)
self.m_price_by_floor_year = (
df.groupby(["year_cat", "floor_cat"], as_index=False)
.agg({"Price": "mean"})
.rename(columns={"Price": "M_PriceByFloorYear"})
)
self.m_price_by_floor_year_mean = self.m_price_by_floor_year[
"M_PriceByFloorYear"
].mean()
def transform(self, X):
# Binary features
X["Ecology_2"] = X["Ecology_2"].map(
self.binary_to_numbers
) # self.binary_to_numbers = {'A': 0, 'B': 1}
X["Ecology_3"] = X["Ecology_3"].map(self.binary_to_numbers)
X["Shops_2"] = X["Shops_2"].map(self.binary_to_numbers)
# DistrictId, IsDistrictLarge
X = X.merge(self.district_size, on="DistrictId", how="left")
X["new_district"] = 0
X.loc[X["DistrictSize"].isna(), "new_district"] = 1
X["DistrictSize"].fillna(5, inplace=True)
X["IsDistrictLarge"] = (X["DistrictSize"] > 100).astype(int)
# More categorical features
X = self.floor_to_cat(X) # + столбец floor_cat
X = self.year_to_cat(X) # + столбец year_cat
# Target encoding
if self.m_price_room_dstr is not None:
X = X.merge(self.m_price_room_dstr, on=["DistrictId", "Rooms"], how="left")
X["M_Price_Room_dstr"].fillna(self.m_price_room_dstr_mean, inplace=True)
if self.m_price_by_floor_year is not None:
X = X.merge(
self.m_price_by_floor_year, on=["year_cat", "floor_cat"], how="left"
)
X["M_PriceByFloorYear"].fillna(
self.m_price_by_floor_year_mean, inplace=True
)
return X
def floor_to_cat(self, X):
X["floor_cat"] = 0
X.loc[X["Floor"] <= 2, "floor_cat"] = 1
X.loc[(X["Floor"] > 2) & (X["Floor"] <= 5), "floor_cat"] = 2
X.loc[(X["Floor"] > 5) & (X["Floor"] <= 9), "floor_cat"] = 3
X.loc[(X["Floor"] > 9) & (X["Floor"] <= 15), "floor_cat"] = 4
X.loc[X["Floor"] > 15, "floor_cat"] = 5
return X
def year_to_cat(self, X):
X["year_cat"] = 0
X.loc[X["HouseYear"] <= 1920, "year_cat"] = 1
X.loc[(X["HouseYear"] > 1920) & (X["HouseYear"] <= 1946), "year_cat"] = 2
X.loc[(X["HouseYear"] > 1946) & (X["HouseYear"] <= 1959), "year_cat"] = 3
X.loc[(X["HouseYear"] > 1960) & (X["HouseYear"] <= 1989), "year_cat"] = 4
X.loc[(X["HouseYear"] > 1989) & (X["HouseYear"] <= 2009), "year_cat"] = 5
X.loc[(X["HouseYear"] > 2010), "year_cat"] = 6
return X
# **5. Отбор признаков**
train_df.columns.tolist()
feature_names = [
"Rooms",
"Square",
"LifeSquare",
"KitchenSquare",
"Floor",
"HouseFloor",
"HouseYear",
"Ecology_1",
"Ecology_2",
"Ecology_3",
"Social_1",
"Social_2",
"Social_3",
"Helthcare_2",
"Shops_1",
"Shops_2",
]
new_feature_names = [
"Rooms_outlier",
"Square_outlier",
"HouseFloor_outlier",
"HouseYear_outlier",
"LifeSquare_nan",
"LifeSquare_outlier",
"Healthcare_1_outlier",
"DistrictSize",
"new_district",
"IsDistrictLarge",
"M_Price_Room_dstr",
"M_PriceByFloorYear",
]
target_name = "Price"
# **6. Разбиение на train и test**
train_df = pd.read_csv(TRAIN_DATASET_PATH)
test_df = pd.read_csv(TEST_DATASET_PATH)
X = train_df.drop(columns=target_name)
y = train_df[target_name]
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size=0.33, shuffle=True, random_state=39
)
preprocessor = DataPreprocessing()
preprocessor.fit(X_train)
X_train = preprocessor.transform(X_train)
X_valid = preprocessor.transform(X_valid)
test_df = preprocessor.transform(test_df)
X_train.shape, X_valid.shape, test_df.shape
features_gen = FeatureGenetator()
features_gen.fit(X_train, y_train)
X_train = features_gen.transform(X_train)
X_valid = features_gen.transform(X_valid)
test_df = features_gen.transform(test_df)
X_train.shape, X_valid.shape, test_df.shape
X_train = X_train[feature_names + new_feature_names]
X_valid = X_valid[feature_names + new_feature_names]
test_df = test_df[feature_names + new_feature_names]
X_train.isna().sum().sum(), X_valid.isna().sum().sum(), test_df.isna().sum().sum()
# **7. Построение модели**
# **Обучение**
# Случайный лес
rf_model = RandomForestRegressor(random_state=39, criterion="mse")
rf_model.fit(X_train, y_train)
# **Оценка модели**
y_train_preds = rf_model.predict(X_train)
y_test_preds = rf_model.predict(X_valid)
evaluate_preds(y_train, y_train_preds, y_valid, y_test_preds)
# проверяем на кросс-валидации
cv_score = cross_val_score(
rf_model,
X_train,
y_train,
scoring="r2",
cv=KFold(n_splits=3, shuffle=True, random_state=21),
)
cv_score
cv_score.mean()
# результат: модель переобучена (три разных результата)
# важность признаков, проверка "полезности" признаков
feature_importances = pd.DataFrame(
zip(X_train.columns, rf_model.feature_importances_),
columns=["feature_name", "importance"],
)
feature_importances.sort_values(by="importance", ascending=False)
from sklearn.ensemble import (
StackingRegressor,
VotingRegressor,
BaggingRegressor,
GradientBoostingRegressor,
)
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
gb = GradientBoostingRegressor()
stack = StackingRegressor([("lr", lr), ("rf", rf_model)], final_estimator=gb)
stack.fit(X_train, y_train)
y_train_preds = stack.predict(X_train)
y_test_preds = stack.predict(X_valid)
evaluate_preds(y_train, y_train_preds, y_valid, y_test_preds)
# **8. Прогнозирование на тестовом датасете**
test_df
submit = pd.read_csv(
"/kaggle/input/real-estate-price-prediction-moscow/sample_submission.csv"
)
submit.head()
predictions = rf_model.predict(test_df)
predictions
submit["Price"] = predictions
submit.head()
submit.to_csv("rf_submit.csv", index=False)
| false | 0 | 10,231 | 2 | 10,231 | 10,231 |
||
69173933
|
# # Optiver Realized Volatility Prediction : EDA + Linear Regression
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
from sklearn.metrics import r2_score
import glob
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import warnings
warnings.filterwarnings("ignore")
# ## Dataset
# ### book_[train/test].parquet:
# A parquet file partitioned by stock_id. Provides order book data on the most competitive buy and sell orders entered into the market. The top two levels of the book are shared. The first level of the book will be more competitive in price terms, it will then receive execution priority over the second level.
# * stock_id - ID code for the stock. Not all stock IDs exist in every time bucket. Parquet coerces this column to the categorical data type when loaded; you may wish to convert it to int8.
# * time_id - ID code for the time bucket. Time IDs are not necessarily sequential but are consistent across all stocks.
# * seconds_in_bucket - Number of seconds from the start of the bucket, always starting from 0.
# * bid_price[1/2] - Normalized prices of the most/second most competitive buy level.
# * ask_price[1/2] - Normalized prices of the most/second most competitive sell level.
# * bid_size[1/2] - The number of shares on the most/second most competitive buy level.
# * ask_size[1/2] - The number of shares on the most/second most competitive sell level.
# ### trade_[train/test].parquet:
# A parquet file partitioned by stock_id. Contains data on trades that actually executed. Usually, in the market, there are more passive buy/sell intention updates (book updates) than actual trades, therefore one may expect this file to be more sparse than the order book.
# * stock_id - Same as above.
# * time_id - Same as above.
# * seconds_in_bucket - Same as above. Note that since trade and book data are taken from the * same time window and trade data is more sparse in general, this field is not necessarily starting from 0.
# * price - The average price of executed transactions happening in one second. Prices have been normalized and the average has been weighted by the number of shares traded in each transaction.
# * size - The sum number of shares traded.
# * order_count - The number of unique trade orders taking place.
# ### train.csv:
# The ground truth values for the training set.
# * stock_id - Same as above, but since this is a csv the column will load as an integer instead of categorical.
# * time_id - Same as above.
# * target - The realized volatility computed over the 10 minute window following the feature data under the same stock/time_id. There is no overlap between feature and target data. You can find more info in our tutorial notebook.
# ### test.csv
# Provides the mapping between the other data files and the submission file. As with other test files, most of the data is only available to your notebook upon submission with just the first few rows available for download.
# * stock_id - Same as above.
# * time_id - Same as above.
# * row_id - Unique identifier for the submission row. There is one row for each existing time ID/stock ID pair. Each time window is not necessarily containing every individual stock.
# ### sample_submission.csv
# A sample submission file in the correct format.
# * row_id - Same as in test.csv.
# * target - Same definition as in train.csv. The benchmark is using the median target value from train.csv.
#
train = pd.read_csv("../input/optiver-realized-volatility-prediction/train.csv")
test = pd.read_csv("../input/optiver-realized-volatility-prediction/test.csv")
train.head()
test.head()
# ## Target Distribution:
# Target is skewed on left side
mean = np.mean(train["target"])
print(f"Mean : {mean}")
plt.figure(figsize=(8, 5))
sns.distplot(train["target"], bins=50)
plt.title("Target Distribution")
plt.show()
# ### Let's see How many values are greater than 0.02
print(
f"Target count greater than 0.02 : {train['target'][train['target'] >= 0.02].count()}"
)
print(
f"Percentage of total: {(train['target'][train['target'] >= 0.02].count() / train.shape[0]) * 100} %"
)
print(f"Number of shares: {train.shape[0]}")
for col in train.columns:
print(f" {col}: {len(train[col].unique())}")
# So there are different 112 stock ids, 3830 time ids and 414287 target.
stock = train.groupby("stock_id")["target"].agg(["mean", "sum"]).reset_index()
print(f"Mean: {stock['mean'].mean()}")
print(f"Max value: {stock['sum'].mean()}")
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
sns.histplot(x=stock["mean"], ax=ax1)
sns.histplot(x=stock["sum"], ax=ax2)
ax1.set_title("Target mean distribution", size=12)
ax2.set_title("Target sum distribution", size=12)
plt.legend()
plt.show()
# So the mean value 0.003 which is close to 0 and Max value is on 14.8.
book_train = pd.read_parquet(
"../input/optiver-realized-volatility-prediction/book_train.parquet/stock_id=0"
)
book_test = pd.read_parquet(
"../input/optiver-realized-volatility-prediction/book_test.parquet/stock_id=0"
)
trade_train = pd.read_parquet(
"../input/optiver-realized-volatility-prediction/trade_train.parquet/stock_id=0"
)
trade_test = pd.read_parquet(
"../input/optiver-realized-volatility-prediction/trade_test.parquet/stock_id=0"
)
book_train.head()
df_book = book_train[book_train["time_id"] == 5]
df_book.head()
plt.figure(figsize=(15, 5))
for col in ["bid_price1", "bid_price2", "ask_price1", "ask_price2"]:
sns.lineplot(x="seconds_in_bucket", y=col, data=df_book, label=col)
plt.legend()
plt.show()
df_trade = trade_train[trade_train["time_id"] == 5]
df_trade.head()
plt.figure(figsize=(15, 5))
for col in ["bid_price1", "bid_price2", "ask_price1", "ask_price2"]:
sns.lineplot(x="seconds_in_bucket", y=col, data=df_book, label=col)
sns.lineplot(
x="seconds_in_bucket",
y="price",
data=df_trade,
linewidth=3,
color="black",
label="price",
)
plt.legend()
plt.show()
df_book["wap"] = (
df_book["bid_price1"] * df_book["ask_size1"]
+ df_book["ask_price1"] * df_book["bid_size1"]
) / (df_book["bid_size1"] + df_book["ask_size1"])
def log_return(list_stock_prices):
return np.log(list_stock_prices).diff()
df_book.loc[:, "log_return"] = log_return(df_book["wap"])
df_book = df_book[~df_book["log_return"].isnull()]
def realized_volatility(series_log_return):
return np.sqrt(np.sum(series_log_return**2))
realized_vol = realized_volatility(df_book["log_return"])
print(f"Realized volatility for stock_id 0 on time_id 5 is {realized_vol}")
list_order_book_file_train = glob.glob(
"/kaggle/input/optiver-realized-volatility-prediction/book_train.parquet/*"
)
train["row_id"] = train["stock_id"].astype(str) + "-" + train["time_id"].astype(str)
model_dict = {}
def realized_volatility_per_time_id_linear(
file_path, prediction_column_name, train_test=True
):
df_book_data = pd.read_parquet(file_path)
df_book_data["wap"] = (
df_book_data["bid_price1"] * df_book_data["ask_size1"]
+ df_book_data["ask_price1"] * df_book_data["bid_size1"]
) / (df_book_data["bid_size1"] + df_book_data["ask_size1"])
df_book_data["log_return"] = df_book_data.groupby(["time_id"])["wap"].apply(
log_return
)
df_book_data = df_book_data[~df_book_data["log_return"].isnull()]
df_realized_vol_per_stock = pd.DataFrame(
df_book_data.groupby(["time_id"])["log_return"].agg(realized_volatility)
).reset_index()
df_realized_vol_per_stock = df_realized_vol_per_stock.rename(
columns={"log_return": prediction_column_name}
)
stock_id = file_path.split("=")[1]
df_realized_vol_per_stock["row_id"] = df_realized_vol_per_stock["time_id"].apply(
lambda x: f"{stock_id}-{x}"
)
poly = PolynomialFeatures(degree=3)
if train_test:
df_realized_vol_per_stock_joined = train.merge(
df_realized_vol_per_stock[["row_id", prediction_column_name]],
on=["row_id"],
how="right",
)
weights = 1 / np.square(df_realized_vol_per_stock_joined.target)
X = np.array(
df_realized_vol_per_stock_joined[[prediction_column_name]]
).reshape(-1, 1)
X_ = poly.fit_transform(X)
y = df_realized_vol_per_stock_joined.target
reg = LinearRegression().fit(X_, y, sample_weight=weights)
df_realized_vol_per_stock[[prediction_column_name]] = reg.predict(X_)
model_dict[stock_id] = reg
else:
reg = model_dict[stock_id]
X = np.array(df_realized_vol_per_stock[[prediction_column_name]]).reshape(-1, 1)
X_ = poly.fit_transform(X)
df_realized_vol_per_stock[[prediction_column_name]] = reg.predict(X_)
return df_realized_vol_per_stock[["row_id", prediction_column_name]]
def past_realized_volatility_per_stock_linear(
list_file, prediction_column_name, train_test=True
):
df_past_realized = pd.DataFrame()
for file in list_file:
df_past_realized = pd.concat(
[
df_past_realized,
realized_volatility_per_time_id_linear(
file, prediction_column_name, train_test
),
]
)
return df_past_realized
df_past_realized_train = past_realized_volatility_per_stock_linear(
list_file=list_order_book_file_train, prediction_column_name="pred"
)
train = train[["row_id", "target"]]
df_joined = train.merge(
df_past_realized_train[["row_id", "pred"]], on=["row_id"], how="left"
)
def rmspe(y_true, y_pred):
return np.sqrt(np.mean(np.square((y_true - y_pred) / y_true)))
R2 = round(r2_score(y_true=df_joined["target"], y_pred=df_joined["pred"]), 3)
RMSPE = round(rmspe(y_true=df_joined["target"], y_pred=df_joined["pred"]), 3)
print(f"Performance of the naive prediction: R2 score: {R2}, RMSPE: {RMSPE}")
# ## Submission
list_order_book_file_test = glob.glob(
"/kaggle/input/optiver-realized-volatility-prediction/book_test.parquet/*"
)
df_naive_pred_test = df_past_realized_train = past_realized_volatility_per_stock_linear(
list_file=list_order_book_file_test,
prediction_column_name="target",
train_test=False,
)
df_naive_pred_test.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173933.ipynb
| null | null |
[{"Id": 69173933, "ScriptId": 18444321, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5640630, "CreationDate": "07/27/2021 16:55:56", "VersionNumber": 2.0, "Title": "Volatility Prediction: EDA + Linear Regression", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 214.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 213.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 16}]
| null | null | null | null |
# # Optiver Realized Volatility Prediction : EDA + Linear Regression
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
from sklearn.metrics import r2_score
import glob
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import warnings
warnings.filterwarnings("ignore")
# ## Dataset
# ### book_[train/test].parquet:
# A parquet file partitioned by stock_id. Provides order book data on the most competitive buy and sell orders entered into the market. The top two levels of the book are shared. The first level of the book will be more competitive in price terms, it will then receive execution priority over the second level.
# * stock_id - ID code for the stock. Not all stock IDs exist in every time bucket. Parquet coerces this column to the categorical data type when loaded; you may wish to convert it to int8.
# * time_id - ID code for the time bucket. Time IDs are not necessarily sequential but are consistent across all stocks.
# * seconds_in_bucket - Number of seconds from the start of the bucket, always starting from 0.
# * bid_price[1/2] - Normalized prices of the most/second most competitive buy level.
# * ask_price[1/2] - Normalized prices of the most/second most competitive sell level.
# * bid_size[1/2] - The number of shares on the most/second most competitive buy level.
# * ask_size[1/2] - The number of shares on the most/second most competitive sell level.
# ### trade_[train/test].parquet:
# A parquet file partitioned by stock_id. Contains data on trades that actually executed. Usually, in the market, there are more passive buy/sell intention updates (book updates) than actual trades, therefore one may expect this file to be more sparse than the order book.
# * stock_id - Same as above.
# * time_id - Same as above.
# * seconds_in_bucket - Same as above. Note that since trade and book data are taken from the * same time window and trade data is more sparse in general, this field is not necessarily starting from 0.
# * price - The average price of executed transactions happening in one second. Prices have been normalized and the average has been weighted by the number of shares traded in each transaction.
# * size - The sum number of shares traded.
# * order_count - The number of unique trade orders taking place.
# ### train.csv:
# The ground truth values for the training set.
# * stock_id - Same as above, but since this is a csv the column will load as an integer instead of categorical.
# * time_id - Same as above.
# * target - The realized volatility computed over the 10 minute window following the feature data under the same stock/time_id. There is no overlap between feature and target data. You can find more info in our tutorial notebook.
# ### test.csv
# Provides the mapping between the other data files and the submission file. As with other test files, most of the data is only available to your notebook upon submission with just the first few rows available for download.
# * stock_id - Same as above.
# * time_id - Same as above.
# * row_id - Unique identifier for the submission row. There is one row for each existing time ID/stock ID pair. Each time window is not necessarily containing every individual stock.
# ### sample_submission.csv
# A sample submission file in the correct format.
# * row_id - Same as in test.csv.
# * target - Same definition as in train.csv. The benchmark is using the median target value from train.csv.
#
train = pd.read_csv("../input/optiver-realized-volatility-prediction/train.csv")
test = pd.read_csv("../input/optiver-realized-volatility-prediction/test.csv")
train.head()
test.head()
# ## Target Distribution:
# Target is skewed on left side
mean = np.mean(train["target"])
print(f"Mean : {mean}")
plt.figure(figsize=(8, 5))
sns.distplot(train["target"], bins=50)
plt.title("Target Distribution")
plt.show()
# ### Let's see How many values are greater than 0.02
print(
f"Target count greater than 0.02 : {train['target'][train['target'] >= 0.02].count()}"
)
print(
f"Percentage of total: {(train['target'][train['target'] >= 0.02].count() / train.shape[0]) * 100} %"
)
print(f"Number of shares: {train.shape[0]}")
for col in train.columns:
print(f" {col}: {len(train[col].unique())}")
# So there are different 112 stock ids, 3830 time ids and 414287 target.
stock = train.groupby("stock_id")["target"].agg(["mean", "sum"]).reset_index()
print(f"Mean: {stock['mean'].mean()}")
print(f"Max value: {stock['sum'].mean()}")
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
sns.histplot(x=stock["mean"], ax=ax1)
sns.histplot(x=stock["sum"], ax=ax2)
ax1.set_title("Target mean distribution", size=12)
ax2.set_title("Target sum distribution", size=12)
plt.legend()
plt.show()
# So the mean value 0.003 which is close to 0 and Max value is on 14.8.
book_train = pd.read_parquet(
"../input/optiver-realized-volatility-prediction/book_train.parquet/stock_id=0"
)
book_test = pd.read_parquet(
"../input/optiver-realized-volatility-prediction/book_test.parquet/stock_id=0"
)
trade_train = pd.read_parquet(
"../input/optiver-realized-volatility-prediction/trade_train.parquet/stock_id=0"
)
trade_test = pd.read_parquet(
"../input/optiver-realized-volatility-prediction/trade_test.parquet/stock_id=0"
)
book_train.head()
df_book = book_train[book_train["time_id"] == 5]
df_book.head()
plt.figure(figsize=(15, 5))
for col in ["bid_price1", "bid_price2", "ask_price1", "ask_price2"]:
sns.lineplot(x="seconds_in_bucket", y=col, data=df_book, label=col)
plt.legend()
plt.show()
df_trade = trade_train[trade_train["time_id"] == 5]
df_trade.head()
plt.figure(figsize=(15, 5))
for col in ["bid_price1", "bid_price2", "ask_price1", "ask_price2"]:
sns.lineplot(x="seconds_in_bucket", y=col, data=df_book, label=col)
sns.lineplot(
x="seconds_in_bucket",
y="price",
data=df_trade,
linewidth=3,
color="black",
label="price",
)
plt.legend()
plt.show()
df_book["wap"] = (
df_book["bid_price1"] * df_book["ask_size1"]
+ df_book["ask_price1"] * df_book["bid_size1"]
) / (df_book["bid_size1"] + df_book["ask_size1"])
def log_return(list_stock_prices):
return np.log(list_stock_prices).diff()
df_book.loc[:, "log_return"] = log_return(df_book["wap"])
df_book = df_book[~df_book["log_return"].isnull()]
def realized_volatility(series_log_return):
return np.sqrt(np.sum(series_log_return**2))
realized_vol = realized_volatility(df_book["log_return"])
print(f"Realized volatility for stock_id 0 on time_id 5 is {realized_vol}")
list_order_book_file_train = glob.glob(
"/kaggle/input/optiver-realized-volatility-prediction/book_train.parquet/*"
)
train["row_id"] = train["stock_id"].astype(str) + "-" + train["time_id"].astype(str)
model_dict = {}
def realized_volatility_per_time_id_linear(
file_path, prediction_column_name, train_test=True
):
df_book_data = pd.read_parquet(file_path)
df_book_data["wap"] = (
df_book_data["bid_price1"] * df_book_data["ask_size1"]
+ df_book_data["ask_price1"] * df_book_data["bid_size1"]
) / (df_book_data["bid_size1"] + df_book_data["ask_size1"])
df_book_data["log_return"] = df_book_data.groupby(["time_id"])["wap"].apply(
log_return
)
df_book_data = df_book_data[~df_book_data["log_return"].isnull()]
df_realized_vol_per_stock = pd.DataFrame(
df_book_data.groupby(["time_id"])["log_return"].agg(realized_volatility)
).reset_index()
df_realized_vol_per_stock = df_realized_vol_per_stock.rename(
columns={"log_return": prediction_column_name}
)
stock_id = file_path.split("=")[1]
df_realized_vol_per_stock["row_id"] = df_realized_vol_per_stock["time_id"].apply(
lambda x: f"{stock_id}-{x}"
)
poly = PolynomialFeatures(degree=3)
if train_test:
df_realized_vol_per_stock_joined = train.merge(
df_realized_vol_per_stock[["row_id", prediction_column_name]],
on=["row_id"],
how="right",
)
weights = 1 / np.square(df_realized_vol_per_stock_joined.target)
X = np.array(
df_realized_vol_per_stock_joined[[prediction_column_name]]
).reshape(-1, 1)
X_ = poly.fit_transform(X)
y = df_realized_vol_per_stock_joined.target
reg = LinearRegression().fit(X_, y, sample_weight=weights)
df_realized_vol_per_stock[[prediction_column_name]] = reg.predict(X_)
model_dict[stock_id] = reg
else:
reg = model_dict[stock_id]
X = np.array(df_realized_vol_per_stock[[prediction_column_name]]).reshape(-1, 1)
X_ = poly.fit_transform(X)
df_realized_vol_per_stock[[prediction_column_name]] = reg.predict(X_)
return df_realized_vol_per_stock[["row_id", prediction_column_name]]
def past_realized_volatility_per_stock_linear(
list_file, prediction_column_name, train_test=True
):
df_past_realized = pd.DataFrame()
for file in list_file:
df_past_realized = pd.concat(
[
df_past_realized,
realized_volatility_per_time_id_linear(
file, prediction_column_name, train_test
),
]
)
return df_past_realized
df_past_realized_train = past_realized_volatility_per_stock_linear(
list_file=list_order_book_file_train, prediction_column_name="pred"
)
train = train[["row_id", "target"]]
df_joined = train.merge(
df_past_realized_train[["row_id", "pred"]], on=["row_id"], how="left"
)
def rmspe(y_true, y_pred):
return np.sqrt(np.mean(np.square((y_true - y_pred) / y_true)))
R2 = round(r2_score(y_true=df_joined["target"], y_pred=df_joined["pred"]), 3)
RMSPE = round(rmspe(y_true=df_joined["target"], y_pred=df_joined["pred"]), 3)
print(f"Performance of the naive prediction: R2 score: {R2}, RMSPE: {RMSPE}")
# ## Submission
list_order_book_file_test = glob.glob(
"/kaggle/input/optiver-realized-volatility-prediction/book_test.parquet/*"
)
df_naive_pred_test = df_past_realized_train = past_realized_volatility_per_stock_linear(
list_file=list_order_book_file_test,
prediction_column_name="target",
train_test=False,
)
df_naive_pred_test.to_csv("submission.csv", index=False)
| false | 0 | 3,258 | 16 | 3,258 | 3,258 |
||
69173323
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head(100)
train_data.corr(method="pearson")
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head(100)
# Handling undefined values
train_data["Age"] = train_data["Age"].fillna(train_data["Age"].mean())
train_data["Age"] = train_data["Age"].astype(int)
test_data["Age"] = test_data["Age"].fillna(test_data["Age"].mean())
test_data["Age"] = test_data["Age"].astype(int)
train_data["Fare"] = train_data["Fare"].astype(int)
test_data["Fare"] = test_data["Fare"].fillna(test_data["Fare"].mean())
test_data["Fare"] = test_data["Fare"].astype(int)
# Creating a new feature called travelled_alone
SibSp = train_data["SibSp"].tolist()
Parch = train_data["Parch"].tolist()
travelled_alone = []
for i in range(len(SibSp)):
if SibSp[i] + Parch[i] > 0:
travelled_alone += ["No"]
else:
travelled_alone += ["Yes"]
train_data["travelled_alone"] = pd.DataFrame(travelled_alone)
SibSp = test_data["SibSp"].tolist()
Parch = test_data["Parch"].tolist()
travelled_alone = []
for i in range(len(SibSp)):
if SibSp[i] + Parch[i] > 0:
travelled_alone += ["No"]
else:
travelled_alone += ["Yes"]
test_data["travelled_alone"] = pd.DataFrame(travelled_alone)
train_data.head(100)
test_data.head(100)
from sklearn.ensemble import RandomForestClassifier
y = train_data["Survived"]
features = ["Sex", "Pclass", "Fare", "Parch", "Age", "travelled_alone"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
model = RandomForestClassifier(n_estimators=200, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions})
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173323.ipynb
| null | null |
[{"Id": 69173323, "ScriptId": 18869093, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7890420, "CreationDate": "07/27/2021 16:47:42", "VersionNumber": 7.0, "Title": "Titanic", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 81.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 80.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head(100)
train_data.corr(method="pearson")
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head(100)
# Handling undefined values
train_data["Age"] = train_data["Age"].fillna(train_data["Age"].mean())
train_data["Age"] = train_data["Age"].astype(int)
test_data["Age"] = test_data["Age"].fillna(test_data["Age"].mean())
test_data["Age"] = test_data["Age"].astype(int)
train_data["Fare"] = train_data["Fare"].astype(int)
test_data["Fare"] = test_data["Fare"].fillna(test_data["Fare"].mean())
test_data["Fare"] = test_data["Fare"].astype(int)
# Creating a new feature called travelled_alone
SibSp = train_data["SibSp"].tolist()
Parch = train_data["Parch"].tolist()
travelled_alone = []
for i in range(len(SibSp)):
if SibSp[i] + Parch[i] > 0:
travelled_alone += ["No"]
else:
travelled_alone += ["Yes"]
train_data["travelled_alone"] = pd.DataFrame(travelled_alone)
SibSp = test_data["SibSp"].tolist()
Parch = test_data["Parch"].tolist()
travelled_alone = []
for i in range(len(SibSp)):
if SibSp[i] + Parch[i] > 0:
travelled_alone += ["No"]
else:
travelled_alone += ["Yes"]
test_data["travelled_alone"] = pd.DataFrame(travelled_alone)
train_data.head(100)
test_data.head(100)
from sklearn.ensemble import RandomForestClassifier
y = train_data["Survived"]
features = ["Sex", "Pclass", "Fare", "Parch", "Age", "travelled_alone"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
model = RandomForestClassifier(n_estimators=200, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions})
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
| false | 0 | 845 | 0 | 845 | 845 |
||
69173552
|
# ## You're here!
# Welcome to your first competition in the [ITI's AI Pro training program](https://ai.iti.gov.eg/epita/ai-engineer/)! We hope you enjoy and learn as much as we did prepairing this competition.
# ## Introduction
# In the competition, it's required to predict the `Severity` of a car crash given info about the crash, e.g., location.
# This is the getting started notebook. Things are kept simple so that it's easier to understand the steps and modify it.
# Feel free to `Fork` this notebook and share it with your modifications **OR** use it to create your submissions.
# ### Prerequisites
# You should know how to use python and a little bit of Machine Learning. You can apply the techniques you learned in the training program and submit the new solutions!
# ### Checklist
# You can participate in this competition the way you perefer. However, I recommend following these steps if this is your first time joining a competition on Kaggle.
# * Fork this notebook and run the cells in order.
# * Submit this solution.
# * Make changes to the data processing step as you see fit.
# * Submit the new solutions.
# *You can submit up to 5 submissions per day. You can select only one of the submission you make to be considered in the final ranking.*
# Don't hesitate to leave a comment or contact me if you have any question!
# ## Import the libraries
# We'll use `pandas` to load and manipulate the data. Other libraries will be imported in the relevant sections.
import pandas as pd
import os
import xml.etree.ElementTree as Xet
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn import preprocessing
# ## Exploratory Data Analysis
# In this step, one should load the data and analyze it. However, I'll load the data and do minimal analysis. You are encouraged to do thorough analysis!
# Let's load the data using `pandas` and have a look at the generated `DataFrame`.
dataset_path = "/kaggle/input/car-crashes-severity-prediction/"
df = pd.read_csv(os.path.join(dataset_path, "train.csv"))
df_test = pd.read_csv(os.path.join(dataset_path, "test.csv"))
print("The shape of the dataset is {}.\n\n".format(df.shape))
df.head()
# We've got 6407 examples in the dataset with 14 featues, 1 ID, and the `Severity` of the crash.
# By looking at the features and a sample from the data, the features look of numerical and catogerical types. What about some descriptive statistics?
df.drop(columns="ID").describe()
cat_list = ["Crossing", "Junction", "Railway", "Stop", "Amenity"]
df[cat_list] = df[cat_list].astype(int)
df_test[cat_list] = df_test[cat_list].astype(int)
df.head()
df = pd.concat(
[
df,
pd.DataFrame(df["timestamp"].str.split(" ").tolist(), columns=["Date", "Time"]),
],
axis=1,
)
df = df.drop(columns="timestamp")
df = df.drop(columns=["Bump", "Give_Way", "No_Exit", "Roundabout"])
df.head()
df_test = pd.concat(
[
df_test,
pd.DataFrame(
df_test["timestamp"].str.split(" ").tolist(), columns=["Date", "Time"]
),
],
axis=1,
)
df_test = df_test.drop(columns="timestamp")
df_test = df_test.drop(columns=["Bump", "Give_Way", "No_Exit", "Roundabout"])
df.head()
df["Hour"] = [int(i[0]) for i in df["Time"].str.split(":").tolist()]
df_test["Hour"] = [int(i[0]) for i in df_test["Time"].str.split(":").tolist()]
df.head()
def Pad(o, padLen):
if len(o) < padLen:
o = "0" * (padLen - len(o)) + o
return o
weather_df = pd.read_csv(os.path.join(dataset_path, "weather-sfcsv.csv"))
print("The shape of the dataset is {}.\n\n".format(weather_df.shape))
weather_df[["Year", "Month", "Day"]] = weather_df[["Year", "Month", "Day"]].astype(str)
weather_df["Date"] = (
weather_df["Year"]
+ "-"
+ weather_df["Month"].map(lambda x: Pad(x, 2))
+ "-"
+ weather_df["Day"].map(lambda x: Pad(x, 2))
)
weather_df = weather_df.drop_duplicates(subset=["Year", "Day", "Month", "Hour"])
weather_df = weather_df.drop(columns=["Selected"])
weather_df = weather_df.drop(columns=["Precipitation(in)"])
weather_df = weather_df.drop(columns="Wind_Chill(F)")
weather_df = weather_df.drop(columns=["Year", "Temperature(F)", "Humidity(%)"])
weather_df["Wind_Speed(mph)"] = weather_df["Wind_Speed(mph)"].fillna(
weather_df["Wind_Speed(mph)"].median()
)
weather_df["weather"] = (
weather_df["Weather_Condition"].astype("category").cat.codes * 100
)
weather_df.head()
df = pd.merge(df, weather_df, on=["Date", "Hour"], how="left")
df = df.dropna()
df_test = pd.merge(df_test, weather_df, on=["Date", "Hour"], how="left")
df_test = df_test.dropna()
df.shape
df["Side"] = (df["Side"] == "R").astype(int)
df_test["Side"] = (df_test["Side"] == "R").astype(int)
df.head()
df["weekday"] = pd.to_datetime(df["Date"]).dt.weekday
df_test["weekday"] = pd.to_datetime(df_test["Date"]).dt.weekday
df.head()
cols = ["Date", "description"]
rows = []
# Parsing the XML file
xmlparse = Xet.parse(os.path.join(dataset_path, "holidays.xml"))
root = xmlparse.getroot()
for i in root:
date = i.find("date").text
description = i.find("description").text
rows.append({"Date": date, "description": description})
holidays_df = pd.DataFrame(rows, columns=cols)
holidays_df["desc_encoded"] = (
holidays_df["description"].astype("category").cat.codes + 1
)
holidays_df = holidays_df.drop(columns="description")
holidays_df.head()
df["is_weekend"] = (df["weekday"] == 5) | (df["weekday"] == 6)
df.head()
print(df["is_weekend"].sum())
df = pd.merge(df, holidays_df, on="Date", how="left")
df = df.fillna(0)
df["is_weekend"] = ((df["desc_encoded"] > 0) | df["is_weekend"]).astype(int)
df["is_weekend"].sum()
df_test["is_weekend"] = (df_test["weekday"] == 5) | (df_test["weekday"] == 6)
df_test.head()
print(df["is_weekend"].sum())
df_test = pd.merge(df_test, holidays_df, on="Date", how="left")
df_test = df_test.fillna(0)
df_test["is_weekend"] = ((df_test["desc_encoded"] > 0) | df_test["is_weekend"]).astype(
int
)
df_test["is_weekend"].sum()
# df["light_cycle"] = 1
# df["light_cycle"] = df["Hour"].replace(list(np.arange(5, 18)), 0)
# def checkInterval(x, groups):
# for i in range(len(groups) - 1):
# if x < groups[i] and x > groups[i + 1]:
# return i
# return len(groups) - 2
# lat_std = df["Lat"].std()
# lat_mean = df["Lat"].mean()
# ranges = [lat_mean - lat_std*i for i in range(-3, 4, 1)]
# print(ranges)
# df["Lat_encoded"] = df["Lat"].map(lambda x: checkInterval(x, ranges))
# df["Lat_encoded"].head()
# The output shows desciptive statistics for the numerical features, `Lat`, `Lng`, `Distance(mi)`, and `Severity`. I'll use the numerical features to demonstrate how to train the model and make submissions. **However you shouldn't use the numerical features only to make the final submission if you want to make it to the top of the leaderboard.**
# ## Data Splitting
# Now it's time to split the dataset for the training step. Typically the dataset is split into 3 subsets, namely, the training, validation and test sets. In our case, the test set is already predefined. So we'll split the "training" set into training and validation sets with 0.8:0.2 ratio.
# *Note: a good way to generate reproducible results is to set the seed to the algorithms that depends on randomization. This is done with the argument `random_state` in the following command*
from sklearn.model_selection import train_test_split
train_df, val_df = train_test_split(
df, test_size=0.2, random_state=42, stratify=df["Severity"]
) # Try adding `stratify` here
X_train = train_df.drop(columns=["ID", "Severity", "Date", "Time", "Weather_Condition"])
y_train = train_df["Severity"]
X_val = val_df.drop(columns=["ID", "Severity", "Date", "Time", "Weather_Condition"])
y_val = val_df["Severity"]
# As pointed out eariler, I'll use the numerical features to train the classifier. **However, you shouldn't use the numerical features only to make the final submission if you want to make it to the top of the leaderboard.**
# This cell is used to select the numerical features. IT SHOULD BE REMOVED AS YOU DO YOUR WORK.
# X_train = X_train[['Lat', 'Lng', 'Distance(mi)']]
# X_val = X_val[['Lat', 'Lng', 'Distance(mi)']]
# ## Model Training
# Let's train a model with the data! We'll train a Random Forest Classifier to demonstrate the process of making submissions.
from sklearn.ensemble import RandomForestClassifier
# Create an instance of the classifier
classifier = RandomForestClassifier(max_depth=2, random_state=0)
# Train the classifier
classifier = classifier.fit(X_train, y_train)
# Now let's test our classifier on the validation dataset and see the accuracy.
print(
"The accuracy of the classifier on the validation set is ",
(classifier.score(X_val, y_val)),
)
# Well. That's a good start, right? A classifier that predicts all examples' `Severity` as 2 will get around 0.63. You should get better score as you add more features and do better data preprocessing.
# ## Submission File Generation
# We have built a model and we'd like to submit our predictions on the test set! In order to do that, we'll load the test set, predict the class and save the submission file.
# First, we'll load the data.
# test_df = pd.read_csv(os.path.join(dataset_path, 'test.csv'))
# test_df.head()
# Note that the test set has the same features and doesn't have the `Severity` column.
# At this stage one must **NOT** forget to apply the same processing done on the training set on the features of the test set.
# Now we'll add `Severity` column to the test `DataFrame` and add the values of the predicted class to it.
# **I'll select the numerical features here as I did in the training set. DO NOT forget to change this step as you change the preprocessing of the training data.**
X_test = df_test.drop(columns=["ID", "Date", "Time", "Weather_Condition"])
# You should update/remove the next line once you change the features used for training
# X_test = X_test[['Lat', 'Lng', 'Distance(mi)']]
y_test_predicted = classifier.predict(X_test)
df_test["Severity"] = y_test_predicted
df_test.head()
# Now we're ready to generate the submission file. The submission file needs the columns `ID` and `Severity` only.
df_test[["ID", "Severity"]].to_csv("/kaggle/working/submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173552.ipynb
| null | null |
[{"Id": 69173552, "ScriptId": 18880479, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7368704, "CreationDate": "07/27/2021 16:50:46", "VersionNumber": 2.0, "Title": "Getting Started - Car Crashes' Severity Prediction", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 271.0, "LinesInsertedFromPrevious": 4.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 267.0, "LinesInsertedFromFork": 143.0, "LinesDeletedFromFork": 14.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 128.0, "TotalVotes": 1}]
| null | null | null | null |
# ## You're here!
# Welcome to your first competition in the [ITI's AI Pro training program](https://ai.iti.gov.eg/epita/ai-engineer/)! We hope you enjoy and learn as much as we did prepairing this competition.
# ## Introduction
# In the competition, it's required to predict the `Severity` of a car crash given info about the crash, e.g., location.
# This is the getting started notebook. Things are kept simple so that it's easier to understand the steps and modify it.
# Feel free to `Fork` this notebook and share it with your modifications **OR** use it to create your submissions.
# ### Prerequisites
# You should know how to use python and a little bit of Machine Learning. You can apply the techniques you learned in the training program and submit the new solutions!
# ### Checklist
# You can participate in this competition the way you perefer. However, I recommend following these steps if this is your first time joining a competition on Kaggle.
# * Fork this notebook and run the cells in order.
# * Submit this solution.
# * Make changes to the data processing step as you see fit.
# * Submit the new solutions.
# *You can submit up to 5 submissions per day. You can select only one of the submission you make to be considered in the final ranking.*
# Don't hesitate to leave a comment or contact me if you have any question!
# ## Import the libraries
# We'll use `pandas` to load and manipulate the data. Other libraries will be imported in the relevant sections.
import pandas as pd
import os
import xml.etree.ElementTree as Xet
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn import preprocessing
# ## Exploratory Data Analysis
# In this step, one should load the data and analyze it. However, I'll load the data and do minimal analysis. You are encouraged to do thorough analysis!
# Let's load the data using `pandas` and have a look at the generated `DataFrame`.
dataset_path = "/kaggle/input/car-crashes-severity-prediction/"
df = pd.read_csv(os.path.join(dataset_path, "train.csv"))
df_test = pd.read_csv(os.path.join(dataset_path, "test.csv"))
print("The shape of the dataset is {}.\n\n".format(df.shape))
df.head()
# We've got 6407 examples in the dataset with 14 featues, 1 ID, and the `Severity` of the crash.
# By looking at the features and a sample from the data, the features look of numerical and catogerical types. What about some descriptive statistics?
df.drop(columns="ID").describe()
cat_list = ["Crossing", "Junction", "Railway", "Stop", "Amenity"]
df[cat_list] = df[cat_list].astype(int)
df_test[cat_list] = df_test[cat_list].astype(int)
df.head()
df = pd.concat(
[
df,
pd.DataFrame(df["timestamp"].str.split(" ").tolist(), columns=["Date", "Time"]),
],
axis=1,
)
df = df.drop(columns="timestamp")
df = df.drop(columns=["Bump", "Give_Way", "No_Exit", "Roundabout"])
df.head()
df_test = pd.concat(
[
df_test,
pd.DataFrame(
df_test["timestamp"].str.split(" ").tolist(), columns=["Date", "Time"]
),
],
axis=1,
)
df_test = df_test.drop(columns="timestamp")
df_test = df_test.drop(columns=["Bump", "Give_Way", "No_Exit", "Roundabout"])
df.head()
df["Hour"] = [int(i[0]) for i in df["Time"].str.split(":").tolist()]
df_test["Hour"] = [int(i[0]) for i in df_test["Time"].str.split(":").tolist()]
df.head()
def Pad(o, padLen):
if len(o) < padLen:
o = "0" * (padLen - len(o)) + o
return o
weather_df = pd.read_csv(os.path.join(dataset_path, "weather-sfcsv.csv"))
print("The shape of the dataset is {}.\n\n".format(weather_df.shape))
weather_df[["Year", "Month", "Day"]] = weather_df[["Year", "Month", "Day"]].astype(str)
weather_df["Date"] = (
weather_df["Year"]
+ "-"
+ weather_df["Month"].map(lambda x: Pad(x, 2))
+ "-"
+ weather_df["Day"].map(lambda x: Pad(x, 2))
)
weather_df = weather_df.drop_duplicates(subset=["Year", "Day", "Month", "Hour"])
weather_df = weather_df.drop(columns=["Selected"])
weather_df = weather_df.drop(columns=["Precipitation(in)"])
weather_df = weather_df.drop(columns="Wind_Chill(F)")
weather_df = weather_df.drop(columns=["Year", "Temperature(F)", "Humidity(%)"])
weather_df["Wind_Speed(mph)"] = weather_df["Wind_Speed(mph)"].fillna(
weather_df["Wind_Speed(mph)"].median()
)
weather_df["weather"] = (
weather_df["Weather_Condition"].astype("category").cat.codes * 100
)
weather_df.head()
df = pd.merge(df, weather_df, on=["Date", "Hour"], how="left")
df = df.dropna()
df_test = pd.merge(df_test, weather_df, on=["Date", "Hour"], how="left")
df_test = df_test.dropna()
df.shape
df["Side"] = (df["Side"] == "R").astype(int)
df_test["Side"] = (df_test["Side"] == "R").astype(int)
df.head()
df["weekday"] = pd.to_datetime(df["Date"]).dt.weekday
df_test["weekday"] = pd.to_datetime(df_test["Date"]).dt.weekday
df.head()
cols = ["Date", "description"]
rows = []
# Parsing the XML file
xmlparse = Xet.parse(os.path.join(dataset_path, "holidays.xml"))
root = xmlparse.getroot()
for i in root:
date = i.find("date").text
description = i.find("description").text
rows.append({"Date": date, "description": description})
holidays_df = pd.DataFrame(rows, columns=cols)
holidays_df["desc_encoded"] = (
holidays_df["description"].astype("category").cat.codes + 1
)
holidays_df = holidays_df.drop(columns="description")
holidays_df.head()
df["is_weekend"] = (df["weekday"] == 5) | (df["weekday"] == 6)
df.head()
print(df["is_weekend"].sum())
df = pd.merge(df, holidays_df, on="Date", how="left")
df = df.fillna(0)
df["is_weekend"] = ((df["desc_encoded"] > 0) | df["is_weekend"]).astype(int)
df["is_weekend"].sum()
df_test["is_weekend"] = (df_test["weekday"] == 5) | (df_test["weekday"] == 6)
df_test.head()
print(df["is_weekend"].sum())
df_test = pd.merge(df_test, holidays_df, on="Date", how="left")
df_test = df_test.fillna(0)
df_test["is_weekend"] = ((df_test["desc_encoded"] > 0) | df_test["is_weekend"]).astype(
int
)
df_test["is_weekend"].sum()
# df["light_cycle"] = 1
# df["light_cycle"] = df["Hour"].replace(list(np.arange(5, 18)), 0)
# def checkInterval(x, groups):
# for i in range(len(groups) - 1):
# if x < groups[i] and x > groups[i + 1]:
# return i
# return len(groups) - 2
# lat_std = df["Lat"].std()
# lat_mean = df["Lat"].mean()
# ranges = [lat_mean - lat_std*i for i in range(-3, 4, 1)]
# print(ranges)
# df["Lat_encoded"] = df["Lat"].map(lambda x: checkInterval(x, ranges))
# df["Lat_encoded"].head()
# The output shows desciptive statistics for the numerical features, `Lat`, `Lng`, `Distance(mi)`, and `Severity`. I'll use the numerical features to demonstrate how to train the model and make submissions. **However you shouldn't use the numerical features only to make the final submission if you want to make it to the top of the leaderboard.**
# ## Data Splitting
# Now it's time to split the dataset for the training step. Typically the dataset is split into 3 subsets, namely, the training, validation and test sets. In our case, the test set is already predefined. So we'll split the "training" set into training and validation sets with 0.8:0.2 ratio.
# *Note: a good way to generate reproducible results is to set the seed to the algorithms that depends on randomization. This is done with the argument `random_state` in the following command*
from sklearn.model_selection import train_test_split
train_df, val_df = train_test_split(
df, test_size=0.2, random_state=42, stratify=df["Severity"]
) # Try adding `stratify` here
X_train = train_df.drop(columns=["ID", "Severity", "Date", "Time", "Weather_Condition"])
y_train = train_df["Severity"]
X_val = val_df.drop(columns=["ID", "Severity", "Date", "Time", "Weather_Condition"])
y_val = val_df["Severity"]
# As pointed out eariler, I'll use the numerical features to train the classifier. **However, you shouldn't use the numerical features only to make the final submission if you want to make it to the top of the leaderboard.**
# This cell is used to select the numerical features. IT SHOULD BE REMOVED AS YOU DO YOUR WORK.
# X_train = X_train[['Lat', 'Lng', 'Distance(mi)']]
# X_val = X_val[['Lat', 'Lng', 'Distance(mi)']]
# ## Model Training
# Let's train a model with the data! We'll train a Random Forest Classifier to demonstrate the process of making submissions.
from sklearn.ensemble import RandomForestClassifier
# Create an instance of the classifier
classifier = RandomForestClassifier(max_depth=2, random_state=0)
# Train the classifier
classifier = classifier.fit(X_train, y_train)
# Now let's test our classifier on the validation dataset and see the accuracy.
print(
"The accuracy of the classifier on the validation set is ",
(classifier.score(X_val, y_val)),
)
# Well. That's a good start, right? A classifier that predicts all examples' `Severity` as 2 will get around 0.63. You should get better score as you add more features and do better data preprocessing.
# ## Submission File Generation
# We have built a model and we'd like to submit our predictions on the test set! In order to do that, we'll load the test set, predict the class and save the submission file.
# First, we'll load the data.
# test_df = pd.read_csv(os.path.join(dataset_path, 'test.csv'))
# test_df.head()
# Note that the test set has the same features and doesn't have the `Severity` column.
# At this stage one must **NOT** forget to apply the same processing done on the training set on the features of the test set.
# Now we'll add `Severity` column to the test `DataFrame` and add the values of the predicted class to it.
# **I'll select the numerical features here as I did in the training set. DO NOT forget to change this step as you change the preprocessing of the training data.**
X_test = df_test.drop(columns=["ID", "Date", "Time", "Weather_Condition"])
# You should update/remove the next line once you change the features used for training
# X_test = X_test[['Lat', 'Lng', 'Distance(mi)']]
y_test_predicted = classifier.predict(X_test)
df_test["Severity"] = y_test_predicted
df_test.head()
# Now we're ready to generate the submission file. The submission file needs the columns `ID` and `Severity` only.
df_test[["ID", "Severity"]].to_csv("/kaggle/working/submission.csv", index=False)
| false | 0 | 3,134 | 1 | 3,134 | 3,134 |
||
69173173
|
import pandas as pd
import pandas_profiling as pp
import numpy as np
import re
from sklearn.preprocessing import LabelBinarizer
from sklearn.impute import KNNImputer
import os
# pip install pandas-profiling
dataset_path = "/kaggle/input/car-crashes-severity-prediction/"
df = pd.read_csv(os.path.join(dataset_path, "train.csv"))
print("The shape of the dataset is {}.\n\n".format(df.shape))
df.head()
import xml.etree.ElementTree as ET
def xml_To_df(xml_path):
xml_data = open(xml_path, "r").read() # Read file
root = ET.XML(xml_data) # Parse XML
data = []
cols = []
for i, child in enumerate(root):
data.append([subchild.text for subchild in child])
cols.append(child.tag)
Holidaydf = pd.DataFrame(data).T # Write in DF and transpose it
Holidaydf.columns = cols # Update column names
Holidaydf = Holidaydf.T
Holidaydf = Holidaydf.reset_index()
Holidaydf.drop(columns="index", inplace=True)
Holidaydf.rename(columns={0: "justDate", 1: "HolidayName"}, inplace=True)
Holidaydf = Holidaydf.drop(columns=["HolidayName"])
Holidaydf["justDate"] = pd.to_datetime(Holidaydf["justDate"])
return Holidaydf
# ## Initial profiling of the whole dataset
# pp.ProfileReport(df)
# ## Dropping the ID, Bump and Roundabout based on the initial profiling
# df2 = df.drop(columns=['ID','Bump','Roundabout'])
# pp.ProfileReport(df2)
# ## Checking significange of each of the categorical features in predicting sevirity:
# categorical_cols= ['Crossing', 'Give_Way', 'Junction', 'No_Exit', 'Railway', 'Stop',
# 'Amenity', 'Side']
# for c in categorical_cols:
# print(c, df2[c].value_counts(), "\n")
# From the quick investigation above we can see clearly that some of these features are not
# indicative on the sevirity of the accident, so we will remove them.
# The columns we will be removing are: "Give_Way", "No_Exit"
# df3= df2.drop(columns= ["Give_Way", "No_Exit"])
# df3.head()
# ### Converting the Side column cateogories using into 0s, and 1s:
# print('values before: ', df3.Side.unique())
# # change the side featue into zero for L, one for R
# df3['Side']= df2['Side'].apply(lambda x: 1 if x== 'R' else -1)
# print('values after', df3.Side.unique())
# df3.head()
# ## Spliting the time stamp data into 4 seprate columns dateyear, datemonth, dateday, datehour:
# df3['dateyear']= pd.to_datetime(df3['timestamp']).dt.year
# df3['datemonth']= pd.to_datetime(df3['timestamp']).dt.month
# df3['dateday']= pd.to_datetime(df3['timestamp']).dt.day
# df3['datehour']= pd.to_datetime(df3['timestamp']).dt.hour
# df4= df3.drop(columns= ['timestamp'])
# df4.head()
# ## Finding the correlation between the sevirity, and the rest of the features:
# for col in df4.columns:
# print(col+": ", df4['Severity'].corr(df4[col]))
# df4.head()
# ### Checking correlation of the characteristics of the location and each other
# pp.ProfileReport(df4)
# for i in df4.columns:
# for j in df4.columns:
# print(i+" and "+ j, df4[i].corr(df4[j]))
# print("\n")
# df4.head()
# df4['timestamp']= df['timestamp']
# df4
# ### Trial to find the geo location:
# from geopy.geocoders import Nominatim
# geolocator = Nominatim(user_agent="geoapiExercises")
# # lat, long= str(df5['Lat'][0]), str(df5['Lng'][0])
# lat, long
# address= geolocator.reverse(lat+","+long).raw['address']
# zipcode = address.get('postcode')
# zipcode
# df100= df5[:5]
# df100.head()
# def location(df):
# return geolocator.reverse(str(df['Lat']) + ',' + str(df['Lng'])).raw['address'].get('postcode')
# df5['location']= df5.apply(location, axis= 1)
# df5['location'].head(50)
# # Determing days off:
# df4['timestamp']= pd.to_datetime(df4['timestamp'])
# df4['dayNum']= df4['timestamp'].dt.dayofweek
# df4.head()
# df4['day_off']= df4['dayNum'].apply(lambda x: 1 if (x== 6 or x== 5) else 0)
# df5= df4.drop(columns=['timestamp', 'dayNum'])
# df6.info()
# ## adding an attribute to represent whether the day was a holiday or not:
# import xml.etree.ElementTree as ET
# xml_data = open('holidays.xml', 'r').read() # Read file
# root = ET.XML(xml_data) # Parse XML
# data = []
# cols = []
# for i, child in enumerate(root):
# data.append([subchild.text for subchild in child])
# cols.append(child.tag)
# dfHoliday = pd.DataFrame(data).T # Write in DF and transpose it
# dfHoliday.columns = cols # Update column names
# dfHoliday=dfHoliday.T
# dfHoliday=dfHoliday.reset_index()
# dfHoliday.drop(columns='index',inplace=True)
# dfHoliday.rename(columns = {0:'justDate',1:'HolidayName'}, inplace = True)
# dfHoliday
weather_df = pd.read_csv(os.path.join(dataset_path, "weather-sfcsv.csv"))
weather_df["timestamp"] = pd.to_datetime(weather_df[["Year", "Month", "Day", "Hour"]])
weather_df.drop(columns=["Year", "Month", "Day", "Hour"], inplace=True)
weather_df = weather_df.drop_duplicates(subset=["timestamp"], keep="first")
weather_dfNew = weather_df.drop(
columns=["Wind_Chill(F)", "Precipitation(in)", "Selected"]
)
weather_dfNew.sort_values("timestamp", inplace=True)
imputer = KNNImputer()
weather_dfNew["Temperature(F)"] = imputer.fit_transform(
weather_dfNew[["Temperature(F)"]]
)
weather_dfNew["Humidity(%)"] = imputer.fit_transform(weather_dfNew[["Humidity(%)"]])
weather_dfNew["Wind_Speed(mph)"] = imputer.fit_transform(
weather_dfNew[["Wind_Speed(mph)"]]
)
weather_dfNew["Visibility(mi)"] = imputer.fit_transform(
weather_dfNew[["Visibility(mi)"]]
)
weather_dfNew.dropna(subset=["Weather_Condition"], inplace=True)
weather_dfNew.reset_index(drop=True, inplace=True)
def weather_encoding(df):
df6 = df
weather = "!".join(df6["Weather_Condition"].dropna().unique().tolist())
weather = np.unique(
np.array(
re.split(
"!|\s/\s|\sand\s|\swith\s|Partly\s|Mostly\s|Blowing\s|Freezing\s",
weather,
)
)
).tolist()
df6["Clear"] = np.where(
df6["Weather_Condition"].str.contains("Clear|Fair", case=False, na=False),
True,
False,
)
df6["Cloud"] = np.where(
df6["Weather_Condition"].str.contains(
"Cloud|Overcast|Partly Cloudy|Mostly Cloudy|Scattered Clouds|Cloudy",
case=False,
na=False,
),
True,
False,
)
df6["Rain"] = np.where(
df6["Weather_Condition"].str.contains(
"Rain|storm|Light Rain|Light Thunderstorms and Rain|Light Drizzle",
case=False,
na=False,
),
True,
False,
)
df6["Heavy_Rain"] = np.where(
df6["Weather_Condition"].str.contains(
"Heavy Rain|Rain Shower|Heavy T-Storm|Heavy Thunderstorms",
case=False,
na=False,
),
True,
False,
)
df6["Snow"] = np.where(
df6["Weather_Condition"].str.contains("Snow|Sleet|Ice", case=False, na=False),
True,
False,
)
df6["Heavy_Snow"] = np.where(
df6["Weather_Condition"].str.contains(
"Heavy Snow|Heavy Sleet|Heavy Ice Pellets|Snow Showers|Squalls",
case=False,
na=False,
),
True,
False,
)
df6["Fog"] = np.where(
df6["Weather_Condition"].str.contains(
"Fog|Patches of Fog|Haze|Shallow Fog|Smoke|Mist", case=False, na=False
),
True,
False,
)
weather = ["Clear", "Cloud", "Rain", "Heavy_Rain", "Snow", "Heavy_Snow", "Fog"]
for i in weather:
df6.loc[df6["Weather_Condition"].isnull(), i] = df6.loc[
df6["Weather_Condition"].isnull(), "Weather_Condition"
]
df6[i] = df6[i].astype("bool")
df6.loc[:, ["Weather_Condition"] + weather]
df6 = df6.drop(["Weather_Condition"], axis=1)
def one_column_encoding(row):
if row["Fog"]:
return 6
elif row["Heavy_Snow"] == True:
return 5
elif row["Snow"] == True:
return 4
elif row["Heavy_Rain"] == True:
return 3
elif row["Rain"] == True:
return 2
elif row["Cloud"] == True:
return 1
else:
return 0
df6["Condition"] = df6.apply(lambda row: one_column_encoding(row), axis=1)
# df6.drop(columns=['Fog','Heavy_Snow','Snow','Heavy_Rain','Rain','Cloud'],inplace=True)
df6.drop(columns=["Condition", "Heavy_Snow", "Snow", "Heavy_Rain"], inplace=True)
return df6
def data_prep(file_path, xml_path):
df = pd.read_csv(file_path)
# droping uninportant functions:
df = df.drop(
columns=[
"ID",
"Bump",
"Give_Way",
"No_Exit",
"Roundabout",
"Railway",
"Amenity",
]
)
# extracting year, and month
df["dateyear"] = pd.to_datetime(df["timestamp"]).dt.year
df["datemonth"] = pd.to_datetime(df["timestamp"]).dt.month
# df['timestamp_w'] = pd.to_datetime(df["timestamp"])
# df = df.assign(timestamp_w = df.timestamp_w.dt.floor('H'))
df["timestamp"] = pd.to_datetime(
df["timestamp"]
).dt.date # elimnating the hour:min:sec part from timestamp
df["dayNum"] = pd.to_datetime(
df["timestamp"]
).dt.dayofweek # extracting which day of the week was that
df["timestamp"] = pd.to_datetime(
df["timestamp"]
) # converting timestamp to datetime object
# create a label to determine weekend
df["day_off"] = df["dayNum"].apply(lambda x: 1 if (x == 6 or x == 5) else 0)
Holidaydf = xml_To_df(xml_path)
# Taking the holidays into consideration in the column of day off:
day_off = list(df["day_off"])
timestamp = list(df["timestamp"].astype(str))
Holiday = list(Holidaydf["justDate"].astype(str))
holiday_range = range(len(Holiday))
time_range = range(len(timestamp))
for i in holiday_range:
for j in time_range:
if timestamp[j] == Holiday[i]:
day_off[j] = 1
df["day_off"] = pd.Series(day_off)
df["timestamp"] = pd.to_datetime(pd.read_csv(file_path)["timestamp"])
df = df.assign(timestamp=df.timestamp.dt.floor("H"))
df = pd.merge(left=df, right=weather_dfNew, how="left")
df["Side"] = df["Side"].apply(lambda x: 1 if x == "R" else -1)
df = weather_encoding(df)
df = df.drop(
columns=[
"Crossing",
"Junction",
"Stop",
"Side",
"dayNum",
"timestamp",
"Humidity(%)",
]
)
df.dropna(
subset=["Temperature(F)", "Wind_Speed(mph)", "Visibility(mi)"], inplace=True
)
return df
dataset_path = "/kaggle/input/car-crashes-severity-prediction/"
training_path = os.path.join(dataset_path, "train.csv")
xml_path = os.path.join(dataset_path, "holidays.xml")
df = data_prep(training_path, xml_path)
# # ML Model:
from sklearn.model_selection import train_test_split
# df5.drop(columns= ['Lat'], axis= 1)
train_df, val_df = train_test_split(
df, test_size=0.2, random_state=42, stratify=df["Severity"]
) # Try adding `stratify` here
print(df)
X_train = train_df.drop(columns=["Severity"])
y_train = train_df["Severity"]
X_val = val_df.drop(columns=["Severity"])
y_val = val_df["Severity"]
# X_train.head()
from sklearn.ensemble import RandomForestClassifier
# Create an instance of the classifie
classifier = RandomForestClassifier(max_depth=15, random_state=42)
# Train the classifier
classifier = classifier.fit(X_train, y_train)
print(
"The accuracy of the classifier on the validation set is ",
(classifier.score(X_val, y_val)),
)
# ## Taking Weather into consideration:
# weather_df = pd.read_csv('weather-sfcsv.csv')
# severity_df = df.drop(df.columns.difference(['Severity','timestamp']), 1)
# severity_df['timestamp']=pd.to_datetime(df['timestamp'])
# severity_df = severity_df.assign(timestamp = severity_df.timestamp.dt.round('H'))
# severity_df
# # severity_df['Month']=pd.DatetimeIndex(df['timestamp']).month
# # severity_df['Day']=pd.DatetimeIndex(df['timestamp']).day
# # severity_df['Hour']=pd.DatetimeIndex(df['timestamp']).hour
# # result = pd.merge(severity_df, weather_df, how='inner', on=['Year', 'Month', 'Day', 'Hour'])
# # result
# # weather_df['Weather_Condition'].value_counts()
# # weather_df['timestamp'] = weather_df['Year']+"-"+weather_df['Month']+"-"+weather_df['Day']+" "+weather_df['Hour']+":00:00"
# weather_df['timestamp'] = pd.to_datetime(weather_df[['Year','Month','Day','Hour']])
# weather_df.drop(columns=['Year','Month','Day','Hour'],inplace=True)
# print(weather_df['timestamp'].value_counts())
# # print(severity_df['timestamp'].value_counts())
# weather_df = weather_df.drop_duplicates(subset = ['timestamp'], keep = 'first')
# print(weather_df['timestamp'].value_counts())
# severity_df.head()
# df5['timestamp']=pd.to_datetime(df['timestamp'])
# df5 = df5.assign(timestamp = df5.timestamp.dt.round('H'))
# df6= df5.merge(weather_df,on='timestamp',how='left')
# df6.sort_values('timestamp',inplace=True)
# pp.ProfileReport(df6)
# df6.fillna(method='bfill',inplace=True)
# pp.ProfileReport(df6)
dataset_path = "/kaggle/input/car-crashes-severity-prediction/"
test_path = os.path.join(dataset_path, "test.csv")
xml_path = os.path.join(dataset_path, "holidays.xml")
df_test = data_prep(test_path, xml_path)
X_test = df_test
# You should update/remove the next line once you change the features used for training
# X_test = X_test[['Lat', 'Lng', 'Distance(mi)']]
y_test_predicted = classifier.predict(X_test)
X_test["Severity"] = y_test_predicted
X_test.head()
X_test["ID"] = pd.read_csv(test_path)["ID"]
X_test.info()
# X_test = test_df.drop(columns=['ID'])
# You should update/remove the next line once you change the features used for training
# X_test = X_test[['Lat', 'Lng', 'Distance(mi)']]
# y_test_predicted = classifier.predict(X_test)
# test_df['Severity'] = y_test_predicted
# test_df.head()
X_test[["ID", "Severity"]].to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173173.ipynb
| null | null |
[{"Id": 69173173, "ScriptId": 18844962, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1049706, "CreationDate": "07/27/2021 16:45:53", "VersionNumber": 6.0, "Title": "Getting Started - Car Crashes' Severity Prediction", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 345.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 344.0, "LinesInsertedFromFork": 316.0, "LinesDeletedFromFork": 113.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 29.0, "TotalVotes": 3}]
| null | null | null | null |
import pandas as pd
import pandas_profiling as pp
import numpy as np
import re
from sklearn.preprocessing import LabelBinarizer
from sklearn.impute import KNNImputer
import os
# pip install pandas-profiling
dataset_path = "/kaggle/input/car-crashes-severity-prediction/"
df = pd.read_csv(os.path.join(dataset_path, "train.csv"))
print("The shape of the dataset is {}.\n\n".format(df.shape))
df.head()
import xml.etree.ElementTree as ET
def xml_To_df(xml_path):
xml_data = open(xml_path, "r").read() # Read file
root = ET.XML(xml_data) # Parse XML
data = []
cols = []
for i, child in enumerate(root):
data.append([subchild.text for subchild in child])
cols.append(child.tag)
Holidaydf = pd.DataFrame(data).T # Write in DF and transpose it
Holidaydf.columns = cols # Update column names
Holidaydf = Holidaydf.T
Holidaydf = Holidaydf.reset_index()
Holidaydf.drop(columns="index", inplace=True)
Holidaydf.rename(columns={0: "justDate", 1: "HolidayName"}, inplace=True)
Holidaydf = Holidaydf.drop(columns=["HolidayName"])
Holidaydf["justDate"] = pd.to_datetime(Holidaydf["justDate"])
return Holidaydf
# ## Initial profiling of the whole dataset
# pp.ProfileReport(df)
# ## Dropping the ID, Bump and Roundabout based on the initial profiling
# df2 = df.drop(columns=['ID','Bump','Roundabout'])
# pp.ProfileReport(df2)
# ## Checking significange of each of the categorical features in predicting sevirity:
# categorical_cols= ['Crossing', 'Give_Way', 'Junction', 'No_Exit', 'Railway', 'Stop',
# 'Amenity', 'Side']
# for c in categorical_cols:
# print(c, df2[c].value_counts(), "\n")
# From the quick investigation above we can see clearly that some of these features are not
# indicative on the sevirity of the accident, so we will remove them.
# The columns we will be removing are: "Give_Way", "No_Exit"
# df3= df2.drop(columns= ["Give_Way", "No_Exit"])
# df3.head()
# ### Converting the Side column cateogories using into 0s, and 1s:
# print('values before: ', df3.Side.unique())
# # change the side featue into zero for L, one for R
# df3['Side']= df2['Side'].apply(lambda x: 1 if x== 'R' else -1)
# print('values after', df3.Side.unique())
# df3.head()
# ## Spliting the time stamp data into 4 seprate columns dateyear, datemonth, dateday, datehour:
# df3['dateyear']= pd.to_datetime(df3['timestamp']).dt.year
# df3['datemonth']= pd.to_datetime(df3['timestamp']).dt.month
# df3['dateday']= pd.to_datetime(df3['timestamp']).dt.day
# df3['datehour']= pd.to_datetime(df3['timestamp']).dt.hour
# df4= df3.drop(columns= ['timestamp'])
# df4.head()
# ## Finding the correlation between the sevirity, and the rest of the features:
# for col in df4.columns:
# print(col+": ", df4['Severity'].corr(df4[col]))
# df4.head()
# ### Checking correlation of the characteristics of the location and each other
# pp.ProfileReport(df4)
# for i in df4.columns:
# for j in df4.columns:
# print(i+" and "+ j, df4[i].corr(df4[j]))
# print("\n")
# df4.head()
# df4['timestamp']= df['timestamp']
# df4
# ### Trial to find the geo location:
# from geopy.geocoders import Nominatim
# geolocator = Nominatim(user_agent="geoapiExercises")
# # lat, long= str(df5['Lat'][0]), str(df5['Lng'][0])
# lat, long
# address= geolocator.reverse(lat+","+long).raw['address']
# zipcode = address.get('postcode')
# zipcode
# df100= df5[:5]
# df100.head()
# def location(df):
# return geolocator.reverse(str(df['Lat']) + ',' + str(df['Lng'])).raw['address'].get('postcode')
# df5['location']= df5.apply(location, axis= 1)
# df5['location'].head(50)
# # Determing days off:
# df4['timestamp']= pd.to_datetime(df4['timestamp'])
# df4['dayNum']= df4['timestamp'].dt.dayofweek
# df4.head()
# df4['day_off']= df4['dayNum'].apply(lambda x: 1 if (x== 6 or x== 5) else 0)
# df5= df4.drop(columns=['timestamp', 'dayNum'])
# df6.info()
# ## adding an attribute to represent whether the day was a holiday or not:
# import xml.etree.ElementTree as ET
# xml_data = open('holidays.xml', 'r').read() # Read file
# root = ET.XML(xml_data) # Parse XML
# data = []
# cols = []
# for i, child in enumerate(root):
# data.append([subchild.text for subchild in child])
# cols.append(child.tag)
# dfHoliday = pd.DataFrame(data).T # Write in DF and transpose it
# dfHoliday.columns = cols # Update column names
# dfHoliday=dfHoliday.T
# dfHoliday=dfHoliday.reset_index()
# dfHoliday.drop(columns='index',inplace=True)
# dfHoliday.rename(columns = {0:'justDate',1:'HolidayName'}, inplace = True)
# dfHoliday
weather_df = pd.read_csv(os.path.join(dataset_path, "weather-sfcsv.csv"))
weather_df["timestamp"] = pd.to_datetime(weather_df[["Year", "Month", "Day", "Hour"]])
weather_df.drop(columns=["Year", "Month", "Day", "Hour"], inplace=True)
weather_df = weather_df.drop_duplicates(subset=["timestamp"], keep="first")
weather_dfNew = weather_df.drop(
columns=["Wind_Chill(F)", "Precipitation(in)", "Selected"]
)
weather_dfNew.sort_values("timestamp", inplace=True)
imputer = KNNImputer()
weather_dfNew["Temperature(F)"] = imputer.fit_transform(
weather_dfNew[["Temperature(F)"]]
)
weather_dfNew["Humidity(%)"] = imputer.fit_transform(weather_dfNew[["Humidity(%)"]])
weather_dfNew["Wind_Speed(mph)"] = imputer.fit_transform(
weather_dfNew[["Wind_Speed(mph)"]]
)
weather_dfNew["Visibility(mi)"] = imputer.fit_transform(
weather_dfNew[["Visibility(mi)"]]
)
weather_dfNew.dropna(subset=["Weather_Condition"], inplace=True)
weather_dfNew.reset_index(drop=True, inplace=True)
def weather_encoding(df):
df6 = df
weather = "!".join(df6["Weather_Condition"].dropna().unique().tolist())
weather = np.unique(
np.array(
re.split(
"!|\s/\s|\sand\s|\swith\s|Partly\s|Mostly\s|Blowing\s|Freezing\s",
weather,
)
)
).tolist()
df6["Clear"] = np.where(
df6["Weather_Condition"].str.contains("Clear|Fair", case=False, na=False),
True,
False,
)
df6["Cloud"] = np.where(
df6["Weather_Condition"].str.contains(
"Cloud|Overcast|Partly Cloudy|Mostly Cloudy|Scattered Clouds|Cloudy",
case=False,
na=False,
),
True,
False,
)
df6["Rain"] = np.where(
df6["Weather_Condition"].str.contains(
"Rain|storm|Light Rain|Light Thunderstorms and Rain|Light Drizzle",
case=False,
na=False,
),
True,
False,
)
df6["Heavy_Rain"] = np.where(
df6["Weather_Condition"].str.contains(
"Heavy Rain|Rain Shower|Heavy T-Storm|Heavy Thunderstorms",
case=False,
na=False,
),
True,
False,
)
df6["Snow"] = np.where(
df6["Weather_Condition"].str.contains("Snow|Sleet|Ice", case=False, na=False),
True,
False,
)
df6["Heavy_Snow"] = np.where(
df6["Weather_Condition"].str.contains(
"Heavy Snow|Heavy Sleet|Heavy Ice Pellets|Snow Showers|Squalls",
case=False,
na=False,
),
True,
False,
)
df6["Fog"] = np.where(
df6["Weather_Condition"].str.contains(
"Fog|Patches of Fog|Haze|Shallow Fog|Smoke|Mist", case=False, na=False
),
True,
False,
)
weather = ["Clear", "Cloud", "Rain", "Heavy_Rain", "Snow", "Heavy_Snow", "Fog"]
for i in weather:
df6.loc[df6["Weather_Condition"].isnull(), i] = df6.loc[
df6["Weather_Condition"].isnull(), "Weather_Condition"
]
df6[i] = df6[i].astype("bool")
df6.loc[:, ["Weather_Condition"] + weather]
df6 = df6.drop(["Weather_Condition"], axis=1)
def one_column_encoding(row):
if row["Fog"]:
return 6
elif row["Heavy_Snow"] == True:
return 5
elif row["Snow"] == True:
return 4
elif row["Heavy_Rain"] == True:
return 3
elif row["Rain"] == True:
return 2
elif row["Cloud"] == True:
return 1
else:
return 0
df6["Condition"] = df6.apply(lambda row: one_column_encoding(row), axis=1)
# df6.drop(columns=['Fog','Heavy_Snow','Snow','Heavy_Rain','Rain','Cloud'],inplace=True)
df6.drop(columns=["Condition", "Heavy_Snow", "Snow", "Heavy_Rain"], inplace=True)
return df6
def data_prep(file_path, xml_path):
df = pd.read_csv(file_path)
# droping uninportant functions:
df = df.drop(
columns=[
"ID",
"Bump",
"Give_Way",
"No_Exit",
"Roundabout",
"Railway",
"Amenity",
]
)
# extracting year, and month
df["dateyear"] = pd.to_datetime(df["timestamp"]).dt.year
df["datemonth"] = pd.to_datetime(df["timestamp"]).dt.month
# df['timestamp_w'] = pd.to_datetime(df["timestamp"])
# df = df.assign(timestamp_w = df.timestamp_w.dt.floor('H'))
df["timestamp"] = pd.to_datetime(
df["timestamp"]
).dt.date # elimnating the hour:min:sec part from timestamp
df["dayNum"] = pd.to_datetime(
df["timestamp"]
).dt.dayofweek # extracting which day of the week was that
df["timestamp"] = pd.to_datetime(
df["timestamp"]
) # converting timestamp to datetime object
# create a label to determine weekend
df["day_off"] = df["dayNum"].apply(lambda x: 1 if (x == 6 or x == 5) else 0)
Holidaydf = xml_To_df(xml_path)
# Taking the holidays into consideration in the column of day off:
day_off = list(df["day_off"])
timestamp = list(df["timestamp"].astype(str))
Holiday = list(Holidaydf["justDate"].astype(str))
holiday_range = range(len(Holiday))
time_range = range(len(timestamp))
for i in holiday_range:
for j in time_range:
if timestamp[j] == Holiday[i]:
day_off[j] = 1
df["day_off"] = pd.Series(day_off)
df["timestamp"] = pd.to_datetime(pd.read_csv(file_path)["timestamp"])
df = df.assign(timestamp=df.timestamp.dt.floor("H"))
df = pd.merge(left=df, right=weather_dfNew, how="left")
df["Side"] = df["Side"].apply(lambda x: 1 if x == "R" else -1)
df = weather_encoding(df)
df = df.drop(
columns=[
"Crossing",
"Junction",
"Stop",
"Side",
"dayNum",
"timestamp",
"Humidity(%)",
]
)
df.dropna(
subset=["Temperature(F)", "Wind_Speed(mph)", "Visibility(mi)"], inplace=True
)
return df
dataset_path = "/kaggle/input/car-crashes-severity-prediction/"
training_path = os.path.join(dataset_path, "train.csv")
xml_path = os.path.join(dataset_path, "holidays.xml")
df = data_prep(training_path, xml_path)
# # ML Model:
from sklearn.model_selection import train_test_split
# df5.drop(columns= ['Lat'], axis= 1)
train_df, val_df = train_test_split(
df, test_size=0.2, random_state=42, stratify=df["Severity"]
) # Try adding `stratify` here
print(df)
X_train = train_df.drop(columns=["Severity"])
y_train = train_df["Severity"]
X_val = val_df.drop(columns=["Severity"])
y_val = val_df["Severity"]
# X_train.head()
from sklearn.ensemble import RandomForestClassifier
# Create an instance of the classifie
classifier = RandomForestClassifier(max_depth=15, random_state=42)
# Train the classifier
classifier = classifier.fit(X_train, y_train)
print(
"The accuracy of the classifier on the validation set is ",
(classifier.score(X_val, y_val)),
)
# ## Taking Weather into consideration:
# weather_df = pd.read_csv('weather-sfcsv.csv')
# severity_df = df.drop(df.columns.difference(['Severity','timestamp']), 1)
# severity_df['timestamp']=pd.to_datetime(df['timestamp'])
# severity_df = severity_df.assign(timestamp = severity_df.timestamp.dt.round('H'))
# severity_df
# # severity_df['Month']=pd.DatetimeIndex(df['timestamp']).month
# # severity_df['Day']=pd.DatetimeIndex(df['timestamp']).day
# # severity_df['Hour']=pd.DatetimeIndex(df['timestamp']).hour
# # result = pd.merge(severity_df, weather_df, how='inner', on=['Year', 'Month', 'Day', 'Hour'])
# # result
# # weather_df['Weather_Condition'].value_counts()
# # weather_df['timestamp'] = weather_df['Year']+"-"+weather_df['Month']+"-"+weather_df['Day']+" "+weather_df['Hour']+":00:00"
# weather_df['timestamp'] = pd.to_datetime(weather_df[['Year','Month','Day','Hour']])
# weather_df.drop(columns=['Year','Month','Day','Hour'],inplace=True)
# print(weather_df['timestamp'].value_counts())
# # print(severity_df['timestamp'].value_counts())
# weather_df = weather_df.drop_duplicates(subset = ['timestamp'], keep = 'first')
# print(weather_df['timestamp'].value_counts())
# severity_df.head()
# df5['timestamp']=pd.to_datetime(df['timestamp'])
# df5 = df5.assign(timestamp = df5.timestamp.dt.round('H'))
# df6= df5.merge(weather_df,on='timestamp',how='left')
# df6.sort_values('timestamp',inplace=True)
# pp.ProfileReport(df6)
# df6.fillna(method='bfill',inplace=True)
# pp.ProfileReport(df6)
dataset_path = "/kaggle/input/car-crashes-severity-prediction/"
test_path = os.path.join(dataset_path, "test.csv")
xml_path = os.path.join(dataset_path, "holidays.xml")
df_test = data_prep(test_path, xml_path)
X_test = df_test
# You should update/remove the next line once you change the features used for training
# X_test = X_test[['Lat', 'Lng', 'Distance(mi)']]
y_test_predicted = classifier.predict(X_test)
X_test["Severity"] = y_test_predicted
X_test.head()
X_test["ID"] = pd.read_csv(test_path)["ID"]
X_test.info()
# X_test = test_df.drop(columns=['ID'])
# You should update/remove the next line once you change the features used for training
# X_test = X_test[['Lat', 'Lng', 'Distance(mi)']]
# y_test_predicted = classifier.predict(X_test)
# test_df['Severity'] = y_test_predicted
# test_df.head()
X_test[["ID", "Severity"]].to_csv("submission.csv", index=False)
| false | 0 | 4,509 | 3 | 4,509 | 4,509 |
||
69173598
|
<jupyter_start><jupyter_text>Chest X-Ray Images (Pneumonia)
### Context
http://www.cell.com/cell/fulltext/S0092-8674(18)30154-5

Figure S6. Illustrative Examples of Chest X-Rays in Patients with Pneumonia, Related to Figure 6
The normal chest X-ray (left panel) depicts clear lungs without any areas of abnormal opacification in the image. Bacterial pneumonia (middle) typically exhibits a focal lobar consolidation, in this case in the right upper lobe (white arrows), whereas viral pneumonia (right) manifests with a more diffuse ‘‘interstitial’’ pattern in both lungs.
http://www.cell.com/cell/fulltext/S0092-8674(18)30154-5
### Content
The dataset is organized into 3 folders (train, test, val) and contains subfolders for each image category (Pneumonia/Normal). There are 5,863 X-Ray images (JPEG) and 2 categories (Pneumonia/Normal).
Chest X-ray images (anterior-posterior) were selected from retrospective cohorts of pediatric patients of one to five years old from Guangzhou Women and Children’s Medical Center, Guangzhou. All chest X-ray imaging was performed as part of patients’ routine clinical care.
For the analysis of chest x-ray images, all chest radiographs were initially screened for quality control by removing all low quality or unreadable scans. The diagnoses for the images were then graded by two expert physicians before being cleared for training the AI system. In order to account for any grading errors, the evaluation set was also checked by a third expert.
Kaggle dataset identifier: chest-xray-pneumonia
<jupyter_script># # Content
# Chest X-ray images (anterior-posterior) were selected from retrospective cohorts of pediatric patients of one to five years old from Guangzhou Women and Children’s Medical Center, Guangzhou. All chest X-ray imaging was performed as part of patients’ routine clinical care.
# For the analysis of chest x-ray images, all chest radiographs were initially screened for quality control by removing all low quality or unreadable scans. The diagnoses for the images were then graded by two expert physicians before being cleared for processing. In order to account for any grading errors, the evaluation set was also checked by a third expert.
# # Data
# The dataset is organized into 3 folders (train, test, val) and contains subfolders for each image category (Pneumonia/Normal). There are 5,863 X-Ray images (JPEG) and 2 categories (Pneumonia/Normal).
# # Business Challenge
# Traditionally, the diagnosis of bactarial/viral pneuomonia takes place when a radiologist examines an xray and makes a diagnosis. However, the use of deep lerning techniques to detect pneumonia can add a layer of objectivity in this diagnostic process. Deploying machine learning systems in the healthcare industry can facilitate early disease detection and reduce the presence of false positives and false negatives.
# # Ackowledgements
# https://data.mendeley.com/datasets/rscbjbr9sj/2
# # 1. Importing Data & Libraries
# Libraries
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Sequential
from tensorflow.keras import layers
from sklearn.metrics import confusion_matrix, classification_report
# Train/test/validation path
train_path = "../input/chest-xray-pneumonia/chest_xray/train"
test_path = "../input/chest-xray-pneumonia/chest_xray/test"
validation_path = "../input/chest-xray-pneumonia/chest_xray/val"
# # 2. Data Preprocessing
# lets plot some images from the training set
train_data = tf.keras.preprocessing.image_dataset_from_directory(train_path)
plt.figure(figsize=(10, 10))
for images, labels in train_data.take(1):
for i in range(12):
plt.subplot(3, 4, i + 1)
plt.imshow(np.squeeze(images[i].numpy().astype("uint8")))
plt.title(train_data.class_names[labels[i]])
plt.axis("off")
# Image Data Generator API
train_data_generator = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255.0
)
valid_data_generator = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255.0
)
test_data_generator = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255.0
)
# Call Keras's flow from directory method
train_set = train_data_generator.flow_from_directory(
train_path,
target_size=(224, 224),
class_mode="binary",
color_mode="grayscale",
batch_size=32,
shuffle=True,
)
validation_set = test_data_generator.flow_from_directory(
validation_path,
target_size=(224, 224),
class_mode="binary",
color_mode="grayscale",
batch_size=32,
shuffle=True,
)
test_set = test_data_generator.flow_from_directory(
test_path,
target_size=(224, 224),
class_mode="binary",
color_mode="grayscale",
batch_size=32,
shuffle=False,
)
# # 3. Model Preprocessing & Deployment
# Sequential API
model = tf.keras.Sequential(
[
layers.Conv2D(32, 3, activation="relu"),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, activation="relu"),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(1, activation="sigmoid"),
]
)
# Configure hyperparameters and accuracy metrics
model.compile(
optimizer="adam",
loss="binary_crossentropy",
metrics=[
"accuracy",
tf.keras.metrics.AUC(name="AUC"),
tf.keras.metrics.Precision(name="Precision"),
tf.keras.metrics.Recall(name="Recall"),
],
)
# Train the model
model_base = model.fit(
train_set,
batch_size=32,
validation_data=validation_set,
epochs=10,
)
pd.DataFrame(model_base.history).plot(figsize=(10, 7), xlabel="epochs")
# # 4. Model Evaluation
# Model Results on the Test set
def result():
results = model_base.model.evaluate(test_set, verbose=0)
accuracy = results[1]
auc = results[2]
precision = results[3]
recall = results[4]
print("Accuracy: {:.2f}".format(accuracy))
print("AUC: {:.2f}".format(auc))
print("Precision: {:.2f}".format(precision))
print("Recall: {:.2f}".format(recall))
return result
result()
# The model clearly seems to be overfitting. The low precision indicates high presence of false positives. This is understandable as the dataset is unbalanced. Lets plot a confusion matrix to confirm this observation.
# Confusion Matrix
def model_evaluation(model, test_data):
y_pred = np.squeeze((model_base.model.predict(test_set) >= 0.5).astype(np.int))
cm = confusion_matrix(test_set.labels, y_pred)
names = ["True Neg", "False Pos", "False Neg", "True Pos"]
count = ["{0:0.2f}".format(value) for value in cm.flatten()]
percentages = ["{0:5%}".format(value) for value in cm.flatten() / np.sum(cm)]
labels = [f"{v1}\n{v2}\n{v3}" for v1, v2, v3 in zip(names, count, percentages)]
labels = np.asarray(labels).reshape(2, 2)
plt.figure(figsize=(7, 7))
sns.heatmap(cm, annot=labels, fmt="", vmin=0, cmap="Blues", cbar=False)
plt.xticks(ticks=np.arange(2) + 0.5, labels=["Negative", "Positive"])
plt.yticks(ticks=np.arange(2) + 0.5, labels=["Negative", "Positive"])
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.title("Confusion Matrix")
plt.show()
clr = classification_report(
test_set.labels, y_pred, target_names=["NEGATIVE", "POSITIVE"]
)
print("Classification Report:\n\n", clr)
model_evaluation(model, test_set)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173598.ipynb
|
chest-xray-pneumonia
|
paultimothymooney
|
[{"Id": 69173598, "ScriptId": 18879869, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4789793, "CreationDate": "07/27/2021 16:51:24", "VersionNumber": 3.0, "Title": "Pneumonia Classification", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 172.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 172.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 92022127, "KernelVersionId": 69173598, "SourceDatasetVersionId": 23812}]
|
[{"Id": 23812, "DatasetId": 17810, "DatasourceVersionId": 23851, "CreatorUserId": 1314380, "LicenseName": "Other (specified in description)", "CreationDate": "03/24/2018 19:41:59", "VersionNumber": 2.0, "Title": "Chest X-Ray Images (Pneumonia)", "Slug": "chest-xray-pneumonia", "Subtitle": "5,863 images, 2 categories", "Description": "### Context\n\nhttp://www.cell.com/cell/fulltext/S0092-8674(18)30154-5\n\n\n\nFigure S6. Illustrative Examples of Chest X-Rays in Patients with Pneumonia, Related to Figure 6\nThe normal chest X-ray (left panel) depicts clear lungs without any areas of abnormal opacification in the image. Bacterial pneumonia (middle) typically exhibits a focal lobar consolidation, in this case in the right upper lobe (white arrows), whereas viral pneumonia (right) manifests with a more diffuse \u2018\u2018interstitial\u2019\u2019 pattern in both lungs.\nhttp://www.cell.com/cell/fulltext/S0092-8674(18)30154-5\n\n### Content\n\nThe dataset is organized into 3 folders (train, test, val) and contains subfolders for each image category (Pneumonia/Normal). There are 5,863 X-Ray images (JPEG) and 2 categories (Pneumonia/Normal). \n\nChest X-ray images (anterior-posterior) were selected from retrospective cohorts of pediatric patients of one to five years old from Guangzhou Women and Children\u2019s Medical Center, Guangzhou. All chest X-ray imaging was performed as part of patients\u2019 routine clinical care. \n\nFor the analysis of chest x-ray images, all chest radiographs were initially screened for quality control by removing all low quality or unreadable scans. The diagnoses for the images were then graded by two expert physicians before being cleared for training the AI system. In order to account for any grading errors, the evaluation set was also checked by a third expert.\n\n### Acknowledgements\n\nData: https://data.mendeley.com/datasets/rscbjbr9sj/2\n\nLicense: [CC BY 4.0][1]\n\nCitation: http://www.cell.com/cell/fulltext/S0092-8674(18)30154-5\n\n![enter image description here][2]\n\n\n### Inspiration\n\nAutomated methods to detect and classify human diseases from medical images.\n\n\n [1]: https://creativecommons.org/licenses/by/4.0/\n [2]: https://i.imgur.com/8AUJkin.png", "VersionNotes": "train/test/val", "TotalCompressedBytes": 1237249419.0, "TotalUncompressedBytes": 1237249419.0}]
|
[{"Id": 17810, "CreatorUserId": 1314380, "OwnerUserId": 1314380.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 23812.0, "CurrentDatasourceVersionId": 23851.0, "ForumId": 25540, "Type": 2, "CreationDate": "03/22/2018 05:42:41", "LastActivityDate": "03/22/2018", "TotalViews": 2063138, "TotalDownloads": 237932, "TotalVotes": 5834, "TotalKernels": 2058}]
|
[{"Id": 1314380, "UserName": "paultimothymooney", "DisplayName": "Paul Mooney", "RegisterDate": "10/05/2017", "PerformanceTier": 5}]
|
# # Content
# Chest X-ray images (anterior-posterior) were selected from retrospective cohorts of pediatric patients of one to five years old from Guangzhou Women and Children’s Medical Center, Guangzhou. All chest X-ray imaging was performed as part of patients’ routine clinical care.
# For the analysis of chest x-ray images, all chest radiographs were initially screened for quality control by removing all low quality or unreadable scans. The diagnoses for the images were then graded by two expert physicians before being cleared for processing. In order to account for any grading errors, the evaluation set was also checked by a third expert.
# # Data
# The dataset is organized into 3 folders (train, test, val) and contains subfolders for each image category (Pneumonia/Normal). There are 5,863 X-Ray images (JPEG) and 2 categories (Pneumonia/Normal).
# # Business Challenge
# Traditionally, the diagnosis of bactarial/viral pneuomonia takes place when a radiologist examines an xray and makes a diagnosis. However, the use of deep lerning techniques to detect pneumonia can add a layer of objectivity in this diagnostic process. Deploying machine learning systems in the healthcare industry can facilitate early disease detection and reduce the presence of false positives and false negatives.
# # Ackowledgements
# https://data.mendeley.com/datasets/rscbjbr9sj/2
# # 1. Importing Data & Libraries
# Libraries
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Sequential
from tensorflow.keras import layers
from sklearn.metrics import confusion_matrix, classification_report
# Train/test/validation path
train_path = "../input/chest-xray-pneumonia/chest_xray/train"
test_path = "../input/chest-xray-pneumonia/chest_xray/test"
validation_path = "../input/chest-xray-pneumonia/chest_xray/val"
# # 2. Data Preprocessing
# lets plot some images from the training set
train_data = tf.keras.preprocessing.image_dataset_from_directory(train_path)
plt.figure(figsize=(10, 10))
for images, labels in train_data.take(1):
for i in range(12):
plt.subplot(3, 4, i + 1)
plt.imshow(np.squeeze(images[i].numpy().astype("uint8")))
plt.title(train_data.class_names[labels[i]])
plt.axis("off")
# Image Data Generator API
train_data_generator = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255.0
)
valid_data_generator = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255.0
)
test_data_generator = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255.0
)
# Call Keras's flow from directory method
train_set = train_data_generator.flow_from_directory(
train_path,
target_size=(224, 224),
class_mode="binary",
color_mode="grayscale",
batch_size=32,
shuffle=True,
)
validation_set = test_data_generator.flow_from_directory(
validation_path,
target_size=(224, 224),
class_mode="binary",
color_mode="grayscale",
batch_size=32,
shuffle=True,
)
test_set = test_data_generator.flow_from_directory(
test_path,
target_size=(224, 224),
class_mode="binary",
color_mode="grayscale",
batch_size=32,
shuffle=False,
)
# # 3. Model Preprocessing & Deployment
# Sequential API
model = tf.keras.Sequential(
[
layers.Conv2D(32, 3, activation="relu"),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, activation="relu"),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(1, activation="sigmoid"),
]
)
# Configure hyperparameters and accuracy metrics
model.compile(
optimizer="adam",
loss="binary_crossentropy",
metrics=[
"accuracy",
tf.keras.metrics.AUC(name="AUC"),
tf.keras.metrics.Precision(name="Precision"),
tf.keras.metrics.Recall(name="Recall"),
],
)
# Train the model
model_base = model.fit(
train_set,
batch_size=32,
validation_data=validation_set,
epochs=10,
)
pd.DataFrame(model_base.history).plot(figsize=(10, 7), xlabel="epochs")
# # 4. Model Evaluation
# Model Results on the Test set
def result():
results = model_base.model.evaluate(test_set, verbose=0)
accuracy = results[1]
auc = results[2]
precision = results[3]
recall = results[4]
print("Accuracy: {:.2f}".format(accuracy))
print("AUC: {:.2f}".format(auc))
print("Precision: {:.2f}".format(precision))
print("Recall: {:.2f}".format(recall))
return result
result()
# The model clearly seems to be overfitting. The low precision indicates high presence of false positives. This is understandable as the dataset is unbalanced. Lets plot a confusion matrix to confirm this observation.
# Confusion Matrix
def model_evaluation(model, test_data):
y_pred = np.squeeze((model_base.model.predict(test_set) >= 0.5).astype(np.int))
cm = confusion_matrix(test_set.labels, y_pred)
names = ["True Neg", "False Pos", "False Neg", "True Pos"]
count = ["{0:0.2f}".format(value) for value in cm.flatten()]
percentages = ["{0:5%}".format(value) for value in cm.flatten() / np.sum(cm)]
labels = [f"{v1}\n{v2}\n{v3}" for v1, v2, v3 in zip(names, count, percentages)]
labels = np.asarray(labels).reshape(2, 2)
plt.figure(figsize=(7, 7))
sns.heatmap(cm, annot=labels, fmt="", vmin=0, cmap="Blues", cbar=False)
plt.xticks(ticks=np.arange(2) + 0.5, labels=["Negative", "Positive"])
plt.yticks(ticks=np.arange(2) + 0.5, labels=["Negative", "Positive"])
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.title("Confusion Matrix")
plt.show()
clr = classification_report(
test_set.labels, y_pred, target_names=["NEGATIVE", "POSITIVE"]
)
print("Classification Report:\n\n", clr)
model_evaluation(model, test_set)
| false | 0 | 1,766 | 2 | 2,242 | 1,766 |
||
69173066
|
# **Group 10 Team Members :-
# Fady Nasser -
# Omar Hisham -
# Saieed Osama**
# Adding Imports
import pandas as pd
import numpy as np
import os
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import RFE
import xml.etree.ElementTree as ET
# Dataset Path
dataset_path = "/kaggle/input/car-crashes-severity-prediction"
# Load Training Dataset
df = pd.read_csv(os.path.join(dataset_path, "train.csv"))
# Data PreProcessing
df = df * 1 # To Convert True And False to Int
df["Side"] = df["Side"].replace({"R": 0, "L": 1})
df["Year"] = pd.DatetimeIndex(df["timestamp"]).year
df["Month"] = pd.DatetimeIndex(df["timestamp"]).month
df["Day"] = pd.DatetimeIndex(df["timestamp"]).day
df["Hour"] = pd.DatetimeIndex(df["timestamp"]).hour
df["Quarter"] = pd.DatetimeIndex(df["timestamp"]).quarter
df["WeekDay"] = pd.DatetimeIndex(df["timestamp"]).dayofweek
df["WeekYear"] = pd.DatetimeIndex(df["timestamp"]).weekofyear
df["WeekEnd"] = np.where(df["WeekDay"] < 5, 0, 1)
df["Morning_Midnight"] = np.where((df["Hour"] >= 6) & (df["Hour"] < 18), 0, 1)
df["Summer_Winter"] = np.where((df["Month"] >= 5) & (df["Month"] < 11), 0, 1)
df["Lat"] = df["Lat"] - np.mean(df["Lat"])
df["Lng"] = df["Lng"] - np.mean(df["Lng"])
print(df.shape)
df.head()
# Load Weather Dataset
Weather_df = pd.read_csv(os.path.join(dataset_path, "weather-sfcsv.csv"))
Weather_df["Selected"] = Weather_df["Selected"].replace({"No": 0, "Yes": 1})
Weather_df.Weather_Condition = pd.Categorical(Weather_df.Weather_Condition)
Weather_df["Weather_Condition_Code"] = Weather_df.Weather_Condition.cat.codes
print(Weather_df.shape)
Weather_df.drop(columns=["Weather_Condition"], inplace=True)
Weather_df.fillna(Weather_df.mean(), inplace=True)
Weather_df = Weather_df.sort_values(["Year", "Month", "Day", "Hour"])
Weather_df.drop_duplicates(
subset=["Year", "Month", "Day", "Hour"], keep="first", inplace=True
)
print(Weather_df.shape)
Weather_df.head()
# Merge Train Dataset with Weather Dataset
Merged_df = pd.merge(df, Weather_df, on=["Year", "Month", "Day", "Hour"], how="inner")
Merged_df.head()
# Load Holiday Dataset
root = ET.parse(os.path.join(dataset_path, "holidays.xml")).getroot()
tags = {"tags": []}
for elem in root:
tag = {
"date": elem.getchildren()[0].text,
"description": elem.getchildren()[1].text,
}
tags["tags"].append(tag)
df_holidays = pd.DataFrame(tags["tags"])
df_holidays["Year"] = pd.DatetimeIndex(df_holidays["date"]).year
df_holidays["Month"] = pd.DatetimeIndex(df_holidays["date"]).month
df_holidays["Day"] = pd.DatetimeIndex(df_holidays["date"]).day
df_holidays["Is_Holiday"] = 1
print(df_holidays.shape)
df_holidays.head()
# Prepaing the final dataset for preprocessing
# Final_df = pd.merge(Merged_df,df_holidays,on=['Year','Month','Day'],how="left")
Final_df = Merged_df
print(Final_df.shape)
Final_df.replace(np.nan, 0)
Final_df = Final_df.drop(
columns=["timestamp", "Bump", "Give_Way", "No_Exit", "Roundabout", "Selected"]
)
print(Final_df.shape)
Final_df.head()
Final_df.drop(columns="ID").describe()
# Calculate The heatmap and the rank of each generated feature
sns.heatmap(Final_df.corr())
estimator = RandomForestClassifier(max_depth=2, random_state=0)
selector = RFE(estimator, n_features_to_select=1, step=1)
selector = selector.fit(Final_df.drop(columns=["Severity", "ID"]), Final_df["Severity"])
for i in range(len(Final_df.drop(columns=["Severity", "ID"]).columns)):
print(
list(Final_df.drop(columns=["Severity", "ID"]).columns)[i], selector.ranking_[i]
)
# Split Data For Training
train_df, val_df = train_test_split(Final_df, test_size=0.2, random_state=42)
# Top Features :- ['Lng','Lat','Stop','Distance(mi)','Year','Precipitation(in)', 'Wind_Chill(F)', 'Crossing']
X_train = train_df[
["Lat", "Lng", "Crossing", "Stop", "Year", "Wind_Chill(F)", "Precipitation(in)"]
]
y_train = train_df["Severity"]
X_val = val_df[
["Lat", "Lng", "Crossing", "Stop", "Year", "Wind_Chill(F)", "Precipitation(in)"]
]
y_val = val_df["Severity"]
# Create an instance of the classifier model
classifier = RandomForestClassifier(max_depth=2, random_state=0)
classifier = classifier.fit(X_train, y_train)
print(
"The accuracy of the classifier on the validation set is ",
(classifier.score(X_val, y_val)),
)
# Load Testing Dataset
test_df = pd.read_csv(os.path.join(dataset_path, "test.csv"))
test_df = test_df * 1
test_df["Side"] = test_df["Side"].replace({"R": 0, "L": 1})
test_df["Year"] = pd.DatetimeIndex(test_df["timestamp"]).year
test_df["Month"] = pd.DatetimeIndex(test_df["timestamp"]).month
test_df["Day"] = pd.DatetimeIndex(test_df["timestamp"]).day
test_df["Hour"] = pd.DatetimeIndex(test_df["timestamp"]).hour
test_df["Quarter"] = pd.DatetimeIndex(test_df["timestamp"]).quarter
test_df["WeekDay"] = pd.DatetimeIndex(test_df["timestamp"]).dayofweek
test_df["WeekYear"] = pd.DatetimeIndex(test_df["timestamp"]).weekofyear
test_df["WeekEnd"] = np.where(test_df["WeekDay"] < 5, 0, 1)
test_df["Morning_Midnight"] = np.where(
(test_df["Hour"] >= 6) & (test_df["Hour"] < 18), 0, 1
)
test_df["Summer_Winter"] = np.where(
(test_df["Month"] >= 5) & (test_df["Month"] < 11), 0, 1
)
test_df["Lat"] = test_df["Lat"] - np.mean(test_df["Lat"])
test_df["Lng"] = test_df["Lng"] - np.mean(test_df["Lng"])
print(df.shape)
df.head()
# Merge Test Dataset with Weather Dataset
test_df = pd.merge(
test_df, Weather_df, on=["Year", "Month", "Day", "Hour"], how="inner"
)
print(test_df.shape)
test_df.replace(np.nan, 0)
test_df = test_df.drop(
columns=["timestamp", "Bump", "Give_Way", "No_Exit", "Roundabout", "Selected"]
)
print(test_df.shape)
# Spliting Data For Testing
X_test = test_df[
["Lat", "Lng", "Crossing", "Stop", "Year", "Wind_Chill(F)", "Precipitation(in)"]
]
# Predict The Output
y_test_predicted = classifier.predict(X_test)
test_df["Severity"] = y_test_predicted
test_df.head()
# Extracting the Submission.csv file
test_df[["ID", "Severity"]].to_csv("/kaggle/working/submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173066.ipynb
| null | null |
[{"Id": 69173066, "ScriptId": 18845535, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7985475, "CreationDate": "07/27/2021 16:44:51", "VersionNumber": 10.0, "Title": "Gigabyte - Car Crashes' Severity Prediction", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 157.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 156.0, "LinesInsertedFromFork": 127.0, "LinesDeletedFromFork": 112.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 30.0, "TotalVotes": 0}]
| null | null | null | null |
# **Group 10 Team Members :-
# Fady Nasser -
# Omar Hisham -
# Saieed Osama**
# Adding Imports
import pandas as pd
import numpy as np
import os
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import RFE
import xml.etree.ElementTree as ET
# Dataset Path
dataset_path = "/kaggle/input/car-crashes-severity-prediction"
# Load Training Dataset
df = pd.read_csv(os.path.join(dataset_path, "train.csv"))
# Data PreProcessing
df = df * 1 # To Convert True And False to Int
df["Side"] = df["Side"].replace({"R": 0, "L": 1})
df["Year"] = pd.DatetimeIndex(df["timestamp"]).year
df["Month"] = pd.DatetimeIndex(df["timestamp"]).month
df["Day"] = pd.DatetimeIndex(df["timestamp"]).day
df["Hour"] = pd.DatetimeIndex(df["timestamp"]).hour
df["Quarter"] = pd.DatetimeIndex(df["timestamp"]).quarter
df["WeekDay"] = pd.DatetimeIndex(df["timestamp"]).dayofweek
df["WeekYear"] = pd.DatetimeIndex(df["timestamp"]).weekofyear
df["WeekEnd"] = np.where(df["WeekDay"] < 5, 0, 1)
df["Morning_Midnight"] = np.where((df["Hour"] >= 6) & (df["Hour"] < 18), 0, 1)
df["Summer_Winter"] = np.where((df["Month"] >= 5) & (df["Month"] < 11), 0, 1)
df["Lat"] = df["Lat"] - np.mean(df["Lat"])
df["Lng"] = df["Lng"] - np.mean(df["Lng"])
print(df.shape)
df.head()
# Load Weather Dataset
Weather_df = pd.read_csv(os.path.join(dataset_path, "weather-sfcsv.csv"))
Weather_df["Selected"] = Weather_df["Selected"].replace({"No": 0, "Yes": 1})
Weather_df.Weather_Condition = pd.Categorical(Weather_df.Weather_Condition)
Weather_df["Weather_Condition_Code"] = Weather_df.Weather_Condition.cat.codes
print(Weather_df.shape)
Weather_df.drop(columns=["Weather_Condition"], inplace=True)
Weather_df.fillna(Weather_df.mean(), inplace=True)
Weather_df = Weather_df.sort_values(["Year", "Month", "Day", "Hour"])
Weather_df.drop_duplicates(
subset=["Year", "Month", "Day", "Hour"], keep="first", inplace=True
)
print(Weather_df.shape)
Weather_df.head()
# Merge Train Dataset with Weather Dataset
Merged_df = pd.merge(df, Weather_df, on=["Year", "Month", "Day", "Hour"], how="inner")
Merged_df.head()
# Load Holiday Dataset
root = ET.parse(os.path.join(dataset_path, "holidays.xml")).getroot()
tags = {"tags": []}
for elem in root:
tag = {
"date": elem.getchildren()[0].text,
"description": elem.getchildren()[1].text,
}
tags["tags"].append(tag)
df_holidays = pd.DataFrame(tags["tags"])
df_holidays["Year"] = pd.DatetimeIndex(df_holidays["date"]).year
df_holidays["Month"] = pd.DatetimeIndex(df_holidays["date"]).month
df_holidays["Day"] = pd.DatetimeIndex(df_holidays["date"]).day
df_holidays["Is_Holiday"] = 1
print(df_holidays.shape)
df_holidays.head()
# Prepaing the final dataset for preprocessing
# Final_df = pd.merge(Merged_df,df_holidays,on=['Year','Month','Day'],how="left")
Final_df = Merged_df
print(Final_df.shape)
Final_df.replace(np.nan, 0)
Final_df = Final_df.drop(
columns=["timestamp", "Bump", "Give_Way", "No_Exit", "Roundabout", "Selected"]
)
print(Final_df.shape)
Final_df.head()
Final_df.drop(columns="ID").describe()
# Calculate The heatmap and the rank of each generated feature
sns.heatmap(Final_df.corr())
estimator = RandomForestClassifier(max_depth=2, random_state=0)
selector = RFE(estimator, n_features_to_select=1, step=1)
selector = selector.fit(Final_df.drop(columns=["Severity", "ID"]), Final_df["Severity"])
for i in range(len(Final_df.drop(columns=["Severity", "ID"]).columns)):
print(
list(Final_df.drop(columns=["Severity", "ID"]).columns)[i], selector.ranking_[i]
)
# Split Data For Training
train_df, val_df = train_test_split(Final_df, test_size=0.2, random_state=42)
# Top Features :- ['Lng','Lat','Stop','Distance(mi)','Year','Precipitation(in)', 'Wind_Chill(F)', 'Crossing']
X_train = train_df[
["Lat", "Lng", "Crossing", "Stop", "Year", "Wind_Chill(F)", "Precipitation(in)"]
]
y_train = train_df["Severity"]
X_val = val_df[
["Lat", "Lng", "Crossing", "Stop", "Year", "Wind_Chill(F)", "Precipitation(in)"]
]
y_val = val_df["Severity"]
# Create an instance of the classifier model
classifier = RandomForestClassifier(max_depth=2, random_state=0)
classifier = classifier.fit(X_train, y_train)
print(
"The accuracy of the classifier on the validation set is ",
(classifier.score(X_val, y_val)),
)
# Load Testing Dataset
test_df = pd.read_csv(os.path.join(dataset_path, "test.csv"))
test_df = test_df * 1
test_df["Side"] = test_df["Side"].replace({"R": 0, "L": 1})
test_df["Year"] = pd.DatetimeIndex(test_df["timestamp"]).year
test_df["Month"] = pd.DatetimeIndex(test_df["timestamp"]).month
test_df["Day"] = pd.DatetimeIndex(test_df["timestamp"]).day
test_df["Hour"] = pd.DatetimeIndex(test_df["timestamp"]).hour
test_df["Quarter"] = pd.DatetimeIndex(test_df["timestamp"]).quarter
test_df["WeekDay"] = pd.DatetimeIndex(test_df["timestamp"]).dayofweek
test_df["WeekYear"] = pd.DatetimeIndex(test_df["timestamp"]).weekofyear
test_df["WeekEnd"] = np.where(test_df["WeekDay"] < 5, 0, 1)
test_df["Morning_Midnight"] = np.where(
(test_df["Hour"] >= 6) & (test_df["Hour"] < 18), 0, 1
)
test_df["Summer_Winter"] = np.where(
(test_df["Month"] >= 5) & (test_df["Month"] < 11), 0, 1
)
test_df["Lat"] = test_df["Lat"] - np.mean(test_df["Lat"])
test_df["Lng"] = test_df["Lng"] - np.mean(test_df["Lng"])
print(df.shape)
df.head()
# Merge Test Dataset with Weather Dataset
test_df = pd.merge(
test_df, Weather_df, on=["Year", "Month", "Day", "Hour"], how="inner"
)
print(test_df.shape)
test_df.replace(np.nan, 0)
test_df = test_df.drop(
columns=["timestamp", "Bump", "Give_Way", "No_Exit", "Roundabout", "Selected"]
)
print(test_df.shape)
# Spliting Data For Testing
X_test = test_df[
["Lat", "Lng", "Crossing", "Stop", "Year", "Wind_Chill(F)", "Precipitation(in)"]
]
# Predict The Output
y_test_predicted = classifier.predict(X_test)
test_df["Severity"] = y_test_predicted
test_df.head()
# Extracting the Submission.csv file
test_df[["ID", "Severity"]].to_csv("/kaggle/working/submission.csv", index=False)
| false | 0 | 2,128 | 0 | 2,128 | 2,128 |
||
69173930
|
# ### Fisrt import libraries we need
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import xml.etree.ElementTree as xet
# ### Importing datasets
root_path = "/kaggle/input/car-crashes-severity-prediction/"
df_train = pd.read_csv(os.path.join(root_path, "train.csv"))
print('"train.csv" shape: {}'.format(df_train.shape))
df_train.head()
holidays_cols = ["Date", "Description"]
holidays_rows = []
xml_parse = xet.parse(os.path.join(root_path, "holidays.xml"))
xml_root = xml_parse.getroot()
for elem in xml_root:
row = {"Date": elem.find("date").text, "Description": elem.find("description").text}
holidays_rows.append(row)
df_holidays = pd.DataFrame(holidays_rows, columns=holidays_cols)
df_holidays.to_csv("/kaggle/working/holidays.csv", index=False)
print('"df_holidays" shape: {}'.format(df_holidays.shape))
df_holidays.head()
df_weather = pd.read_csv(os.path.join(root_path, "weather-sfcsv.csv"))
print('"df_weather" shape: {}'.format(df_weather.shape))
df_weather.head()
from sklearn.base import BaseEstimator, TransformerMixin
class MergeData(BaseEstimator, TransformerMixin):
def __init__(self, holidays, weather):
self.holidays = holidays.copy()
self.weather = weather.copy()
def fit(self, X, y=None):
return self
def transform(self, X):
# before merging data let's convert the timestamp column in df_train into (year, month, day, hour)
# frist we neet to convert its type to datetime type
Xt = X.copy()
Xt["timestamp"] = pd.to_datetime(Xt["timestamp"], format="%Y-%m-%dT%H:%M:%S")
# now we can convert it
Xt["Year"] = Xt["timestamp"].dt.year
Xt["Month"] = Xt["timestamp"].dt.month
Xt["Day"] = Xt["timestamp"].dt.day
Xt["Hour"] = Xt["timestamp"].dt.hour
# do the same with df_holidays
self.holidays["Date"] = pd.to_datetime(self.holidays["Date"], format="%Y-%m-%d")
self.holidays["Year"] = self.holidays["Date"].dt.year
self.holidays["Month"] = self.holidays["Date"].dt.month
self.holidays["Day"] = self.holidays["Date"].dt.day
# drop old columns
Xt.drop(columns="timestamp", inplace=True)
holidays_new = self.holidays.drop(columns="Date", inplace=False)
df_merged_holidays = Xt.merge(
holidays_new, on=["Year", "Month", "Day"], how="left"
)
# now we can merge the resulting dataframe with weather dataframe
Xt = df_merged_holidays.merge(
self.weather, on=["Year", "Month", "Day", "Hour"], how="left"
)
# we need to drop the duplicates
# but since the data has different values for the same date we need to check on the id only
Xt.drop_duplicates(subset=["ID"], inplace=True)
return Xt
class DropCols(BaseEstimator, TransformerMixin):
def __init__(self, cols):
self.cols = cols
def fit(self, X, y=None):
return self
def transform(self, X):
Xt = X.copy()
Xt.drop(columns=self.cols, inplace=True)
return Xt
class FillNa(BaseEstimator, TransformerMixin):
def __init__(self, cols):
self.cols = cols
def fit(self, X, y=None):
return self
def transform(self, X):
Xt = X.copy()
for col in self.cols:
if Xt[col].dtype in [np.float64, np.int64]:
median = Xt[col].median()
Xt[col].replace(np.nan, median, inplace=True)
elif col == "Description":
# Xt['is_holiday'] = Xt['Description'].apply(lambda row: row is not np.nan)
value = "no_holiday"
Xt[col].replace(np.nan, value, inplace=True)
# Xt.drop(columns='Description', inplace=True)
else:
top = Xt[col].value_counts().idxmax()
Xt[col].replace(np.nan, top, inplace=True)
return Xt
class ScalerExcept(BaseEstimator, TransformerMixin):
def __init__(self, cols):
self.cols = cols
def fit(self, X, y=None):
return self
def transform(self, X):
self.cols.append("Severity")
self.cols_scale = [
col for col in X.columns.to_list() if X[col].dtype in [np.float64, np.int64]
]
Xt = X.copy()
for col in self.cols_scale:
if col in self.cols:
continue
Xt[col] = Xt[col] / Xt[col].max()
return Xt
class Dummy(BaseEstimator, TransformerMixin):
def __init__(self, cols, holidays, weather):
self.cols = cols
self.holidays = holidays.copy()
self.weather = weather.copy()
def fit(self, X, y=None):
return self
def transform(self, X):
Xt = X.copy()
# data should be in numbers, so we need to convert categorical data to numbers
# here we need to convert only Weather_Condition and Side columns
for col in self.cols:
if col in self.holidays.columns.to_list():
values = list(self.holidays[col].unique())
Xt[values] = 0
col_dummy = pd.get_dummies(Xt[col])
Xt[col_dummy.columns.to_list()] = col_dummy
elif col in self.weather.columns.to_list():
values = list(self.weather[col].unique())
Xt[values] = 0
col_dummy = pd.get_dummies(Xt[col])
Xt[col_dummy.columns.to_list()] = col_dummy
else:
col_dummy = pd.get_dummies(Xt[col])
# add the new columns to the dataframe
Xt = pd.concat([Xt, col_dummy], axis=1)
# drop the original column
Xt.drop(columns=col, inplace=True)
# Xt.drop(columns=np.nan, inplace=True)
return Xt
class FeaturSelector(BaseEstimator, TransformerMixin):
def __init__(self, cols=None):
self.cols = cols
def fit(self, X, y=None):
return self
def transform(self, X):
Xt = X.copy()
if "Severity" in Xt.columns.to_list():
Xt.drop(columns="Severity", inplace=True)
if self.cols is None:
return Xt
return Xt[self.cols]
class Poly(BaseEstimator, TransformerMixin):
def __init__(self, cols=None):
self.cols = cols
def fit(self, X, y=None):
return self
def transform(self, X):
Xt = X.copy()
if self.cols is None:
return Xt
Xt[self.cols] = Xt[self.cols] * 2 # + Xt[self.cols] ** 2
return Xt
class OutlierReplacer(BaseEstimator, TransformerMixin):
def __init__(self, q_lower, q_upper):
self.q_lower = q_lower
self.q_upper = q_upper
def fit(self, X, y=None):
self.upper = np.percentile(X, self.q_upper, axis=0, interpolation="midpoint")
self.lower = np.percentile(X, self.q_lower, axis=0, interpolation="midpoint")
return self
def transform(self, X):
Xt = X.copy()
Xt = Xt.reset_index(drop=True)
# IQR
IQR = self.upper - self.lower
# Upper bound
upper = np.where(Xt >= (self.upper + 1.2 * IQR))
# Lower bound
lower = np.where(Xt <= (self.lower - 1.2 * IQR))
""" Removing the Outliers """
for i, j in zip(upper[0], upper[1]):
Xt.iloc[i, j] = self.upper[j] + 1.2 * IQR[j]
for i, j in zip(lower[0], lower[1]):
Xt.iloc[i, j] = self.lower[j] - 1.2 * IQR[j]
return Xt
# ### Merging all datasets to one dataset
merge = MergeData(df_holidays, df_weather)
df_merged = merge.fit_transform(df_train)
# After merging data we can explore it
df_merged.iloc[:, :14].describe(include="all")
df_merged.iloc[:, 14:].describe(include="all")
df_merged.info()
# ### Data wrangling
# #### From the above data description we can remove the following columns(they will have no effect on prediction):
# * ID
# * Bump
# * Give_Way
# * No_Exit
# * Roundabout
# ### plotting the ``` Temperature(F) ``` and ``` Wind_Chill(F) ```
wind_temperature = df_merged[df_merged["Wind_Chill(F)"].isna() == False][
["Wind_Chill(F)", "Temperature(F)"]
]
wind_temperature.plot(kind="scatter", x="Temperature(F)", y="Wind_Chill(F)")
plt.grid()
plt.show()
df_merged["Wind_Chill(F)"].isna().sum()
# We notice that in the most cases the ```Temprature(F)``` is equal to ```Wind_Chill(F)```
# and it has many null values, so we can drop it.
#
# The column ``` Selected``` has only 2 rows with the value 'Yes' and the rest are 'No', so we can drop it also.
cols = ["ID", "Bump", "Give_Way", "No_Exit", "Roundabout", "Wind_Chill(F)", "Selected"]
drop_cols = DropCols(cols)
df_merged = drop_cols.fit_transform(df_merged)
df_merged.head()
# now let's check the shape again
df_merged.shape
# #### Dealing with missing values
df_merged.isna().sum().to_frame()
# There are 7 columns containing missing data
na_cols = [
"Weather_Condition",
"Precipitation(in)",
"Temperature(F)",
"Humidity(%)",
"Wind_Speed(mph)",
"Visibility(mi)",
"Description",
]
# ```Weather_Condition``` has one missing value, we can replace it with the top frequency
# then replace the remaianing columns missing data with the median.
# ```Description``` has 6259 missing data values, but we can not replace them because there is no holidays in these days
# so we first need to replace all non missing data with ```True``` and then replace missing data with ```False```
fillna = FillNa(na_cols)
df_merged = fillna.fit_transform(df_merged)
df_merged.head()
# ### Normalize data
df_merged.dtypes
cols_except = ["Lng", "Lat"]
scaler_except = ScalerExcept(cols_except)
df_merged = scaler_except.fit_transform(df_merged)
df_merged.head()
# #### Dealing with categorical data
dum_cols = ["Weather_Condition", "Side", "Description"]
dummy = Dummy(dum_cols, df_holidays, df_weather)
df_merged = dummy.fit_transform(df_merged)
df_merged.head()
fig, ax = plt.subplots(figsize=(20, 20))
im = ax.pcolor(df_merged.corr(), cmap="RdBu")
labels = df_merged.corr().columns.to_list()
# move ticks and labels to the center
ax.set_xticks(np.arange(len(labels)) + 0.5, minor=False)
ax.set_yticks(np.arange(len(labels)) + 0.5, minor=False)
# insert labels
ax.set_xticklabels(labels, minor=False)
ax.set_yticklabels(labels, minor=False)
# rotate label if too long
plt.xticks(rotation=90)
plt.colorbar(im)
plt.show()
from sklearn.model_selection import train_test_split
train_data, val_data = train_test_split(df_train, test_size=0.2, random_state=42)
X_train = train_data.drop(columns="Severity")
y_train = train_data["Severity"]
X_val = val_data.drop(columns="Severity")
y_val = val_data["Severity"]
# print(df_merged.columns.to_list())
cols_select = [
"L",
"Lng",
"Crossing",
"Junction",
"Railway",
"Stop",
"Hour",
"Overcast",
"Mostly Cloudy",
"Smoke",
"Lat",
"Amenity",
"Year",
"Rain",
"Wind_Speed(mph)",
]
# cols_select = df_merged.columns.to_list()
# cols_select.remove('Severity')
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
pca = PCA(n_components=10)
# Create an instance of the classifier
classifier = RandomForestClassifier(max_depth=2, random_state=0)
pipe = Pipeline(
[
("merge", MergeData(df_holidays, df_weather)),
("drop_cols", DropCols(cols)),
("fillna", FillNa(na_cols)),
("scaler_except", ScalerExcept(cols_except)),
("dummy", Dummy(dum_cols, df_holidays, df_weather)),
("poly", Poly()),
("feature_selector", FeaturSelector()),
("outlier_replacer", OutlierReplacer(25, 75)),
# ('pca', pca),
("classifier", classifier),
]
)
pipe = pipe.fit(X_train, y_train)
print(
"The accuracy of the classifier on the validation set is ",
(pipe.score(X_val, y_val)),
)
df_test = pd.read_csv(os.path.join(root_path, "test.csv"))
df_test.head()
y_test_predicted = pipe.predict(df_test)
df_test["Severity"] = y_test_predicted
df_test.head()
df_test[["ID", "Severity"]].to_csv("/kaggle/working/submission.csv", index=False)
pd.read_csv("/kaggle/working/submission.csv")["Severity"].value_counts()
# df_weather['Weather_Condition'].unique()
# cols_select= ['Overcast', 'is_holiday', 'Hour', 'Cloudy', 'Patches of Fog', 'Haze', 'Fog']
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173930.ipynb
| null | null |
[{"Id": 69173930, "ScriptId": 18861546, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4183954, "CreationDate": "07/27/2021 16:55:54", "VersionNumber": 11.0, "Title": "A_M_M", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 403.0, "LinesInsertedFromPrevious": 62.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 341.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# ### Fisrt import libraries we need
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import xml.etree.ElementTree as xet
# ### Importing datasets
root_path = "/kaggle/input/car-crashes-severity-prediction/"
df_train = pd.read_csv(os.path.join(root_path, "train.csv"))
print('"train.csv" shape: {}'.format(df_train.shape))
df_train.head()
holidays_cols = ["Date", "Description"]
holidays_rows = []
xml_parse = xet.parse(os.path.join(root_path, "holidays.xml"))
xml_root = xml_parse.getroot()
for elem in xml_root:
row = {"Date": elem.find("date").text, "Description": elem.find("description").text}
holidays_rows.append(row)
df_holidays = pd.DataFrame(holidays_rows, columns=holidays_cols)
df_holidays.to_csv("/kaggle/working/holidays.csv", index=False)
print('"df_holidays" shape: {}'.format(df_holidays.shape))
df_holidays.head()
df_weather = pd.read_csv(os.path.join(root_path, "weather-sfcsv.csv"))
print('"df_weather" shape: {}'.format(df_weather.shape))
df_weather.head()
from sklearn.base import BaseEstimator, TransformerMixin
class MergeData(BaseEstimator, TransformerMixin):
def __init__(self, holidays, weather):
self.holidays = holidays.copy()
self.weather = weather.copy()
def fit(self, X, y=None):
return self
def transform(self, X):
# before merging data let's convert the timestamp column in df_train into (year, month, day, hour)
# frist we neet to convert its type to datetime type
Xt = X.copy()
Xt["timestamp"] = pd.to_datetime(Xt["timestamp"], format="%Y-%m-%dT%H:%M:%S")
# now we can convert it
Xt["Year"] = Xt["timestamp"].dt.year
Xt["Month"] = Xt["timestamp"].dt.month
Xt["Day"] = Xt["timestamp"].dt.day
Xt["Hour"] = Xt["timestamp"].dt.hour
# do the same with df_holidays
self.holidays["Date"] = pd.to_datetime(self.holidays["Date"], format="%Y-%m-%d")
self.holidays["Year"] = self.holidays["Date"].dt.year
self.holidays["Month"] = self.holidays["Date"].dt.month
self.holidays["Day"] = self.holidays["Date"].dt.day
# drop old columns
Xt.drop(columns="timestamp", inplace=True)
holidays_new = self.holidays.drop(columns="Date", inplace=False)
df_merged_holidays = Xt.merge(
holidays_new, on=["Year", "Month", "Day"], how="left"
)
# now we can merge the resulting dataframe with weather dataframe
Xt = df_merged_holidays.merge(
self.weather, on=["Year", "Month", "Day", "Hour"], how="left"
)
# we need to drop the duplicates
# but since the data has different values for the same date we need to check on the id only
Xt.drop_duplicates(subset=["ID"], inplace=True)
return Xt
class DropCols(BaseEstimator, TransformerMixin):
def __init__(self, cols):
self.cols = cols
def fit(self, X, y=None):
return self
def transform(self, X):
Xt = X.copy()
Xt.drop(columns=self.cols, inplace=True)
return Xt
class FillNa(BaseEstimator, TransformerMixin):
def __init__(self, cols):
self.cols = cols
def fit(self, X, y=None):
return self
def transform(self, X):
Xt = X.copy()
for col in self.cols:
if Xt[col].dtype in [np.float64, np.int64]:
median = Xt[col].median()
Xt[col].replace(np.nan, median, inplace=True)
elif col == "Description":
# Xt['is_holiday'] = Xt['Description'].apply(lambda row: row is not np.nan)
value = "no_holiday"
Xt[col].replace(np.nan, value, inplace=True)
# Xt.drop(columns='Description', inplace=True)
else:
top = Xt[col].value_counts().idxmax()
Xt[col].replace(np.nan, top, inplace=True)
return Xt
class ScalerExcept(BaseEstimator, TransformerMixin):
def __init__(self, cols):
self.cols = cols
def fit(self, X, y=None):
return self
def transform(self, X):
self.cols.append("Severity")
self.cols_scale = [
col for col in X.columns.to_list() if X[col].dtype in [np.float64, np.int64]
]
Xt = X.copy()
for col in self.cols_scale:
if col in self.cols:
continue
Xt[col] = Xt[col] / Xt[col].max()
return Xt
class Dummy(BaseEstimator, TransformerMixin):
def __init__(self, cols, holidays, weather):
self.cols = cols
self.holidays = holidays.copy()
self.weather = weather.copy()
def fit(self, X, y=None):
return self
def transform(self, X):
Xt = X.copy()
# data should be in numbers, so we need to convert categorical data to numbers
# here we need to convert only Weather_Condition and Side columns
for col in self.cols:
if col in self.holidays.columns.to_list():
values = list(self.holidays[col].unique())
Xt[values] = 0
col_dummy = pd.get_dummies(Xt[col])
Xt[col_dummy.columns.to_list()] = col_dummy
elif col in self.weather.columns.to_list():
values = list(self.weather[col].unique())
Xt[values] = 0
col_dummy = pd.get_dummies(Xt[col])
Xt[col_dummy.columns.to_list()] = col_dummy
else:
col_dummy = pd.get_dummies(Xt[col])
# add the new columns to the dataframe
Xt = pd.concat([Xt, col_dummy], axis=1)
# drop the original column
Xt.drop(columns=col, inplace=True)
# Xt.drop(columns=np.nan, inplace=True)
return Xt
class FeaturSelector(BaseEstimator, TransformerMixin):
def __init__(self, cols=None):
self.cols = cols
def fit(self, X, y=None):
return self
def transform(self, X):
Xt = X.copy()
if "Severity" in Xt.columns.to_list():
Xt.drop(columns="Severity", inplace=True)
if self.cols is None:
return Xt
return Xt[self.cols]
class Poly(BaseEstimator, TransformerMixin):
def __init__(self, cols=None):
self.cols = cols
def fit(self, X, y=None):
return self
def transform(self, X):
Xt = X.copy()
if self.cols is None:
return Xt
Xt[self.cols] = Xt[self.cols] * 2 # + Xt[self.cols] ** 2
return Xt
class OutlierReplacer(BaseEstimator, TransformerMixin):
def __init__(self, q_lower, q_upper):
self.q_lower = q_lower
self.q_upper = q_upper
def fit(self, X, y=None):
self.upper = np.percentile(X, self.q_upper, axis=0, interpolation="midpoint")
self.lower = np.percentile(X, self.q_lower, axis=0, interpolation="midpoint")
return self
def transform(self, X):
Xt = X.copy()
Xt = Xt.reset_index(drop=True)
# IQR
IQR = self.upper - self.lower
# Upper bound
upper = np.where(Xt >= (self.upper + 1.2 * IQR))
# Lower bound
lower = np.where(Xt <= (self.lower - 1.2 * IQR))
""" Removing the Outliers """
for i, j in zip(upper[0], upper[1]):
Xt.iloc[i, j] = self.upper[j] + 1.2 * IQR[j]
for i, j in zip(lower[0], lower[1]):
Xt.iloc[i, j] = self.lower[j] - 1.2 * IQR[j]
return Xt
# ### Merging all datasets to one dataset
merge = MergeData(df_holidays, df_weather)
df_merged = merge.fit_transform(df_train)
# After merging data we can explore it
df_merged.iloc[:, :14].describe(include="all")
df_merged.iloc[:, 14:].describe(include="all")
df_merged.info()
# ### Data wrangling
# #### From the above data description we can remove the following columns(they will have no effect on prediction):
# * ID
# * Bump
# * Give_Way
# * No_Exit
# * Roundabout
# ### plotting the ``` Temperature(F) ``` and ``` Wind_Chill(F) ```
wind_temperature = df_merged[df_merged["Wind_Chill(F)"].isna() == False][
["Wind_Chill(F)", "Temperature(F)"]
]
wind_temperature.plot(kind="scatter", x="Temperature(F)", y="Wind_Chill(F)")
plt.grid()
plt.show()
df_merged["Wind_Chill(F)"].isna().sum()
# We notice that in the most cases the ```Temprature(F)``` is equal to ```Wind_Chill(F)```
# and it has many null values, so we can drop it.
#
# The column ``` Selected``` has only 2 rows with the value 'Yes' and the rest are 'No', so we can drop it also.
cols = ["ID", "Bump", "Give_Way", "No_Exit", "Roundabout", "Wind_Chill(F)", "Selected"]
drop_cols = DropCols(cols)
df_merged = drop_cols.fit_transform(df_merged)
df_merged.head()
# now let's check the shape again
df_merged.shape
# #### Dealing with missing values
df_merged.isna().sum().to_frame()
# There are 7 columns containing missing data
na_cols = [
"Weather_Condition",
"Precipitation(in)",
"Temperature(F)",
"Humidity(%)",
"Wind_Speed(mph)",
"Visibility(mi)",
"Description",
]
# ```Weather_Condition``` has one missing value, we can replace it with the top frequency
# then replace the remaianing columns missing data with the median.
# ```Description``` has 6259 missing data values, but we can not replace them because there is no holidays in these days
# so we first need to replace all non missing data with ```True``` and then replace missing data with ```False```
fillna = FillNa(na_cols)
df_merged = fillna.fit_transform(df_merged)
df_merged.head()
# ### Normalize data
df_merged.dtypes
cols_except = ["Lng", "Lat"]
scaler_except = ScalerExcept(cols_except)
df_merged = scaler_except.fit_transform(df_merged)
df_merged.head()
# #### Dealing with categorical data
dum_cols = ["Weather_Condition", "Side", "Description"]
dummy = Dummy(dum_cols, df_holidays, df_weather)
df_merged = dummy.fit_transform(df_merged)
df_merged.head()
fig, ax = plt.subplots(figsize=(20, 20))
im = ax.pcolor(df_merged.corr(), cmap="RdBu")
labels = df_merged.corr().columns.to_list()
# move ticks and labels to the center
ax.set_xticks(np.arange(len(labels)) + 0.5, minor=False)
ax.set_yticks(np.arange(len(labels)) + 0.5, minor=False)
# insert labels
ax.set_xticklabels(labels, minor=False)
ax.set_yticklabels(labels, minor=False)
# rotate label if too long
plt.xticks(rotation=90)
plt.colorbar(im)
plt.show()
from sklearn.model_selection import train_test_split
train_data, val_data = train_test_split(df_train, test_size=0.2, random_state=42)
X_train = train_data.drop(columns="Severity")
y_train = train_data["Severity"]
X_val = val_data.drop(columns="Severity")
y_val = val_data["Severity"]
# print(df_merged.columns.to_list())
cols_select = [
"L",
"Lng",
"Crossing",
"Junction",
"Railway",
"Stop",
"Hour",
"Overcast",
"Mostly Cloudy",
"Smoke",
"Lat",
"Amenity",
"Year",
"Rain",
"Wind_Speed(mph)",
]
# cols_select = df_merged.columns.to_list()
# cols_select.remove('Severity')
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
pca = PCA(n_components=10)
# Create an instance of the classifier
classifier = RandomForestClassifier(max_depth=2, random_state=0)
pipe = Pipeline(
[
("merge", MergeData(df_holidays, df_weather)),
("drop_cols", DropCols(cols)),
("fillna", FillNa(na_cols)),
("scaler_except", ScalerExcept(cols_except)),
("dummy", Dummy(dum_cols, df_holidays, df_weather)),
("poly", Poly()),
("feature_selector", FeaturSelector()),
("outlier_replacer", OutlierReplacer(25, 75)),
# ('pca', pca),
("classifier", classifier),
]
)
pipe = pipe.fit(X_train, y_train)
print(
"The accuracy of the classifier on the validation set is ",
(pipe.score(X_val, y_val)),
)
df_test = pd.read_csv(os.path.join(root_path, "test.csv"))
df_test.head()
y_test_predicted = pipe.predict(df_test)
df_test["Severity"] = y_test_predicted
df_test.head()
df_test[["ID", "Severity"]].to_csv("/kaggle/working/submission.csv", index=False)
pd.read_csv("/kaggle/working/submission.csv")["Severity"].value_counts()
# df_weather['Weather_Condition'].unique()
# cols_select= ['Overcast', 'is_holiday', 'Hour', 'Cloudy', 'Patches of Fog', 'Haze', 'Fog']
| false | 0 | 3,710 | 0 | 3,710 | 3,710 |
||
69173511
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from pandas.api.types import CategoricalDtype
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.feature_selection import mutual_info_classif
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from category_encoders import MEstimateEncoder
import random
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
df = pd.concat([train_data, test_data])
print("Pclass", df.Pclass.unique())
print("Sex", df.Sex.unique())
print("SibSp", df.SibSp.unique())
print("Parch", df.Parch.unique())
print("Ticket", df.Ticket.unique())
print("Name", df.Name.unique())
print("Fare", df.Fare.unique())
print("Cabin", df.Cabin.unique())
print("Embarked", df.Embarked.unique())
print("Age", df.Age.unique())
# display(df.info())
# # print(sorted(cabin, key= lambda x: x[0]))
# # According to the above informations we can say,
# **1. Pclass [3 1 2] Sex ['male' 'female'] SibSp [1 0 3 4 2 5 8] Parch [0 1 2 5 3 4 6 9] Embarked ['S' 'C' 'Q' nan] can be more infomative**
# **2. Ticket, Name, PassengerId is unique for almost every entry. So it has low infomations to map input to output**
# **3. Fare, cabin, age features will be more considered in the feature engineering section**
# # Preprocessing
# ### Clean
def clean(df):
# no cleaning
return df
df.dtypes
# ### Encode
features_nom = ["Sex", "Embarked", "Cabin"] # list down unordered categorical features
ordered_levels = { # list down un ordered categorical coloumn and assign categories
"Pclass": [1, 2, 3]
}
# Add a None level for missing values
ordered_levels = {key: ["None"] + value for key, value in ordered_levels.items()}
def encode(df):
# Nominal categories
for name in features_nom:
df[name] = df[name].astype("category")
# Add a None category for missing values
if "None" not in df[name].cat.categories and df[name].isnull().sum() != 0:
df[name].cat.add_categories("None", inplace=True)
# Ordinal categories
for name, levels in ordered_levels.items():
df[name] = df[name].astype(CategoricalDtype(levels, ordered=True))
return df
# ### Impute
def impute(df):
for name in df.select_dtypes("number"):
df[name] = df[name].fillna(0)
for name in df.select_dtypes("category"):
if "None" in df[name].cat.categories:
df[name] = df[name].fillna("None")
for name in df.select_dtypes("object"):
df[name] = df[name].fillna("None")
return df
def place_NaN_by_mean(d_train, d_test, feature):
d_train_mean = d_train[feature].mean()
d_test_std = d_test[feature].std()
d_train[feature] = d_train[feature].replace(
np.NaN, np.random.randint(d_train_mean - d_test_std, d_train_mean + d_test_std)
)
d_test[feature] = d_test[feature].replace(np.NaN, d_train_mean)
df = pd.concat([d_train, d_test])
return df
# ## Data Loading
def load_data():
# Read data
df_train = pd.read_csv("/kaggle/input/titanic/train.csv")
df_test = pd.read_csv("/kaggle/input/titanic/test.csv")
df_train = df_train.set_index([pd.Index(range(891))])
df_test = df_test.set_index([pd.Index(range(891, 1309))])
# Merge the splits so we can process them together
df = pd.concat([df_train, df_test])
# Preprocessing
df = place_NaN_by_mean(df_train, df_test, "Age")
df = place_NaN_by_mean(df_train, df_test, "Fare")
df = clean(df)
df = encode(df)
df = impute(df)
# Reform splits
df_train = df.loc[df_train.index, :]
df_test = df.loc[df_test.index, :]
return df_train, df_test
d_train, d_test = load_data()
d_train["Survived"] = d_train.Survived.astype("category")
d_train.head()
features = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Cabin", "Embarked"]
# ## Scoring DataSet
def score_dataset(
X_train,
y,
model=RandomForestClassifier(
criterion="gini",
n_estimators=700,
min_samples_split=10,
min_samples_leaf=1,
max_features="auto",
oob_score=True,
random_state=1,
n_jobs=-1,
),
):
for colname in X_train.select_dtypes(["category", "object"]):
X_dummies = pd.get_dummies(X_train[colname], drop_first=False, prefix=colname)
X_train = X_train.join(X_dummies)
X_train = X_train.drop(colname, 1)
score = cross_val_score(
model,
X_train,
y,
cv=5,
scoring="neg_mean_squared_log_error",
)
score = -1 * score.mean()
score = np.sqrt(score)
return score
X = d_train[features].copy()
y = d_train.Survived.copy()
# print(X,y)
baseline_score = score_dataset(X.copy(), y.copy())
print(f"Baseline score: {baseline_score:.5f} RMSLE")
# ## Mutual infirmation Scores
def make_mi_scores(X, y, is_one_hot_encode):
X = X.copy()
if is_one_hot_encode:
for colname in X.select_dtypes(["category", "object"]):
X_dummies = pd.get_dummies(X[colname], drop_first=False, prefix=colname)
X = X.join(X_dummies)
X = X.drop(colname, 1)
else:
for colname in X.select_dtypes(["object", "category"]):
X[colname], _ = X[colname].factorize()
# All discrete features should now have integer dtypes
discrete_features = [pd.api.types.is_integer_dtype(t) for t in X.dtypes]
mi_scores = mutual_info_classif(X, y, discrete_features="auto", random_state=0)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
def plot_mi_scores(scores):
scores = scores.sort_values(ascending=True)
width = np.arange(len(scores))
ticks = list(scores.index)
plt.barh(width, scores)
plt.yticks(width, ticks)
plt.title("Mutual Information Scores")
# ### Mutual information check
X = d_train.copy()
y = X.pop("Survived")
mi_scores = make_mi_scores(X.copy(), y, False)
print(mi_scores)
def plot_mi_scores(scores):
scores = scores.sort_values(ascending=True)
width = np.arange(len(scores))
ticks = list(scores.index)
plt.barh(width, scores)
plt.yticks(width, ticks)
plt.title("Mutual Information Scores")
plt.figure(dpi=100, figsize=(8, 5))
plot_mi_scores(mi_scores)
sns.countplot(d_train["Survived"])
sns.countplot(d_train["Pclass"], hue=d_train["Survived"])
sns.countplot(d_train["Sex"], hue=d_train["Survived"])
sns.countplot(d_train["Embarked"], hue=d_train["Survived"])
# # Creating Features
def name_breakdown(X_train_test, features):
X_new = pd.DataFrame()
X_new["Title"] = X_train_test["Name"].str.split(", ", n=1, expand=True)[1]
X_new["Title"] = X_new["Title"].copy().str.split(".", n=1, expand=True)[0]
X = X_train_test[features].copy()
X["Title"] = X_new
X["Title"] = X["Title"].astype("category")
return X
def name_len_feature(X_train_test, data):
X_new = pd.DataFrame()
X_new["Name_len"] = data["Name"].apply(lambda x: len(x))
# X = X_train[features].copy()
X_train_test["Name_len"] = X_new
return X_train_test
def ticket_first_litter(X_train_test, data):
X_new = pd.DataFrame()
X_new["Ticket_letter"] = data["Ticket"].apply(lambda x: str(x)[0])
# X = X_train[features].copy()
X_train_test["Ticket_letter"] = X_new
X_train_test["Ticket_letter"] = X_train_test["Ticket_letter"].astype("category")
return X_train_test
def Cabin_break_down(X_train_test):
X_new = pd.DataFrame()
# X = X_train[features].copy()
X_train_test["Cabin_break_down"] = np.where(
X_train_test.Cabin != "None", X_train_test["Cabin"].astype(str).str[0], "None"
)
X_train_test["Cabin_break_down"] = X_train_test["Cabin_break_down"].astype(
"category"
)
return X_train_test
def age_binning(X_train_test):
X_new = pd.DataFrame()
cut_list = [0, 4, 8, 12, 18, 30, 40, 55, 65, 80]
X_new["AgeBin"] = pd.cut(X_train_test["Age"], cut_list, labels=False)
# X = X_train[features].copy()
X_train_test["AgeBin"] = X_new
X_train_test["AgeBin"] = X_train_test["AgeBin"].astype("category")
return X_train_test
def fare_binning(X_train_test):
X_new = pd.DataFrame()
cut_list = list(range(0, 100, 10))
cut_list.extend(list(range(100, 700, 100)))
X_new["FareBin"] = pd.cut(X_train_test["Fare"], cut_list, labels=False, right=False)
# X = X_train[features].copy()
X_train_test["FareBin"] = X_new
X_train_test["FareBin"] = X_train_test["FareBin"].astype("category")
return X_train_test
def embarked_fare_group(X_train_test):
X_new = pd.DataFrame()
X_new["FareEmbarked"] = X_train_test.groupby("Embarked")["Fare"].transform("median")
# X = X_train[features].copy()
X_train_test["FareEmbarked"] = X_new
X_train_test["FareEmbarked"] = X_train_test["FareEmbarked"].astype("category")
return X_train_test
def cluster_labels(df, features, n_clusters=20):
X = df.copy()
X_scaled = X.loc[:, features]
X_scaled = (X_scaled - X_scaled.mean(axis=0)) / X_scaled.std(axis=0)
kmeans = KMeans(n_clusters=n_clusters, n_init=50, random_state=0)
# print(X_scaled)
X_new = X.loc[:, features]
X_new["Cluster"] = kmeans.fit_predict(X_scaled)
return X_new, kmeans
def cluster_distance(df, features, n_clusters=20):
X = df.copy()
X_scaled = X.loc[:, features]
X_scaled = (X_scaled - X_scaled.mean(axis=0)) / X_scaled.std(axis=0)
kmeans = KMeans(n_clusters=n_clusters, n_init=50, random_state=0)
X_cd = kmeans.fit_transform(X_scaled)
# Label features and join to dataset
X_cd = pd.DataFrame(X_cd, columns=[f"Centroid_{i}" for i in range(X_cd.shape[1])])
return X_cd
def cluster_from_Age_fare(X_train_test):
cluster_features = ["Age", "Fare"]
X_clustered, kmeans_feature_engineering = cluster_labels(
X_train_test.copy(), cluster_features, 10
)
print(X_clustered.head())
sns.relplot(x="Age", y="Fare", hue="Cluster", data=X_clustered, height=5)
# X_clustered_data = X_train.copy()
X_train_test["Cluster"] = X_clustered.Cluster
X_train_test["Cluster"] = X_train_test["Cluster"].astype("category")
return X_train_test
# print(X_modified_7)
# X_modified_8 = cluster_from_Age_fare(X_modified_6.copy())
# # PCA
def apply_pca(X, standardize=True):
# Standardize
if standardize:
X = (X - X.mean(axis=0)) / X.std(axis=0)
# Create principal components
pca = PCA()
X_pca = pca.fit_transform(X)
# Convert to dataframe
component_names = [f"PC{i+1}" for i in range(X_pca.shape[1])]
X_pca = pd.DataFrame(X_pca, columns=component_names)
# Create loadings
loadings = pd.DataFrame(
pca.components_.T, # transpose the matrix of loadings
columns=component_names, # so the columns are the principal components
index=X.columns, # and the rows are the original features
)
return pca, X_pca, loadings
def plot_variance(pca, width=8, dpi=100):
# Create figure
fig, axs = plt.subplots(1, 2)
n = pca.n_components_
grid = np.arange(1, n + 1)
# Explained variance
evr = pca.explained_variance_ratio_
axs[0].bar(grid, evr)
axs[0].set(xlabel="Component", title="% Explained Variance", ylim=(0.0, 1.0))
# Cumulative Variance
cv = np.cumsum(evr)
axs[1].plot(np.r_[0, grid], np.r_[0, cv], "o-")
axs[1].set(xlabel="Component", title="% Cumulative Variance", ylim=(0.0, 1.0))
# Set up figure
fig.set(figwidth=8, dpi=100)
return axs
features_for_PCA = [
"Age",
"Fare",
"SibSp",
"Parch",
]
print("Correlation with Survived:\n")
# print(X_modified[features_for_PCA].corrwith(X_modified.Survived))
pca, X_pca, loadings = apply_pca(d_train[features_for_PCA])
print(loadings)
# Look at explained variance
plot_variance(pca)
# #### **SibSp and Parch has a significant principal components. So, I add those two, because adding that we can get familly size of the person. It might help to map input to output**
def SibSp_and_Parch(X_train_test):
X_train_test["SibSp_Parch"] = X_train_test.SibSp + X_train_test.Parch
return X_train_test
# X_modified_9 = SibSp_and_Parch(X_modified_8.copy())
def model_run(
X_train_test,
d_train,
d_test,
y,
model=RandomForestClassifier(
criterion="gini",
n_estimators=700,
min_samples_split=10,
min_samples_leaf=1,
max_features="auto",
oob_score=True,
random_state=1,
n_jobs=-1,
),
):
for colname in X_train_test.select_dtypes(["category", "object"]):
X_dummies = pd.get_dummies(
X_train_test[colname], drop_first=False, prefix=colname
)
X_train_test = X_train_test.join(X_dummies)
X_train_test = X_train_test.drop(colname, 1)
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X_train_test.loc[d_train.index, :].copy(), y)
predictions = model.predict(X_train_test.loc[d_test.index, :].copy())
predictions = predictions.astype(int)
output = pd.DataFrame({"PassengerId": d_test.index + 1, "Survived": predictions})
output.to_csv("my_submission_9.csv", index=False)
print(output)
print("Your submission was successfully saved!")
# # Target Encoding
# # Encoding split
# X_for_encoding = X_modified_9.copy()
# X_for_encoding['Survived']= X_modified_9[["Survived"]].copy()
# X_for_encoding['Ticket']= d_train[["Ticket"]].copy()
# X_encode = X_for_encoding.sample(frac=0.9, random_state=0)
# y_encode = X_encode.pop("Survived")
# # Training split
# X_pretrain = X_for_encoding.drop(X_encode.index)
# y_train = X_pretrain.pop("Survived")
# # Choose a set of features to encode and a value for m
# encoder = MEstimateEncoder(cols=["Cabin"], m=5)
# # Fit the encoder on the encoding split
# encoder.fit(X_encode, y_encode)
# # Encode the training split
# X_train = encoder.transform(X_pretrain, y_train).copy()
# feature = encoder.cols
# plt.figure(dpi=90)
# ax = sns.distplot(y_train, kde=True, hist=False)
# ax = sns.distplot(X_train[feature], color='r', ax=ax, hist=True, kde=False, norm_hist=True)
# ax.set_xlabel("Survived");
# # Test preprocess
d_train_y = d_train["Survived"].copy()
d_train_x = d_train.drop("Survived", 1)
d_test_y = d_test["Survived"]
d_test_x = d_test.drop("Survived", 1)
X_modified_1 = name_breakdown(pd.concat([d_train_x, d_test_x]), features)
print(
f"New score after breaking-down Names: {score_dataset(X_modified_1.loc[d_train.index, :].copy(), d_train_y.copy()):.5f} RMSLE"
)
X_modified_2 = name_len_feature(X_modified_1, pd.concat([d_train, d_test]))
print(
f"New score after taking name length feature: {score_dataset(X_modified_2.loc[d_train.index, :].copy(), d_train_y.copy()):.5f} RMSLE"
)
X_modified_3 = ticket_first_litter(X_modified_2, pd.concat([d_train, d_test]))
print(
f"New score after taking ticket first letter breakdown feature: {score_dataset(X_modified_3.loc[d_train.index, :].copy(), d_train_y.copy()):.5f} RMSLE"
)
X_modified_4 = Cabin_break_down(X_modified_3)
print(
f"New score after taking cabin first letter breakdown feature: {score_dataset(X_modified_4.loc[d_train.index, :].copy(), d_train_y.copy()):.5f} RMSLE"
)
X_modified_5 = fare_binning(X_modified_4)
print(
f"New score after binning fare feature: {score_dataset(X_modified_5.loc[d_train.index, :].copy(), d_train_y.copy()):.5f} RMSLE"
)
X_modified_6 = age_binning(X_modified_5)
print(
f"New score after binning age feature: {score_dataset(X_modified_6.loc[d_train.index, :].copy(), d_train_y.copy()):.5f} RMSLE"
)
X_modified_7 = embarked_fare_group(X_modified_6)
print(
f"New score after grouping embarked feature: {score_dataset(X_modified_7.loc[d_train.index, :].copy(), d_train_y.copy()):.5f} RMSLE"
)
X_modified_8 = cluster_from_Age_fare(X_modified_7.copy())
print(
f"New score after cluster age and fare feature: {score_dataset(X_modified_8.loc[d_train.index, :].copy(), d_train_y.copy()):.5f} RMSLE"
)
X_modified_9 = SibSp_and_Parch(X_modified_8.copy())
print(
f"New score after add SibSp and Parch feature: {score_dataset(X_modified_9.loc[d_train.index, :].copy(), d_train_y.copy()):.5f} RMSLE"
)
X_modified_9
# Ticket_letter
sns.countplot(
X_modified_9.loc[d_train.index, :].copy()["Ticket_letter"], hue=d_train["Survived"]
)
# sns.countplot(d_train['Embarked'], hue=d_train['Survived'])
sns.countplot(
X_modified_9.loc[d_train.index, :].copy()["Cabin_break_down"],
hue=d_train["Survived"],
)
sns.countplot(
X_modified_9.loc[d_train.index, :].copy()["FareEmbarked"], hue=d_train["Survived"]
)
sns.countplot(
X_modified_9.loc[d_train.index, :].copy()["AgeBin"], hue=d_train["Survived"]
)
model_run(X_modified_9, d_train, d_test, d_train_y)
# # Get mutual informations without considering onehot encorded features(original features)
mi_score_for_original_feature = make_mi_scores(
X_modified_9.loc[d_train.index, :].copy(), d_train_y, False
)
print(mi_score_for_original_feature)
informative_feature_original = []
for column_name, score in mi_score_for_original_feature.items():
if score > 0.036688:
informative_feature_original.append(column_name)
print(informative_feature_original)
X_modified_9_droped_unifomative = X_modified_9.copy()
for x in X_modified_9.columns:
if x not in informative_feature_original:
X_modified_9_droped_unifomative = X_modified_9_droped_unifomative.drop(x, 1)
X_modified_9_droped_unifomative
# score_dataset(X_modified_9_droped_unifomative, y)
print(
score_dataset(
X_modified_9_droped_unifomative.loc[d_train.index, :].copy(), d_train_y.copy()
)
)
model_run(X_modified_9_droped_unifomative, d_train, d_test, d_train_y)
# ## Mutual infomations of the all features (there are lots of onehot encoded features when we fit the model, so remove all unwanted features may help full). Eventhough, this part increase the RMSLE score, predictiction is same as before model. However,the this part reduce lots of unwanted computaion parts
miscore_list = make_mi_scores(
X_modified_9.loc[d_train.index, :].copy(), d_train_y, True
)
informative_features_after_hot_encode = []
for column_name, score in miscore_list.items():
# print(score)
if score != 0 or score != 0.0:
informative_features_after_hot_encode.append(column_name)
print(
"Informative features, extract using mi scores",
len(informative_features_after_hot_encode),
)
print("all feature size", miscore_list.size)
def score_dataset_without_uninfomative_feature_filter(
X_train,
y,
uninfomative_features,
model=RandomForestClassifier(
criterion="gini",
n_estimators=700,
min_samples_split=10,
min_samples_leaf=1,
max_features="auto",
oob_score=True,
random_state=1,
n_jobs=-1,
),
):
for colname in X_train.select_dtypes(["category", "object"]):
X_dummies = pd.get_dummies(X_train[colname], drop_first=False, prefix=colname)
X_train = X_train.join(X_dummies)
X_train = X_train.drop(colname, 1)
for colname in X_train.columns:
if colname in uninfomative_features:
X_train = X_train.drop(colname, 1)
score = cross_val_score(
model,
X_train,
y,
cv=5,
scoring="neg_mean_squared_log_error",
)
score = -1 * score.mean()
score = np.sqrt(score)
return score
X_modified_1_without_uninfomative = name_breakdown(
pd.concat([d_train_x, d_test_x]), features
)
print(
f"New score after breaking-down Names: {score_dataset_without_uninfomative_feature_filter(X_modified_1_without_uninfomative.loc[d_train.index, :].copy(), d_train_y.copy(),informative_features_after_hot_encode):.5f} RMSLE"
)
X_modified_2_without_uninfomative = name_len_feature(
X_modified_1_without_uninfomative, pd.concat([d_train, d_test])
)
print(
f"New score after taking name length feature: {score_dataset_without_uninfomative_feature_filter(X_modified_2_without_uninfomative.loc[d_train.index, :].copy(), d_train_y.copy(),informative_features_after_hot_encode):.5f} RMSLE"
)
X_modified_3_without_uninfomative = ticket_first_litter(
X_modified_2_without_uninfomative, pd.concat([d_train, d_test])
)
print(
f"New score after taking ticket first letter breakdown feature: {score_dataset_without_uninfomative_feature_filter(X_modified_3_without_uninfomative.loc[d_train.index, :].copy(), d_train_y.copy(),informative_features_after_hot_encode):.5f} RMSLE"
)
X_modified_4_without_uninfomative = Cabin_break_down(X_modified_3_without_uninfomative)
print(
f"New score after taking cabin first letter breakdown feature: {score_dataset_without_uninfomative_feature_filter(X_modified_4_without_uninfomative.loc[d_train.index, :].copy(), d_train_y.copy(),informative_features_after_hot_encode):.5f} RMSLE"
)
X_modified_5_without_uninfomative = fare_binning(X_modified_4_without_uninfomative)
print(
f"New score after binning fare feature: {score_dataset_without_uninfomative_feature_filter(X_modified_5_without_uninfomative.loc[d_train.index, :].copy(), d_train_y.copy(),informative_features_after_hot_encode):.5f} RMSLE"
)
X_modified_6_without_uninfomative = age_binning(X_modified_5_without_uninfomative)
print(
f"New score after binning age feature: {score_dataset_without_uninfomative_feature_filter(X_modified_6_without_uninfomative.loc[d_train.index, :].copy(), d_train_y.copy(),informative_features_after_hot_encode):.5f} RMSLE"
)
X_modified_7_without_uninfomative = embarked_fare_group(
X_modified_6_without_uninfomative
)
print(
f"New score after grouping embarked feature: {score_dataset_without_uninfomative_feature_filter(X_modified_7_without_uninfomative.loc[d_train.index, :].copy(), d_train_y.copy(),informative_features_after_hot_encode):.5f} RMSLE"
)
X_modified_8_without_uninfomative = cluster_from_Age_fare(
X_modified_7_without_uninfomative.copy()
)
print(
f"New score after cluster age and fare feature: {score_dataset_without_uninfomative_feature_filter(X_modified_8_without_uninfomative.loc[d_train.index, :].copy(), d_train_y.copy(),informative_features_after_hot_encode):.5f} RMSLE"
)
X_modified_9_without_uninfomative = SibSp_and_Parch(
X_modified_8_without_uninfomative.copy()
)
print(
f"New score after add SibSp and Parch feature: {score_dataset_without_uninfomative_feature_filter(X_modified_9_without_uninfomative.loc[d_train.index, :].copy(), d_train_y.copy(),informative_features_after_hot_encode):.5f} RMSLE"
)
X_modified_9_without_uninfomative
model_run(X_modified_9_without_uninfomative, d_train, d_test, d_train_y)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173511.ipynb
| null | null |
[{"Id": 69173511, "ScriptId": 18805145, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7890013, "CreationDate": "07/27/2021 16:50:05", "VersionNumber": 7.0, "Title": "UOM_170406P_Titanic_Comppetetion", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 563.0, "LinesInsertedFromPrevious": 7.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 556.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from pandas.api.types import CategoricalDtype
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.feature_selection import mutual_info_classif
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from category_encoders import MEstimateEncoder
import random
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
df = pd.concat([train_data, test_data])
print("Pclass", df.Pclass.unique())
print("Sex", df.Sex.unique())
print("SibSp", df.SibSp.unique())
print("Parch", df.Parch.unique())
print("Ticket", df.Ticket.unique())
print("Name", df.Name.unique())
print("Fare", df.Fare.unique())
print("Cabin", df.Cabin.unique())
print("Embarked", df.Embarked.unique())
print("Age", df.Age.unique())
# display(df.info())
# # print(sorted(cabin, key= lambda x: x[0]))
# # According to the above informations we can say,
# **1. Pclass [3 1 2] Sex ['male' 'female'] SibSp [1 0 3 4 2 5 8] Parch [0 1 2 5 3 4 6 9] Embarked ['S' 'C' 'Q' nan] can be more infomative**
# **2. Ticket, Name, PassengerId is unique for almost every entry. So it has low infomations to map input to output**
# **3. Fare, cabin, age features will be more considered in the feature engineering section**
# # Preprocessing
# ### Clean
def clean(df):
# no cleaning
return df
df.dtypes
# ### Encode
features_nom = ["Sex", "Embarked", "Cabin"] # list down unordered categorical features
ordered_levels = { # list down un ordered categorical coloumn and assign categories
"Pclass": [1, 2, 3]
}
# Add a None level for missing values
ordered_levels = {key: ["None"] + value for key, value in ordered_levels.items()}
def encode(df):
# Nominal categories
for name in features_nom:
df[name] = df[name].astype("category")
# Add a None category for missing values
if "None" not in df[name].cat.categories and df[name].isnull().sum() != 0:
df[name].cat.add_categories("None", inplace=True)
# Ordinal categories
for name, levels in ordered_levels.items():
df[name] = df[name].astype(CategoricalDtype(levels, ordered=True))
return df
# ### Impute
def impute(df):
for name in df.select_dtypes("number"):
df[name] = df[name].fillna(0)
for name in df.select_dtypes("category"):
if "None" in df[name].cat.categories:
df[name] = df[name].fillna("None")
for name in df.select_dtypes("object"):
df[name] = df[name].fillna("None")
return df
def place_NaN_by_mean(d_train, d_test, feature):
d_train_mean = d_train[feature].mean()
d_test_std = d_test[feature].std()
d_train[feature] = d_train[feature].replace(
np.NaN, np.random.randint(d_train_mean - d_test_std, d_train_mean + d_test_std)
)
d_test[feature] = d_test[feature].replace(np.NaN, d_train_mean)
df = pd.concat([d_train, d_test])
return df
# ## Data Loading
def load_data():
# Read data
df_train = pd.read_csv("/kaggle/input/titanic/train.csv")
df_test = pd.read_csv("/kaggle/input/titanic/test.csv")
df_train = df_train.set_index([pd.Index(range(891))])
df_test = df_test.set_index([pd.Index(range(891, 1309))])
# Merge the splits so we can process them together
df = pd.concat([df_train, df_test])
# Preprocessing
df = place_NaN_by_mean(df_train, df_test, "Age")
df = place_NaN_by_mean(df_train, df_test, "Fare")
df = clean(df)
df = encode(df)
df = impute(df)
# Reform splits
df_train = df.loc[df_train.index, :]
df_test = df.loc[df_test.index, :]
return df_train, df_test
d_train, d_test = load_data()
d_train["Survived"] = d_train.Survived.astype("category")
d_train.head()
features = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Cabin", "Embarked"]
# ## Scoring DataSet
def score_dataset(
X_train,
y,
model=RandomForestClassifier(
criterion="gini",
n_estimators=700,
min_samples_split=10,
min_samples_leaf=1,
max_features="auto",
oob_score=True,
random_state=1,
n_jobs=-1,
),
):
for colname in X_train.select_dtypes(["category", "object"]):
X_dummies = pd.get_dummies(X_train[colname], drop_first=False, prefix=colname)
X_train = X_train.join(X_dummies)
X_train = X_train.drop(colname, 1)
score = cross_val_score(
model,
X_train,
y,
cv=5,
scoring="neg_mean_squared_log_error",
)
score = -1 * score.mean()
score = np.sqrt(score)
return score
X = d_train[features].copy()
y = d_train.Survived.copy()
# print(X,y)
baseline_score = score_dataset(X.copy(), y.copy())
print(f"Baseline score: {baseline_score:.5f} RMSLE")
# ## Mutual infirmation Scores
def make_mi_scores(X, y, is_one_hot_encode):
X = X.copy()
if is_one_hot_encode:
for colname in X.select_dtypes(["category", "object"]):
X_dummies = pd.get_dummies(X[colname], drop_first=False, prefix=colname)
X = X.join(X_dummies)
X = X.drop(colname, 1)
else:
for colname in X.select_dtypes(["object", "category"]):
X[colname], _ = X[colname].factorize()
# All discrete features should now have integer dtypes
discrete_features = [pd.api.types.is_integer_dtype(t) for t in X.dtypes]
mi_scores = mutual_info_classif(X, y, discrete_features="auto", random_state=0)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
def plot_mi_scores(scores):
scores = scores.sort_values(ascending=True)
width = np.arange(len(scores))
ticks = list(scores.index)
plt.barh(width, scores)
plt.yticks(width, ticks)
plt.title("Mutual Information Scores")
# ### Mutual information check
X = d_train.copy()
y = X.pop("Survived")
mi_scores = make_mi_scores(X.copy(), y, False)
print(mi_scores)
def plot_mi_scores(scores):
scores = scores.sort_values(ascending=True)
width = np.arange(len(scores))
ticks = list(scores.index)
plt.barh(width, scores)
plt.yticks(width, ticks)
plt.title("Mutual Information Scores")
plt.figure(dpi=100, figsize=(8, 5))
plot_mi_scores(mi_scores)
sns.countplot(d_train["Survived"])
sns.countplot(d_train["Pclass"], hue=d_train["Survived"])
sns.countplot(d_train["Sex"], hue=d_train["Survived"])
sns.countplot(d_train["Embarked"], hue=d_train["Survived"])
# # Creating Features
def name_breakdown(X_train_test, features):
X_new = pd.DataFrame()
X_new["Title"] = X_train_test["Name"].str.split(", ", n=1, expand=True)[1]
X_new["Title"] = X_new["Title"].copy().str.split(".", n=1, expand=True)[0]
X = X_train_test[features].copy()
X["Title"] = X_new
X["Title"] = X["Title"].astype("category")
return X
def name_len_feature(X_train_test, data):
X_new = pd.DataFrame()
X_new["Name_len"] = data["Name"].apply(lambda x: len(x))
# X = X_train[features].copy()
X_train_test["Name_len"] = X_new
return X_train_test
def ticket_first_litter(X_train_test, data):
X_new = pd.DataFrame()
X_new["Ticket_letter"] = data["Ticket"].apply(lambda x: str(x)[0])
# X = X_train[features].copy()
X_train_test["Ticket_letter"] = X_new
X_train_test["Ticket_letter"] = X_train_test["Ticket_letter"].astype("category")
return X_train_test
def Cabin_break_down(X_train_test):
X_new = pd.DataFrame()
# X = X_train[features].copy()
X_train_test["Cabin_break_down"] = np.where(
X_train_test.Cabin != "None", X_train_test["Cabin"].astype(str).str[0], "None"
)
X_train_test["Cabin_break_down"] = X_train_test["Cabin_break_down"].astype(
"category"
)
return X_train_test
def age_binning(X_train_test):
X_new = pd.DataFrame()
cut_list = [0, 4, 8, 12, 18, 30, 40, 55, 65, 80]
X_new["AgeBin"] = pd.cut(X_train_test["Age"], cut_list, labels=False)
# X = X_train[features].copy()
X_train_test["AgeBin"] = X_new
X_train_test["AgeBin"] = X_train_test["AgeBin"].astype("category")
return X_train_test
def fare_binning(X_train_test):
X_new = pd.DataFrame()
cut_list = list(range(0, 100, 10))
cut_list.extend(list(range(100, 700, 100)))
X_new["FareBin"] = pd.cut(X_train_test["Fare"], cut_list, labels=False, right=False)
# X = X_train[features].copy()
X_train_test["FareBin"] = X_new
X_train_test["FareBin"] = X_train_test["FareBin"].astype("category")
return X_train_test
def embarked_fare_group(X_train_test):
X_new = pd.DataFrame()
X_new["FareEmbarked"] = X_train_test.groupby("Embarked")["Fare"].transform("median")
# X = X_train[features].copy()
X_train_test["FareEmbarked"] = X_new
X_train_test["FareEmbarked"] = X_train_test["FareEmbarked"].astype("category")
return X_train_test
def cluster_labels(df, features, n_clusters=20):
X = df.copy()
X_scaled = X.loc[:, features]
X_scaled = (X_scaled - X_scaled.mean(axis=0)) / X_scaled.std(axis=0)
kmeans = KMeans(n_clusters=n_clusters, n_init=50, random_state=0)
# print(X_scaled)
X_new = X.loc[:, features]
X_new["Cluster"] = kmeans.fit_predict(X_scaled)
return X_new, kmeans
def cluster_distance(df, features, n_clusters=20):
X = df.copy()
X_scaled = X.loc[:, features]
X_scaled = (X_scaled - X_scaled.mean(axis=0)) / X_scaled.std(axis=0)
kmeans = KMeans(n_clusters=n_clusters, n_init=50, random_state=0)
X_cd = kmeans.fit_transform(X_scaled)
# Label features and join to dataset
X_cd = pd.DataFrame(X_cd, columns=[f"Centroid_{i}" for i in range(X_cd.shape[1])])
return X_cd
def cluster_from_Age_fare(X_train_test):
cluster_features = ["Age", "Fare"]
X_clustered, kmeans_feature_engineering = cluster_labels(
X_train_test.copy(), cluster_features, 10
)
print(X_clustered.head())
sns.relplot(x="Age", y="Fare", hue="Cluster", data=X_clustered, height=5)
# X_clustered_data = X_train.copy()
X_train_test["Cluster"] = X_clustered.Cluster
X_train_test["Cluster"] = X_train_test["Cluster"].astype("category")
return X_train_test
# print(X_modified_7)
# X_modified_8 = cluster_from_Age_fare(X_modified_6.copy())
# # PCA
def apply_pca(X, standardize=True):
# Standardize
if standardize:
X = (X - X.mean(axis=0)) / X.std(axis=0)
# Create principal components
pca = PCA()
X_pca = pca.fit_transform(X)
# Convert to dataframe
component_names = [f"PC{i+1}" for i in range(X_pca.shape[1])]
X_pca = pd.DataFrame(X_pca, columns=component_names)
# Create loadings
loadings = pd.DataFrame(
pca.components_.T, # transpose the matrix of loadings
columns=component_names, # so the columns are the principal components
index=X.columns, # and the rows are the original features
)
return pca, X_pca, loadings
def plot_variance(pca, width=8, dpi=100):
# Create figure
fig, axs = plt.subplots(1, 2)
n = pca.n_components_
grid = np.arange(1, n + 1)
# Explained variance
evr = pca.explained_variance_ratio_
axs[0].bar(grid, evr)
axs[0].set(xlabel="Component", title="% Explained Variance", ylim=(0.0, 1.0))
# Cumulative Variance
cv = np.cumsum(evr)
axs[1].plot(np.r_[0, grid], np.r_[0, cv], "o-")
axs[1].set(xlabel="Component", title="% Cumulative Variance", ylim=(0.0, 1.0))
# Set up figure
fig.set(figwidth=8, dpi=100)
return axs
features_for_PCA = [
"Age",
"Fare",
"SibSp",
"Parch",
]
print("Correlation with Survived:\n")
# print(X_modified[features_for_PCA].corrwith(X_modified.Survived))
pca, X_pca, loadings = apply_pca(d_train[features_for_PCA])
print(loadings)
# Look at explained variance
plot_variance(pca)
# #### **SibSp and Parch has a significant principal components. So, I add those two, because adding that we can get familly size of the person. It might help to map input to output**
def SibSp_and_Parch(X_train_test):
X_train_test["SibSp_Parch"] = X_train_test.SibSp + X_train_test.Parch
return X_train_test
# X_modified_9 = SibSp_and_Parch(X_modified_8.copy())
def model_run(
X_train_test,
d_train,
d_test,
y,
model=RandomForestClassifier(
criterion="gini",
n_estimators=700,
min_samples_split=10,
min_samples_leaf=1,
max_features="auto",
oob_score=True,
random_state=1,
n_jobs=-1,
),
):
for colname in X_train_test.select_dtypes(["category", "object"]):
X_dummies = pd.get_dummies(
X_train_test[colname], drop_first=False, prefix=colname
)
X_train_test = X_train_test.join(X_dummies)
X_train_test = X_train_test.drop(colname, 1)
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X_train_test.loc[d_train.index, :].copy(), y)
predictions = model.predict(X_train_test.loc[d_test.index, :].copy())
predictions = predictions.astype(int)
output = pd.DataFrame({"PassengerId": d_test.index + 1, "Survived": predictions})
output.to_csv("my_submission_9.csv", index=False)
print(output)
print("Your submission was successfully saved!")
# # Target Encoding
# # Encoding split
# X_for_encoding = X_modified_9.copy()
# X_for_encoding['Survived']= X_modified_9[["Survived"]].copy()
# X_for_encoding['Ticket']= d_train[["Ticket"]].copy()
# X_encode = X_for_encoding.sample(frac=0.9, random_state=0)
# y_encode = X_encode.pop("Survived")
# # Training split
# X_pretrain = X_for_encoding.drop(X_encode.index)
# y_train = X_pretrain.pop("Survived")
# # Choose a set of features to encode and a value for m
# encoder = MEstimateEncoder(cols=["Cabin"], m=5)
# # Fit the encoder on the encoding split
# encoder.fit(X_encode, y_encode)
# # Encode the training split
# X_train = encoder.transform(X_pretrain, y_train).copy()
# feature = encoder.cols
# plt.figure(dpi=90)
# ax = sns.distplot(y_train, kde=True, hist=False)
# ax = sns.distplot(X_train[feature], color='r', ax=ax, hist=True, kde=False, norm_hist=True)
# ax.set_xlabel("Survived");
# # Test preprocess
d_train_y = d_train["Survived"].copy()
d_train_x = d_train.drop("Survived", 1)
d_test_y = d_test["Survived"]
d_test_x = d_test.drop("Survived", 1)
X_modified_1 = name_breakdown(pd.concat([d_train_x, d_test_x]), features)
print(
f"New score after breaking-down Names: {score_dataset(X_modified_1.loc[d_train.index, :].copy(), d_train_y.copy()):.5f} RMSLE"
)
X_modified_2 = name_len_feature(X_modified_1, pd.concat([d_train, d_test]))
print(
f"New score after taking name length feature: {score_dataset(X_modified_2.loc[d_train.index, :].copy(), d_train_y.copy()):.5f} RMSLE"
)
X_modified_3 = ticket_first_litter(X_modified_2, pd.concat([d_train, d_test]))
print(
f"New score after taking ticket first letter breakdown feature: {score_dataset(X_modified_3.loc[d_train.index, :].copy(), d_train_y.copy()):.5f} RMSLE"
)
X_modified_4 = Cabin_break_down(X_modified_3)
print(
f"New score after taking cabin first letter breakdown feature: {score_dataset(X_modified_4.loc[d_train.index, :].copy(), d_train_y.copy()):.5f} RMSLE"
)
X_modified_5 = fare_binning(X_modified_4)
print(
f"New score after binning fare feature: {score_dataset(X_modified_5.loc[d_train.index, :].copy(), d_train_y.copy()):.5f} RMSLE"
)
X_modified_6 = age_binning(X_modified_5)
print(
f"New score after binning age feature: {score_dataset(X_modified_6.loc[d_train.index, :].copy(), d_train_y.copy()):.5f} RMSLE"
)
X_modified_7 = embarked_fare_group(X_modified_6)
print(
f"New score after grouping embarked feature: {score_dataset(X_modified_7.loc[d_train.index, :].copy(), d_train_y.copy()):.5f} RMSLE"
)
X_modified_8 = cluster_from_Age_fare(X_modified_7.copy())
print(
f"New score after cluster age and fare feature: {score_dataset(X_modified_8.loc[d_train.index, :].copy(), d_train_y.copy()):.5f} RMSLE"
)
X_modified_9 = SibSp_and_Parch(X_modified_8.copy())
print(
f"New score after add SibSp and Parch feature: {score_dataset(X_modified_9.loc[d_train.index, :].copy(), d_train_y.copy()):.5f} RMSLE"
)
X_modified_9
# Ticket_letter
sns.countplot(
X_modified_9.loc[d_train.index, :].copy()["Ticket_letter"], hue=d_train["Survived"]
)
# sns.countplot(d_train['Embarked'], hue=d_train['Survived'])
sns.countplot(
X_modified_9.loc[d_train.index, :].copy()["Cabin_break_down"],
hue=d_train["Survived"],
)
sns.countplot(
X_modified_9.loc[d_train.index, :].copy()["FareEmbarked"], hue=d_train["Survived"]
)
sns.countplot(
X_modified_9.loc[d_train.index, :].copy()["AgeBin"], hue=d_train["Survived"]
)
model_run(X_modified_9, d_train, d_test, d_train_y)
# # Get mutual informations without considering onehot encorded features(original features)
mi_score_for_original_feature = make_mi_scores(
X_modified_9.loc[d_train.index, :].copy(), d_train_y, False
)
print(mi_score_for_original_feature)
informative_feature_original = []
for column_name, score in mi_score_for_original_feature.items():
if score > 0.036688:
informative_feature_original.append(column_name)
print(informative_feature_original)
X_modified_9_droped_unifomative = X_modified_9.copy()
for x in X_modified_9.columns:
if x not in informative_feature_original:
X_modified_9_droped_unifomative = X_modified_9_droped_unifomative.drop(x, 1)
X_modified_9_droped_unifomative
# score_dataset(X_modified_9_droped_unifomative, y)
print(
score_dataset(
X_modified_9_droped_unifomative.loc[d_train.index, :].copy(), d_train_y.copy()
)
)
model_run(X_modified_9_droped_unifomative, d_train, d_test, d_train_y)
# ## Mutual infomations of the all features (there are lots of onehot encoded features when we fit the model, so remove all unwanted features may help full). Eventhough, this part increase the RMSLE score, predictiction is same as before model. However,the this part reduce lots of unwanted computaion parts
miscore_list = make_mi_scores(
X_modified_9.loc[d_train.index, :].copy(), d_train_y, True
)
informative_features_after_hot_encode = []
for column_name, score in miscore_list.items():
# print(score)
if score != 0 or score != 0.0:
informative_features_after_hot_encode.append(column_name)
print(
"Informative features, extract using mi scores",
len(informative_features_after_hot_encode),
)
print("all feature size", miscore_list.size)
def score_dataset_without_uninfomative_feature_filter(
X_train,
y,
uninfomative_features,
model=RandomForestClassifier(
criterion="gini",
n_estimators=700,
min_samples_split=10,
min_samples_leaf=1,
max_features="auto",
oob_score=True,
random_state=1,
n_jobs=-1,
),
):
for colname in X_train.select_dtypes(["category", "object"]):
X_dummies = pd.get_dummies(X_train[colname], drop_first=False, prefix=colname)
X_train = X_train.join(X_dummies)
X_train = X_train.drop(colname, 1)
for colname in X_train.columns:
if colname in uninfomative_features:
X_train = X_train.drop(colname, 1)
score = cross_val_score(
model,
X_train,
y,
cv=5,
scoring="neg_mean_squared_log_error",
)
score = -1 * score.mean()
score = np.sqrt(score)
return score
X_modified_1_without_uninfomative = name_breakdown(
pd.concat([d_train_x, d_test_x]), features
)
print(
f"New score after breaking-down Names: {score_dataset_without_uninfomative_feature_filter(X_modified_1_without_uninfomative.loc[d_train.index, :].copy(), d_train_y.copy(),informative_features_after_hot_encode):.5f} RMSLE"
)
X_modified_2_without_uninfomative = name_len_feature(
X_modified_1_without_uninfomative, pd.concat([d_train, d_test])
)
print(
f"New score after taking name length feature: {score_dataset_without_uninfomative_feature_filter(X_modified_2_without_uninfomative.loc[d_train.index, :].copy(), d_train_y.copy(),informative_features_after_hot_encode):.5f} RMSLE"
)
X_modified_3_without_uninfomative = ticket_first_litter(
X_modified_2_without_uninfomative, pd.concat([d_train, d_test])
)
print(
f"New score after taking ticket first letter breakdown feature: {score_dataset_without_uninfomative_feature_filter(X_modified_3_without_uninfomative.loc[d_train.index, :].copy(), d_train_y.copy(),informative_features_after_hot_encode):.5f} RMSLE"
)
X_modified_4_without_uninfomative = Cabin_break_down(X_modified_3_without_uninfomative)
print(
f"New score after taking cabin first letter breakdown feature: {score_dataset_without_uninfomative_feature_filter(X_modified_4_without_uninfomative.loc[d_train.index, :].copy(), d_train_y.copy(),informative_features_after_hot_encode):.5f} RMSLE"
)
X_modified_5_without_uninfomative = fare_binning(X_modified_4_without_uninfomative)
print(
f"New score after binning fare feature: {score_dataset_without_uninfomative_feature_filter(X_modified_5_without_uninfomative.loc[d_train.index, :].copy(), d_train_y.copy(),informative_features_after_hot_encode):.5f} RMSLE"
)
X_modified_6_without_uninfomative = age_binning(X_modified_5_without_uninfomative)
print(
f"New score after binning age feature: {score_dataset_without_uninfomative_feature_filter(X_modified_6_without_uninfomative.loc[d_train.index, :].copy(), d_train_y.copy(),informative_features_after_hot_encode):.5f} RMSLE"
)
X_modified_7_without_uninfomative = embarked_fare_group(
X_modified_6_without_uninfomative
)
print(
f"New score after grouping embarked feature: {score_dataset_without_uninfomative_feature_filter(X_modified_7_without_uninfomative.loc[d_train.index, :].copy(), d_train_y.copy(),informative_features_after_hot_encode):.5f} RMSLE"
)
X_modified_8_without_uninfomative = cluster_from_Age_fare(
X_modified_7_without_uninfomative.copy()
)
print(
f"New score after cluster age and fare feature: {score_dataset_without_uninfomative_feature_filter(X_modified_8_without_uninfomative.loc[d_train.index, :].copy(), d_train_y.copy(),informative_features_after_hot_encode):.5f} RMSLE"
)
X_modified_9_without_uninfomative = SibSp_and_Parch(
X_modified_8_without_uninfomative.copy()
)
print(
f"New score after add SibSp and Parch feature: {score_dataset_without_uninfomative_feature_filter(X_modified_9_without_uninfomative.loc[d_train.index, :].copy(), d_train_y.copy(),informative_features_after_hot_encode):.5f} RMSLE"
)
X_modified_9_without_uninfomative
model_run(X_modified_9_without_uninfomative, d_train, d_test, d_train_y)
| false | 0 | 8,138 | 0 | 8,138 | 8,138 |
||
69173378
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import pandas as pd
import os
from pathlib import Path
import matplotlib.pyplot as plt
import seaborn as sns
import random
# keras
from keras.models import Sequential
from keras.layers import (
Conv2D,
MaxPooling2D,
Dropout,
Flatten,
Dense,
Activation,
BatchNormalization,
)
from keras import optimizers
from keras.losses import SparseCategoricalCrossentropy
from keras.preprocessing.image import ImageDataGenerator
# sklearn library
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
# labels
file = os.listdir("./train/train")
Labels = list(map(lambda x: x.split(".")[0], file))
# for filenames
f = Path("./train/train")
File_Path = list(f.glob(r"**/*.jpg"))
# dataframe
File_Path = pd.Series(File_Path).astype(str)
Labels = pd.Series(Labels)
df = pd.concat([File_Path, Labels], axis=1)
df.columns = ["filename", "category"]
df.head()
train_set, test_data = train_test_split(df, test_size=0.2, random_state=42)
train_data, val_data = train_test_split(train_set, test_size=0.2, random_state=42)
print(train_data.shape)
print(test_data.shape)
print(val_data.shape)
train_data = train_data.reset_index(drop=True)
test_data = test_data.reset_index(drop=True)
val_data = val_data.reset_index(drop=True)
img_size = (128, 128)
img_gen = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True)
train_gen = img_gen.flow_from_dataframe(
train_data,
x_col="filename",
y_col="category",
target_size=img_size,
class_mode="binary",
batch_size=32,
shuffle=False,
)
validation_gen = img_gen.flow_from_dataframe(
val_data,
x_col="filename",
y_col="category",
target_size=img_size,
class_mode="binary",
batch_size=32,
shuffle=False,
)
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation="relu", input_shape=(128, 128, 3)))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation="relu"))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation="relu"))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation="relu"))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation="relu"))
model.add(layers.Dense(1, activation="sigmoid"))
model.compile(
loss="binary_crossentropy", optimizer=optimizers.RMSprop(lr=1e-4), metrics=["acc"]
)
model.summary()
history = model.fit(
train_gen,
validation_data=validation_gen,
validation_steps=50,
steps_per_epoch=100,
epochs=30,
verbose=1,
)
# accuracy
acc = history.history["acc"]
val_acc = history.history["val_acc"]
# loss
loss = history.history["loss"]
val_loss = history.history["val_loss"]
plt.figure(figsize=(10, 5))
# visualising Accuracy
plt.subplot(2, 1, 1)
plt.plot(acc, label="Training Accuracy")
plt.plot(val_acc, label="Validation Accuracy")
plt.ylabel("Accuracy")
plt.title("Training and Validation Accuracy")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173378.ipynb
| null | null |
[{"Id": 69173378, "ScriptId": 18879707, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7426200, "CreationDate": "07/27/2021 16:48:17", "VersionNumber": 1.0, "Title": "notebooka155a8f8b7", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 142.0, "LinesInsertedFromPrevious": 142.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import pandas as pd
import os
from pathlib import Path
import matplotlib.pyplot as plt
import seaborn as sns
import random
# keras
from keras.models import Sequential
from keras.layers import (
Conv2D,
MaxPooling2D,
Dropout,
Flatten,
Dense,
Activation,
BatchNormalization,
)
from keras import optimizers
from keras.losses import SparseCategoricalCrossentropy
from keras.preprocessing.image import ImageDataGenerator
# sklearn library
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
# labels
file = os.listdir("./train/train")
Labels = list(map(lambda x: x.split(".")[0], file))
# for filenames
f = Path("./train/train")
File_Path = list(f.glob(r"**/*.jpg"))
# dataframe
File_Path = pd.Series(File_Path).astype(str)
Labels = pd.Series(Labels)
df = pd.concat([File_Path, Labels], axis=1)
df.columns = ["filename", "category"]
df.head()
train_set, test_data = train_test_split(df, test_size=0.2, random_state=42)
train_data, val_data = train_test_split(train_set, test_size=0.2, random_state=42)
print(train_data.shape)
print(test_data.shape)
print(val_data.shape)
train_data = train_data.reset_index(drop=True)
test_data = test_data.reset_index(drop=True)
val_data = val_data.reset_index(drop=True)
img_size = (128, 128)
img_gen = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True)
train_gen = img_gen.flow_from_dataframe(
train_data,
x_col="filename",
y_col="category",
target_size=img_size,
class_mode="binary",
batch_size=32,
shuffle=False,
)
validation_gen = img_gen.flow_from_dataframe(
val_data,
x_col="filename",
y_col="category",
target_size=img_size,
class_mode="binary",
batch_size=32,
shuffle=False,
)
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation="relu", input_shape=(128, 128, 3)))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation="relu"))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation="relu"))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation="relu"))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation="relu"))
model.add(layers.Dense(1, activation="sigmoid"))
model.compile(
loss="binary_crossentropy", optimizer=optimizers.RMSprop(lr=1e-4), metrics=["acc"]
)
model.summary()
history = model.fit(
train_gen,
validation_data=validation_gen,
validation_steps=50,
steps_per_epoch=100,
epochs=30,
verbose=1,
)
# accuracy
acc = history.history["acc"]
val_acc = history.history["val_acc"]
# loss
loss = history.history["loss"]
val_loss = history.history["val_loss"]
plt.figure(figsize=(10, 5))
# visualising Accuracy
plt.subplot(2, 1, 1)
plt.plot(acc, label="Training Accuracy")
plt.plot(val_acc, label="Validation Accuracy")
plt.ylabel("Accuracy")
plt.title("Training and Validation Accuracy")
| false | 0 | 1,226 | 0 | 1,226 | 1,226 |
||
69173239
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
# print(os.path.join(dirname, filename))
# print()
1
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import re
import matplotlib.pyplot as plt
import os
import numpy as np
import pywt
from scipy.stats import entropy as kl
import time
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn import svm
from sklearn.metrics import accuracy_score
from numpy import arange
from sklearn.datasets import make_classification
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from keras.models import Sequential
from keras.layers import Dense
from pyentrp import entropy as ent
import numpy as np
import entropy
from scipy.special import gamma, psi
from scipy import ndimage
from scipy.linalg import det
from numpy import pi
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
from sklearn.preprocessing import MinMaxScaler
f = []
nf = []
label = []
count = 0
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
if (
re.search(
"/kaggle/input/bern-eeg/Data_F_50/*", os.path.join(dirname, filename)
)
!= None
):
# print(os.path.join(dirname, filename))
dataframe = pd.read_csv(
os.path.join(dirname, filename), sep=",", header=None, dtype=float
)
f.append(dataframe[0].values - dataframe[1].values)
label.append(0)
if (
re.search(
"/kaggle/input/bern-eeg/Data_N_50/*", os.path.join(dirname, filename)
)
!= None
):
dataframe = pd.read_csv(
os.path.join(dirname, filename), sep=",", header=None, dtype=float
)
nf.append(dataframe[0].values - dataframe[1].values)
label.append(1)
len(f), len(nf), len(label)
data = []
data = f[0:50]
data.extend(nf[0:50])
len(data)
f[0].shape
def bern_dataset(data, wavelet_family="db10", level=4):
db = pywt.Wavelet(wavelet_family)
a4 = []
d4 = []
d3 = []
d2 = []
d1 = []
for samp in data:
cA4, cD4, cD3, cD2, cD1 = pywt.wavedec(samp, db, level=level)
a4.append(cA4)
d4.append(cD4)
d3.append(cD3)
d2.append(cD2)
d1.append(cD1)
a4 = np.array(a4)
d4 = np.array(d4)
d3 = np.array(d3)
d2 = np.array(d2)
d1 = np.array(d1)
print("[INFO] Dataset processing completed")
return [a4, d4, d3, d2, d1]
f_class = bern_dataset(f)
nf_class = bern_dataset(nf)
D = bern_dataset(data)
y = [0 for i in range(50)]
y.extend([1 for i in range(50)])
f_class[0].shape, f_class[1].shape, f_class[2].shape, f_class[3].shape, f_class[4].shape
len(D), D[0].shape
def renyi_entropy(data, alpha):
"""if alpha ==1:
alpha=0"""
# alpha = np.random.uniform(0.9,1.1)
# reyni_entropy(data,alpha)
ren_ent = []
for i in range(data.shape[0]):
_, ele_counts = np.unique(np.around(data[i], decimals=0), return_counts=True)
summation = np.sum(np.log((ele_counts / data.shape[1]) ** alpha))
ren_ent.append((1 / (1 - alpha)) * summation)
return np.array(ren_ent)
def permutation_ent(data, order):
if order < 2:
order = 2
perm_ent = []
# order = int(order)
for i in range(data.shape[0]):
data[i] = np.around(data[i], decimals=0)
perm_ent.append(ent.permutation_entropy(data[i], order=order, normalize=True))
return np.array(perm_ent)
def tsallis_ent(data, alpha):
"""if alpha ==1:
alpha=0"""
# alpha = np.random.uniform(0.9,1.1)
# tsallis_ent(data,alpha)
tsa_ent = []
for i in range(data.shape[0]):
_, ele_counts = np.unique(np.around(data[i], decimals=0), return_counts=True)
summation = np.sum(np.log((ele_counts / data.shape[1]) ** alpha))
tsa_ent.append((1 / (alpha - 1)) * (1 - summation))
return np.array(tsa_ent)
def kraskov_ent(data, k):
# if k < 1:
# k = 1
kra_ent = []
# k=int(k)
knn = NearestNeighbors(n_neighbors=k)
for i in range(data.shape[0]):
X = np.around(data[i], decimals=0).reshape(-1, 1)
knn.fit(X)
r, _ = knn.kneighbors(X)
n, d = X.shape
volume_unit_ball = (pi ** (0.5 * d)) / gamma(0.5 * d + 1)
kra_ent.append(
(
d * np.mean(np.log(r[:, -1] + np.finfo(X.dtype).eps))
+ np.log(volume_unit_ball)
+ psi(n)
- psi(k)
)
)
return np.array(kra_ent)
"""def permutation_ent(data, order):
if order<2:
order=2
perm_entr= []
for coeff in data:
temp=[]
for i in coeff:
temp.append(ent.permutation_entropy(i,order=order,normalize=True))
perm_entr.append(temp)
#print(len(perm_entr))
return np.array(perm_entr)"""
"""kullback_divergence_A4=[]
kullback_divergence_D4=[]
kullback_divergence_D3=[]
kullback_divergence_D2=[]
kullback_divergence_D1=[]
for i in range(2,15):
permutation_entropy_A=permutation_ent(f_class,i)
permutation_entropy_E=permutation_ent(nf_class,i)
#print("kullback divergence for A4 coefficients :",kl(permutation_entropy_A[0],permutation_entropy_E[0]))
#print("kullback divergence for D4 coefficients :",kl(permutation_entropy_A[1],permutation_entropy_E[1]))
#print("kullback divergence for D3 coefficients :",kl(permutation_entropy_A[2],permutation_entropy_E[2]))
#print("kullback divergence for D2 coefficients :",kl(permutation_entropy_A[3],permutation_entropy_E[3]))
#print("kullback divergence for D1 coefficients :",kl(permutation_entropy_A[4],permutation_entropy_E[4]))
kullback_divergence_A4.append(kl(permutation_entropy_A[0],permutation_entropy_E[0]))
kullback_divergence_D4.append(kl(permutation_entropy_A[1],permutation_entropy_E[1]))
kullback_divergence_D3.append(kl(permutation_entropy_A[2],permutation_entropy_E[2]))
kullback_divergence_D2.append(kl(permutation_entropy_A[3],permutation_entropy_E[3]))
kullback_divergence_D1.append(kl(permutation_entropy_A[4],permutation_entropy_E[4]))
plt.title('KL Divergence [Permutation entropy](Class A vs Class E)')
plt.xlabel('Order')
plt.ylabel('KL values')
plt.grid( )
plt.plot(range(2,15),kullback_divergence_A4)
plt.plot(range(2,15),kullback_divergence_D4)
plt.plot(range(2,15),kullback_divergence_D3)
plt.plot(range(2,15),kullback_divergence_D2)
plt.plot(range(2,15),kullback_divergence_D1)
plt.legend(['A4','D4','D3','D2','D1'],loc='upper right')
plt.show()"""
import time
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn import svm
from sklearn.metrics import accuracy_score
"""def classify_svm(X,Y):
#X, Y = extract_best_features()
print(X.shape)
acc = []
classify = svm.SVC(gamma='scale', kernel = 'rbf')
#ten_fold_score = cross_val_score(classify, X, Y, cv = 10)
for _ in range(20):
X_train , X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.15)
classify.fit(X_train, Y_train)
Y_pred = classify.predict(X_test)
acc.append(accuracy_score(Y_test, Y_pred))
print(np.mean(np.array(acc)))
np.save("spl_param_svm_rbf", acc)
return np.mean(np.array(acc))"""
"""order=[]
acc=[]
for i in range(2,15):
order.append(i)
permutation_entropy_A=permutation_ent(f_class,i)
permutation_entropy_A[0].shape
permutation_entropy_E=permutation_ent(nf_class,i)
permutation_entropy_E.shape
X= np.concatenate((permutation_entropy_A.T,permutation_entropy_E.T))
acc.append(classify_svm(X,label))"""
"""plt.title(' SVM accuracy vs Order of Permutation entropy (Class A vs Class E)')
plt.xlabel('Order')
plt.ylabel('Accuracy of SVM')
plt.grid( )
plt.plot(order,acc)
plt.legend(['Acc'],loc='upper right')
plt.show()"""
import random
def farm_land_fertility_compute_optim_RT(
D, option, flag_entropy, output_file, max_iteration=10
):
if option == 0:
print("=== REYNI ENTROPY ===")
else:
print("=== TSALLIS ENTROPY ===")
n = 3 ### no of samples in a sector
k = 4 ### no of sections
population_size = 12 ### population size = n*k
no_of_class = 2
best_param = []
best_param_sof_far = []
int_pop = list(np.random.uniform(0, 10, size=population_size * len(D)))
kl_values = [-999999999] * (population_size * len(D))
global_best_kl = [-999999999] * len(D)
global_best_order = [-999999999] * len(D)
local_best_kl = [-9999999999] * (len(D) * k)
local_best_order = [-9999999999] * (len(D) * k)
mean_kl = [-999999999] * (k * len(D))
worst_section_kl = [999999999] * len(D)
worst_section = [-1] * len(D)
# external_memory_order=[0]*(population_size*len(D-1))
alpha = np.random.rand(1)[0]
count = 0
w = np.random.rand(1)[0]
q = np.random.rand(1)[0]
copy_global_param = global_best_order
f = open(output_file + ".txt", "w")
ent_exc_time = time.time()
for iter in range(max_iteration):
# print("ITERATION ",iter," ------------------------------")
f.write("ITERATION " + str(iter) + " ------------------------------ \n")
tic = time.time()
# calculating kl divergence for population
for pop in range(population_size):
for band in range(len(D)):
if option == 0:
int_pop = [0 if i < 0 else i for i in int_pop]
int_pop = [10 if i > 10 else i for i in int_pop]
ent = renyi_entropy(D[band], int_pop[pop * len(D) + band])
else:
int_pop = [0 if i < 0 else i for i in int_pop]
int_pop = [10 if i > 10 else i for i in int_pop]
ent = tsallis_ent(D[band], int_pop[pop * len(D) + band])
a, e = np.split(ent, no_of_class)
kl_values[pop * len(D) + band] = kl(a, e)
# saving best kl and corresposnding order values
for pop in range(population_size):
for band in range(len(D)):
if global_best_kl[band] < kl_values[(pop * 5) + band]:
global_best_kl[band] = kl_values[(pop * len(D)) + band]
global_best_order[band] = int_pop[(pop * len(D)) + band]
# saving local best kl and corresponding order
for section in range(k):
for samp in range(n):
for band in range(len(D)):
if (
local_best_kl[section * len(D) + band]
< kl_values[section * samp + band]
):
local_best_kl[section * len(D) + band] = kl_values[
section * samp + band
]
local_best_order[section * len(D) + band] = int_pop[
section * samp + band
]
# calculating mean fitness
len_sec = len(int_pop) // k
for j in range(k):
sliced_pop = kl_values[j * len_sec : (j * len_sec) + len_sec]
x, y, z = sliced_pop[0:5], sliced_pop[5:10], sliced_pop[10:15]
for band in range(len(D)):
mean_kl[j * len(D) + band] = (x[band] + y[band] + z[band]) / n
# updating worst section kl
for band in range(len(D)):
for section in range(k):
if mean_kl[section * len(D) + band] < worst_section_kl[band]:
worst_section_kl[band] = mean_kl[section * len(D) + band]
worst_section[band] = section
# rearranging population into array (just for convenience)
h = alpha * np.random.uniform(-1, 1)
coeff = np.array([[0] * population_size] * len(D))
for pop in range(population_size):
for band in range(len(D)):
coeff[band][pop] = int_pop[pop * len(D) + band]
# udpdating population for next step
for band in range(coeff.shape[0]):
for pop in range(coeff.shape[1]):
sec = pop // n
if sec == worst_section[band]:
l = list(coeff[band])
del l[sec * n : (sec + 1) * n]
random_soln = random.choice(
l
) ### selecting a random solution from external memory
coeff[band][pop] = (
h * (coeff[band][pop] - random_soln) + coeff[band][pop]
)
else:
coeff[band][pop] = (
h * (coeff[band][pop] - global_best_order[band])
+ coeff[band][pop]
)
for pop in range(population_size):
for band in range(len(D)):
int_pop[(pop * len(D)) + band] = coeff[band][pop]
# checking for a trap / conflict
if copy_global_param != global_best_order:
copy_global_param = global_best_order
else:
count += 1
# changing int pop combination to exit local minima trap
if count >= 5:
if q > np.random.rand(1)[0]:
for pop in range(population_size):
for band in range(len(D)):
int_pop[(pop * len(D)) + band] = int_pop[
(pop * len(D)) + band
] + (
w
* (int_pop[(pop * len(D)) + band] - global_best_order[band])
)
else:
for section in range(k):
for samp in range(n):
for band in range(len(D)):
int_pop[section * samp + band] = int_pop[
section * samp + band
] + (
np.random.rand(1)[0]
* (
int_pop[section * samp + band]
- local_best_order[section * len(D) + band]
)
)
w = w * np.random.rand(1)[0]
# print("BEst parameter so far = ",global_best_order)
f.write(
"BEst parameter so far = "
+ " ".join(list(map(str, global_best_order)))
+ "\n"
)
# print("Done.time elasped: " + str(time.time() - tic))
f.write("Done.time elasped: " + str(time.time() - tic) + "\n")
f.write("Total Execution Time: " + str(time.time() - ent_exc_time) + "\n")
print("The final Best parameter : " + flag_entropy, global_best_order)
f.close()
return global_best_order
import random
def farm_land_fertility_compute_optim_PK(
D, option, flag_entropy, output_file, max_iteration=10
):
if option == 0:
print("=== PERMUTATION ENTROPY ===")
else:
print("=== KRASKOV ENTROPY ===")
n = 3 ### no of samples in a sector
k = 4 ### no of sections
# max_iteration = 10
population_size = 12 ### population size = n*k
no_of_class = 2
best_param = []
best_param_sof_far = []
int_pop = list(np.random.randint(0, 10, size=population_size * len(D)))
kl_values = [-999999999] * (population_size * len(D))
global_best_kl = [-999999999] * len(D)
global_best_order = [-999999999] * len(D)
local_best_kl = [-9999999999] * (len(D) * k)
local_best_order = [-9999999999] * (len(D) * k)
mean_kl = [-999999999] * (k * len(D))
worst_section_kl = [999999999] * len(D)
worst_section = [-1] * len(D)
# external_memory_order=[0]*(population_size*len(D-1))
alpha = np.random.rand(1)[0]
count = 0
w = np.random.rand(1)[0]
q = np.random.rand(1)[0]
copy_global_param = global_best_order
# copy_global_param=global_best_order
f = open(output_file + ".txt", "w")
ent_exc_time = time.time()
for iter in range(max_iteration):
print("ITERATION ", iter, " ------------------------------")
tic = time.time()
# calculating kl divergence for population
for pop in range(population_size):
for band in range(len(D)):
if option == 0:
int_pop = [2 if i < 2 else i for i in int_pop]
int_pop = [15 if i > 15 else i for i in int_pop]
ent = permutation_ent(D[band], int_pop[pop * len(D) + band])
else:
int_pop = [1 if i < 1 else i for i in int_pop]
int_pop = [15 if i > 15 else i for i in int_pop]
ent = kraskov_ent(D[band], int_pop[pop * len(D) + band])
a, e = np.split(ent, no_of_class)
kl_values[pop * len(D) + band] = kl(a, e)
# saving best kl and corresposnding order values
for pop in range(population_size):
for band in range(len(D)):
if global_best_kl[band] < kl_values[(pop * 5) + band]:
global_best_kl[band] = kl_values[(pop * len(D)) + band]
global_best_order[band] = int_pop[(pop * len(D)) + band]
# saving local best kl and corresponding order
for section in range(k):
for samp in range(n):
for band in range(len(D)):
if (
local_best_kl[section * len(D) + band]
< kl_values[section * samp + band]
):
local_best_kl[section * len(D) + band] = kl_values[
section * samp + band
]
local_best_order[section * len(D) + band] = int_pop[
section * samp + band
]
# calculating mean fitness
len_sec = len(int_pop) // k
for j in range(k):
sliced_pop = kl_values[j * len_sec : (j * len_sec) + len_sec]
x, y, z = sliced_pop[0:5], sliced_pop[5:10], sliced_pop[10:15]
for band in range(len(D)):
mean_kl[j * len(D) + band] = (x[band] + y[band] + z[band]) / n
# updating worst section kl
for band in range(len(D)):
for section in range(k):
if mean_kl[section * len(D) + band] < worst_section_kl[band]:
worst_section_kl[band] = mean_kl[section * len(D) + band]
worst_section[band] = section
# rearranging population into array (just for convenience)
h = alpha * np.random.uniform(-1, 1)
coeff = np.array([[0] * population_size] * len(D))
for pop in range(population_size):
for band in range(len(D)):
coeff[band][pop] = int_pop[pop * len(D) + band]
# udpdating population for next step
for band in range(coeff.shape[0]):
for pop in range(coeff.shape[1]):
sec = pop // n
if sec == worst_section[band]:
l = list(coeff[band])
del l[sec * n : (sec + 1) * n]
random_soln = random.choice(
l
) ### selecting a random solution from external memory
coeff[band][pop] = (
h * (coeff[band][pop] - random_soln) + coeff[band][pop]
)
else:
coeff[band][pop] = (
h * (coeff[band][pop] - global_best_order[band])
+ coeff[band][pop]
)
for pop in range(population_size):
for band in range(len(D)):
int_pop[(pop * len(D)) + band] = coeff[band][pop]
# checking for a trap / conflict
if copy_global_param != global_best_order:
copy_global_param = global_best_order
else:
count += 1
# changing int pop combination to exit local minima trap
if count >= 3:
if q > np.random.rand(1)[0]:
for pop in range(population_size):
for band in range(len(D)):
int_pop[(pop * len(D)) + band] = int_pop[
(pop * len(D)) + band
] + (
w
* (int_pop[(pop * len(D)) + band] - global_best_order[band])
)
else:
for section in range(k):
for samp in range(n):
for band in range(len(D)):
int_pop[section * samp + band] = int_pop[
section * samp + band
] + (
np.random.rand(1)[0]
* (
int_pop[section * samp + band]
- local_best_order[section * len(D) + band]
)
)
w = w * np.random.rand(1)[0]
int_pop = list(np.round(int_pop).astype(int))
# print("BEst parameter so far = ",global_best_order)
f.write(
"BEst parameter so far = "
+ " ".join(list(map(str, global_best_order)))
+ "\n"
)
# print("Done.time elasped: " + str(time.time() - tic))
f.write("Done.time elasped: " + str(time.time() - tic) + "\n")
f.write("Total Execution Time: " + str(time.time() - ent_exc_time) + "\n")
print("The final Best parameter : " + flag_entropy, global_best_order)
f.close()
return global_best_order
def classify_svm(X, Y):
# X, Y = extract_best_features()
acc = []
classify = svm.SVC(gamma="scale", kernel="rbf")
# ten_fold_score = cross_val_score(classify, X, Y, cv = 10)
for _ in range(20):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
classify.fit(X_train, Y_train)
Y_pred = classify.predict(X_test)
acc.append(accuracy_score(Y_test, Y_pred))
# print(np.mean(np.array(acc)))
np.save("spl_param_svm_rbf", acc)
return np.mean(np.array(acc))
def classify_LDA(X, Y):
# X, Y = extract_best_features()
acc = []
classify = LinearDiscriminantAnalysis()
# ten_fold_score = cross_val_score(classify, X, Y, cv = 10)
for _ in range(20):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
classify.fit(X_train, Y_train)
Y_pred = classify.predict(X_test)
acc.append(accuracy_score(Y_test, Y_pred))
# print(np.mean(np.array(acc)))
np.save("spl_param_LDA", acc)
return np.mean(np.array(acc))
def classify_KNN(X, Y):
# X, Y = extract_best_features()
acc = []
classify = KNeighborsClassifier(n_neighbors=3)
# ten_fold_score = cross_val_score(classify, X, Y, cv = 10)
for _ in range(20):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
classify.fit(X_train, Y_train)
Y_pred = classify.predict(X_test)
acc.append(accuracy_score(Y_test, Y_pred))
# print(np.mean(np.array(acc)))
np.save("spl_param_KNN", acc)
return np.mean(np.array(acc))
def classify_RF(X, Y):
# X, Y = extract_best_features()
acc = []
classify = RandomForestClassifier(max_depth=2, random_state=0)
# ten_fold_score = cross_val_score(classify, X, Y, cv = 10)
for _ in range(20):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
classify.fit(X_train, Y_train)
Y_pred = classify.predict(X_test)
acc.append(accuracy_score(Y_test, Y_pred))
# print(np.mean(np.array(acc)))
np.save("spl_param_RF", acc)
return np.mean(np.array(acc))
def classify_ANN(X, Y):
# X, Y = extract_best_features()
acc = []
# ten_fold_score = cross_val_score(classify, X, Y, cv = 10)
for _ in range(20):
model = Sequential()
model.add(
Dense(
20, input_dim=X.shape[1], kernel_initializer="normal", activation="relu"
)
)
model.add(Dense(32, kernel_initializer="normal", activation="relu"))
model.add(Dense(16, kernel_initializer="normal", activation="relu"))
model.add(Dense(1, activation="sigmoid"))
# Compile model
model.compile(loss="binary_crossentropy", optimizer="adam")
# fit the keras model on the dataset
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
model.fit(
np.array(X_train), np.array(Y_train), epochs=50, batch_size=5, verbose=0
)
Y_pred = model.predict(np.array(X_test))
acc.append(accuracy_score(np.array(Y_test), np.round(Y_pred)))
# print(np.mean(np.array(acc)))
np.save("spl_param_ANN", acc)
return np.mean(np.array(acc))
# parallelize
from multiprocessing import Pool
pool = Pool()
# class F vs class NF
tic = time.time()
new_File = open("Best_Parameters_F_NF.txt", "w")
result1 = pool.apply_async(
farm_land_fertility_compute_optim_RT, [D, 0, "Reyni", "Reyni_F_NF"]
) # evaluate "solve1(A)" asynchronously
result2 = pool.apply_async(
farm_land_fertility_compute_optim_RT, [D, 1, "Tsallis", "Tsallis_F_NF"]
)
result3 = pool.apply_async(
farm_land_fertility_compute_optim_PK, [D, 0, "Permutation", "Permutation_F_NF"]
) # evaluate "solve1(A)" asynchronously
result4 = pool.apply_async(
farm_land_fertility_compute_optim_PK, [D, 1, "Kraskov", "Kraskov_F_NF"]
)
# result5 = pool.apply_async(farm_land_fertility_compute_optim_Approximation, [D]) # evaluate "solve2(B)" asynchronously
answer1 = result1.get()
answer2 = result2.get()
answer3 = result3.get()
answer4 = result4.get()
# answer5= result5.get()
new_File.write("Reyni Entropy : " + " ".join(map(str, answer1)) + "\n")
new_File.write("Tsallis Entropy : " + " ".join(map(str, answer2)) + "\n")
new_File.write("Permutation Entropy : " + " ".join(map(str, answer3)) + "\n")
new_File.write("Kraskov Entropy : " + " ".join(map(str, answer4)) + "\n")
# new_File.write(' '.join(map(str,answer5))+"\n")
new_File.write("Done.time elasped: " + str(time.time() - tic))
print("Done.time elasped: " + str(time.time() - tic))
l1 = []
l2 = []
l3 = []
l4 = []
for band in range(len(D)):
l1.append(renyi_entropy(D[band], answer1[band]))
for band in range(len(D)):
l2.append(tsallis_ent(D[band], answer2[band]))
for band in range(len(D)):
l3.append(permutation_ent(D[band], answer3[band]))
for band in range(len(D)):
l4.append(kraskov_ent(D[band], answer4[band]))
print(len(l1), len(l2), len(l3), len(l4))
X = np.concatenate(
(np.array(l1).T, np.array(l2).T, np.array(l3).T, np.array(l4).T), axis=1
)
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
SVM_acc = classify_svm(X, y)
LDA_acc = classify_LDA(X, y)
KNN_acc = classify_KNN(X, y)
RF_acc = classify_RF(X, y)
ANN_acc = classify_ANN(X, y)
###
new_File.write("SVM : " + "".join(map(str, str(SVM_acc))) + "\n")
new_File.write("LDA : " + "".join(map(str, str(LDA_acc))) + "\n")
new_File.write("KNN : " + "".join(map(str, str(KNN_acc))) + "\n")
new_File.write("RF : " + "".join(map(str, str(RF_acc))) + "\n")
new_File.write("ANN : " + "".join(map(str, str(ANN_acc))) + "\n")
print(SVM_acc, LDA_acc, KNN_acc, RF_acc, ANN_acc)
new_File.close()
print("Done.time elasped: " + str(time.time() - tic))
# class F vs NF
tic = time.time()
new_File = open("Default_Parameters_F_NF.txt", "w")
l1 = []
l2 = []
l3 = []
l4 = []
for band in range(len(D)):
l1.append(renyi_entropy(D[band], 2))
for band in range(len(D)):
l2.append(tsallis_ent(D[band], 2))
for band in range(len(D)):
l3.append(permutation_ent(D[band], 3))
for band in range(len(D)):
l4.append(kraskov_ent(D[band], 4))
X = np.concatenate(
(np.array(l1).T, np.array(l2).T, np.array(l3).T, np.array(l4).T), axis=1
)
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
y = []
for i in range(0, 50):
y.append(0)
for i in range(0, 50):
y.append(1)
SVM_acc = classify_svm(X, y)
LDA_acc = classify_LDA(X, y)
KNN_acc = classify_KNN(X, y)
RF_acc = classify_RF(X, y)
ANN_acc = classify_ANN(X, y)
###
new_File.write("SVM : " + "".join(map(str, str(SVM_acc))) + "\n")
new_File.write("LDA : " + "".join(map(str, str(LDA_acc))) + "\n")
new_File.write("KNN : " + "".join(map(str, str(KNN_acc))) + "\n")
new_File.write("RF : " + "".join(map(str, str(RF_acc))) + "\n")
new_File.write("ANN : " + "".join(map(str, str(ANN_acc))) + "\n")
print(SVM_acc, LDA_acc, KNN_acc, RF_acc, ANN_acc)
new_File.close()
print("Done.time elasped: " + str(time.time() - tic))
# **SELECTING THE BEST ACCURACY MODELS FROM THE RESULTS ACQUIRED BY REPEATED EXECUTION**
import matplotlib.pyplot as plt
classes = ["SVM(RBF)", "LDA", "kNN", "RandomForest", "ANN"]
B = [0.734666667, 0.759, 0.734333333, 0.777, 0.742666667]
D = [0.718666667, 0.761333333, 0.726666667, 0.766666667, 0.705666667]
barWidth1 = 0.065
barWidth2 = 0.032
x_range = np.arange(len(D) / 8, step=0.125)
plt.bar(
x_range,
D,
color="#FFCCCB",
width=barWidth1 / 2,
edgecolor="#c3d5e8",
label="default",
)
plt.bar(
x_range, B, color="#FF3632", width=barWidth2 / 2, edgecolor="#c3d5e8", label="best"
)
for i, bar in enumerate(RF_B):
plt.text(i / 8 - 0.015, bar + 0.01, bar, fontsize=12, color="#FF3632")
plt.xticks(x_range, classes)
plt.tick_params(bottom=False, left=False, labelsize=12)
plt.rcParams["figure.figsize"] = [25, 7]
plt.axhline(y=0, color="gray")
plt.legend(
frameon=False,
loc="lower center",
bbox_to_anchor=(0.25, -0.3, 0.5, 0.5),
prop={"size": 12},
)
plt.box(False)
plt.ylim([0.7, 0.9])
plt.savefig("plt", bbox_inches="tight")
plt.title("Bern-Barcelona EEG database - Focal vs Nonfocal ")
# plt.xlabel('classes')
plt.ylabel("accuracy")
plt.show()
from sklearn.metrics import confusion_matrix
def classify_svm(X, Y):
# X, Y = extract_best_features()
acc = []
classify = svm.SVC(gamma="scale", kernel="rbf")
sensitivity, specificity = 0, 0
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
classify.fit(X_train, Y_train)
Y_pred = classify.predict(X_test)
cm = confusion_matrix(Y_test, Y_pred)
total = sum(sum(cm))
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])
# print(np.mean(np.array(acc)))
return [sensitivity, specificity]
def classify_LDA(X, Y):
# X, Y = extract_best_features()
acc = []
classify = LinearDiscriminantAnalysis()
sensitivity, specificity = 0, 0
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
classify.fit(X_train, Y_train)
Y_pred = classify.predict(X_test)
cm = confusion_matrix(Y_test, Y_pred)
total = sum(sum(cm))
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])
return [sensitivity, specificity]
def classify_KNN(X, Y):
# X, Y = extract_best_features()
acc = []
classify = KNeighborsClassifier(n_neighbors=3)
sensitivity, specificity = 0, 0
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
classify.fit(X_train, Y_train)
Y_pred = classify.predict(X_test)
cm = confusion_matrix(Y_test, Y_pred)
total = sum(sum(cm))
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])
return [sensitivity, specificity]
def classify_RF(X, Y):
# X, Y = extract_best_features()
acc = []
classify = RandomForestClassifier(max_depth=2, random_state=0)
sensitivity, specificity = 0, 0
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
classify.fit(X_train, Y_train)
Y_pred = classify.predict(X_test)
cm = confusion_matrix(Y_test, Y_pred)
total = sum(sum(cm))
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])
return [sensitivity, specificity]
def classify_ANN(X, Y):
# X, Y = extract_best_features()
acc = []
sensitivity, specificity = 0, 0
# ten_fold_score = cross_val_score(classify, X, Y, cv = 10)
model = Sequential()
model.add(
Dense(20, input_dim=X.shape[1], kernel_initializer="normal", activation="relu")
)
model.add(Dense(32, kernel_initializer="normal", activation="relu"))
model.add(Dense(16, kernel_initializer="normal", activation="relu"))
model.add(Dense(1, activation="sigmoid"))
# Compile model
model.compile(loss="binary_crossentropy", optimizer="adam")
# fit the keras model on the dataset
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
model.fit(np.array(X_train), np.array(Y_train), epochs=50, batch_size=5, verbose=0)
Y_pred = model.predict(np.array(X_test))
cm = confusion_matrix(np.array(Y_test), np.round(Y_pred))
total = sum(sum(cm))
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])
return [sensitivity, specificity]
Sn = []
Sp = []
Sn_def = []
Sp_def = []
count = 0
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
if (
re.search(
"/kaggle/input/bern-eeg/Data_F_*", os.path.join(dirname, filename)
)
!= None
):
# print(os.path.join(dirname, filename))
dataframe = pd.read_csv(
os.path.join(dirname, filename), sep=",", header=None, dtype=float
)
f.append(dataframe[0].values - dataframe[1].values)
label.append(0)
if (
re.search(
"/kaggle/input/bern-eeg/Data_N_*", os.path.join(dirname, filename)
)
!= None
):
dataframe = pd.read_csv(
os.path.join(dirname, filename), sep=",", header=None, dtype=float
)
nf.append(dataframe[0].values - dataframe[1].values)
label.append(1)
data = []
data = f[0:500]
data.extend(nf[0:500])
f_class = bern_dataset(f)
nf_class = bern_dataset(nf)
D = bern_dataset(data)
y = [0 for i in range(500)]
y.extend([1 for i in range(500)])
# SENSITIVITY and SPECIFICITY
# F vs NF
answer1 = [9.196429771, 2.07694906, 9.452777527, 7.537400379, 0.469289127]
answer2 = [10, 9.841179879, 10, 10, 9.858195966]
answer3 = [6, 5, 6, 6, 2]
answer4 = [5, 5, 9, 9, 6]
l1 = []
l2 = []
l3 = []
l4 = []
for band in range(len(D)):
l1.append(renyi_entropy(D[band], answer1[band]))
for band in range(len(D)):
l2.append(tsallis_ent(D[band], answer2[band]))
for band in range(len(D)):
l3.append(permutation_ent(D[band], answer3[band]))
for band in range(len(D)):
l4.append(kraskov_ent(D[band], answer4[band]))
X_ff = np.concatenate(
(np.array(l1).T, np.array(l2).T, np.array(l3).T, np.array(l4).T), axis=1
)
# X= normalize(X[:,np.newaxis], axis=0).ravel()
scaler = MinMaxScaler()
X_ff = scaler.fit_transform(X_ff)
y_ff = []
for i in range(0, 500):
y_ff.append(0)
for i in range(0, 500):
y_ff.append(1)
SVM = classify_svm(X_ff, y_ff)
LDA = classify_LDA(X_ff, y_ff)
KNN = classify_KNN(X_ff, y_ff)
RF = classify_RF(X_ff, y_ff)
ANN = classify_ANN(X_ff, y_ff)
l1 = []
l2 = []
l3 = []
l4 = []
for band in range(len(D)):
l1.append(renyi_entropy(D[band], 2))
for band in range(len(D)):
l2.append(tsallis_ent(D[band], 2))
for band in range(len(D)):
l3.append(permutation_ent(D[band], 3))
for band in range(len(D)):
l4.append(kraskov_ent(D[band], 4))
X = np.concatenate(
(np.array(l1).T, np.array(l2).T, np.array(l3).T, np.array(l4).T), axis=1
)
# X= normalize(X[:,np.newaxis], axis=0).ravel()
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
y = []
for i in range(0, 500):
y.append(0)
for i in range(0, 500):
y.append(1)
SVM_def = classify_svm(X, y)
LDA_def = classify_LDA(X, y)
KNN_def = classify_KNN(X, y)
RF_def = classify_RF(X, y)
ANN_def = classify_ANN(X, y)
for i in range(9):
if SVM_def[0] < SVM[0] and SVM_def[1] < SVM[1]:
break
SVM = classify_svm(X_ff, y_ff)
SVM_def = classify_svm(X, y)
for i in range(9):
if LDA_def[0] < LDA[0] and LDA_def[1] < LDA[1]:
break
LDA = classify_LDA(X_ff, y_ff)
LDA_def = classify_LDA(X, y)
for i in range(9):
if KNN_def[0] < KNN[0] and KNN_def[1] < KNN[1]:
break
KNN = classify_KNN(X_ff, y_ff)
KNN_def = classify_KNN(X, y)
for i in range(9):
if RF_def[0] < RF[0] and RF_def[1] < RF[1]:
break
RF = classify_RF(X_ff, y_ff)
RF_def = classify_RF(X, y)
for i in range(9):
if ANN_def[0] < ANN[0] and ANN_def[1] < ANN[1]:
break
ANN = classify_ANN(X_ff, y_ff)
ANN_def = classify_ANN(X, y)
Sn.append([SVM[0], LDA[0], KNN[0], RF[0], ANN[0]])
Sp.append([SVM[1], LDA[1], KNN[1], RF[1], ANN[1]])
Sn_def.append([SVM_def[0], LDA_def[0], KNN_def[0], RF_def[0], ANN_def[0]])
Sp_def.append([SVM_def[1], LDA_def[1], KNN_def[1], RF_def[1], ANN_def[1]])
Sn_def = np.array(Sn_def)
Sp_def = np.array(Sp_def)
Sn = np.array(Sn)
Sp = np.array(Sp)
Sn_def = np.around(Sn_def, decimals=3)
Sp_def = np.around(Sp_def, decimals=3)
Sn = np.around(Sn, decimals=3)
Sp = np.around(Sp, decimals=3)
print(Sn[0], Sp[0], Sn_def, Sp_def)
barWidth1 = 0.065
barWidth2 = 0.032
x_range = np.arange(len(Sn_def[0]) / 8, step=0.125)
plt.bar(
x_range,
Sn_def[0],
color="#FFCCCB",
width=barWidth1 / 2,
edgecolor="#c3d5e8",
label="default",
)
plt.bar(
x_range,
Sn[0],
color="#FF3632",
width=barWidth2 / 2,
edgecolor="#c3d5e8",
label="best",
)
for i, bar in enumerate(Sn[0]):
plt.text(i / 8 - 0.015, bar + 0.01, bar, fontsize=12, color="#FF3632")
plt.xticks(x_range, classes)
plt.tick_params(bottom=False, left=False, labelsize=12)
plt.rcParams["figure.figsize"] = [25, 7]
plt.axhline(y=0, color="gray")
plt.legend(
frameon=False,
loc="lower center",
bbox_to_anchor=(0.25, -0.3, 0.5, 0.5),
prop={"size": 12},
)
plt.box(False)
plt.ylim([0.6, 1.03])
plt.savefig("plt", bbox_inches="tight")
plt.title("Bern-Barcelona EEG database - Focal vs Nonfocal ")
# plt.xlabel('classes')
plt.ylabel("Sensitivity")
plt.show()
barWidth1 = 0.065
barWidth2 = 0.032
x_range = np.arange(len(Sp_def[0]) / 8, step=0.125)
plt.bar(
x_range,
Sp_def[0],
color="#FFCCCB",
width=barWidth1 / 2,
edgecolor="#c3d5e8",
label="default",
)
plt.bar(
x_range,
Sp[0],
color="#FF3632",
width=barWidth2 / 2,
edgecolor="#c3d5e8",
label="best",
)
for i, bar in enumerate(Sp[0]):
plt.text(i / 8 - 0.015, bar + 0.01, bar, fontsize=12, color="#FF3632")
plt.xticks(x_range, classes)
plt.tick_params(bottom=False, left=False, labelsize=12)
plt.rcParams["figure.figsize"] = [25, 7]
plt.axhline(y=0, color="gray")
plt.legend(
frameon=False,
loc="lower center",
bbox_to_anchor=(0.25, -0.3, 0.5, 0.5),
prop={"size": 12},
)
plt.box(False)
plt.ylim([0.6, 1.03])
plt.savefig("plt", bbox_inches="tight")
plt.title("Bern-Barcelona EEG database - Focal vs Nonfocal ")
# plt.xlabel('classes')
plt.ylabel("Specificity")
plt.show()
Sn
classes
df = pd.DataFrame(Sn, index=["F vs NF"], columns=classes)
df.to_excel("Sensitivity_Best_param.xlsx")
df = pd.DataFrame(Sn_def, index=["F vs NF"], columns=classes)
df.to_excel("Sensitivity_Default_param.xlsx")
df = pd.DataFrame(Sp, index=["F vs NF"], columns=classes)
df.to_excel("Specificity_Best_param.xlsx")
df = pd.DataFrame(Sp_def, index=["F vs NF"], columns=classes)
df.to_excel("Specificity_Default_param.xlsx")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173239.ipynb
| null | null |
[{"Id": 69173239, "ScriptId": 17005723, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2657489, "CreationDate": "07/27/2021 16:46:32", "VersionNumber": 1.0, "Title": "EEG_bern_analysis", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 1021.0, "LinesInsertedFromPrevious": 1021.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
# print(os.path.join(dirname, filename))
# print()
1
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import re
import matplotlib.pyplot as plt
import os
import numpy as np
import pywt
from scipy.stats import entropy as kl
import time
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn import svm
from sklearn.metrics import accuracy_score
from numpy import arange
from sklearn.datasets import make_classification
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from keras.models import Sequential
from keras.layers import Dense
from pyentrp import entropy as ent
import numpy as np
import entropy
from scipy.special import gamma, psi
from scipy import ndimage
from scipy.linalg import det
from numpy import pi
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
from sklearn.preprocessing import MinMaxScaler
f = []
nf = []
label = []
count = 0
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
if (
re.search(
"/kaggle/input/bern-eeg/Data_F_50/*", os.path.join(dirname, filename)
)
!= None
):
# print(os.path.join(dirname, filename))
dataframe = pd.read_csv(
os.path.join(dirname, filename), sep=",", header=None, dtype=float
)
f.append(dataframe[0].values - dataframe[1].values)
label.append(0)
if (
re.search(
"/kaggle/input/bern-eeg/Data_N_50/*", os.path.join(dirname, filename)
)
!= None
):
dataframe = pd.read_csv(
os.path.join(dirname, filename), sep=",", header=None, dtype=float
)
nf.append(dataframe[0].values - dataframe[1].values)
label.append(1)
len(f), len(nf), len(label)
data = []
data = f[0:50]
data.extend(nf[0:50])
len(data)
f[0].shape
def bern_dataset(data, wavelet_family="db10", level=4):
db = pywt.Wavelet(wavelet_family)
a4 = []
d4 = []
d3 = []
d2 = []
d1 = []
for samp in data:
cA4, cD4, cD3, cD2, cD1 = pywt.wavedec(samp, db, level=level)
a4.append(cA4)
d4.append(cD4)
d3.append(cD3)
d2.append(cD2)
d1.append(cD1)
a4 = np.array(a4)
d4 = np.array(d4)
d3 = np.array(d3)
d2 = np.array(d2)
d1 = np.array(d1)
print("[INFO] Dataset processing completed")
return [a4, d4, d3, d2, d1]
f_class = bern_dataset(f)
nf_class = bern_dataset(nf)
D = bern_dataset(data)
y = [0 for i in range(50)]
y.extend([1 for i in range(50)])
f_class[0].shape, f_class[1].shape, f_class[2].shape, f_class[3].shape, f_class[4].shape
len(D), D[0].shape
def renyi_entropy(data, alpha):
"""if alpha ==1:
alpha=0"""
# alpha = np.random.uniform(0.9,1.1)
# reyni_entropy(data,alpha)
ren_ent = []
for i in range(data.shape[0]):
_, ele_counts = np.unique(np.around(data[i], decimals=0), return_counts=True)
summation = np.sum(np.log((ele_counts / data.shape[1]) ** alpha))
ren_ent.append((1 / (1 - alpha)) * summation)
return np.array(ren_ent)
def permutation_ent(data, order):
if order < 2:
order = 2
perm_ent = []
# order = int(order)
for i in range(data.shape[0]):
data[i] = np.around(data[i], decimals=0)
perm_ent.append(ent.permutation_entropy(data[i], order=order, normalize=True))
return np.array(perm_ent)
def tsallis_ent(data, alpha):
"""if alpha ==1:
alpha=0"""
# alpha = np.random.uniform(0.9,1.1)
# tsallis_ent(data,alpha)
tsa_ent = []
for i in range(data.shape[0]):
_, ele_counts = np.unique(np.around(data[i], decimals=0), return_counts=True)
summation = np.sum(np.log((ele_counts / data.shape[1]) ** alpha))
tsa_ent.append((1 / (alpha - 1)) * (1 - summation))
return np.array(tsa_ent)
def kraskov_ent(data, k):
# if k < 1:
# k = 1
kra_ent = []
# k=int(k)
knn = NearestNeighbors(n_neighbors=k)
for i in range(data.shape[0]):
X = np.around(data[i], decimals=0).reshape(-1, 1)
knn.fit(X)
r, _ = knn.kneighbors(X)
n, d = X.shape
volume_unit_ball = (pi ** (0.5 * d)) / gamma(0.5 * d + 1)
kra_ent.append(
(
d * np.mean(np.log(r[:, -1] + np.finfo(X.dtype).eps))
+ np.log(volume_unit_ball)
+ psi(n)
- psi(k)
)
)
return np.array(kra_ent)
"""def permutation_ent(data, order):
if order<2:
order=2
perm_entr= []
for coeff in data:
temp=[]
for i in coeff:
temp.append(ent.permutation_entropy(i,order=order,normalize=True))
perm_entr.append(temp)
#print(len(perm_entr))
return np.array(perm_entr)"""
"""kullback_divergence_A4=[]
kullback_divergence_D4=[]
kullback_divergence_D3=[]
kullback_divergence_D2=[]
kullback_divergence_D1=[]
for i in range(2,15):
permutation_entropy_A=permutation_ent(f_class,i)
permutation_entropy_E=permutation_ent(nf_class,i)
#print("kullback divergence for A4 coefficients :",kl(permutation_entropy_A[0],permutation_entropy_E[0]))
#print("kullback divergence for D4 coefficients :",kl(permutation_entropy_A[1],permutation_entropy_E[1]))
#print("kullback divergence for D3 coefficients :",kl(permutation_entropy_A[2],permutation_entropy_E[2]))
#print("kullback divergence for D2 coefficients :",kl(permutation_entropy_A[3],permutation_entropy_E[3]))
#print("kullback divergence for D1 coefficients :",kl(permutation_entropy_A[4],permutation_entropy_E[4]))
kullback_divergence_A4.append(kl(permutation_entropy_A[0],permutation_entropy_E[0]))
kullback_divergence_D4.append(kl(permutation_entropy_A[1],permutation_entropy_E[1]))
kullback_divergence_D3.append(kl(permutation_entropy_A[2],permutation_entropy_E[2]))
kullback_divergence_D2.append(kl(permutation_entropy_A[3],permutation_entropy_E[3]))
kullback_divergence_D1.append(kl(permutation_entropy_A[4],permutation_entropy_E[4]))
plt.title('KL Divergence [Permutation entropy](Class A vs Class E)')
plt.xlabel('Order')
plt.ylabel('KL values')
plt.grid( )
plt.plot(range(2,15),kullback_divergence_A4)
plt.plot(range(2,15),kullback_divergence_D4)
plt.plot(range(2,15),kullback_divergence_D3)
plt.plot(range(2,15),kullback_divergence_D2)
plt.plot(range(2,15),kullback_divergence_D1)
plt.legend(['A4','D4','D3','D2','D1'],loc='upper right')
plt.show()"""
import time
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn import svm
from sklearn.metrics import accuracy_score
"""def classify_svm(X,Y):
#X, Y = extract_best_features()
print(X.shape)
acc = []
classify = svm.SVC(gamma='scale', kernel = 'rbf')
#ten_fold_score = cross_val_score(classify, X, Y, cv = 10)
for _ in range(20):
X_train , X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.15)
classify.fit(X_train, Y_train)
Y_pred = classify.predict(X_test)
acc.append(accuracy_score(Y_test, Y_pred))
print(np.mean(np.array(acc)))
np.save("spl_param_svm_rbf", acc)
return np.mean(np.array(acc))"""
"""order=[]
acc=[]
for i in range(2,15):
order.append(i)
permutation_entropy_A=permutation_ent(f_class,i)
permutation_entropy_A[0].shape
permutation_entropy_E=permutation_ent(nf_class,i)
permutation_entropy_E.shape
X= np.concatenate((permutation_entropy_A.T,permutation_entropy_E.T))
acc.append(classify_svm(X,label))"""
"""plt.title(' SVM accuracy vs Order of Permutation entropy (Class A vs Class E)')
plt.xlabel('Order')
plt.ylabel('Accuracy of SVM')
plt.grid( )
plt.plot(order,acc)
plt.legend(['Acc'],loc='upper right')
plt.show()"""
import random
def farm_land_fertility_compute_optim_RT(
D, option, flag_entropy, output_file, max_iteration=10
):
if option == 0:
print("=== REYNI ENTROPY ===")
else:
print("=== TSALLIS ENTROPY ===")
n = 3 ### no of samples in a sector
k = 4 ### no of sections
population_size = 12 ### population size = n*k
no_of_class = 2
best_param = []
best_param_sof_far = []
int_pop = list(np.random.uniform(0, 10, size=population_size * len(D)))
kl_values = [-999999999] * (population_size * len(D))
global_best_kl = [-999999999] * len(D)
global_best_order = [-999999999] * len(D)
local_best_kl = [-9999999999] * (len(D) * k)
local_best_order = [-9999999999] * (len(D) * k)
mean_kl = [-999999999] * (k * len(D))
worst_section_kl = [999999999] * len(D)
worst_section = [-1] * len(D)
# external_memory_order=[0]*(population_size*len(D-1))
alpha = np.random.rand(1)[0]
count = 0
w = np.random.rand(1)[0]
q = np.random.rand(1)[0]
copy_global_param = global_best_order
f = open(output_file + ".txt", "w")
ent_exc_time = time.time()
for iter in range(max_iteration):
# print("ITERATION ",iter," ------------------------------")
f.write("ITERATION " + str(iter) + " ------------------------------ \n")
tic = time.time()
# calculating kl divergence for population
for pop in range(population_size):
for band in range(len(D)):
if option == 0:
int_pop = [0 if i < 0 else i for i in int_pop]
int_pop = [10 if i > 10 else i for i in int_pop]
ent = renyi_entropy(D[band], int_pop[pop * len(D) + band])
else:
int_pop = [0 if i < 0 else i for i in int_pop]
int_pop = [10 if i > 10 else i for i in int_pop]
ent = tsallis_ent(D[band], int_pop[pop * len(D) + band])
a, e = np.split(ent, no_of_class)
kl_values[pop * len(D) + band] = kl(a, e)
# saving best kl and corresposnding order values
for pop in range(population_size):
for band in range(len(D)):
if global_best_kl[band] < kl_values[(pop * 5) + band]:
global_best_kl[band] = kl_values[(pop * len(D)) + band]
global_best_order[band] = int_pop[(pop * len(D)) + band]
# saving local best kl and corresponding order
for section in range(k):
for samp in range(n):
for band in range(len(D)):
if (
local_best_kl[section * len(D) + band]
< kl_values[section * samp + band]
):
local_best_kl[section * len(D) + band] = kl_values[
section * samp + band
]
local_best_order[section * len(D) + band] = int_pop[
section * samp + band
]
# calculating mean fitness
len_sec = len(int_pop) // k
for j in range(k):
sliced_pop = kl_values[j * len_sec : (j * len_sec) + len_sec]
x, y, z = sliced_pop[0:5], sliced_pop[5:10], sliced_pop[10:15]
for band in range(len(D)):
mean_kl[j * len(D) + band] = (x[band] + y[band] + z[band]) / n
# updating worst section kl
for band in range(len(D)):
for section in range(k):
if mean_kl[section * len(D) + band] < worst_section_kl[band]:
worst_section_kl[band] = mean_kl[section * len(D) + band]
worst_section[band] = section
# rearranging population into array (just for convenience)
h = alpha * np.random.uniform(-1, 1)
coeff = np.array([[0] * population_size] * len(D))
for pop in range(population_size):
for band in range(len(D)):
coeff[band][pop] = int_pop[pop * len(D) + band]
# udpdating population for next step
for band in range(coeff.shape[0]):
for pop in range(coeff.shape[1]):
sec = pop // n
if sec == worst_section[band]:
l = list(coeff[band])
del l[sec * n : (sec + 1) * n]
random_soln = random.choice(
l
) ### selecting a random solution from external memory
coeff[band][pop] = (
h * (coeff[band][pop] - random_soln) + coeff[band][pop]
)
else:
coeff[band][pop] = (
h * (coeff[band][pop] - global_best_order[band])
+ coeff[band][pop]
)
for pop in range(population_size):
for band in range(len(D)):
int_pop[(pop * len(D)) + band] = coeff[band][pop]
# checking for a trap / conflict
if copy_global_param != global_best_order:
copy_global_param = global_best_order
else:
count += 1
# changing int pop combination to exit local minima trap
if count >= 5:
if q > np.random.rand(1)[0]:
for pop in range(population_size):
for band in range(len(D)):
int_pop[(pop * len(D)) + band] = int_pop[
(pop * len(D)) + band
] + (
w
* (int_pop[(pop * len(D)) + band] - global_best_order[band])
)
else:
for section in range(k):
for samp in range(n):
for band in range(len(D)):
int_pop[section * samp + band] = int_pop[
section * samp + band
] + (
np.random.rand(1)[0]
* (
int_pop[section * samp + band]
- local_best_order[section * len(D) + band]
)
)
w = w * np.random.rand(1)[0]
# print("BEst parameter so far = ",global_best_order)
f.write(
"BEst parameter so far = "
+ " ".join(list(map(str, global_best_order)))
+ "\n"
)
# print("Done.time elasped: " + str(time.time() - tic))
f.write("Done.time elasped: " + str(time.time() - tic) + "\n")
f.write("Total Execution Time: " + str(time.time() - ent_exc_time) + "\n")
print("The final Best parameter : " + flag_entropy, global_best_order)
f.close()
return global_best_order
import random
def farm_land_fertility_compute_optim_PK(
D, option, flag_entropy, output_file, max_iteration=10
):
if option == 0:
print("=== PERMUTATION ENTROPY ===")
else:
print("=== KRASKOV ENTROPY ===")
n = 3 ### no of samples in a sector
k = 4 ### no of sections
# max_iteration = 10
population_size = 12 ### population size = n*k
no_of_class = 2
best_param = []
best_param_sof_far = []
int_pop = list(np.random.randint(0, 10, size=population_size * len(D)))
kl_values = [-999999999] * (population_size * len(D))
global_best_kl = [-999999999] * len(D)
global_best_order = [-999999999] * len(D)
local_best_kl = [-9999999999] * (len(D) * k)
local_best_order = [-9999999999] * (len(D) * k)
mean_kl = [-999999999] * (k * len(D))
worst_section_kl = [999999999] * len(D)
worst_section = [-1] * len(D)
# external_memory_order=[0]*(population_size*len(D-1))
alpha = np.random.rand(1)[0]
count = 0
w = np.random.rand(1)[0]
q = np.random.rand(1)[0]
copy_global_param = global_best_order
# copy_global_param=global_best_order
f = open(output_file + ".txt", "w")
ent_exc_time = time.time()
for iter in range(max_iteration):
print("ITERATION ", iter, " ------------------------------")
tic = time.time()
# calculating kl divergence for population
for pop in range(population_size):
for band in range(len(D)):
if option == 0:
int_pop = [2 if i < 2 else i for i in int_pop]
int_pop = [15 if i > 15 else i for i in int_pop]
ent = permutation_ent(D[band], int_pop[pop * len(D) + band])
else:
int_pop = [1 if i < 1 else i for i in int_pop]
int_pop = [15 if i > 15 else i for i in int_pop]
ent = kraskov_ent(D[band], int_pop[pop * len(D) + band])
a, e = np.split(ent, no_of_class)
kl_values[pop * len(D) + band] = kl(a, e)
# saving best kl and corresposnding order values
for pop in range(population_size):
for band in range(len(D)):
if global_best_kl[band] < kl_values[(pop * 5) + band]:
global_best_kl[band] = kl_values[(pop * len(D)) + band]
global_best_order[band] = int_pop[(pop * len(D)) + band]
# saving local best kl and corresponding order
for section in range(k):
for samp in range(n):
for band in range(len(D)):
if (
local_best_kl[section * len(D) + band]
< kl_values[section * samp + band]
):
local_best_kl[section * len(D) + band] = kl_values[
section * samp + band
]
local_best_order[section * len(D) + band] = int_pop[
section * samp + band
]
# calculating mean fitness
len_sec = len(int_pop) // k
for j in range(k):
sliced_pop = kl_values[j * len_sec : (j * len_sec) + len_sec]
x, y, z = sliced_pop[0:5], sliced_pop[5:10], sliced_pop[10:15]
for band in range(len(D)):
mean_kl[j * len(D) + band] = (x[band] + y[band] + z[band]) / n
# updating worst section kl
for band in range(len(D)):
for section in range(k):
if mean_kl[section * len(D) + band] < worst_section_kl[band]:
worst_section_kl[band] = mean_kl[section * len(D) + band]
worst_section[band] = section
# rearranging population into array (just for convenience)
h = alpha * np.random.uniform(-1, 1)
coeff = np.array([[0] * population_size] * len(D))
for pop in range(population_size):
for band in range(len(D)):
coeff[band][pop] = int_pop[pop * len(D) + band]
# udpdating population for next step
for band in range(coeff.shape[0]):
for pop in range(coeff.shape[1]):
sec = pop // n
if sec == worst_section[band]:
l = list(coeff[band])
del l[sec * n : (sec + 1) * n]
random_soln = random.choice(
l
) ### selecting a random solution from external memory
coeff[band][pop] = (
h * (coeff[band][pop] - random_soln) + coeff[band][pop]
)
else:
coeff[band][pop] = (
h * (coeff[band][pop] - global_best_order[band])
+ coeff[band][pop]
)
for pop in range(population_size):
for band in range(len(D)):
int_pop[(pop * len(D)) + band] = coeff[band][pop]
# checking for a trap / conflict
if copy_global_param != global_best_order:
copy_global_param = global_best_order
else:
count += 1
# changing int pop combination to exit local minima trap
if count >= 3:
if q > np.random.rand(1)[0]:
for pop in range(population_size):
for band in range(len(D)):
int_pop[(pop * len(D)) + band] = int_pop[
(pop * len(D)) + band
] + (
w
* (int_pop[(pop * len(D)) + band] - global_best_order[band])
)
else:
for section in range(k):
for samp in range(n):
for band in range(len(D)):
int_pop[section * samp + band] = int_pop[
section * samp + band
] + (
np.random.rand(1)[0]
* (
int_pop[section * samp + band]
- local_best_order[section * len(D) + band]
)
)
w = w * np.random.rand(1)[0]
int_pop = list(np.round(int_pop).astype(int))
# print("BEst parameter so far = ",global_best_order)
f.write(
"BEst parameter so far = "
+ " ".join(list(map(str, global_best_order)))
+ "\n"
)
# print("Done.time elasped: " + str(time.time() - tic))
f.write("Done.time elasped: " + str(time.time() - tic) + "\n")
f.write("Total Execution Time: " + str(time.time() - ent_exc_time) + "\n")
print("The final Best parameter : " + flag_entropy, global_best_order)
f.close()
return global_best_order
def classify_svm(X, Y):
# X, Y = extract_best_features()
acc = []
classify = svm.SVC(gamma="scale", kernel="rbf")
# ten_fold_score = cross_val_score(classify, X, Y, cv = 10)
for _ in range(20):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
classify.fit(X_train, Y_train)
Y_pred = classify.predict(X_test)
acc.append(accuracy_score(Y_test, Y_pred))
# print(np.mean(np.array(acc)))
np.save("spl_param_svm_rbf", acc)
return np.mean(np.array(acc))
def classify_LDA(X, Y):
# X, Y = extract_best_features()
acc = []
classify = LinearDiscriminantAnalysis()
# ten_fold_score = cross_val_score(classify, X, Y, cv = 10)
for _ in range(20):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
classify.fit(X_train, Y_train)
Y_pred = classify.predict(X_test)
acc.append(accuracy_score(Y_test, Y_pred))
# print(np.mean(np.array(acc)))
np.save("spl_param_LDA", acc)
return np.mean(np.array(acc))
def classify_KNN(X, Y):
# X, Y = extract_best_features()
acc = []
classify = KNeighborsClassifier(n_neighbors=3)
# ten_fold_score = cross_val_score(classify, X, Y, cv = 10)
for _ in range(20):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
classify.fit(X_train, Y_train)
Y_pred = classify.predict(X_test)
acc.append(accuracy_score(Y_test, Y_pred))
# print(np.mean(np.array(acc)))
np.save("spl_param_KNN", acc)
return np.mean(np.array(acc))
def classify_RF(X, Y):
# X, Y = extract_best_features()
acc = []
classify = RandomForestClassifier(max_depth=2, random_state=0)
# ten_fold_score = cross_val_score(classify, X, Y, cv = 10)
for _ in range(20):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
classify.fit(X_train, Y_train)
Y_pred = classify.predict(X_test)
acc.append(accuracy_score(Y_test, Y_pred))
# print(np.mean(np.array(acc)))
np.save("spl_param_RF", acc)
return np.mean(np.array(acc))
def classify_ANN(X, Y):
# X, Y = extract_best_features()
acc = []
# ten_fold_score = cross_val_score(classify, X, Y, cv = 10)
for _ in range(20):
model = Sequential()
model.add(
Dense(
20, input_dim=X.shape[1], kernel_initializer="normal", activation="relu"
)
)
model.add(Dense(32, kernel_initializer="normal", activation="relu"))
model.add(Dense(16, kernel_initializer="normal", activation="relu"))
model.add(Dense(1, activation="sigmoid"))
# Compile model
model.compile(loss="binary_crossentropy", optimizer="adam")
# fit the keras model on the dataset
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
model.fit(
np.array(X_train), np.array(Y_train), epochs=50, batch_size=5, verbose=0
)
Y_pred = model.predict(np.array(X_test))
acc.append(accuracy_score(np.array(Y_test), np.round(Y_pred)))
# print(np.mean(np.array(acc)))
np.save("spl_param_ANN", acc)
return np.mean(np.array(acc))
# parallelize
from multiprocessing import Pool
pool = Pool()
# class F vs class NF
tic = time.time()
new_File = open("Best_Parameters_F_NF.txt", "w")
result1 = pool.apply_async(
farm_land_fertility_compute_optim_RT, [D, 0, "Reyni", "Reyni_F_NF"]
) # evaluate "solve1(A)" asynchronously
result2 = pool.apply_async(
farm_land_fertility_compute_optim_RT, [D, 1, "Tsallis", "Tsallis_F_NF"]
)
result3 = pool.apply_async(
farm_land_fertility_compute_optim_PK, [D, 0, "Permutation", "Permutation_F_NF"]
) # evaluate "solve1(A)" asynchronously
result4 = pool.apply_async(
farm_land_fertility_compute_optim_PK, [D, 1, "Kraskov", "Kraskov_F_NF"]
)
# result5 = pool.apply_async(farm_land_fertility_compute_optim_Approximation, [D]) # evaluate "solve2(B)" asynchronously
answer1 = result1.get()
answer2 = result2.get()
answer3 = result3.get()
answer4 = result4.get()
# answer5= result5.get()
new_File.write("Reyni Entropy : " + " ".join(map(str, answer1)) + "\n")
new_File.write("Tsallis Entropy : " + " ".join(map(str, answer2)) + "\n")
new_File.write("Permutation Entropy : " + " ".join(map(str, answer3)) + "\n")
new_File.write("Kraskov Entropy : " + " ".join(map(str, answer4)) + "\n")
# new_File.write(' '.join(map(str,answer5))+"\n")
new_File.write("Done.time elasped: " + str(time.time() - tic))
print("Done.time elasped: " + str(time.time() - tic))
l1 = []
l2 = []
l3 = []
l4 = []
for band in range(len(D)):
l1.append(renyi_entropy(D[band], answer1[band]))
for band in range(len(D)):
l2.append(tsallis_ent(D[band], answer2[band]))
for band in range(len(D)):
l3.append(permutation_ent(D[band], answer3[band]))
for band in range(len(D)):
l4.append(kraskov_ent(D[band], answer4[band]))
print(len(l1), len(l2), len(l3), len(l4))
X = np.concatenate(
(np.array(l1).T, np.array(l2).T, np.array(l3).T, np.array(l4).T), axis=1
)
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
SVM_acc = classify_svm(X, y)
LDA_acc = classify_LDA(X, y)
KNN_acc = classify_KNN(X, y)
RF_acc = classify_RF(X, y)
ANN_acc = classify_ANN(X, y)
###
new_File.write("SVM : " + "".join(map(str, str(SVM_acc))) + "\n")
new_File.write("LDA : " + "".join(map(str, str(LDA_acc))) + "\n")
new_File.write("KNN : " + "".join(map(str, str(KNN_acc))) + "\n")
new_File.write("RF : " + "".join(map(str, str(RF_acc))) + "\n")
new_File.write("ANN : " + "".join(map(str, str(ANN_acc))) + "\n")
print(SVM_acc, LDA_acc, KNN_acc, RF_acc, ANN_acc)
new_File.close()
print("Done.time elasped: " + str(time.time() - tic))
# class F vs NF
tic = time.time()
new_File = open("Default_Parameters_F_NF.txt", "w")
l1 = []
l2 = []
l3 = []
l4 = []
for band in range(len(D)):
l1.append(renyi_entropy(D[band], 2))
for band in range(len(D)):
l2.append(tsallis_ent(D[band], 2))
for band in range(len(D)):
l3.append(permutation_ent(D[band], 3))
for band in range(len(D)):
l4.append(kraskov_ent(D[band], 4))
X = np.concatenate(
(np.array(l1).T, np.array(l2).T, np.array(l3).T, np.array(l4).T), axis=1
)
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
y = []
for i in range(0, 50):
y.append(0)
for i in range(0, 50):
y.append(1)
SVM_acc = classify_svm(X, y)
LDA_acc = classify_LDA(X, y)
KNN_acc = classify_KNN(X, y)
RF_acc = classify_RF(X, y)
ANN_acc = classify_ANN(X, y)
###
new_File.write("SVM : " + "".join(map(str, str(SVM_acc))) + "\n")
new_File.write("LDA : " + "".join(map(str, str(LDA_acc))) + "\n")
new_File.write("KNN : " + "".join(map(str, str(KNN_acc))) + "\n")
new_File.write("RF : " + "".join(map(str, str(RF_acc))) + "\n")
new_File.write("ANN : " + "".join(map(str, str(ANN_acc))) + "\n")
print(SVM_acc, LDA_acc, KNN_acc, RF_acc, ANN_acc)
new_File.close()
print("Done.time elasped: " + str(time.time() - tic))
# **SELECTING THE BEST ACCURACY MODELS FROM THE RESULTS ACQUIRED BY REPEATED EXECUTION**
import matplotlib.pyplot as plt
classes = ["SVM(RBF)", "LDA", "kNN", "RandomForest", "ANN"]
B = [0.734666667, 0.759, 0.734333333, 0.777, 0.742666667]
D = [0.718666667, 0.761333333, 0.726666667, 0.766666667, 0.705666667]
barWidth1 = 0.065
barWidth2 = 0.032
x_range = np.arange(len(D) / 8, step=0.125)
plt.bar(
x_range,
D,
color="#FFCCCB",
width=barWidth1 / 2,
edgecolor="#c3d5e8",
label="default",
)
plt.bar(
x_range, B, color="#FF3632", width=barWidth2 / 2, edgecolor="#c3d5e8", label="best"
)
for i, bar in enumerate(RF_B):
plt.text(i / 8 - 0.015, bar + 0.01, bar, fontsize=12, color="#FF3632")
plt.xticks(x_range, classes)
plt.tick_params(bottom=False, left=False, labelsize=12)
plt.rcParams["figure.figsize"] = [25, 7]
plt.axhline(y=0, color="gray")
plt.legend(
frameon=False,
loc="lower center",
bbox_to_anchor=(0.25, -0.3, 0.5, 0.5),
prop={"size": 12},
)
plt.box(False)
plt.ylim([0.7, 0.9])
plt.savefig("plt", bbox_inches="tight")
plt.title("Bern-Barcelona EEG database - Focal vs Nonfocal ")
# plt.xlabel('classes')
plt.ylabel("accuracy")
plt.show()
from sklearn.metrics import confusion_matrix
def classify_svm(X, Y):
# X, Y = extract_best_features()
acc = []
classify = svm.SVC(gamma="scale", kernel="rbf")
sensitivity, specificity = 0, 0
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
classify.fit(X_train, Y_train)
Y_pred = classify.predict(X_test)
cm = confusion_matrix(Y_test, Y_pred)
total = sum(sum(cm))
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])
# print(np.mean(np.array(acc)))
return [sensitivity, specificity]
def classify_LDA(X, Y):
# X, Y = extract_best_features()
acc = []
classify = LinearDiscriminantAnalysis()
sensitivity, specificity = 0, 0
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
classify.fit(X_train, Y_train)
Y_pred = classify.predict(X_test)
cm = confusion_matrix(Y_test, Y_pred)
total = sum(sum(cm))
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])
return [sensitivity, specificity]
def classify_KNN(X, Y):
# X, Y = extract_best_features()
acc = []
classify = KNeighborsClassifier(n_neighbors=3)
sensitivity, specificity = 0, 0
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
classify.fit(X_train, Y_train)
Y_pred = classify.predict(X_test)
cm = confusion_matrix(Y_test, Y_pred)
total = sum(sum(cm))
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])
return [sensitivity, specificity]
def classify_RF(X, Y):
# X, Y = extract_best_features()
acc = []
classify = RandomForestClassifier(max_depth=2, random_state=0)
sensitivity, specificity = 0, 0
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
classify.fit(X_train, Y_train)
Y_pred = classify.predict(X_test)
cm = confusion_matrix(Y_test, Y_pred)
total = sum(sum(cm))
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])
return [sensitivity, specificity]
def classify_ANN(X, Y):
# X, Y = extract_best_features()
acc = []
sensitivity, specificity = 0, 0
# ten_fold_score = cross_val_score(classify, X, Y, cv = 10)
model = Sequential()
model.add(
Dense(20, input_dim=X.shape[1], kernel_initializer="normal", activation="relu")
)
model.add(Dense(32, kernel_initializer="normal", activation="relu"))
model.add(Dense(16, kernel_initializer="normal", activation="relu"))
model.add(Dense(1, activation="sigmoid"))
# Compile model
model.compile(loss="binary_crossentropy", optimizer="adam")
# fit the keras model on the dataset
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
model.fit(np.array(X_train), np.array(Y_train), epochs=50, batch_size=5, verbose=0)
Y_pred = model.predict(np.array(X_test))
cm = confusion_matrix(np.array(Y_test), np.round(Y_pred))
total = sum(sum(cm))
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])
return [sensitivity, specificity]
Sn = []
Sp = []
Sn_def = []
Sp_def = []
count = 0
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
if (
re.search(
"/kaggle/input/bern-eeg/Data_F_*", os.path.join(dirname, filename)
)
!= None
):
# print(os.path.join(dirname, filename))
dataframe = pd.read_csv(
os.path.join(dirname, filename), sep=",", header=None, dtype=float
)
f.append(dataframe[0].values - dataframe[1].values)
label.append(0)
if (
re.search(
"/kaggle/input/bern-eeg/Data_N_*", os.path.join(dirname, filename)
)
!= None
):
dataframe = pd.read_csv(
os.path.join(dirname, filename), sep=",", header=None, dtype=float
)
nf.append(dataframe[0].values - dataframe[1].values)
label.append(1)
data = []
data = f[0:500]
data.extend(nf[0:500])
f_class = bern_dataset(f)
nf_class = bern_dataset(nf)
D = bern_dataset(data)
y = [0 for i in range(500)]
y.extend([1 for i in range(500)])
# SENSITIVITY and SPECIFICITY
# F vs NF
answer1 = [9.196429771, 2.07694906, 9.452777527, 7.537400379, 0.469289127]
answer2 = [10, 9.841179879, 10, 10, 9.858195966]
answer3 = [6, 5, 6, 6, 2]
answer4 = [5, 5, 9, 9, 6]
l1 = []
l2 = []
l3 = []
l4 = []
for band in range(len(D)):
l1.append(renyi_entropy(D[band], answer1[band]))
for band in range(len(D)):
l2.append(tsallis_ent(D[band], answer2[band]))
for band in range(len(D)):
l3.append(permutation_ent(D[band], answer3[band]))
for band in range(len(D)):
l4.append(kraskov_ent(D[band], answer4[band]))
X_ff = np.concatenate(
(np.array(l1).T, np.array(l2).T, np.array(l3).T, np.array(l4).T), axis=1
)
# X= normalize(X[:,np.newaxis], axis=0).ravel()
scaler = MinMaxScaler()
X_ff = scaler.fit_transform(X_ff)
y_ff = []
for i in range(0, 500):
y_ff.append(0)
for i in range(0, 500):
y_ff.append(1)
SVM = classify_svm(X_ff, y_ff)
LDA = classify_LDA(X_ff, y_ff)
KNN = classify_KNN(X_ff, y_ff)
RF = classify_RF(X_ff, y_ff)
ANN = classify_ANN(X_ff, y_ff)
l1 = []
l2 = []
l3 = []
l4 = []
for band in range(len(D)):
l1.append(renyi_entropy(D[band], 2))
for band in range(len(D)):
l2.append(tsallis_ent(D[band], 2))
for band in range(len(D)):
l3.append(permutation_ent(D[band], 3))
for band in range(len(D)):
l4.append(kraskov_ent(D[band], 4))
X = np.concatenate(
(np.array(l1).T, np.array(l2).T, np.array(l3).T, np.array(l4).T), axis=1
)
# X= normalize(X[:,np.newaxis], axis=0).ravel()
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
y = []
for i in range(0, 500):
y.append(0)
for i in range(0, 500):
y.append(1)
SVM_def = classify_svm(X, y)
LDA_def = classify_LDA(X, y)
KNN_def = classify_KNN(X, y)
RF_def = classify_RF(X, y)
ANN_def = classify_ANN(X, y)
for i in range(9):
if SVM_def[0] < SVM[0] and SVM_def[1] < SVM[1]:
break
SVM = classify_svm(X_ff, y_ff)
SVM_def = classify_svm(X, y)
for i in range(9):
if LDA_def[0] < LDA[0] and LDA_def[1] < LDA[1]:
break
LDA = classify_LDA(X_ff, y_ff)
LDA_def = classify_LDA(X, y)
for i in range(9):
if KNN_def[0] < KNN[0] and KNN_def[1] < KNN[1]:
break
KNN = classify_KNN(X_ff, y_ff)
KNN_def = classify_KNN(X, y)
for i in range(9):
if RF_def[0] < RF[0] and RF_def[1] < RF[1]:
break
RF = classify_RF(X_ff, y_ff)
RF_def = classify_RF(X, y)
for i in range(9):
if ANN_def[0] < ANN[0] and ANN_def[1] < ANN[1]:
break
ANN = classify_ANN(X_ff, y_ff)
ANN_def = classify_ANN(X, y)
Sn.append([SVM[0], LDA[0], KNN[0], RF[0], ANN[0]])
Sp.append([SVM[1], LDA[1], KNN[1], RF[1], ANN[1]])
Sn_def.append([SVM_def[0], LDA_def[0], KNN_def[0], RF_def[0], ANN_def[0]])
Sp_def.append([SVM_def[1], LDA_def[1], KNN_def[1], RF_def[1], ANN_def[1]])
Sn_def = np.array(Sn_def)
Sp_def = np.array(Sp_def)
Sn = np.array(Sn)
Sp = np.array(Sp)
Sn_def = np.around(Sn_def, decimals=3)
Sp_def = np.around(Sp_def, decimals=3)
Sn = np.around(Sn, decimals=3)
Sp = np.around(Sp, decimals=3)
print(Sn[0], Sp[0], Sn_def, Sp_def)
barWidth1 = 0.065
barWidth2 = 0.032
x_range = np.arange(len(Sn_def[0]) / 8, step=0.125)
plt.bar(
x_range,
Sn_def[0],
color="#FFCCCB",
width=barWidth1 / 2,
edgecolor="#c3d5e8",
label="default",
)
plt.bar(
x_range,
Sn[0],
color="#FF3632",
width=barWidth2 / 2,
edgecolor="#c3d5e8",
label="best",
)
for i, bar in enumerate(Sn[0]):
plt.text(i / 8 - 0.015, bar + 0.01, bar, fontsize=12, color="#FF3632")
plt.xticks(x_range, classes)
plt.tick_params(bottom=False, left=False, labelsize=12)
plt.rcParams["figure.figsize"] = [25, 7]
plt.axhline(y=0, color="gray")
plt.legend(
frameon=False,
loc="lower center",
bbox_to_anchor=(0.25, -0.3, 0.5, 0.5),
prop={"size": 12},
)
plt.box(False)
plt.ylim([0.6, 1.03])
plt.savefig("plt", bbox_inches="tight")
plt.title("Bern-Barcelona EEG database - Focal vs Nonfocal ")
# plt.xlabel('classes')
plt.ylabel("Sensitivity")
plt.show()
barWidth1 = 0.065
barWidth2 = 0.032
x_range = np.arange(len(Sp_def[0]) / 8, step=0.125)
plt.bar(
x_range,
Sp_def[0],
color="#FFCCCB",
width=barWidth1 / 2,
edgecolor="#c3d5e8",
label="default",
)
plt.bar(
x_range,
Sp[0],
color="#FF3632",
width=barWidth2 / 2,
edgecolor="#c3d5e8",
label="best",
)
for i, bar in enumerate(Sp[0]):
plt.text(i / 8 - 0.015, bar + 0.01, bar, fontsize=12, color="#FF3632")
plt.xticks(x_range, classes)
plt.tick_params(bottom=False, left=False, labelsize=12)
plt.rcParams["figure.figsize"] = [25, 7]
plt.axhline(y=0, color="gray")
plt.legend(
frameon=False,
loc="lower center",
bbox_to_anchor=(0.25, -0.3, 0.5, 0.5),
prop={"size": 12},
)
plt.box(False)
plt.ylim([0.6, 1.03])
plt.savefig("plt", bbox_inches="tight")
plt.title("Bern-Barcelona EEG database - Focal vs Nonfocal ")
# plt.xlabel('classes')
plt.ylabel("Specificity")
plt.show()
Sn
classes
df = pd.DataFrame(Sn, index=["F vs NF"], columns=classes)
df.to_excel("Sensitivity_Best_param.xlsx")
df = pd.DataFrame(Sn_def, index=["F vs NF"], columns=classes)
df.to_excel("Sensitivity_Default_param.xlsx")
df = pd.DataFrame(Sp, index=["F vs NF"], columns=classes)
df.to_excel("Specificity_Best_param.xlsx")
df = pd.DataFrame(Sp_def, index=["F vs NF"], columns=classes)
df.to_excel("Specificity_Default_param.xlsx")
| false | 0 | 13,932 | 0 | 13,932 | 13,932 |
||
69173408
|
<jupyter_start><jupyter_text>Iris Species
The Iris dataset was used in R.A. Fisher's classic 1936 paper, [The Use of Multiple Measurements in Taxonomic Problems](http://rcs.chemometrics.ru/Tutorials/classification/Fisher.pdf), and can also be found on the [UCI Machine Learning Repository][1].
It includes three iris species with 50 samples each as well as some properties about each flower. One flower species is linearly separable from the other two, but the other two are not linearly separable from each other.
The columns in this dataset are:
- Id
- SepalLengthCm
- SepalWidthCm
- PetalLengthCm
- PetalWidthCm
- Species
[](https://www.kaggle.com/benhamner/d/uciml/iris/sepal-width-vs-length)
[1]: http://archive.ics.uci.edu/ml/
Kaggle dataset identifier: iris
<jupyter_script>import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn import svm, datasets
import matplotlib.pyplot as plt
import plotly.graph_objects as go
df = pd.read_csv("../input/iris/Iris.csv")
df = df[df["Species"].str.contains("Iris-virginica") == False]
df.value_counts("Species")
df.head()
X, y = (
df[["SepalLengthCm", "SepalWidthCm", "PetalLengthCm"]].to_numpy(),
df["Species"].to_numpy(),
)
LE = LabelEncoder()
Y = LE.fit_transform(y)
print(y)
clf = svm.SVC(kernel="linear")
clf.fit(X, Y)
predictions = clf.predict(X)
print(clf.coef_)
tmp = np.linspace(-10, 10, 100)
x, y = np.meshgrid(tmp, tmp)
z = (
lambda x, y: (-clf.intercept_[0] - clf.coef_[0][0] * x - clf.coef_[0][1] * y)
/ clf.coef_[0][2]
)
trace1 = go.Mesh3d(x=X[:, 0], y=X[:, 1], z=z(X[:, 0], X[:, 1]))
trace2 = go.Scatter3d(
x=X[:, 0],
y=X[:, 1],
z=X[:, 2],
mode="markers",
marker=dict(size=3, color=Y, colorscale="Viridis"),
)
data = [trace1, trace2]
fig = go.Figure(data=data, layout={})
fig.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/173/69173408.ipynb
|
iris
| null |
[{"Id": 69173408, "ScriptId": 18881190, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7358823, "CreationDate": "07/27/2021 16:48:35", "VersionNumber": 1.0, "Title": "SVM_sklearn", "EvaluationDate": "07/27/2021", "IsChange": true, "TotalLines": 34.0, "LinesInsertedFromPrevious": 34.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 92021815, "KernelVersionId": 69173408, "SourceDatasetVersionId": 420}]
|
[{"Id": 420, "DatasetId": 19, "DatasourceVersionId": 420, "CreatorUserId": 1, "LicenseName": "CC0: Public Domain", "CreationDate": "09/27/2016 07:38:05", "VersionNumber": 2.0, "Title": "Iris Species", "Slug": "iris", "Subtitle": "Classify iris plants into three species in this classic dataset", "Description": "The Iris dataset was used in R.A. Fisher's classic 1936 paper, [The Use of Multiple Measurements in Taxonomic Problems](http://rcs.chemometrics.ru/Tutorials/classification/Fisher.pdf), and can also be found on the [UCI Machine Learning Repository][1].\n\nIt includes three iris species with 50 samples each as well as some properties about each flower. One flower species is linearly separable from the other two, but the other two are not linearly separable from each other.\n\nThe columns in this dataset are:\n\n - Id\n - SepalLengthCm\n - SepalWidthCm\n - PetalLengthCm\n - PetalWidthCm\n - Species\n\n[](https://www.kaggle.com/benhamner/d/uciml/iris/sepal-width-vs-length)\n\n\n [1]: http://archive.ics.uci.edu/ml/", "VersionNotes": "Republishing files so they're formally in our system", "TotalCompressedBytes": 15347.0, "TotalUncompressedBytes": 15347.0}]
|
[{"Id": 19, "CreatorUserId": 1, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 420.0, "CurrentDatasourceVersionId": 420.0, "ForumId": 997, "Type": 2, "CreationDate": "01/12/2016 00:33:31", "LastActivityDate": "02/06/2018", "TotalViews": 1637863, "TotalDownloads": 423540, "TotalVotes": 3416, "TotalKernels": 6420}]
| null |
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn import svm, datasets
import matplotlib.pyplot as plt
import plotly.graph_objects as go
df = pd.read_csv("../input/iris/Iris.csv")
df = df[df["Species"].str.contains("Iris-virginica") == False]
df.value_counts("Species")
df.head()
X, y = (
df[["SepalLengthCm", "SepalWidthCm", "PetalLengthCm"]].to_numpy(),
df["Species"].to_numpy(),
)
LE = LabelEncoder()
Y = LE.fit_transform(y)
print(y)
clf = svm.SVC(kernel="linear")
clf.fit(X, Y)
predictions = clf.predict(X)
print(clf.coef_)
tmp = np.linspace(-10, 10, 100)
x, y = np.meshgrid(tmp, tmp)
z = (
lambda x, y: (-clf.intercept_[0] - clf.coef_[0][0] * x - clf.coef_[0][1] * y)
/ clf.coef_[0][2]
)
trace1 = go.Mesh3d(x=X[:, 0], y=X[:, 1], z=z(X[:, 0], X[:, 1]))
trace2 = go.Scatter3d(
x=X[:, 0],
y=X[:, 1],
z=X[:, 2],
mode="markers",
marker=dict(size=3, color=Y, colorscale="Viridis"),
)
data = [trace1, trace2]
fig = go.Figure(data=data, layout={})
fig.show()
| false | 0 | 421 | 1 | 720 | 421 |
||
69090130
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()
women = train_data.loc[train_data.Sex == "female"]["Survived"]
rate_women = sum(women) / len(women)
print("% of women who survived:", rate_women)
men = train_data.loc[train_data.Sex == "male"]["Survived"]
rate_men = sum(men) / len(men)
print("% of men who survived:", rate_men)
X = train_data.copy()
y = X.pop("Survived")
Z = test_data.copy()
# Label encoding for categoricals
for colname in X.select_dtypes("object"):
X[colname], _ = X[colname].factorize()
for colname in Z.select_dtypes("object"):
Z[colname], _ = Z[colname].factorize()
# All discrete features should now have integer dtypes (double-check this before using MI!)
X[:] = np.nan_to_num(X)
Z[:] = np.nan_to_num(Z)
discrete_features = X.dtypes == int
# test_data.replace(to_replace = np.nan, value = -99)
from sklearn.feature_selection import mutual_info_regression
def make_mi_scores(X, y, discrete_features):
mi_scores = mutual_info_regression(X, y, discrete_features=discrete_features)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
mi_scores = make_mi_scores(X, y, discrete_features)
mi_scores[::3] # show a few features with their MI scores
import matplotlib.pyplot as plt
def plot_mi_scores(scores):
scores = scores.sort_values(ascending=True)
width = np.arange(len(scores))
ticks = list(scores.index)
plt.barh(width, scores)
plt.yticks(width, ticks)
plt.title("Mutual Information Scores")
plt.figure(dpi=100, figsize=(8, 5))
plot_mi_scores(mi_scores)
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from numpy import nan
features = ["Fare", "Pclass", "Sex", "Age"]
X1 = pd.get_dummies(X[features])
df_test = pd.get_dummies(Z[features])
X1["Sex_Age"] = X1["Sex"] + X1["Age"]
df_test["Sex_Age"] = df_test["Sex"] + df_test["Age"]
X1["Fare_Pclass"] = X1["Fare"] / X1["Pclass"]
df_test["Fare_Pclass"] = df_test["Fare"] / df_test["Pclass"]
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
df_test[:] = np.nan_to_num(df_test)
model.fit(X1, y)
predictions = model.predict(df_test)
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions})
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/090/69090130.ipynb
| null | null |
[{"Id": 69090130, "ScriptId": 18764324, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7882803, "CreationDate": "07/26/2021 18:12:18", "VersionNumber": 11.0, "Title": "Final_170046K", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 102.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 100.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()
women = train_data.loc[train_data.Sex == "female"]["Survived"]
rate_women = sum(women) / len(women)
print("% of women who survived:", rate_women)
men = train_data.loc[train_data.Sex == "male"]["Survived"]
rate_men = sum(men) / len(men)
print("% of men who survived:", rate_men)
X = train_data.copy()
y = X.pop("Survived")
Z = test_data.copy()
# Label encoding for categoricals
for colname in X.select_dtypes("object"):
X[colname], _ = X[colname].factorize()
for colname in Z.select_dtypes("object"):
Z[colname], _ = Z[colname].factorize()
# All discrete features should now have integer dtypes (double-check this before using MI!)
X[:] = np.nan_to_num(X)
Z[:] = np.nan_to_num(Z)
discrete_features = X.dtypes == int
# test_data.replace(to_replace = np.nan, value = -99)
from sklearn.feature_selection import mutual_info_regression
def make_mi_scores(X, y, discrete_features):
mi_scores = mutual_info_regression(X, y, discrete_features=discrete_features)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
mi_scores = make_mi_scores(X, y, discrete_features)
mi_scores[::3] # show a few features with their MI scores
import matplotlib.pyplot as plt
def plot_mi_scores(scores):
scores = scores.sort_values(ascending=True)
width = np.arange(len(scores))
ticks = list(scores.index)
plt.barh(width, scores)
plt.yticks(width, ticks)
plt.title("Mutual Information Scores")
plt.figure(dpi=100, figsize=(8, 5))
plot_mi_scores(mi_scores)
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from numpy import nan
features = ["Fare", "Pclass", "Sex", "Age"]
X1 = pd.get_dummies(X[features])
df_test = pd.get_dummies(Z[features])
X1["Sex_Age"] = X1["Sex"] + X1["Age"]
df_test["Sex_Age"] = df_test["Sex"] + df_test["Age"]
X1["Fare_Pclass"] = X1["Fare"] / X1["Pclass"]
df_test["Fare_Pclass"] = df_test["Fare"] / df_test["Pclass"]
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
df_test[:] = np.nan_to_num(df_test)
model.fit(X1, y)
predictions = model.predict(df_test)
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions})
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
| false | 0 | 1,061 | 0 | 1,061 | 1,061 |
||
69090871
|
<jupyter_start><jupyter_text>gdcm conda install
Kaggle dataset identifier: gdcm-conda-install
<jupyter_script>#
# ---
# CREATION OF 3D NUMPY ARRAYSTOWARDS BRAIN TUMOR CLASSIFICATION
#
# CREATED BY: DARIEN SCHETTLER
# ---
# 🛑🛑🛑 CAUTION: THIS NOTEBOOK IS A WORK IN PROGRESS 🛑🛑🛑
# ---
# TABLE OF CONTENTS
# ---
# 0 IMPORTS
# ---
# 1 BACKGROUND INFORMATION
# ---
# 2 SETUP
# ---
# 3 HELPER FUNCTIONS
# ---
# 0 IMPORTS
print("\n... PIP/APT INSTALLS STARTING ...")
# !conda install -c conda-forge gdcm -y
print("... PIP/APT INSTALLS COMPLETE ...\n")
print("\n... IMPORTS STARTING ...\n")
print("\n\tVERSION INFORMATION")
# Machine Learning and Data Science Imports
import tensorflow as tf
print(f"\t\t– TENSORFLOW VERSION: {tf.__version__}")
import tensorflow_addons as tfa
print(f"\t\t– TENSORFLOW ADDONS VERSION: {tfa.__version__}")
import pandas as pd
pd.options.mode.chained_assignment = None
pd.set_option("max_columns", 100)
import numpy as np
print(f"\t\t– NUMPY VERSION: {np.__version__}")
# Other Competition Related Imports
import pydicom
from pydicom.pixel_data_handlers.util import apply_voi_lut
from pandarallel import pandarallel
pandarallel.initialize()
# Built In Imports
from kaggle_datasets import KaggleDatasets
from collections import Counter
from datetime import datetime
from glob import glob
import warnings
import requests
import imageio
import IPython
import urllib
import zipfile
import pickle
import random
import shutil
import string
import scipy
import math
import time
import gzip
import ast
import sys
import io
import os
import gc
import re
# Visualization Imports
from matplotlib.colors import ListedColormap
from matplotlib import animation, rc
rc("animation", html="jshtml")
import matplotlib.patches as patches
import plotly.graph_objects as go
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
tqdm.pandas()
import plotly.express as px
import seaborn as sns
from PIL import Image
import matplotlib
print(f"\t\t– MATPLOTLIB VERSION: {matplotlib.__version__}")
import plotly
import PIL
import cv2
print("\n\n... IMPORTS COMPLETE ...\n")
print("\n... SEEDING FOR DETERMINISTIC BEHAVIOUR ...")
def seed_it_all(seed=7):
"""Attempt to be Reproducible"""
os.environ["PYTHONHASHSEED"] = str(seed)
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
seed_it_all()
print("... SEEDING COMPLETE ...\n\n")
print("\n... SETTING PRESETS STARTING...")
FIG_FONT = dict(family="Helvetica, Arial", size=14, color="#7f7f7f")
print("... SETTING PRESETS COMPLETE...\n\n")
#
# 1 BACKGROUND INFORMATION
# 1.1 OVERVIEW
# ---
# COMPETITION DESCRIPTION
# A malignant tumor in the brain is a life-threatening condition. Known as glioblastoma, it's both the most common form of brain cancer in adults and the one with the worst prognosis, with median survival being less than a year. The presence of a specific genetic sequence in the tumor known as MGMT promoter methylation has been shown to be a favorable prognostic factor and a strong predictor of responsiveness to chemotherapy.
# Currently, genetic analysis of cancer requires surgery to extract a tissue sample. Then it can take several weeks to determine the genetic characterization of the tumor. Depending upon the results and type of initial therapy chosen, a subsequent surgery may be necessary. If an accurate method to predict the genetics of the cancer through imaging (i.e., radiogenomics) alone could be developed, this would potentially minimize the number of surgeries and refine the type of therapy required.
# The Radiological Society of North America (RSNA) has teamed up with the Medical Image Computing and Computer Assisted Intervention Society (the MICCAI Society) to improve diagnosis and treatment planning for patients with glioblastoma. In this competition you will predict the genetic subtype of glioblastoma using MRI (magnetic resonance imaging) scans to train and test your model to detect for the presence of MGMT promoter methylation.
# If successful, you'll help brain cancer patients receive less invasive diagnoses and treatments. The introduction of new and customized treatment strategies before surgery has the potential to improve the management, survival, and prospects of patients with brain cancer.
# **Secondary Description From UPenn**
# > The participants are called to use the provided mpMRI data to extract imaging/radiomic features that they consider appropriate, and analyze them through machine learning algorithms, in an attempt to predict the MGMT promoter methylation status. The participants do not need to be limited to volumetric parameters, but can also consider intensity, morphologic, histogram-based, and textural features, as well as spatial information, deep learning features, and glioma diffusion properties extracted from glioma growth models.
# > Note that participants will be evaluated for the predicted MGMT status of the subjects indicated in the accompanying spreadsheet.
# 🤔🤔🤔 MY INTERPRETATION OF THIS COMPETITION 🤔🤔🤔In this competition we are tasked with identifying/predicting the genetic subtype (genetic subtype = a group of tumors that is enriched for genetic aberrations in a set of subtype predictor genes) of glioblastoma (glioblastoma = brain tumor)HUH?!?!?All this means is that we are taking in dicom images containing 3D representations (slices) of a patients brain which we will process with computer vision algorithms to allow us to perform binary classification. The binary classification is to identify if, within the patients image data, MGMT promoter methylation is present. The diagram below shows a simple, if slightly inaccurate, representation of what is required.
# ---
# SUBMISSION EVALUATION/RESTRICTIONS AND FILE FORMAT
# **Submission Evaluation**
# * Submissions are evaluated on the [area under the ROC curve](http://en.wikipedia.org/wiki/Receiver_operating_characteristic) between the predicted probability and the observed target.
# **Submission Restrictions**
# * **THIS IS A KERNELS ONLY COMPETITION**
# * Submissions to this competition must be made through Notebooks.
# * In order for the "Submit" button to be active after a commit, the following conditions must be met:
# * *CPU Notebook <= 9 hours run-time*
# * *GPU Notebook <= 9 hours run-time*
# * *Internet access disabled*
# * *Freely & publicly available external data is allowed, including pre-trained models*
# * *Submission file must be named `submission.csv`*
# **Submission File Format**
# * For each **`BraTS21ID`** in the test set, you must predict a probability for the target **`MGMT_value`**. The file should contain a header and have the following format:
# >```
# >BraTS21ID,MGMT_value
# >00001,0.5
# >00013,0.999
# >00015,0.1
# >etc.
# >```
# ---
# COMPETITION TIMELINE
# * **July 13, 2021** - Start Date.
# * **October 8, 2021** - Entry Deadline. You must accept the competition rules before this date in order to compete.
# * **October 8, 2021** - Team Merger Deadline. This is the last day participants may join or merge teams.
# * **October 15, 2021** - Final Submission Deadline.
# * **October 25, 2021** - Winners’ Requirements Deadline. This is the deadline for winners to submit to the host/Kaggle their training code, video, method description.
# > All deadlines are at 11:59 PM UTC on the corresponding day unless otherwise noted. The competition organizers reserve the right to update the contest timeline if they deem it necessary.
# 1.2 DATA DESCRIPTION
# ---
# DATA SPLITS/COHORTS
# The competition data is defined by three cohorts: **Training**, **Validation (Public)**, and **Testing (Private)**.
# * The **“Training”** and the **“Validation”** cohorts are provided to the participants
# * The **“Testing”** cohort is kept hidden at all times, during and after the competition
# These 3 cohorts are structured as follows:
# * Each independent case has a **dedicated folder identified by a five-digit number**.
# * Within each of these **“case”** folders, there are four sub-folders
# * Each of these **"case"** subfolders corresponds to each of the structural **m**ulti-**p**arametric **MRI** (**mpMRI**) scans, in **DICOM** format.
# * The exact mpMRI scans included are:
# * Fluid Attenuated Inversion Recovery (FLAIR)
# * T1-weighted pre-contrast (T1w)
# * T1-weighted post-contrast (T1Gd)
# * T2-weighted (T2)
# Exact folder structure:
# ```
# Training/Validation/Testing
# │
# └─── 00000
# │ │
# │ └─── FLAIR
# │ │ │ Image-1.dcm
# │ │ │ Image-2.dcm
# │ │ │ ...
# │ │
# │ └─── T1w
# │ │ │ Image-1.dcm
# │ │ │ Image-2.dcm
# │ │ │ ...
# │ │
# │ └─── T1wCE
# │ │ │ Image-1.dcm
# │ │ │ Image-2.dcm
# │ │ │ ...
# │ │
# │ └─── T2w
# │ │ │ Image-1.dcm
# │ │ │ Image-2.dcm
# │ │ │ .....
# │
# └─── 00001
# │ │ ...
# │
# │ ...
# │
# └─── 00002
# │ │ ...
# ```
# FILES
# **`train/`**
# - folder containing the training files, with each top-level folder representing a subject
# **`train_labels.csv`**
# - file containing the target MGMT_value for each subject in the training data (e.g. the presence of MGMT promoter methylation)
# **`test/`**
# - the test files, which use the same structure as train/; your task is to predict the MGMT_value for each subject in the test data. NOTE: the total size of the rerun test set (Public and Private) is ~5x the size of the Public test set
# **`sample_submission.csv`**
# - a sample submission file in the correct format
# 2 SETUP
ROOT_DIR = "/kaggle/input/rsna-miccai-brain-tumor-radiogenomic-classification"
TRAIN_DIR = os.path.join(ROOT_DIR, "train")
TEST_DIR = os.path.join(ROOT_DIR, "test")
SS_CSV = os.path.join(ROOT_DIR, "sample_submission.csv")
TRAIN_CSV = os.path.join(ROOT_DIR, "train_labels.csv")
train_df = pd.read_csv(TRAIN_CSV)
train_df["path_to_flair_dir"] = train_df.BraTS21ID.apply(
lambda x: os.path.join(TRAIN_DIR, f"{x:>05}", "FLAIR")
)
train_df["flair_image_count"] = train_df.path_to_flair_dir.progress_apply(
lambda x: len(os.listdir(x))
)
train_df["path_to_t1w_dir"] = train_df.BraTS21ID.apply(
lambda x: os.path.join(TRAIN_DIR, f"{x:>05}", "T1w")
)
train_df["t1w_image_count"] = train_df.path_to_t1w_dir.progress_apply(
lambda x: len(os.listdir(x))
)
train_df["path_to_t1wce_dir"] = train_df.BraTS21ID.apply(
lambda x: os.path.join(TRAIN_DIR, f"{x:>05}", "T1wCE")
)
train_df["t1wce_image_count"] = train_df.path_to_t1wce_dir.progress_apply(
lambda x: len(os.listdir(x))
)
train_df["path_to_t2w_dir"] = train_df.BraTS21ID.apply(
lambda x: os.path.join(TRAIN_DIR, f"{x:>05}", "T2w")
)
train_df["t2w_image_count"] = train_df.path_to_t2w_dir.progress_apply(
lambda x: len(os.listdir(x))
)
ss_df = pd.read_csv(SS_CSV)
ss_df["path_to_flair_dir"] = ss_df.BraTS21ID.apply(
lambda x: os.path.join(TEST_DIR, f"{x:>05}", "FLAIR")
)
ss_df["flair_image_count"] = ss_df.path_to_flair_dir.progress_apply(
lambda x: len(os.listdir(x))
)
ss_df["path_to_t1w_dir"] = ss_df.BraTS21ID.apply(
lambda x: os.path.join(TEST_DIR, f"{x:>05}", "T1w")
)
ss_df["t1w_image_count"] = ss_df.path_to_t1w_dir.progress_apply(
lambda x: len(os.listdir(x))
)
ss_df["path_to_t1wce_dir"] = ss_df.BraTS21ID.apply(
lambda x: os.path.join(TEST_DIR, f"{x:>05}", "T1wCE")
)
ss_df["t1wce_image_count"] = ss_df.path_to_t1wce_dir.progress_apply(
lambda x: len(os.listdir(x))
)
ss_df["path_to_t2w_dir"] = ss_df.BraTS21ID.apply(
lambda x: os.path.join(TEST_DIR, f"{x:>05}", "T2w")
)
ss_df["t2w_image_count"] = ss_df.path_to_t2w_dir.progress_apply(
lambda x: len(os.listdir(x))
)
print("\n\nTRAIN DATAFRAME\n")
display(train_df.head())
print("\n\n\nSAMPLE SUBMISSION DATAFRAME\n")
display(ss_df.head())
#
# 3 HELPER FUNCTIONS
def get_list_of_dcm_paths(dir_path):
return sorted(
[os.path.join(dir_path, f_name) for f_name in os.listdir(dir_path)],
key=lambda x: int(x.rsplit("-", 1)[1].split(".", 1)[0]),
)
def dicom2array(path, voi_lut=True, fix_monochrome=True):
"""Convert dicom file to numpy array
Args:
path (str): Path to the dicom file to be converted
voi_lut (bool): Whether or not VOI LUT is available
fix_monochrome (bool): Whether or not to apply monochrome fix
Returns:
Numpy array of the respective dicom file
"""
# Use the pydicom library to read the dicom file
dicom = pydicom.read_file(path)
# VOI LUT (if available by DICOM device) is used to
# transform raw DICOM data to "human-friendly" view
if voi_lut:
data = apply_voi_lut(dicom.pixel_array, dicom)
else:
data = dicom.pixel_array
# The XRAY may look inverted
# - If we want to fix this we can
if fix_monochrome and dicom.PhotometricInterpretation == "MONOCHROME1":
data = np.amax(data) - data
# Normalize the image array and return
data = (data - np.min(data)) / (np.max(data) - np.min(data))
return data
def create_animation(ims):
fig = plt.figure(figsize=(4, 4))
plt.axis("off")
im = plt.imshow(ims[..., 0], cmap="bone")
def animate_func(i):
im.set_array(ims[..., i])
return [im]
plt.close()
return animation.FuncAnimation(
fig, animate_func, frames=ims.shape[-1], interval=1000 // 24
)
def get_dicom_meta(row, attrs):
dcm_file = pydicom.read_file(row.dcm_path)
for val in attrs:
row[val] = dcm_file.get(val, None)
return row
#
# 4 CREATE/LOAD 3D REPRESENTATIONS OF EXAMPLES
# ############################# #
# SMALL = ( 128, 128, 32 ) #
# MEDIUM = ( 256, 256, 64 ) #
# LARGE = ( 512, 512, 128 ) #
# ############################# #
RESIZE_TO = (128, 128, 32)
DEMO_FLAIR = train_df.path_to_flair_dir.iloc[0]
# ############################# #
DEMO_FLAIR
def get_numpy_arr(
path_to_dir,
resize_to=(512, 512, 128),
save_to_disk=False,
output_dir="/kaggle/working",
):
# Get paths to all dicom files for a given brain 🧠
dicom_paths = get_list_of_dcm_paths(path_to_dir)
# Get ref file
ref_dicom = pydicom.read_file(dicom_paths[0])
# Load dimensions based on the number of rows, columns, and slices (along the Z axis)
original_img_dims = (int(ref_dicom.Rows), int(ref_dicom.Columns), len(dicom_paths))
# Load spacing values (in mm)
px_spacing = (
float(ref_dicom.PixelSpacing[0]),
float(ref_dicom.PixelSpacing[1]),
float(ref_dicom.SliceThickness),
)
# The array is sized based on dicom information gathered above
np_arr_list = []
# loop through all the DICOM files
print(f"\n... Creating Numpy Array ...\n")
for i, dcm_file in tqdm(enumerate(dicom_paths), total=len(dicom_paths)):
# read the file
dcm_slice = pydicom.read_file(dcm_file)
# store the raw image data
slice_arr = dcm_slice.pixel_array
if slice_arr.max() == 0:
continue
else:
slice_arr = ((slice_arr / np.max(slice_arr)) * 255).astype(np.uint8)
# Add to the numpy slice list
np_arr_list.append(slice_arr)
# Stack the numpy slices into a numpy 3d array
if len(np_arr_list) == 0:
return None
np_arr = np.stack(np_arr_list, axis=-1)
# Interpolate to the correct 3d shape
print(
f"\n... Interpoloating Numpy Array Starting - From {original_img_dims} – w/ {np_arr.shape[-1]} Non Empty Slizes – To {resize_to} ..."
)
np_arr = scipy.ndimage.zoom(
np_arr,
(
resize_to[0] / np_arr.shape[0],
resize_to[1] / np_arr.shape[1],
resize_to[-1] / np_arr.shape[-1],
),
)
print(f"... Interpoloating Completed ...\n")
# Save to disk or return np array
if save_to_disk:
path_stuff = path_to_dir.rsplit("/", 3)[1:]
output_path = os.path.join(
output_dir, path_stuff[0], path_stuff[2], path_stuff[1]
)
if not os.path.isdir(output_path.rsplit("/", 1)[0]):
os.makedirs(output_path.rsplit("/", 1)[0], exist_ok=True)
print(f"\n... Writing Numpy Array to Disk Starting - {output_path} ...")
np.savez_compressed(output_path, np_arr)
print(f"... Writing Numpy Array to Disk Completed ...\n")
else:
return np_arr
def create_npz_arrays(
row, resize_to=(512, 512, 128), output_dir="/kaggle/working", save_to_disk=True
):
get_numpy_arr(
row.path_to_flair_dir,
resize_to=resize_to,
save_to_disk=save_to_disk,
output_dir=output_dir,
)
get_numpy_arr(
row.path_to_t1w_dir,
resize_to=resize_to,
save_to_disk=save_to_disk,
output_dir=output_dir,
)
get_numpy_arr(
row.path_to_t1wce_dir,
resize_to=resize_to,
save_to_disk=save_to_disk,
output_dir=output_dir,
)
get_numpy_arr(
row.path_to_t2w_dir,
resize_to=resize_to,
save_to_disk=save_to_disk,
output_dir=output_dir,
)
demo_resized_np_arr = get_numpy_arr(DEMO_FLAIR, save_to_disk=False, resize_to=RESIZE_TO)
# Run to generate all npz files... only run if not already run
train_df.parallel_apply(lambda x: create_npz_arrays(x, resize_to=RESIZE_TO), axis=1)
ss_df.parallel_apply(lambda x: create_npz_arrays(x, resize_to=RESIZE_TO), axis=1)
print("\n... Visualization After Resize From Memory ...\n")
display(create_animation(demo_resized_np_arr))
print("\n... Visualization From Disk ...\n")
display(create_animation(np.load("./train/FLAIR/00000.npz")["arr_0"]))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/090/69090871.ipynb
|
gdcm-conda-install
|
ronaldokun
|
[{"Id": 69090871, "ScriptId": 18807463, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1636313, "CreationDate": "07/26/2021 18:26:20", "VersionNumber": 2.0, "Title": "Create 3D NPZ \u2013 RSNA \u2013 Radiogenomic Classification", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 491.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 490.0, "LinesInsertedFromFork": 17.0, "LinesDeletedFromFork": 192.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 474.0, "TotalVotes": 0}]
|
[{"Id": 91863527, "KernelVersionId": 69090871, "SourceDatasetVersionId": 1421668}]
|
[{"Id": 1421668, "DatasetId": 832340, "DatasourceVersionId": 1454967, "CreatorUserId": 1118320, "LicenseName": "CC0: Public Domain", "CreationDate": "08/15/2020 18:21:36", "VersionNumber": 1.0, "Title": "gdcm conda install", "Slug": "gdcm-conda-install", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 832340, "CreatorUserId": 1118320, "OwnerUserId": 1118320.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1421668.0, "CurrentDatasourceVersionId": 1454967.0, "ForumId": 847502, "Type": 2, "CreationDate": "08/15/2020 18:21:36", "LastActivityDate": "08/15/2020", "TotalViews": 4772, "TotalDownloads": 277, "TotalVotes": 41, "TotalKernels": 83}]
|
[{"Id": 1118320, "UserName": "ronaldokun", "DisplayName": "Ronaldo S.A. Batista", "RegisterDate": "06/09/2017", "PerformanceTier": 2}]
|
#
# ---
# CREATION OF 3D NUMPY ARRAYSTOWARDS BRAIN TUMOR CLASSIFICATION
#
# CREATED BY: DARIEN SCHETTLER
# ---
# 🛑🛑🛑 CAUTION: THIS NOTEBOOK IS A WORK IN PROGRESS 🛑🛑🛑
# ---
# TABLE OF CONTENTS
# ---
# 0 IMPORTS
# ---
# 1 BACKGROUND INFORMATION
# ---
# 2 SETUP
# ---
# 3 HELPER FUNCTIONS
# ---
# 0 IMPORTS
print("\n... PIP/APT INSTALLS STARTING ...")
# !conda install -c conda-forge gdcm -y
print("... PIP/APT INSTALLS COMPLETE ...\n")
print("\n... IMPORTS STARTING ...\n")
print("\n\tVERSION INFORMATION")
# Machine Learning and Data Science Imports
import tensorflow as tf
print(f"\t\t– TENSORFLOW VERSION: {tf.__version__}")
import tensorflow_addons as tfa
print(f"\t\t– TENSORFLOW ADDONS VERSION: {tfa.__version__}")
import pandas as pd
pd.options.mode.chained_assignment = None
pd.set_option("max_columns", 100)
import numpy as np
print(f"\t\t– NUMPY VERSION: {np.__version__}")
# Other Competition Related Imports
import pydicom
from pydicom.pixel_data_handlers.util import apply_voi_lut
from pandarallel import pandarallel
pandarallel.initialize()
# Built In Imports
from kaggle_datasets import KaggleDatasets
from collections import Counter
from datetime import datetime
from glob import glob
import warnings
import requests
import imageio
import IPython
import urllib
import zipfile
import pickle
import random
import shutil
import string
import scipy
import math
import time
import gzip
import ast
import sys
import io
import os
import gc
import re
# Visualization Imports
from matplotlib.colors import ListedColormap
from matplotlib import animation, rc
rc("animation", html="jshtml")
import matplotlib.patches as patches
import plotly.graph_objects as go
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
tqdm.pandas()
import plotly.express as px
import seaborn as sns
from PIL import Image
import matplotlib
print(f"\t\t– MATPLOTLIB VERSION: {matplotlib.__version__}")
import plotly
import PIL
import cv2
print("\n\n... IMPORTS COMPLETE ...\n")
print("\n... SEEDING FOR DETERMINISTIC BEHAVIOUR ...")
def seed_it_all(seed=7):
"""Attempt to be Reproducible"""
os.environ["PYTHONHASHSEED"] = str(seed)
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
seed_it_all()
print("... SEEDING COMPLETE ...\n\n")
print("\n... SETTING PRESETS STARTING...")
FIG_FONT = dict(family="Helvetica, Arial", size=14, color="#7f7f7f")
print("... SETTING PRESETS COMPLETE...\n\n")
#
# 1 BACKGROUND INFORMATION
# 1.1 OVERVIEW
# ---
# COMPETITION DESCRIPTION
# A malignant tumor in the brain is a life-threatening condition. Known as glioblastoma, it's both the most common form of brain cancer in adults and the one with the worst prognosis, with median survival being less than a year. The presence of a specific genetic sequence in the tumor known as MGMT promoter methylation has been shown to be a favorable prognostic factor and a strong predictor of responsiveness to chemotherapy.
# Currently, genetic analysis of cancer requires surgery to extract a tissue sample. Then it can take several weeks to determine the genetic characterization of the tumor. Depending upon the results and type of initial therapy chosen, a subsequent surgery may be necessary. If an accurate method to predict the genetics of the cancer through imaging (i.e., radiogenomics) alone could be developed, this would potentially minimize the number of surgeries and refine the type of therapy required.
# The Radiological Society of North America (RSNA) has teamed up with the Medical Image Computing and Computer Assisted Intervention Society (the MICCAI Society) to improve diagnosis and treatment planning for patients with glioblastoma. In this competition you will predict the genetic subtype of glioblastoma using MRI (magnetic resonance imaging) scans to train and test your model to detect for the presence of MGMT promoter methylation.
# If successful, you'll help brain cancer patients receive less invasive diagnoses and treatments. The introduction of new and customized treatment strategies before surgery has the potential to improve the management, survival, and prospects of patients with brain cancer.
# **Secondary Description From UPenn**
# > The participants are called to use the provided mpMRI data to extract imaging/radiomic features that they consider appropriate, and analyze them through machine learning algorithms, in an attempt to predict the MGMT promoter methylation status. The participants do not need to be limited to volumetric parameters, but can also consider intensity, morphologic, histogram-based, and textural features, as well as spatial information, deep learning features, and glioma diffusion properties extracted from glioma growth models.
# > Note that participants will be evaluated for the predicted MGMT status of the subjects indicated in the accompanying spreadsheet.
# 🤔🤔🤔 MY INTERPRETATION OF THIS COMPETITION 🤔🤔🤔In this competition we are tasked with identifying/predicting the genetic subtype (genetic subtype = a group of tumors that is enriched for genetic aberrations in a set of subtype predictor genes) of glioblastoma (glioblastoma = brain tumor)HUH?!?!?All this means is that we are taking in dicom images containing 3D representations (slices) of a patients brain which we will process with computer vision algorithms to allow us to perform binary classification. The binary classification is to identify if, within the patients image data, MGMT promoter methylation is present. The diagram below shows a simple, if slightly inaccurate, representation of what is required.
# ---
# SUBMISSION EVALUATION/RESTRICTIONS AND FILE FORMAT
# **Submission Evaluation**
# * Submissions are evaluated on the [area under the ROC curve](http://en.wikipedia.org/wiki/Receiver_operating_characteristic) between the predicted probability and the observed target.
# **Submission Restrictions**
# * **THIS IS A KERNELS ONLY COMPETITION**
# * Submissions to this competition must be made through Notebooks.
# * In order for the "Submit" button to be active after a commit, the following conditions must be met:
# * *CPU Notebook <= 9 hours run-time*
# * *GPU Notebook <= 9 hours run-time*
# * *Internet access disabled*
# * *Freely & publicly available external data is allowed, including pre-trained models*
# * *Submission file must be named `submission.csv`*
# **Submission File Format**
# * For each **`BraTS21ID`** in the test set, you must predict a probability for the target **`MGMT_value`**. The file should contain a header and have the following format:
# >```
# >BraTS21ID,MGMT_value
# >00001,0.5
# >00013,0.999
# >00015,0.1
# >etc.
# >```
# ---
# COMPETITION TIMELINE
# * **July 13, 2021** - Start Date.
# * **October 8, 2021** - Entry Deadline. You must accept the competition rules before this date in order to compete.
# * **October 8, 2021** - Team Merger Deadline. This is the last day participants may join or merge teams.
# * **October 15, 2021** - Final Submission Deadline.
# * **October 25, 2021** - Winners’ Requirements Deadline. This is the deadline for winners to submit to the host/Kaggle their training code, video, method description.
# > All deadlines are at 11:59 PM UTC on the corresponding day unless otherwise noted. The competition organizers reserve the right to update the contest timeline if they deem it necessary.
# 1.2 DATA DESCRIPTION
# ---
# DATA SPLITS/COHORTS
# The competition data is defined by three cohorts: **Training**, **Validation (Public)**, and **Testing (Private)**.
# * The **“Training”** and the **“Validation”** cohorts are provided to the participants
# * The **“Testing”** cohort is kept hidden at all times, during and after the competition
# These 3 cohorts are structured as follows:
# * Each independent case has a **dedicated folder identified by a five-digit number**.
# * Within each of these **“case”** folders, there are four sub-folders
# * Each of these **"case"** subfolders corresponds to each of the structural **m**ulti-**p**arametric **MRI** (**mpMRI**) scans, in **DICOM** format.
# * The exact mpMRI scans included are:
# * Fluid Attenuated Inversion Recovery (FLAIR)
# * T1-weighted pre-contrast (T1w)
# * T1-weighted post-contrast (T1Gd)
# * T2-weighted (T2)
# Exact folder structure:
# ```
# Training/Validation/Testing
# │
# └─── 00000
# │ │
# │ └─── FLAIR
# │ │ │ Image-1.dcm
# │ │ │ Image-2.dcm
# │ │ │ ...
# │ │
# │ └─── T1w
# │ │ │ Image-1.dcm
# │ │ │ Image-2.dcm
# │ │ │ ...
# │ │
# │ └─── T1wCE
# │ │ │ Image-1.dcm
# │ │ │ Image-2.dcm
# │ │ │ ...
# │ │
# │ └─── T2w
# │ │ │ Image-1.dcm
# │ │ │ Image-2.dcm
# │ │ │ .....
# │
# └─── 00001
# │ │ ...
# │
# │ ...
# │
# └─── 00002
# │ │ ...
# ```
# FILES
# **`train/`**
# - folder containing the training files, with each top-level folder representing a subject
# **`train_labels.csv`**
# - file containing the target MGMT_value for each subject in the training data (e.g. the presence of MGMT promoter methylation)
# **`test/`**
# - the test files, which use the same structure as train/; your task is to predict the MGMT_value for each subject in the test data. NOTE: the total size of the rerun test set (Public and Private) is ~5x the size of the Public test set
# **`sample_submission.csv`**
# - a sample submission file in the correct format
# 2 SETUP
ROOT_DIR = "/kaggle/input/rsna-miccai-brain-tumor-radiogenomic-classification"
TRAIN_DIR = os.path.join(ROOT_DIR, "train")
TEST_DIR = os.path.join(ROOT_DIR, "test")
SS_CSV = os.path.join(ROOT_DIR, "sample_submission.csv")
TRAIN_CSV = os.path.join(ROOT_DIR, "train_labels.csv")
train_df = pd.read_csv(TRAIN_CSV)
train_df["path_to_flair_dir"] = train_df.BraTS21ID.apply(
lambda x: os.path.join(TRAIN_DIR, f"{x:>05}", "FLAIR")
)
train_df["flair_image_count"] = train_df.path_to_flair_dir.progress_apply(
lambda x: len(os.listdir(x))
)
train_df["path_to_t1w_dir"] = train_df.BraTS21ID.apply(
lambda x: os.path.join(TRAIN_DIR, f"{x:>05}", "T1w")
)
train_df["t1w_image_count"] = train_df.path_to_t1w_dir.progress_apply(
lambda x: len(os.listdir(x))
)
train_df["path_to_t1wce_dir"] = train_df.BraTS21ID.apply(
lambda x: os.path.join(TRAIN_DIR, f"{x:>05}", "T1wCE")
)
train_df["t1wce_image_count"] = train_df.path_to_t1wce_dir.progress_apply(
lambda x: len(os.listdir(x))
)
train_df["path_to_t2w_dir"] = train_df.BraTS21ID.apply(
lambda x: os.path.join(TRAIN_DIR, f"{x:>05}", "T2w")
)
train_df["t2w_image_count"] = train_df.path_to_t2w_dir.progress_apply(
lambda x: len(os.listdir(x))
)
ss_df = pd.read_csv(SS_CSV)
ss_df["path_to_flair_dir"] = ss_df.BraTS21ID.apply(
lambda x: os.path.join(TEST_DIR, f"{x:>05}", "FLAIR")
)
ss_df["flair_image_count"] = ss_df.path_to_flair_dir.progress_apply(
lambda x: len(os.listdir(x))
)
ss_df["path_to_t1w_dir"] = ss_df.BraTS21ID.apply(
lambda x: os.path.join(TEST_DIR, f"{x:>05}", "T1w")
)
ss_df["t1w_image_count"] = ss_df.path_to_t1w_dir.progress_apply(
lambda x: len(os.listdir(x))
)
ss_df["path_to_t1wce_dir"] = ss_df.BraTS21ID.apply(
lambda x: os.path.join(TEST_DIR, f"{x:>05}", "T1wCE")
)
ss_df["t1wce_image_count"] = ss_df.path_to_t1wce_dir.progress_apply(
lambda x: len(os.listdir(x))
)
ss_df["path_to_t2w_dir"] = ss_df.BraTS21ID.apply(
lambda x: os.path.join(TEST_DIR, f"{x:>05}", "T2w")
)
ss_df["t2w_image_count"] = ss_df.path_to_t2w_dir.progress_apply(
lambda x: len(os.listdir(x))
)
print("\n\nTRAIN DATAFRAME\n")
display(train_df.head())
print("\n\n\nSAMPLE SUBMISSION DATAFRAME\n")
display(ss_df.head())
#
# 3 HELPER FUNCTIONS
def get_list_of_dcm_paths(dir_path):
return sorted(
[os.path.join(dir_path, f_name) for f_name in os.listdir(dir_path)],
key=lambda x: int(x.rsplit("-", 1)[1].split(".", 1)[0]),
)
def dicom2array(path, voi_lut=True, fix_monochrome=True):
"""Convert dicom file to numpy array
Args:
path (str): Path to the dicom file to be converted
voi_lut (bool): Whether or not VOI LUT is available
fix_monochrome (bool): Whether or not to apply monochrome fix
Returns:
Numpy array of the respective dicom file
"""
# Use the pydicom library to read the dicom file
dicom = pydicom.read_file(path)
# VOI LUT (if available by DICOM device) is used to
# transform raw DICOM data to "human-friendly" view
if voi_lut:
data = apply_voi_lut(dicom.pixel_array, dicom)
else:
data = dicom.pixel_array
# The XRAY may look inverted
# - If we want to fix this we can
if fix_monochrome and dicom.PhotometricInterpretation == "MONOCHROME1":
data = np.amax(data) - data
# Normalize the image array and return
data = (data - np.min(data)) / (np.max(data) - np.min(data))
return data
def create_animation(ims):
fig = plt.figure(figsize=(4, 4))
plt.axis("off")
im = plt.imshow(ims[..., 0], cmap="bone")
def animate_func(i):
im.set_array(ims[..., i])
return [im]
plt.close()
return animation.FuncAnimation(
fig, animate_func, frames=ims.shape[-1], interval=1000 // 24
)
def get_dicom_meta(row, attrs):
dcm_file = pydicom.read_file(row.dcm_path)
for val in attrs:
row[val] = dcm_file.get(val, None)
return row
#
# 4 CREATE/LOAD 3D REPRESENTATIONS OF EXAMPLES
# ############################# #
# SMALL = ( 128, 128, 32 ) #
# MEDIUM = ( 256, 256, 64 ) #
# LARGE = ( 512, 512, 128 ) #
# ############################# #
RESIZE_TO = (128, 128, 32)
DEMO_FLAIR = train_df.path_to_flair_dir.iloc[0]
# ############################# #
DEMO_FLAIR
def get_numpy_arr(
path_to_dir,
resize_to=(512, 512, 128),
save_to_disk=False,
output_dir="/kaggle/working",
):
# Get paths to all dicom files for a given brain 🧠
dicom_paths = get_list_of_dcm_paths(path_to_dir)
# Get ref file
ref_dicom = pydicom.read_file(dicom_paths[0])
# Load dimensions based on the number of rows, columns, and slices (along the Z axis)
original_img_dims = (int(ref_dicom.Rows), int(ref_dicom.Columns), len(dicom_paths))
# Load spacing values (in mm)
px_spacing = (
float(ref_dicom.PixelSpacing[0]),
float(ref_dicom.PixelSpacing[1]),
float(ref_dicom.SliceThickness),
)
# The array is sized based on dicom information gathered above
np_arr_list = []
# loop through all the DICOM files
print(f"\n... Creating Numpy Array ...\n")
for i, dcm_file in tqdm(enumerate(dicom_paths), total=len(dicom_paths)):
# read the file
dcm_slice = pydicom.read_file(dcm_file)
# store the raw image data
slice_arr = dcm_slice.pixel_array
if slice_arr.max() == 0:
continue
else:
slice_arr = ((slice_arr / np.max(slice_arr)) * 255).astype(np.uint8)
# Add to the numpy slice list
np_arr_list.append(slice_arr)
# Stack the numpy slices into a numpy 3d array
if len(np_arr_list) == 0:
return None
np_arr = np.stack(np_arr_list, axis=-1)
# Interpolate to the correct 3d shape
print(
f"\n... Interpoloating Numpy Array Starting - From {original_img_dims} – w/ {np_arr.shape[-1]} Non Empty Slizes – To {resize_to} ..."
)
np_arr = scipy.ndimage.zoom(
np_arr,
(
resize_to[0] / np_arr.shape[0],
resize_to[1] / np_arr.shape[1],
resize_to[-1] / np_arr.shape[-1],
),
)
print(f"... Interpoloating Completed ...\n")
# Save to disk or return np array
if save_to_disk:
path_stuff = path_to_dir.rsplit("/", 3)[1:]
output_path = os.path.join(
output_dir, path_stuff[0], path_stuff[2], path_stuff[1]
)
if not os.path.isdir(output_path.rsplit("/", 1)[0]):
os.makedirs(output_path.rsplit("/", 1)[0], exist_ok=True)
print(f"\n... Writing Numpy Array to Disk Starting - {output_path} ...")
np.savez_compressed(output_path, np_arr)
print(f"... Writing Numpy Array to Disk Completed ...\n")
else:
return np_arr
def create_npz_arrays(
row, resize_to=(512, 512, 128), output_dir="/kaggle/working", save_to_disk=True
):
get_numpy_arr(
row.path_to_flair_dir,
resize_to=resize_to,
save_to_disk=save_to_disk,
output_dir=output_dir,
)
get_numpy_arr(
row.path_to_t1w_dir,
resize_to=resize_to,
save_to_disk=save_to_disk,
output_dir=output_dir,
)
get_numpy_arr(
row.path_to_t1wce_dir,
resize_to=resize_to,
save_to_disk=save_to_disk,
output_dir=output_dir,
)
get_numpy_arr(
row.path_to_t2w_dir,
resize_to=resize_to,
save_to_disk=save_to_disk,
output_dir=output_dir,
)
demo_resized_np_arr = get_numpy_arr(DEMO_FLAIR, save_to_disk=False, resize_to=RESIZE_TO)
# Run to generate all npz files... only run if not already run
train_df.parallel_apply(lambda x: create_npz_arrays(x, resize_to=RESIZE_TO), axis=1)
ss_df.parallel_apply(lambda x: create_npz_arrays(x, resize_to=RESIZE_TO), axis=1)
print("\n... Visualization After Resize From Memory ...\n")
display(create_animation(demo_resized_np_arr))
print("\n... Visualization From Disk ...\n")
display(create_animation(np.load("./train/FLAIR/00000.npz")["arr_0"]))
| false | 0 | 5,764 | 0 | 5,789 | 5,764 |
||
69090687
|
# #TASK1
# SIRSS2321
# Twinkle
#
""" 1 55555
5555
555
55
5 """
rows = 5
num = rows
for i in range(rows, 0, -1):
for j in range(0, i):
print(num, end="")
print("\r")
""" 2:- 012345
01234
012
01
0 """
rows = 5
for i in range(rows, 0, -1):
for j in range(0, i + 1):
print(j, end="")
print("\r")
""" 3:- 1
33
555
7777
99999 """
rows = 5
i = 1
while i <= rows:
j = 1
while j <= i:
print((i * 2 - 1), end=" ")
j = j + 1
i = i + 1
print()
""" 4:- 1
21
321
4321
54321 """
rows = 6
for row in range(1, rows):
for column in range(row, 0, -1):
print(column, end="")
print("")
""" 5:- 1
32
654
10987
"""
start = 1
stop = 2
currentNumber = stop
for row in range(2, 6):
for col in range(start, stop):
currentNumber -= 1
print(currentNumber, end="")
print("")
start = stop
stop += row
currentNumber = stop
""" 6:- 1
11
121
1331
14641
15101051
1615201561
"""
def print_pascal_triangle(size):
for i in range(0, size):
for j in range(0, i + 1):
print(decide_number(i, j), end=" ")
print()
def decide_number(n, k):
num = 1
if k > n - k:
k = n - k
for i in range(0, k):
num = num * (n - i)
num = num // (i + 1)
return num
# set rows
rows = 7
print_pascal_triangle(rows)
""" 7:- 12345
22345
33345
44445
55555 """
rows = 5
for i in range(1, rows + 1):
for j in range(1, rows + 1):
if j <= i:
print(i, end=" ")
else:
print(j, end=" ")
print()
""" 8:-
1
2 4
3 6 9
4 8 12 16
5 10 15 20 25
6 12 18 24 30 36
7 14 21 28 35 42 49
8 16 24 32 40 48 56 64
"""
rows = 8
# rows = int(input("Enter the number of rows "))
for i in range(1, rows + 1):
for j in range(1, i + 1):
# multiplication current column and row
square = i * j
print(i * j, end=" ")
print()
"""' 9:-
* * * * * *
* * * * *
* * * *
* * *
* *
*
"""
rows = 5
k = 2 * rows - 2
for i in range(rows, -1, -1):
for j in range(k, 0, -1):
print(end=" ")
k = k + 1
for j in range(0, i + 1):
print("*", end=" ")
print("")
""" 10:-
*
* *
* * *
* * * *
* * * * *
* * * * * *
* * * * * * *
"""
print("Print equilateral triangle Pyramid using asterisk symbol ")
# printing full Triangle pyramid using stars
size = 7
m = (2 * size) - 2
for i in range(0, size):
for j in range(0, m):
print(end=" ")
# decrementing m after each loop
m = m - 1
for j in range(0, i + 1):
print("* ", end=" ")
print(" ")
""" 11:-
*
* *
* * *
* * * *
* * * * *
* * * * * *
* * * * * *
* * * * *
* * * *
* * *
* *
*
"""
rows = 6
for i in range(0, rows):
for j in range(0, i + 1):
print("*", end=" ")
print(" ")
print(" ")
for i in range(rows + 1, 0, -1):
for j in range(0, i - 1):
print("*", end=" ")
print(" ")
""" 12:-
*
* *
* * *
* * * *
* * * * *
* * * *
* * *
* *
*
"""
rows = 5
for i in range(0, rows):
for j in range(0, i + 1):
print("*", end=" ")
print("\r")
for i in range(rows, 0, -1):
for j in range(0, i - 1):
print("*", end=" ")
print("\r")
""" 13:-
*
* *
* * *
* * * *
* * * * *
* * * *
* * *
* *
*
"""
rows = 5
i = 1
while i <= rows:
j = i
while j < rows:
# display space
print(" ", end=" ")
j += 1
k = 1
while k <= i:
print("*", end=" ")
k += 1
print()
i += 1
i = rows
while i >= 1:
j = i
while j <= rows:
print(" ", end=" ")
j += 1
k = 1
while k < i:
print("*", end=" ")
k += 1
print("")
i -= 1
"""
* * * * *
* * * *
* * *
* *
*
*
* *
* * *
* * * *
* * * * *
"""
rows = 5
i = 0
while i <= rows - 1:
j = 0
while j < i:
# display space
print("", end=" ")
j += 1
k = i
while k <= rows - 1:
print("*", end=" ")
k += 1
print()
i += 1
i = rows - 1
while i >= 0:
j = 0
while j < i:
print("", end=" ")
j += 1
k = i
while k <= rows - 1:
print("*", end=" ")
k += 1
print("")
i -= 1
""" 15:-
****************
*******__*******
******____******
*****______*****
****________****
***__________***
**____________**
*______________*
"""
rows = 14
print("*" * rows, end="\n")
i = (rows // 2) - 1
j = 2
while i != 0:
while j <= (rows - 2):
print("*" * i, end="")
print("_" * j, end="")
print("*" * i, end="\n")
i = i - 1
j = j + 2
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/090/69090687.ipynb
| null | null |
[{"Id": 69090687, "ScriptId": 18857310, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7988750, "CreationDate": "07/26/2021 18:22:52", "VersionNumber": 1.0, "Title": "Task1", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 363.0, "LinesInsertedFromPrevious": 363.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# #TASK1
# SIRSS2321
# Twinkle
#
""" 1 55555
5555
555
55
5 """
rows = 5
num = rows
for i in range(rows, 0, -1):
for j in range(0, i):
print(num, end="")
print("\r")
""" 2:- 012345
01234
012
01
0 """
rows = 5
for i in range(rows, 0, -1):
for j in range(0, i + 1):
print(j, end="")
print("\r")
""" 3:- 1
33
555
7777
99999 """
rows = 5
i = 1
while i <= rows:
j = 1
while j <= i:
print((i * 2 - 1), end=" ")
j = j + 1
i = i + 1
print()
""" 4:- 1
21
321
4321
54321 """
rows = 6
for row in range(1, rows):
for column in range(row, 0, -1):
print(column, end="")
print("")
""" 5:- 1
32
654
10987
"""
start = 1
stop = 2
currentNumber = stop
for row in range(2, 6):
for col in range(start, stop):
currentNumber -= 1
print(currentNumber, end="")
print("")
start = stop
stop += row
currentNumber = stop
""" 6:- 1
11
121
1331
14641
15101051
1615201561
"""
def print_pascal_triangle(size):
for i in range(0, size):
for j in range(0, i + 1):
print(decide_number(i, j), end=" ")
print()
def decide_number(n, k):
num = 1
if k > n - k:
k = n - k
for i in range(0, k):
num = num * (n - i)
num = num // (i + 1)
return num
# set rows
rows = 7
print_pascal_triangle(rows)
""" 7:- 12345
22345
33345
44445
55555 """
rows = 5
for i in range(1, rows + 1):
for j in range(1, rows + 1):
if j <= i:
print(i, end=" ")
else:
print(j, end=" ")
print()
""" 8:-
1
2 4
3 6 9
4 8 12 16
5 10 15 20 25
6 12 18 24 30 36
7 14 21 28 35 42 49
8 16 24 32 40 48 56 64
"""
rows = 8
# rows = int(input("Enter the number of rows "))
for i in range(1, rows + 1):
for j in range(1, i + 1):
# multiplication current column and row
square = i * j
print(i * j, end=" ")
print()
"""' 9:-
* * * * * *
* * * * *
* * * *
* * *
* *
*
"""
rows = 5
k = 2 * rows - 2
for i in range(rows, -1, -1):
for j in range(k, 0, -1):
print(end=" ")
k = k + 1
for j in range(0, i + 1):
print("*", end=" ")
print("")
""" 10:-
*
* *
* * *
* * * *
* * * * *
* * * * * *
* * * * * * *
"""
print("Print equilateral triangle Pyramid using asterisk symbol ")
# printing full Triangle pyramid using stars
size = 7
m = (2 * size) - 2
for i in range(0, size):
for j in range(0, m):
print(end=" ")
# decrementing m after each loop
m = m - 1
for j in range(0, i + 1):
print("* ", end=" ")
print(" ")
""" 11:-
*
* *
* * *
* * * *
* * * * *
* * * * * *
* * * * * *
* * * * *
* * * *
* * *
* *
*
"""
rows = 6
for i in range(0, rows):
for j in range(0, i + 1):
print("*", end=" ")
print(" ")
print(" ")
for i in range(rows + 1, 0, -1):
for j in range(0, i - 1):
print("*", end=" ")
print(" ")
""" 12:-
*
* *
* * *
* * * *
* * * * *
* * * *
* * *
* *
*
"""
rows = 5
for i in range(0, rows):
for j in range(0, i + 1):
print("*", end=" ")
print("\r")
for i in range(rows, 0, -1):
for j in range(0, i - 1):
print("*", end=" ")
print("\r")
""" 13:-
*
* *
* * *
* * * *
* * * * *
* * * *
* * *
* *
*
"""
rows = 5
i = 1
while i <= rows:
j = i
while j < rows:
# display space
print(" ", end=" ")
j += 1
k = 1
while k <= i:
print("*", end=" ")
k += 1
print()
i += 1
i = rows
while i >= 1:
j = i
while j <= rows:
print(" ", end=" ")
j += 1
k = 1
while k < i:
print("*", end=" ")
k += 1
print("")
i -= 1
"""
* * * * *
* * * *
* * *
* *
*
*
* *
* * *
* * * *
* * * * *
"""
rows = 5
i = 0
while i <= rows - 1:
j = 0
while j < i:
# display space
print("", end=" ")
j += 1
k = i
while k <= rows - 1:
print("*", end=" ")
k += 1
print()
i += 1
i = rows - 1
while i >= 0:
j = 0
while j < i:
print("", end=" ")
j += 1
k = i
while k <= rows - 1:
print("*", end=" ")
k += 1
print("")
i -= 1
""" 15:-
****************
*******__*******
******____******
*****______*****
****________****
***__________***
**____________**
*______________*
"""
rows = 14
print("*" * rows, end="\n")
i = (rows // 2) - 1
j = 2
while i != 0:
while j <= (rows - 2):
print("*" * i, end="")
print("_" * j, end="")
print("*" * i, end="\n")
i = i - 1
j = j + 2
| false | 0 | 1,928 | 0 | 1,928 | 1,928 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.