Dataset Viewer
file_id
stringlengths 5
9
| content
stringlengths 100
5.25M
| local_path
stringlengths 66
70
| kaggle_dataset_name
stringlengths 3
50
⌀ | kaggle_dataset_owner
stringlengths 3
20
⌀ | kversion
stringlengths 497
763
⌀ | kversion_datasetsources
stringlengths 71
5.46k
⌀ | dataset_versions
stringlengths 338
235k
⌀ | datasets
stringlengths 334
371
⌀ | users
stringlengths 111
264
⌀ | script
stringlengths 100
5.25M
| df_info
stringlengths 0
4.87M
| has_data_info
bool 2
classes | nb_filenames
int64 0
370
| retreived_data_description
stringlengths 0
4.44M
| script_nb_tokens
int64 25
663k
| upvotes
int64 0
1.65k
| tokens_description
int64 25
663k
| tokens_script
int64 25
663k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
69046156
|
<jupyter_start><jupyter_text>Tomato Diseases Dataset (CSV+Images)
Kaggle dataset identifier: tomato-diseases-dataset-csvimages
<jupyter_code>import pandas as pd
df = pd.read_csv('tomato-diseases-dataset-csvimages/train.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 18160 entries, 0 to 18159
Data columns (total 5 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Unnamed: 0 18160 non-null int64
1 path 18160 non-null object
2 img 18160 non-null object
3 label_text 18160 non-null object
4 label 18160 non-null int64
dtypes: int64(2), object(3)
memory usage: 709.5+ KB
<jupyter_text>Examples:
{
"Unnamed: 0": 0,
"path": "../input/plantvillage-dataset/color/Tomato___Late_blight/781e93a9-2059-42de-8075-658033a6abf7___RS_Late.B 6075.JPG",
"img": "781e93a9-2059-42de-8075-658033a6abf7___RS_Late.B 6075.JPG",
"label_text": "Tomato___Late_blight",
"label": 2
}
{
"Unnamed: 0": 1,
"path": "../input/plantvillage-dataset/color/Tomato___Late_blight/283ff0be-6e5e-4b4e-bf21-639780b77ffc___GHLB2 Leaf 8636.JPG",
"img": "283ff0be-6e5e-4b4e-bf21-639780b77ffc___GHLB2 Leaf 8636.JPG",
"label_text": "Tomato___Late_blight",
"label": 2
}
{
"Unnamed: 0": 2,
"path": "../input/plantvillage-dataset/color/Tomato___Late_blight/0db85707-41f9-42df-ba3b-842d14f00a68___GHLB2 Leaf 8909.JPG",
"img": "0db85707-41f9-42df-ba3b-842d14f00a68___GHLB2 Leaf 8909.JPG",
"label_text": "Tomato___Late_blight",
"label": 2
}
{
"Unnamed: 0": 3,
"path": "../input/plantvillage-dataset/color/Tomato___Late_blight/078a999d-6e6f-427e-a1e6-80b4d2df2bae___GHLB2 Leaf 9029.JPG",
"img": "078a999d-6e6f-427e-a1e6-80b4d2df2bae___GHLB2 Leaf 9029.JPG",
"label_text": "Tomato___Late_blight",
"label": 2
}
<jupyter_script># # Tomato Leaf Disease Detection 0.998 [inference]
# ### Hi kagglers, This is `inference` notebook using `Keras`.
# >
# > [Tomato Leaf Disease Detection 0.998 [Training]](https://www.kaggle.com/ammarnassanalhajali/tomato-leaf-disease-detection-0-998-training)
# ### Please if this kernel is useful, please upvote !!
import os, cv2, json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import tensorflow as tf
from tensorflow.keras import models, layers
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.applications import InceptionV3
from tensorflow.keras.optimizers import Adam
from PIL import Image
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Dense,
Dropout,
Activation,
Input,
BatchNormalization,
GlobalAveragePooling2D,
)
train = pd.read_csv("../input/tomato-diseases-dataset-csvimages/train.csv")
from sklearn.model_selection import train_test_split
df_train, df_validate, y_train, y_test = train_test_split(
train, train.label, train_size=0.8, random_state=42, stratify=train.label
)
df_train = df_train.reset_index(drop=True)
df_validate = df_validate.reset_index(drop=True)
sample = df_train[df_train.label == 3].sample(3)
plt.figure(figsize=(15, 5))
for ind, (img, label) in enumerate(zip(sample.img, sample.label)):
plt.subplot(1, 3, ind + 1)
img = cv2.imread(
os.path.join(
"../input/tomato-diseases-dataset-csvimages/Tomato_images/Tomato_images",
img,
)
)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(img)
plt.axis("off")
plt.show()
# Main parameters
BATCH_SIZE = 16
STEPS_PER_EPOCH = len(train) * 0.8 / BATCH_SIZE
VALIDATION_STEPS = len(train) * 0.2 / BATCH_SIZE
EPOCHS = 60 #
IMG_WIDTH = 256
IMG_HEIGHT = 256
train_dir = "../input/tomato-diseases-dataset-csvimages/Tomato_images/Tomato_images"
df_train.label = df_train.label.astype("str")
df_validate.label = df_validate.label.astype("str")
train_datagen = ImageDataGenerator(
rescale=1.0 / 255,
shear_range=0.2,
zoom_range=0.2,
rotation_range=180,
vertical_flip=True,
horizontal_flip=True,
)
# our train_datagen generator will use the following transformations on the images
validation_datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen.flow_from_dataframe(
df_train,
train_dir,
target_size=(IMG_WIDTH, IMG_HEIGHT),
batch_size=BATCH_SIZE,
x_col="img",
y_col="label",
class_mode="categorical",
)
# generator = ImageDataGenerator(*args).flow_from_dataframe(dataframe, directory, target_size,
# batch_size, x_col, y_col, class_mode)
# your dataframe shoudl be in the format such that x_col = features, y_col = class/label
# binary class mode since output is either 0(dog) or 1(cat)
validation_generator = validation_datagen.flow_from_dataframe(
df_validate,
train_dir,
target_size=(IMG_WIDTH, IMG_HEIGHT),
x_col="img",
y_col="label",
class_mode="categorical",
batch_size=BATCH_SIZE,
)
def create_model():
efficientnet_layers = InceptionV3(
weights="imagenet",
include_top=False,
input_shape=(IMG_WIDTH, IMG_HEIGHT, 3),
pooling="avg",
)
model = Sequential()
model.add(efficientnet_layers)
model.add(Dense(10, activation="softmax"))
model.compile(
optimizer=Adam(lr=0.001), loss="categorical_crossentropy", metrics=["acc"]
)
return model
model = create_model()
model.summary()
model.load_weights("../input/tomatoleafdiseasedetection-weights/InceptionV3_256.h5")
# ss=df_validate.sample(n=20)
ss = df_validate
ss = ss[["img", "label"]]
preds = []
for image_id in ss.img:
image = Image.open(
os.path.join(
"../input/tomato-diseases-dataset-csvimages/Tomato_images/Tomato_images/",
image_id,
)
)
array = tf.keras.preprocessing.image.img_to_array(image)
array = array / 255
image = np.expand_dims(array, axis=0)
preds.append(np.argmax(model.predict(image)))
ss["labelP"] = preds
ss
score = model.evaluate_generator(validation_generator)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
confusion_matrix = pd.crosstab(
ss.label, ss.labelP, rownames=["Actual"], colnames=["Predicted"]
)
print(confusion_matrix)
plt.figure(figsize=(10, 8))
# use seaborn to draw the headmap
sns.heatmap(
confusion_matrix,
xticklabels=confusion_matrix.columns.values, # x label
yticklabels=confusion_matrix.columns.values,
cmap="YlGnBu",
annot=True,
fmt="d",
)
plt.show()
from imblearn.metrics import sensitivity_score, specificity_score
from sklearn.metrics import (
f1_score,
precision_score,
recall_score,
accuracy_score,
confusion_matrix,
)
y_test = ss.label.values.astype(int)
y_pred = ss.labelP.values.astype(int)
type(y_test)
# Print f1, precision, and recall scores
print("specificity:", specificity_score(y_test, y_pred, average="macro"))
print("sensitivity:", sensitivity_score(y_test, y_pred, average="macro"))
print("recall:", recall_score(y_test, y_pred, average="macro"))
print("precision::", precision_score(y_test, y_pred, average="macro"))
print("f1_score:", f1_score(y_test, y_pred, average="macro"))
print("accuracy_score:", accuracy_score(y_test, y_pred))
from sklearn.metrics import classification_report
import numpy as np
print(classification_report(y_test, y_pred))
y_true = y_test
y_prediction = y_pred
cnf_matrix = confusion_matrix(y_true, y_prediction)
print(cnf_matrix)
# [[1 1 3]
# [3 2 2]
# [1 3 1]]
FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)
FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)
TP = np.diag(cnf_matrix)
TN = cnf_matrix.sum() - (FP + FN + TP)
FP = FP.astype(float)
FN = FN.astype(float)
TP = TP.astype(float)
TN = TN.astype(float)
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP / (TP + FN)
# Specificity or true negative rate
TNR = TN / (TN + FP)
# Precision or positive predictive value
PPV = TP / (TP + FP)
# Negative predictive value
NPV = TN / (TN + FN)
# Fall out or false positive rate
FPR = FP / (FP + TN)
# False negative rate
FNR = FN / (TP + FN)
# False discovery rate
FDR = FP / (TP + FP)
# Overall accuracy
ACC = (TP + TN) / (TP + FP + FN + TN)
print("Sensitivity OR recall")
print(TPR)
print("-------------------")
print("Specificity")
print(TNR)
print("-------------------")
print("Precision")
print(PPV)
print("-------------------")
print("accuracy")
print(ACC)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/046/69046156.ipynb
|
tomato-diseases-dataset-csvimages
|
ammarnassanalhajali
|
[{"Id": 69046156, "ScriptId": 17307062, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5966695, "CreationDate": "07/26/2021 08:36:05", "VersionNumber": 5.0, "Title": "Tomato Leaf Disease Detection 0.998 [inference]", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 227.0, "LinesInsertedFromPrevious": 11.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 216.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
|
[{"Id": 91775532, "KernelVersionId": 69046156, "SourceDatasetVersionId": 2222983}]
|
[{"Id": 2222983, "DatasetId": 1335181, "DatasourceVersionId": 2264687, "CreatorUserId": 5966695, "LicenseName": "Unknown", "CreationDate": "05/12/2021 00:09:31", "VersionNumber": 1.0, "Title": "Tomato Diseases Dataset (CSV+Images)", "Slug": "tomato-diseases-dataset-csvimages", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1335181, "CreatorUserId": 5966695, "OwnerUserId": 5966695.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2222983.0, "CurrentDatasourceVersionId": 2264687.0, "ForumId": 1354120, "Type": 2, "CreationDate": "05/12/2021 00:09:31", "LastActivityDate": "05/12/2021", "TotalViews": 5441, "TotalDownloads": 516, "TotalVotes": 14, "TotalKernels": 2}]
|
[{"Id": 5966695, "UserName": "ammarnassanalhajali", "DisplayName": "Ammar Alhaj Ali", "RegisterDate": "10/15/2020", "PerformanceTier": 4}]
|
# # Tomato Leaf Disease Detection 0.998 [inference]
# ### Hi kagglers, This is `inference` notebook using `Keras`.
# >
# > [Tomato Leaf Disease Detection 0.998 [Training]](https://www.kaggle.com/ammarnassanalhajali/tomato-leaf-disease-detection-0-998-training)
# ### Please if this kernel is useful, please upvote !!
import os, cv2, json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import tensorflow as tf
from tensorflow.keras import models, layers
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.applications import InceptionV3
from tensorflow.keras.optimizers import Adam
from PIL import Image
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Dense,
Dropout,
Activation,
Input,
BatchNormalization,
GlobalAveragePooling2D,
)
train = pd.read_csv("../input/tomato-diseases-dataset-csvimages/train.csv")
from sklearn.model_selection import train_test_split
df_train, df_validate, y_train, y_test = train_test_split(
train, train.label, train_size=0.8, random_state=42, stratify=train.label
)
df_train = df_train.reset_index(drop=True)
df_validate = df_validate.reset_index(drop=True)
sample = df_train[df_train.label == 3].sample(3)
plt.figure(figsize=(15, 5))
for ind, (img, label) in enumerate(zip(sample.img, sample.label)):
plt.subplot(1, 3, ind + 1)
img = cv2.imread(
os.path.join(
"../input/tomato-diseases-dataset-csvimages/Tomato_images/Tomato_images",
img,
)
)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(img)
plt.axis("off")
plt.show()
# Main parameters
BATCH_SIZE = 16
STEPS_PER_EPOCH = len(train) * 0.8 / BATCH_SIZE
VALIDATION_STEPS = len(train) * 0.2 / BATCH_SIZE
EPOCHS = 60 #
IMG_WIDTH = 256
IMG_HEIGHT = 256
train_dir = "../input/tomato-diseases-dataset-csvimages/Tomato_images/Tomato_images"
df_train.label = df_train.label.astype("str")
df_validate.label = df_validate.label.astype("str")
train_datagen = ImageDataGenerator(
rescale=1.0 / 255,
shear_range=0.2,
zoom_range=0.2,
rotation_range=180,
vertical_flip=True,
horizontal_flip=True,
)
# our train_datagen generator will use the following transformations on the images
validation_datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen.flow_from_dataframe(
df_train,
train_dir,
target_size=(IMG_WIDTH, IMG_HEIGHT),
batch_size=BATCH_SIZE,
x_col="img",
y_col="label",
class_mode="categorical",
)
# generator = ImageDataGenerator(*args).flow_from_dataframe(dataframe, directory, target_size,
# batch_size, x_col, y_col, class_mode)
# your dataframe shoudl be in the format such that x_col = features, y_col = class/label
# binary class mode since output is either 0(dog) or 1(cat)
validation_generator = validation_datagen.flow_from_dataframe(
df_validate,
train_dir,
target_size=(IMG_WIDTH, IMG_HEIGHT),
x_col="img",
y_col="label",
class_mode="categorical",
batch_size=BATCH_SIZE,
)
def create_model():
efficientnet_layers = InceptionV3(
weights="imagenet",
include_top=False,
input_shape=(IMG_WIDTH, IMG_HEIGHT, 3),
pooling="avg",
)
model = Sequential()
model.add(efficientnet_layers)
model.add(Dense(10, activation="softmax"))
model.compile(
optimizer=Adam(lr=0.001), loss="categorical_crossentropy", metrics=["acc"]
)
return model
model = create_model()
model.summary()
model.load_weights("../input/tomatoleafdiseasedetection-weights/InceptionV3_256.h5")
# ss=df_validate.sample(n=20)
ss = df_validate
ss = ss[["img", "label"]]
preds = []
for image_id in ss.img:
image = Image.open(
os.path.join(
"../input/tomato-diseases-dataset-csvimages/Tomato_images/Tomato_images/",
image_id,
)
)
array = tf.keras.preprocessing.image.img_to_array(image)
array = array / 255
image = np.expand_dims(array, axis=0)
preds.append(np.argmax(model.predict(image)))
ss["labelP"] = preds
ss
score = model.evaluate_generator(validation_generator)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
confusion_matrix = pd.crosstab(
ss.label, ss.labelP, rownames=["Actual"], colnames=["Predicted"]
)
print(confusion_matrix)
plt.figure(figsize=(10, 8))
# use seaborn to draw the headmap
sns.heatmap(
confusion_matrix,
xticklabels=confusion_matrix.columns.values, # x label
yticklabels=confusion_matrix.columns.values,
cmap="YlGnBu",
annot=True,
fmt="d",
)
plt.show()
from imblearn.metrics import sensitivity_score, specificity_score
from sklearn.metrics import (
f1_score,
precision_score,
recall_score,
accuracy_score,
confusion_matrix,
)
y_test = ss.label.values.astype(int)
y_pred = ss.labelP.values.astype(int)
type(y_test)
# Print f1, precision, and recall scores
print("specificity:", specificity_score(y_test, y_pred, average="macro"))
print("sensitivity:", sensitivity_score(y_test, y_pred, average="macro"))
print("recall:", recall_score(y_test, y_pred, average="macro"))
print("precision::", precision_score(y_test, y_pred, average="macro"))
print("f1_score:", f1_score(y_test, y_pred, average="macro"))
print("accuracy_score:", accuracy_score(y_test, y_pred))
from sklearn.metrics import classification_report
import numpy as np
print(classification_report(y_test, y_pred))
y_true = y_test
y_prediction = y_pred
cnf_matrix = confusion_matrix(y_true, y_prediction)
print(cnf_matrix)
# [[1 1 3]
# [3 2 2]
# [1 3 1]]
FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)
FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)
TP = np.diag(cnf_matrix)
TN = cnf_matrix.sum() - (FP + FN + TP)
FP = FP.astype(float)
FN = FN.astype(float)
TP = TP.astype(float)
TN = TN.astype(float)
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP / (TP + FN)
# Specificity or true negative rate
TNR = TN / (TN + FP)
# Precision or positive predictive value
PPV = TP / (TP + FP)
# Negative predictive value
NPV = TN / (TN + FN)
# Fall out or false positive rate
FPR = FP / (FP + TN)
# False negative rate
FNR = FN / (TP + FN)
# False discovery rate
FDR = FP / (TP + FP)
# Overall accuracy
ACC = (TP + TN) / (TP + FP + FN + TN)
print("Sensitivity OR recall")
print(TPR)
print("-------------------")
print("Specificity")
print(TNR)
print("-------------------")
print("Precision")
print(PPV)
print("-------------------")
print("accuracy")
print(ACC)
|
[{"tomato-diseases-dataset-csvimages/train.csv": {"column_names": "[\"Unnamed: 0\", \"path\", \"img\", \"label_text\", \"label\"]", "column_data_types": "{\"Unnamed: 0\": \"int64\", \"path\": \"object\", \"img\": \"object\", \"label_text\": \"object\", \"label\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 18160 entries, 0 to 18159\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Unnamed: 0 18160 non-null int64 \n 1 path 18160 non-null object\n 2 img 18160 non-null object\n 3 label_text 18160 non-null object\n 4 label 18160 non-null int64 \ndtypes: int64(2), object(3)\nmemory usage: 709.5+ KB\n", "summary": "{\"Unnamed: 0\": {\"count\": 18160.0, \"mean\": 9079.5, \"std\": 5242.484779822128, \"min\": 0.0, \"25%\": 4539.75, \"50%\": 9079.5, \"75%\": 13619.25, \"max\": 18159.0}, \"label\": {\"count\": 18160.0, \"mean\": 4.755726872246696, \"std\": 2.801276569006158, \"min\": 0.0, \"25%\": 2.0, \"50%\": 5.0, \"75%\": 7.0, \"max\": 9.0}}", "examples": "{\"Unnamed: 0\":{\"0\":0,\"1\":1,\"2\":2,\"3\":3},\"path\":{\"0\":\"..\\/input\\/plantvillage-dataset\\/color\\/Tomato___Late_blight\\/781e93a9-2059-42de-8075-658033a6abf7___RS_Late.B 6075.JPG\",\"1\":\"..\\/input\\/plantvillage-dataset\\/color\\/Tomato___Late_blight\\/283ff0be-6e5e-4b4e-bf21-639780b77ffc___GHLB2 Leaf 8636.JPG\",\"2\":\"..\\/input\\/plantvillage-dataset\\/color\\/Tomato___Late_blight\\/0db85707-41f9-42df-ba3b-842d14f00a68___GHLB2 Leaf 8909.JPG\",\"3\":\"..\\/input\\/plantvillage-dataset\\/color\\/Tomato___Late_blight\\/078a999d-6e6f-427e-a1e6-80b4d2df2bae___GHLB2 Leaf 9029.JPG\"},\"img\":{\"0\":\"781e93a9-2059-42de-8075-658033a6abf7___RS_Late.B 6075.JPG\",\"1\":\"283ff0be-6e5e-4b4e-bf21-639780b77ffc___GHLB2 Leaf 8636.JPG\",\"2\":\"0db85707-41f9-42df-ba3b-842d14f00a68___GHLB2 Leaf 8909.JPG\",\"3\":\"078a999d-6e6f-427e-a1e6-80b4d2df2bae___GHLB2 Leaf 9029.JPG\"},\"label_text\":{\"0\":\"Tomato___Late_blight\",\"1\":\"Tomato___Late_blight\",\"2\":\"Tomato___Late_blight\",\"3\":\"Tomato___Late_blight\"},\"label\":{\"0\":2,\"1\":2,\"2\":2,\"3\":2}}"}}]
| true | 1 |
<start_data_description><data_path>tomato-diseases-dataset-csvimages/train.csv:
<column_names>
['Unnamed: 0', 'path', 'img', 'label_text', 'label']
<column_types>
{'Unnamed: 0': 'int64', 'path': 'object', 'img': 'object', 'label_text': 'object', 'label': 'int64'}
<dataframe_Summary>
{'Unnamed: 0': {'count': 18160.0, 'mean': 9079.5, 'std': 5242.484779822128, 'min': 0.0, '25%': 4539.75, '50%': 9079.5, '75%': 13619.25, 'max': 18159.0}, 'label': {'count': 18160.0, 'mean': 4.755726872246696, 'std': 2.801276569006158, 'min': 0.0, '25%': 2.0, '50%': 5.0, '75%': 7.0, 'max': 9.0}}
<dataframe_info>
RangeIndex: 18160 entries, 0 to 18159
Data columns (total 5 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Unnamed: 0 18160 non-null int64
1 path 18160 non-null object
2 img 18160 non-null object
3 label_text 18160 non-null object
4 label 18160 non-null int64
dtypes: int64(2), object(3)
memory usage: 709.5+ KB
<some_examples>
{'Unnamed: 0': {'0': 0, '1': 1, '2': 2, '3': 3}, 'path': {'0': '../input/plantvillage-dataset/color/Tomato___Late_blight/781e93a9-2059-42de-8075-658033a6abf7___RS_Late.B 6075.JPG', '1': '../input/plantvillage-dataset/color/Tomato___Late_blight/283ff0be-6e5e-4b4e-bf21-639780b77ffc___GHLB2 Leaf 8636.JPG', '2': '../input/plantvillage-dataset/color/Tomato___Late_blight/0db85707-41f9-42df-ba3b-842d14f00a68___GHLB2 Leaf 8909.JPG', '3': '../input/plantvillage-dataset/color/Tomato___Late_blight/078a999d-6e6f-427e-a1e6-80b4d2df2bae___GHLB2 Leaf 9029.JPG'}, 'img': {'0': '781e93a9-2059-42de-8075-658033a6abf7___RS_Late.B 6075.JPG', '1': '283ff0be-6e5e-4b4e-bf21-639780b77ffc___GHLB2 Leaf 8636.JPG', '2': '0db85707-41f9-42df-ba3b-842d14f00a68___GHLB2 Leaf 8909.JPG', '3': '078a999d-6e6f-427e-a1e6-80b4d2df2bae___GHLB2 Leaf 9029.JPG'}, 'label_text': {'0': 'Tomato___Late_blight', '1': 'Tomato___Late_blight', '2': 'Tomato___Late_blight', '3': 'Tomato___Late_blight'}, 'label': {'0': 2, '1': 2, '2': 2, '3': 2}}
<end_description>
| 2,204 | 3 | 3,074 | 2,204 |
69046074
|
<jupyter_start><jupyter_text>MosMedData FullChestCT
Kaggle dataset identifier: mosmeddata-fullchestct
<jupyter_script>import numpy as np
import pandas as pd
import os
import tensorflow as tf
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
import nibabel as nib
image_paths0 = []
labels0 = []
for dirname, _, filenames in os.walk(
"../input/mosmeddata-fullchestct/COVID19_1110/studies/CT-0"
):
for filename in filenames:
image_paths0.append(os.path.join(dirname, filename))
labels0.append(0)
image_paths1 = []
labels1 = []
for dirname, _, filenames in os.walk(
"../input/mosmeddata-fullchestct/COVID19_1110/studies/CT-1"
):
for filename in filenames:
image_paths1.append(os.path.join(dirname, filename))
labels1.append(1)
image_paths2 = []
labels2 = []
for dirname, _, filenames in os.walk(
"../input/mosmeddata-fullchestct/COVID19_1110/studies/CT-2"
):
for filename in filenames:
image_paths2.append(os.path.join(dirname, filename))
labels2.append(2)
image_paths3 = []
labels3 = []
for dirname, _, filenames in os.walk(
"../input/mosmeddata-fullchestct/COVID19_1110/studies/CT-3"
):
for filename in filenames:
image_paths3.append(os.path.join(dirname, filename))
labels3.append(3)
image_paths = []
image_paths.extend(image_paths0)
image_paths.extend(image_paths1)
image_paths.extend(image_paths2)
image_paths.extend(image_paths3)
labels = []
labels.extend(labels0)
labels.extend(labels1)
labels.extend(labels2)
labels.extend(labels3)
np.max(labels)
from sklearn.utils import shuffle
image_paths, labels = shuffle(image_paths, labels, random_state=10800)
# def parse_function(image_paths, labels):
# image_path = tf.compat.v1.data.make_one_shot_iterator(image_path)
# print(image_path)
image_names_tab = []
labels_tab = []
counter = 0
for image_path, label in zip(image_paths[:20], labels[:20]):
niimg = nib.load(image_path)
npimage = niimg.get_fdata()
s = npimage.shape
for j in range(20, 30):
img = np.zeros((s[0], s[1], 3))
img[:, :, 0] = npimage[:, :, j]
img[:, :, 1] = npimage[:, :, j]
img[:, :, 2] = npimage[:, :, j]
img = img / np.max(npimage[:, :, j])
# img = tf.cast(img, tf.float32)
img = cv2.resize(img, (224, 224))
image_names_tab.append(img)
labels_tab.append(label)
counter += 1
print(counter, end="\r")
np.shape(image_names_tab)
image_names = image_names_tab
labels = labels_tab
image_names1 = []
image_names2 = []
image_names3 = []
image_names4 = []
image_names5 = []
image_names6 = []
image_names7 = []
image_names8 = []
image_names9 = []
image_names10 = []
labels1 = []
labels2 = []
labels3 = []
labels4 = []
labels5 = []
labels6 = []
labels7 = []
labels8 = []
labels9 = []
labels10 = []
counter = 0
for i in range(0, len(image_names), 10):
image_names1.append(image_names[i])
image_names2.append(image_names[i + 1])
image_names3.append(image_names[i + 2])
image_names4.append(image_names[i + 3])
image_names5.append(image_names[i + 4])
image_names6.append(image_names[i + 5])
image_names7.append(image_names[i + 6])
image_names8.append(image_names[i + 7])
image_names9.append(image_names[i + 8])
image_names10.append(image_names[i + 9])
labels1.append(labels[i])
labels2.append(labels[i + 1])
labels3.append(labels[i + 2])
labels4.append(labels[i + 3])
labels5.append(labels[i + 4])
labels6.append(labels[i + 5])
labels7.append(labels[i + 6])
labels8.append(labels[i + 7])
labels9.append(labels[i + 8])
labels10.append(labels[i + 9])
counter += 1
print(counter, end="\r")
image_names1 = np.array(image_names1)
image_names2 = np.array(image_names2)
image_names3 = np.array(image_names3)
image_names4 = np.array(image_names4)
image_names5 = np.array(image_names5)
image_names6 = np.array(image_names6)
image_names7 = np.array(image_names7)
image_names8 = np.array(image_names8)
image_names9 = np.array(image_names9)
image_names10 = np.array(image_names10)
labels1 = np.array(labels1)
from sklearn.utils import shuffle
(
image_names1,
labels1,
image_names2,
labels2,
image_names3,
labels3,
image_names4,
labels4,
image_names5,
labels5,
image_names6,
labels6,
image_names7,
labels7,
image_names8,
labels8,
image_names9,
labels9,
image_names10,
labels10,
) = shuffle(
image_names1,
labels1,
image_names2,
labels2,
image_names3,
labels3,
image_names4,
labels4,
image_names5,
labels5,
image_names6,
labels6,
image_names7,
labels7,
image_names8,
labels8,
image_names9,
labels9,
image_names10,
labels10,
random_state=10000,
)
i = 100
print(labels1[i])
print(labels5[i])
print(labels7[i])
import tensorflow as tf
base_model = tf.keras.applications.ResNet50(
include_top=False,
weights="imagenet",
input_tensor=None,
input_shape=(224, 224, 3),
classes=1000,
)
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Input, Model
inputA = Input(shape=(224, 224, 3))
inputB = Input(shape=(224, 224, 3))
inputC = Input(shape=(224, 224, 3))
inputD = Input(shape=(224, 224, 3))
inputE = Input(shape=(224, 224, 3))
inputF = Input(shape=(224, 224, 3))
inputG = Input(shape=(224, 224, 3))
inputH = Input(shape=(224, 224, 3))
inputI = Input(shape=(224, 224, 3))
inputJ = Input(shape=(224, 224, 3))
# defining parallel outputs
A = Model(inputs=inputA, outputs=base_model(inputA))
B = Model(inputs=inputB, outputs=base_model(inputB))
C = Model(inputs=inputC, outputs=base_model(inputC))
D = Model(inputs=inputD, outputs=base_model(inputD))
E = Model(inputs=inputE, outputs=base_model(inputE))
F = Model(inputs=inputF, outputs=base_model(inputF))
G = Model(inputs=inputG, outputs=base_model(inputG))
H = Model(inputs=inputH, outputs=base_model(inputH))
I = Model(inputs=inputI, outputs=base_model(inputI))
J = Model(inputs=inputJ, outputs=base_model(inputJ))
combined = layers.Add()(
[
A.output,
B.output,
C.output,
D.output,
E.output,
F.output,
G.output,
H.output,
I.output,
J.output,
]
)
# x = layers.Conv2D(512, 3, activation = 'relu', padding = 'same')(combined)
# fx = layers.Conv2D(512, 3, activation='relu', padding='same')(x)
# fx = layers.BatchNormalization()(fx)
# fx = layers.Conv2D(512, 3, padding='same')(fx)
# out = layers.Add()([x,fx])
# out = layers.MaxPooling2D()(out)
# out = layers.ReLU()(out)
# out = layers.BatchNormalization()(out)
z = layers.Flatten()(combined)
# z = layers.Dense(4096, activation="relu")(z)
# z = layers.Dropout(0.5)(z)
# z = layers.Dense(4096, activation='relu')(z)
# z = layers.Dropout(0.4)(z)
z = layers.Dense(4, activation="softmax")(z)
model = Model(
inputs=[
A.input,
B.input,
C.input,
D.input,
E.input,
F.input,
G.input,
H.input,
I.input,
J.input,
],
outputs=z,
)
model.summary()
for layer in model.layers:
layer.trainable = True
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(),
metrics=["acc"],
)
from keras.callbacks import ModelCheckpoint
checkpoint = ModelCheckpoint(
"nohnohmosmed.h5", monitor="val_acc", verbose=1, save_best_only=True, mode="auto"
)
History = model.fit(
x=[
image_names1,
image_names2,
image_names3,
image_names4,
image_names5,
image_names6,
image_names7,
image_names8,
image_names9,
image_names10,
],
y=labels1,
validation_split=0.2,
epochs=50,
callbacks=[checkpoint],
)
model.summary()
model = model.save_weights("model_mri.h5")
loss = model.history["loss"]
val_loss = model.history["val_loss"]
epochs = range(300)
plt.figure()
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/046/69046074.ipynb
|
mosmeddata-fullchestct
|
ahmedamineafardas
|
[{"Id": 69046074, "ScriptId": 18591601, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7118898, "CreationDate": "07/26/2021 08:34:50", "VersionNumber": 1.0, "Title": "Biotech MosMed Dataset model", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 219.0, "LinesInsertedFromPrevious": 33.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 186.0, "LinesInsertedFromFork": 33.0, "LinesDeletedFromFork": 27.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 186.0, "TotalVotes": 0}]
|
[{"Id": 91775379, "KernelVersionId": 69046074, "SourceDatasetVersionId": 2076367}]
|
[{"Id": 2076367, "DatasetId": 1244618, "DatasourceVersionId": 2116675, "CreatorUserId": 7051386, "LicenseName": "Unknown", "CreationDate": "04/01/2021 04:22:08", "VersionNumber": 1.0, "Title": "MosMedData FullChestCT", "Slug": "mosmeddata-fullchestct", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1244618, "CreatorUserId": 7051386, "OwnerUserId": 7051386.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2076367.0, "CurrentDatasourceVersionId": 2116675.0, "ForumId": 1262902, "Type": 2, "CreationDate": "04/01/2021 04:22:08", "LastActivityDate": "04/01/2021", "TotalViews": 1017, "TotalDownloads": 52, "TotalVotes": 1, "TotalKernels": 4}]
|
[{"Id": 7051386, "UserName": "ahmedamineafardas", "DisplayName": "ahmed amine afardas", "RegisterDate": "03/28/2021", "PerformanceTier": 0}]
|
import numpy as np
import pandas as pd
import os
import tensorflow as tf
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
import nibabel as nib
image_paths0 = []
labels0 = []
for dirname, _, filenames in os.walk(
"../input/mosmeddata-fullchestct/COVID19_1110/studies/CT-0"
):
for filename in filenames:
image_paths0.append(os.path.join(dirname, filename))
labels0.append(0)
image_paths1 = []
labels1 = []
for dirname, _, filenames in os.walk(
"../input/mosmeddata-fullchestct/COVID19_1110/studies/CT-1"
):
for filename in filenames:
image_paths1.append(os.path.join(dirname, filename))
labels1.append(1)
image_paths2 = []
labels2 = []
for dirname, _, filenames in os.walk(
"../input/mosmeddata-fullchestct/COVID19_1110/studies/CT-2"
):
for filename in filenames:
image_paths2.append(os.path.join(dirname, filename))
labels2.append(2)
image_paths3 = []
labels3 = []
for dirname, _, filenames in os.walk(
"../input/mosmeddata-fullchestct/COVID19_1110/studies/CT-3"
):
for filename in filenames:
image_paths3.append(os.path.join(dirname, filename))
labels3.append(3)
image_paths = []
image_paths.extend(image_paths0)
image_paths.extend(image_paths1)
image_paths.extend(image_paths2)
image_paths.extend(image_paths3)
labels = []
labels.extend(labels0)
labels.extend(labels1)
labels.extend(labels2)
labels.extend(labels3)
np.max(labels)
from sklearn.utils import shuffle
image_paths, labels = shuffle(image_paths, labels, random_state=10800)
# def parse_function(image_paths, labels):
# image_path = tf.compat.v1.data.make_one_shot_iterator(image_path)
# print(image_path)
image_names_tab = []
labels_tab = []
counter = 0
for image_path, label in zip(image_paths[:20], labels[:20]):
niimg = nib.load(image_path)
npimage = niimg.get_fdata()
s = npimage.shape
for j in range(20, 30):
img = np.zeros((s[0], s[1], 3))
img[:, :, 0] = npimage[:, :, j]
img[:, :, 1] = npimage[:, :, j]
img[:, :, 2] = npimage[:, :, j]
img = img / np.max(npimage[:, :, j])
# img = tf.cast(img, tf.float32)
img = cv2.resize(img, (224, 224))
image_names_tab.append(img)
labels_tab.append(label)
counter += 1
print(counter, end="\r")
np.shape(image_names_tab)
image_names = image_names_tab
labels = labels_tab
image_names1 = []
image_names2 = []
image_names3 = []
image_names4 = []
image_names5 = []
image_names6 = []
image_names7 = []
image_names8 = []
image_names9 = []
image_names10 = []
labels1 = []
labels2 = []
labels3 = []
labels4 = []
labels5 = []
labels6 = []
labels7 = []
labels8 = []
labels9 = []
labels10 = []
counter = 0
for i in range(0, len(image_names), 10):
image_names1.append(image_names[i])
image_names2.append(image_names[i + 1])
image_names3.append(image_names[i + 2])
image_names4.append(image_names[i + 3])
image_names5.append(image_names[i + 4])
image_names6.append(image_names[i + 5])
image_names7.append(image_names[i + 6])
image_names8.append(image_names[i + 7])
image_names9.append(image_names[i + 8])
image_names10.append(image_names[i + 9])
labels1.append(labels[i])
labels2.append(labels[i + 1])
labels3.append(labels[i + 2])
labels4.append(labels[i + 3])
labels5.append(labels[i + 4])
labels6.append(labels[i + 5])
labels7.append(labels[i + 6])
labels8.append(labels[i + 7])
labels9.append(labels[i + 8])
labels10.append(labels[i + 9])
counter += 1
print(counter, end="\r")
image_names1 = np.array(image_names1)
image_names2 = np.array(image_names2)
image_names3 = np.array(image_names3)
image_names4 = np.array(image_names4)
image_names5 = np.array(image_names5)
image_names6 = np.array(image_names6)
image_names7 = np.array(image_names7)
image_names8 = np.array(image_names8)
image_names9 = np.array(image_names9)
image_names10 = np.array(image_names10)
labels1 = np.array(labels1)
from sklearn.utils import shuffle
(
image_names1,
labels1,
image_names2,
labels2,
image_names3,
labels3,
image_names4,
labels4,
image_names5,
labels5,
image_names6,
labels6,
image_names7,
labels7,
image_names8,
labels8,
image_names9,
labels9,
image_names10,
labels10,
) = shuffle(
image_names1,
labels1,
image_names2,
labels2,
image_names3,
labels3,
image_names4,
labels4,
image_names5,
labels5,
image_names6,
labels6,
image_names7,
labels7,
image_names8,
labels8,
image_names9,
labels9,
image_names10,
labels10,
random_state=10000,
)
i = 100
print(labels1[i])
print(labels5[i])
print(labels7[i])
import tensorflow as tf
base_model = tf.keras.applications.ResNet50(
include_top=False,
weights="imagenet",
input_tensor=None,
input_shape=(224, 224, 3),
classes=1000,
)
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Input, Model
inputA = Input(shape=(224, 224, 3))
inputB = Input(shape=(224, 224, 3))
inputC = Input(shape=(224, 224, 3))
inputD = Input(shape=(224, 224, 3))
inputE = Input(shape=(224, 224, 3))
inputF = Input(shape=(224, 224, 3))
inputG = Input(shape=(224, 224, 3))
inputH = Input(shape=(224, 224, 3))
inputI = Input(shape=(224, 224, 3))
inputJ = Input(shape=(224, 224, 3))
# defining parallel outputs
A = Model(inputs=inputA, outputs=base_model(inputA))
B = Model(inputs=inputB, outputs=base_model(inputB))
C = Model(inputs=inputC, outputs=base_model(inputC))
D = Model(inputs=inputD, outputs=base_model(inputD))
E = Model(inputs=inputE, outputs=base_model(inputE))
F = Model(inputs=inputF, outputs=base_model(inputF))
G = Model(inputs=inputG, outputs=base_model(inputG))
H = Model(inputs=inputH, outputs=base_model(inputH))
I = Model(inputs=inputI, outputs=base_model(inputI))
J = Model(inputs=inputJ, outputs=base_model(inputJ))
combined = layers.Add()(
[
A.output,
B.output,
C.output,
D.output,
E.output,
F.output,
G.output,
H.output,
I.output,
J.output,
]
)
# x = layers.Conv2D(512, 3, activation = 'relu', padding = 'same')(combined)
# fx = layers.Conv2D(512, 3, activation='relu', padding='same')(x)
# fx = layers.BatchNormalization()(fx)
# fx = layers.Conv2D(512, 3, padding='same')(fx)
# out = layers.Add()([x,fx])
# out = layers.MaxPooling2D()(out)
# out = layers.ReLU()(out)
# out = layers.BatchNormalization()(out)
z = layers.Flatten()(combined)
# z = layers.Dense(4096, activation="relu")(z)
# z = layers.Dropout(0.5)(z)
# z = layers.Dense(4096, activation='relu')(z)
# z = layers.Dropout(0.4)(z)
z = layers.Dense(4, activation="softmax")(z)
model = Model(
inputs=[
A.input,
B.input,
C.input,
D.input,
E.input,
F.input,
G.input,
H.input,
I.input,
J.input,
],
outputs=z,
)
model.summary()
for layer in model.layers:
layer.trainable = True
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(),
metrics=["acc"],
)
from keras.callbacks import ModelCheckpoint
checkpoint = ModelCheckpoint(
"nohnohmosmed.h5", monitor="val_acc", verbose=1, save_best_only=True, mode="auto"
)
History = model.fit(
x=[
image_names1,
image_names2,
image_names3,
image_names4,
image_names5,
image_names6,
image_names7,
image_names8,
image_names9,
image_names10,
],
y=labels1,
validation_split=0.2,
epochs=50,
callbacks=[checkpoint],
)
model.summary()
model = model.save_weights("model_mri.h5")
loss = model.history["loss"]
val_loss = model.history["val_loss"]
epochs = range(300)
plt.figure()
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.show()
| false | 0 | 2,877 | 0 | 2,908 | 2,877 |
||
69046611
|
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.multioutput import MultiOutputRegressor
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
train = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/train.csv")
test = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/test.csv")
sub = pd.read_csv(
"/kaggle/input/tabular-playground-series-jul-2021/sample_submission.csv"
)
train = train.set_index("date_time").copy()
test = test.set_index("date_time").copy()
target_cols = [col for col in train.columns if col.startswith("target")]
feat_cols = [col for col in train.columns if col not in target_cols]
train, val = train_test_split(train, test_size=0.2, random_state=42)
fea_scaler = MinMaxScaler()
lab_scaler = MinMaxScaler()
Xtrain_scaled = fea_scaler.fit_transform(train.drop(target_cols[:], axis=1))
Xval_scaled = fea_scaler.transform(val.drop(target_cols[:], axis=1))
Ytrain_scaled = lab_scaler.fit_transform(train[target_cols[:]])
Yval_scaled = lab_scaler.transform(val[target_cols[:]])
Xtest_scaled = fea_scaler.transform(test)
other_params = {
"learning_rate": 0.1,
"n_estimators": 400,
"max_depth": 4,
"min_child_weight": 5,
"seed": 0,
"subsample": 0.8,
"colsample_bytree": 0.8,
"gamma": 0.1,
"reg_alpha": 0.1,
"reg_lambda": 0.1,
}
model = xgb.XGBRegressor(**other_params)
multioutputregressor = MultiOutputRegressor(
xgb.XGBRegressor(objective="reg:squarederror", **other_params)
).fit(Xtrain_scaled, Ytrain_scaled)
# cv_params = {'n_estimators': [400, 500, 600, 700, 800]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 500, 'max_depth': 5, 'min_child_weight': 1, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# evalute_result = optimized_GBM.cv_results_
# print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'max_depth': [3, 4, 5, 6, 7, 8, 9, 10]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 5, 'min_child_weight': 1, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytr ee': 0.8, 'gamma': 0, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# evalute_result = optimized_GBM.cv_results_
# print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'min_child_weight': [1, 2, 3, 4, 5, 6]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytr ee': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# evalute_result = optimized_GBM.cv_results_
# print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'gamma': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# evalute_result = optimized_GBM.cv_results_
# print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'subsample': [0.6, 0.7, 0.8, 0.9], 'colsample_bytree': [0.6, 0.7, 0.8, 0.9]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# #evalute_result = optimized_GBM.grid_scores_
# #print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'reg_alpha': [0.05, 0.1, 1, 2, 3], 'reg_lambda': [0.05, 0.1, 1, 2, 3]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# #evalute_result = optimized_GBM.grid_scores_
# #print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'n_estimators': [400, 500, 600, 700, 800],
# 'max_depth': [3, 4, 5, 6, 7, 8, 9, 10],
# 'min_child_weight': [1, 2, 3, 4, 5, 6],
# 'gamma': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
# 'subsample': [0.6, 0.7, 0.8, 0.9],
# 'colsample_bytree': [0.6, 0.7, 0.8, 0.9],
# 'reg_alpha': [0.05, 0.1, 1, 2, 3],
# 'reg_lambda': [0.05, 0.1, 1, 2, 3],
# 'learning_rate': [0.01, 0.05, 0.07, 0.1, 0.2]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0.1, 'reg_lambda': 0.1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=10)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# evalute_result = optimized_GBM.cv_results_
# print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
pred = multioutputregressor.predict(Xtest_scaled)
pred = lab_scaler.inverse_transform(pred)
pred = pred.reshape(2247, 3)
sub[target_cols[:]] = pred
sub.to_csv("sample_submission.csv", index=0)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/046/69046611.ipynb
| null | null |
[{"Id": 69046611, "ScriptId": 18816238, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7392108, "CreationDate": "07/26/2021 08:43:12", "VersionNumber": 3.0, "Title": "XGBoost", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 120.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 119.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
| null | null | null | null |
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.multioutput import MultiOutputRegressor
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
train = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/train.csv")
test = pd.read_csv("/kaggle/input/tabular-playground-series-jul-2021/test.csv")
sub = pd.read_csv(
"/kaggle/input/tabular-playground-series-jul-2021/sample_submission.csv"
)
train = train.set_index("date_time").copy()
test = test.set_index("date_time").copy()
target_cols = [col for col in train.columns if col.startswith("target")]
feat_cols = [col for col in train.columns if col not in target_cols]
train, val = train_test_split(train, test_size=0.2, random_state=42)
fea_scaler = MinMaxScaler()
lab_scaler = MinMaxScaler()
Xtrain_scaled = fea_scaler.fit_transform(train.drop(target_cols[:], axis=1))
Xval_scaled = fea_scaler.transform(val.drop(target_cols[:], axis=1))
Ytrain_scaled = lab_scaler.fit_transform(train[target_cols[:]])
Yval_scaled = lab_scaler.transform(val[target_cols[:]])
Xtest_scaled = fea_scaler.transform(test)
other_params = {
"learning_rate": 0.1,
"n_estimators": 400,
"max_depth": 4,
"min_child_weight": 5,
"seed": 0,
"subsample": 0.8,
"colsample_bytree": 0.8,
"gamma": 0.1,
"reg_alpha": 0.1,
"reg_lambda": 0.1,
}
model = xgb.XGBRegressor(**other_params)
multioutputregressor = MultiOutputRegressor(
xgb.XGBRegressor(objective="reg:squarederror", **other_params)
).fit(Xtrain_scaled, Ytrain_scaled)
# cv_params = {'n_estimators': [400, 500, 600, 700, 800]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 500, 'max_depth': 5, 'min_child_weight': 1, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# evalute_result = optimized_GBM.cv_results_
# print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'max_depth': [3, 4, 5, 6, 7, 8, 9, 10]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 5, 'min_child_weight': 1, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytr ee': 0.8, 'gamma': 0, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# evalute_result = optimized_GBM.cv_results_
# print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'min_child_weight': [1, 2, 3, 4, 5, 6]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytr ee': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# evalute_result = optimized_GBM.cv_results_
# print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'gamma': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# evalute_result = optimized_GBM.cv_results_
# print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'subsample': [0.6, 0.7, 0.8, 0.9], 'colsample_bytree': [0.6, 0.7, 0.8, 0.9]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# #evalute_result = optimized_GBM.grid_scores_
# #print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'reg_alpha': [0.05, 0.1, 1, 2, 3], 'reg_lambda': [0.05, 0.1, 1, 2, 3]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0, 'reg_lambda': 1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=4)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# #evalute_result = optimized_GBM.grid_scores_
# #print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
# cv_params = {'n_estimators': [400, 500, 600, 700, 800],
# 'max_depth': [3, 4, 5, 6, 7, 8, 9, 10],
# 'min_child_weight': [1, 2, 3, 4, 5, 6],
# 'gamma': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
# 'subsample': [0.6, 0.7, 0.8, 0.9],
# 'colsample_bytree': [0.6, 0.7, 0.8, 0.9],
# 'reg_alpha': [0.05, 0.1, 1, 2, 3],
# 'reg_lambda': [0.05, 0.1, 1, 2, 3],
# 'learning_rate': [0.01, 0.05, 0.07, 0.1, 0.2]}
# other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 4, 'min_child_weight': 5, 'seed': 0,
# 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.1, 'reg_alpha': 0.1, 'reg_lambda': 0.1}
# optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=10)
# optimized_GBM.fit(Xtrain_scaled, Ytrain_scaled[:, 1])
# evalute_result = optimized_GBM.cv_results_
# print('每轮迭代运行结果:{0}'.format(evalute_result))
# print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
# print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
pred = multioutputregressor.predict(Xtest_scaled)
pred = lab_scaler.inverse_transform(pred)
pred = pred.reshape(2247, 3)
sub[target_cols[:]] = pred
sub.to_csv("sample_submission.csv", index=0)
| false | 0 | 2,940 | 1 | 2,940 | 2,940 |
||
69046436
|
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer, KNNImputer
from sklearn.ensemble import (
RandomForestRegressor,
GradientBoostingRegressor,
ExtraTreesRegressor,
)
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import plotly.express as px
import seaborn as sns
from catboost import CatBoostRegressor
from lightgbm import LGBMRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from xgboost import XGBRegressor
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train = pd.read_csv("../input/house-prices-advanced-regression-techniques/train.csv")
test = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv")
print(train.shape)
print(test.shape)
df = train.append(test).reset_index(drop=True)
print(df.shape)
df.columns
def check_df(dataframe, head=5):
print("##################### Shape #####################")
print(dataframe.shape)
print("##################### Types #####################")
print(dataframe.dtypes)
print("##################### Head #####################")
print(dataframe.head(head))
print("##################### Tail #####################")
print(dataframe.tail(head))
print("##################### NA #####################")
print(dataframe.isnull().sum())
print("##################### Quantiles #####################")
print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T)
check_df(df)
def grab_col_names(dataframe, cat_th=10, car_th=20):
# cat_cols, cat_but_car
cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"]
num_but_cat = [
col
for col in dataframe.columns
if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"
]
cat_but_car = [
col
for col in dataframe.columns
if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"
]
cat_cols = cat_cols + num_but_cat
cat_cols = [col for col in cat_cols if col not in cat_but_car]
# num_cols
num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"]
num_cols = [col for col in num_cols if col not in num_but_cat]
print(f"Observations: {dataframe.shape[0]}")
print(f"Variables: {dataframe.shape[1]}")
print(f"cat_cols: {len(cat_cols)}")
print(f"num_cols: {len(num_cols)}")
print(f"cat_but_car: {len(cat_but_car)}")
print(f"num_but_cat: {len(num_but_cat)}")
return cat_cols, num_cols, cat_but_car
cat_cols, num_cols, cat_but_car = grab_col_names(df)
df["Neighborhood"].value_counts()
# Kategorik Değişken Analizi
def cat_summary(dataframe, col_name, plot=False):
print(
pd.DataFrame(
{
col_name: dataframe[col_name].value_counts(),
"Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe),
}
)
)
print("##########################################")
if plot:
sns.countplot(x=dataframe[col_name], data=dataframe)
plt.show()
for col in cat_cols:
cat_summary(df, col)
for col in cat_but_car:
cat_summary(df, col)
# Sayısal Değişken Analizi
df[num_cols].describe([0.10, 0.30, 0.50, 0.70, 0.80, 0.99]).T
# Target Analizi
df["SalePrice"].describe([0.05, 0.10, 0.25, 0.50, 0.75, 0.80, 0.90, 0.95, 0.99]).T
def find_correlation(dataframe, numeric_cols, corr_limit=0.60):
high_correlations = []
low_correlations = []
for col in numeric_cols:
if col == "SalePrice":
pass
else:
correlation = dataframe[[col, "SalePrice"]].corr().loc[col, "SalePrice"]
print(col, correlation)
if abs(correlation) > corr_limit:
high_correlations.append(col + ": " + str(correlation))
else:
low_correlations.append(col + ": " + str(correlation))
return low_correlations, high_correlations
low_corrs, high_corrs = find_correlation(df, num_cols)
# tüm değişkenler korelasyon
corr_matrix = df.corr()
sns.clustermap(corr_matrix, annot=True, figsize=(20, 15), fmt=".2f")
plt.title("Correlation Between Features")
plt.show()
threshold = 0.60
filter = np.abs(corr_matrix["SalePrice"]) > threshold
corr_features = corr_matrix.columns[filter].tolist()
sns.clustermap(df[corr_features].corr(), annot=True, fmt=".2f")
plt.title("Correlation Between Features w/ Corr Threshold 0.60)")
plt.show()
def high_correlated_cols(dataframe, plot=False, corr_th=0.60):
corr = dataframe.corr()
cor_matrix = corr.abs()
upper_triangle_matrix = cor_matrix.where(
np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)
)
drop_list = [
col
for col in upper_triangle_matrix.columns
if any(upper_triangle_matrix[col] > corr_th)
]
if plot:
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(rc={"figure.figsize": (15, 15)})
sns.heatmap(corr, cmap="RdBu")
plt.show()
return drop_list
high_correlated_cols(df)
# FEATURE ENGINEERING
df["SqFtPerRoom"] = df["GrLivArea"] / (
df["TotRmsAbvGrd"] + df["FullBath"] + df["HalfBath"] + df["KitchenAbvGr"]
)
df["Total_Home_Quality"] = df["OverallQual"] + df["OverallCond"]
df["Total_Bathrooms"] = (
df["FullBath"]
+ (0.5 * df["HalfBath"])
+ df["BsmtFullBath"]
+ (0.5 * df["BsmtHalfBath"])
)
df["HighQualSF"] = df["1stFlrSF"] + df["2ndFlrSF"]
# Converting non-numeric predictors stored as numbers into string
df["MSSubClass"] = df["MSSubClass"].apply(str)
df["YrSold"] = df["YrSold"].apply(str)
df["MoSold"] = df["MoSold"].apply(str)
# RARE ENCODING
def rare_encoder(dataframe, rare_perc, cat_cols):
rare_columns = [
col
for col in cat_cols
if (dataframe[col].value_counts() / len(dataframe) < 0.01).sum() > 1
]
for col in rare_columns:
tmp = dataframe[col].value_counts() / len(dataframe)
rare_labels = tmp[tmp < rare_perc].index
dataframe[col] = np.where(
dataframe[col].isin(rare_labels), "Rare", dataframe[col]
)
return dataframe
def rare_analyser(dataframe, target, cat_cols):
for col in cat_cols:
print(col, ":", len(dataframe[col].value_counts()))
print(
pd.DataFrame(
{
"COUNT": dataframe[col].value_counts(),
"RATIO": dataframe[col].value_counts() / len(dataframe),
"TARGET_MEAN": dataframe.groupby(col)[target].mean(),
}
),
end="\n\n\n",
)
rare_analyser(df, "SalePrice", cat_cols)
df = rare_encoder(df, 0.01, cat_cols)
drop_list = [
"Street",
"SaleCondition",
"Functional",
"Condition2",
"Utilities",
"SaleType",
"MiscVal",
"Alley",
"LandSlope",
"PoolQC",
"MiscFeature",
"Electrical",
"Fence",
"RoofStyle",
"RoofMatl",
"FireplaceQu",
]
cat_cols = [col for col in cat_cols if col not in drop_list]
for col in drop_list:
df.drop(col, axis=1, inplace=True)
rare_analyser(df, "SalePrice", cat_cols)
useless_cols = [
col
for col in cat_cols
if df[col].nunique() == 1
or (
df[col].nunique() == 2
and (df[col].value_counts() / len(df) <= 0.01).any(axis=None)
)
]
cat_cols = [col for col in cat_cols if col not in useless_cols]
for col in useless_cols:
df.drop(col, axis=1, inplace=True)
rare_analyser(df, "SalePrice", cat_cols)
# Label Encoding & ONE-HOT ENCODING
def one_hot_encoder(dataframe, categorical_cols, drop_first=False):
dataframe = pd.get_dummies(
dataframe, columns=categorical_cols, drop_first=drop_first
)
return dataframe
cat_cols, num_cols, cat_but_car = grab_col_names(df)
cat_cols = cat_cols + cat_but_car
df = one_hot_encoder(df, cat_cols, drop_first=True)
check_df(df)
cat_cols, num_cols, cat_but_car = grab_col_names(df)
rare_analyser(df, "SalePrice", cat_cols)
useless_cols_new = [
col for col in cat_cols if (df[col].value_counts() / len(df) <= 0.01).any(axis=None)
]
df[useless_cols_new].head()
for col in useless_cols_new:
cat_summary(df, col)
rare_analyser(df, "SalePrice", useless_cols_new)
# Missing Values
def missing_values_table(dataframe, na_name=False):
na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0]
n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False)
ratio = (
dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100
).sort_values(ascending=False)
missing_df = pd.concat(
[n_miss, np.round(ratio, 2)], axis=1, keys=["n_miss", "ratio"]
)
print(missing_df, end="\n")
if na_name:
return na_columns
missing_values_table(df)
test.shape
missing_values_table(train)
na_cols = [
col for col in df.columns if df[col].isnull().sum() > 0 and "SalePrice" not in col
]
df[na_cols] = df[na_cols].apply(lambda x: x.fillna(x.median()), axis=0)
# Outliers
def outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75):
quartile1 = dataframe[col_name].quantile(q1)
quartile3 = dataframe[col_name].quantile(q3)
interquantile_range = quartile3 - quartile1
up_limit = quartile3 + 1.5 * interquantile_range
low_limit = quartile1 - 1.5 * interquantile_range
return low_limit, up_limit
def check_outlier(dataframe, col_name, q1=0.25, q3=0.75):
low_limit, up_limit = outlier_thresholds(dataframe, col_name, q1, q3)
if dataframe[
(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)
].any(axis=None):
return True
else:
return False
for col in num_cols:
print(col, check_outlier(df, col, q1=0.01, q3=0.99))
# Model
df.shape
train_df = df[df["SalePrice"].notnull()]
test_df = df[df["SalePrice"].isnull()].drop("SalePrice", axis=1)
train_df.shape
test_df.shape
y = np.log1p(train_df["SalePrice"])
X = train_df.drop(["Id", "SalePrice"], axis=1)
X.shape
# Base Models
##################
models = [
("LR", LinearRegression()),
("CART", DecisionTreeRegressor()),
("RF", RandomForestRegressor()),
("GBM", GradientBoostingRegressor()),
("XGBoost", XGBRegressor(objective="reg:squarederror")),
("LightGBM", LGBMRegressor()),
]
for name, regressor in models:
rmse = np.mean(
np.sqrt(
-cross_val_score(regressor, X, y, cv=3, scoring="neg_mean_squared_error")
)
)
print(f"RMSE: {round(rmse, 4)} ({name}) ")
# **Hyperparameter Optimization**
lgbm_model = LGBMRegressor(random_state=46)
# modelleme öncesi hata:
rmse = np.mean(
np.sqrt(-cross_val_score(lgbm_model, X, y, cv=10, scoring="neg_mean_squared_error"))
)
rmse
lgbm_params = {
"learning_rate": [0.01, 0.005],
"n_estimators": [15000, 20000],
"colsample_bytree": [0.5, 0.3],
}
lgbm_gs_best = GridSearchCV(
lgbm_model, lgbm_params, cv=10, n_jobs=-1, verbose=False
).fit(X, y)
final_model = lgbm_model.set_params(**lgbm_gs_best.best_params_).fit(X, y)
rmse = np.mean(
np.sqrt(
-cross_val_score(final_model, X, y, cv=10, scoring="neg_mean_squared_error")
)
)
print(rmse)
# hiperparametrelerin default kendi değeriyle rmse 0.1305858 idi.
# optimizasyonlarla 0.12328 e indirdik
# Feature Selection
def plot_importance(model, features, num=len(X), save=False):
feature_imp = pd.DataFrame(
{"Value": model.feature_importances_, "Feature": features.columns}
)
plt.figure(figsize=(10, 10))
sns.set(font_scale=1)
sns.barplot(
x="Value",
y="Feature",
data=feature_imp.sort_values(by="Value", ascending=False)[0:num],
)
plt.title("Features")
plt.tight_layout()
plt.show()
if save:
plt.savefig("importances.png")
plot_importance(final_model, X, 20)
X.shape
feature_imp = pd.DataFrame(
{"Value": final_model.feature_importances_, "Feature": X.columns}
)
def num_summary(dataframe, numerical_col, plot=False):
quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99]
print(dataframe[numerical_col].describe(quantiles).T)
num_summary(feature_imp, "Value", True)
feature_imp[feature_imp["Value"] > 0].shape
feature_imp[feature_imp["Value"] < 1].shape
zero_imp_cols = feature_imp[feature_imp["Value"] < 1]["Feature"].values
selected_cols = [col for col in X.columns if col not in zero_imp_cols]
# Hyperparameter Optimization with Selected Features
lgbm_model = LGBMRegressor(random_state=46)
lgbm_params = {
"learning_rate": [0.01, 0.005],
"n_estimators": [15000, 20000],
"colsample_bytree": [0.5, 0.3],
}
lgbm_gs_best = GridSearchCV(
lgbm_model, lgbm_params, cv=10, n_jobs=-1, verbose=True
).fit(X[selected_cols], y)
y = np.log1p(train_df["SalePrice"])
X = train_df.drop(["Id", "SalePrice"], axis=1)
final_model = lgbm_model.set_params(**lgbm_gs_best.best_params_).fit(
X[selected_cols], y
)
rmse = np.mean(
np.sqrt(
-cross_val_score(
final_model, X[selected_cols], y, cv=10, scoring="neg_mean_squared_error"
)
)
)
print(rmse)
# SONUCLARIN YUKLENMESI
#######################################
submission_df = pd.DataFrame()
submission_df["Id"] = test_df["Id"].astype("Int32")
submission_df.head()
y_pred_sub = final_model.predict(test_df[selected_cols])
test_df.head()
y_pred_sub = np.expm1(y_pred_sub)
submission_df["SalePrice"] = y_pred_sub
submission_df.to_csv("submission.csv", index=False)
submission_df
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/046/69046436.ipynb
| null | null |
[{"Id": 69046436, "ScriptId": 18841428, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6947038, "CreationDate": "07/26/2021 08:40:25", "VersionNumber": 1.0, "Title": "HousePricePrediction", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 411.0, "LinesInsertedFromPrevious": 265.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 146.0, "LinesInsertedFromFork": 265.0, "LinesDeletedFromFork": 632.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 146.0, "TotalVotes": 4}]
| null | null | null | null |
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer, KNNImputer
from sklearn.ensemble import (
RandomForestRegressor,
GradientBoostingRegressor,
ExtraTreesRegressor,
)
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import plotly.express as px
import seaborn as sns
from catboost import CatBoostRegressor
from lightgbm import LGBMRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from xgboost import XGBRegressor
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train = pd.read_csv("../input/house-prices-advanced-regression-techniques/train.csv")
test = pd.read_csv("../input/house-prices-advanced-regression-techniques/test.csv")
print(train.shape)
print(test.shape)
df = train.append(test).reset_index(drop=True)
print(df.shape)
df.columns
def check_df(dataframe, head=5):
print("##################### Shape #####################")
print(dataframe.shape)
print("##################### Types #####################")
print(dataframe.dtypes)
print("##################### Head #####################")
print(dataframe.head(head))
print("##################### Tail #####################")
print(dataframe.tail(head))
print("##################### NA #####################")
print(dataframe.isnull().sum())
print("##################### Quantiles #####################")
print(dataframe.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T)
check_df(df)
def grab_col_names(dataframe, cat_th=10, car_th=20):
# cat_cols, cat_but_car
cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"]
num_but_cat = [
col
for col in dataframe.columns
if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"
]
cat_but_car = [
col
for col in dataframe.columns
if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"
]
cat_cols = cat_cols + num_but_cat
cat_cols = [col for col in cat_cols if col not in cat_but_car]
# num_cols
num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"]
num_cols = [col for col in num_cols if col not in num_but_cat]
print(f"Observations: {dataframe.shape[0]}")
print(f"Variables: {dataframe.shape[1]}")
print(f"cat_cols: {len(cat_cols)}")
print(f"num_cols: {len(num_cols)}")
print(f"cat_but_car: {len(cat_but_car)}")
print(f"num_but_cat: {len(num_but_cat)}")
return cat_cols, num_cols, cat_but_car
cat_cols, num_cols, cat_but_car = grab_col_names(df)
df["Neighborhood"].value_counts()
# Kategorik Değişken Analizi
def cat_summary(dataframe, col_name, plot=False):
print(
pd.DataFrame(
{
col_name: dataframe[col_name].value_counts(),
"Ratio": 100 * dataframe[col_name].value_counts() / len(dataframe),
}
)
)
print("##########################################")
if plot:
sns.countplot(x=dataframe[col_name], data=dataframe)
plt.show()
for col in cat_cols:
cat_summary(df, col)
for col in cat_but_car:
cat_summary(df, col)
# Sayısal Değişken Analizi
df[num_cols].describe([0.10, 0.30, 0.50, 0.70, 0.80, 0.99]).T
# Target Analizi
df["SalePrice"].describe([0.05, 0.10, 0.25, 0.50, 0.75, 0.80, 0.90, 0.95, 0.99]).T
def find_correlation(dataframe, numeric_cols, corr_limit=0.60):
high_correlations = []
low_correlations = []
for col in numeric_cols:
if col == "SalePrice":
pass
else:
correlation = dataframe[[col, "SalePrice"]].corr().loc[col, "SalePrice"]
print(col, correlation)
if abs(correlation) > corr_limit:
high_correlations.append(col + ": " + str(correlation))
else:
low_correlations.append(col + ": " + str(correlation))
return low_correlations, high_correlations
low_corrs, high_corrs = find_correlation(df, num_cols)
# tüm değişkenler korelasyon
corr_matrix = df.corr()
sns.clustermap(corr_matrix, annot=True, figsize=(20, 15), fmt=".2f")
plt.title("Correlation Between Features")
plt.show()
threshold = 0.60
filter = np.abs(corr_matrix["SalePrice"]) > threshold
corr_features = corr_matrix.columns[filter].tolist()
sns.clustermap(df[corr_features].corr(), annot=True, fmt=".2f")
plt.title("Correlation Between Features w/ Corr Threshold 0.60)")
plt.show()
def high_correlated_cols(dataframe, plot=False, corr_th=0.60):
corr = dataframe.corr()
cor_matrix = corr.abs()
upper_triangle_matrix = cor_matrix.where(
np.triu(np.ones(cor_matrix.shape), k=1).astype(np.bool)
)
drop_list = [
col
for col in upper_triangle_matrix.columns
if any(upper_triangle_matrix[col] > corr_th)
]
if plot:
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(rc={"figure.figsize": (15, 15)})
sns.heatmap(corr, cmap="RdBu")
plt.show()
return drop_list
high_correlated_cols(df)
# FEATURE ENGINEERING
df["SqFtPerRoom"] = df["GrLivArea"] / (
df["TotRmsAbvGrd"] + df["FullBath"] + df["HalfBath"] + df["KitchenAbvGr"]
)
df["Total_Home_Quality"] = df["OverallQual"] + df["OverallCond"]
df["Total_Bathrooms"] = (
df["FullBath"]
+ (0.5 * df["HalfBath"])
+ df["BsmtFullBath"]
+ (0.5 * df["BsmtHalfBath"])
)
df["HighQualSF"] = df["1stFlrSF"] + df["2ndFlrSF"]
# Converting non-numeric predictors stored as numbers into string
df["MSSubClass"] = df["MSSubClass"].apply(str)
df["YrSold"] = df["YrSold"].apply(str)
df["MoSold"] = df["MoSold"].apply(str)
# RARE ENCODING
def rare_encoder(dataframe, rare_perc, cat_cols):
rare_columns = [
col
for col in cat_cols
if (dataframe[col].value_counts() / len(dataframe) < 0.01).sum() > 1
]
for col in rare_columns:
tmp = dataframe[col].value_counts() / len(dataframe)
rare_labels = tmp[tmp < rare_perc].index
dataframe[col] = np.where(
dataframe[col].isin(rare_labels), "Rare", dataframe[col]
)
return dataframe
def rare_analyser(dataframe, target, cat_cols):
for col in cat_cols:
print(col, ":", len(dataframe[col].value_counts()))
print(
pd.DataFrame(
{
"COUNT": dataframe[col].value_counts(),
"RATIO": dataframe[col].value_counts() / len(dataframe),
"TARGET_MEAN": dataframe.groupby(col)[target].mean(),
}
),
end="\n\n\n",
)
rare_analyser(df, "SalePrice", cat_cols)
df = rare_encoder(df, 0.01, cat_cols)
drop_list = [
"Street",
"SaleCondition",
"Functional",
"Condition2",
"Utilities",
"SaleType",
"MiscVal",
"Alley",
"LandSlope",
"PoolQC",
"MiscFeature",
"Electrical",
"Fence",
"RoofStyle",
"RoofMatl",
"FireplaceQu",
]
cat_cols = [col for col in cat_cols if col not in drop_list]
for col in drop_list:
df.drop(col, axis=1, inplace=True)
rare_analyser(df, "SalePrice", cat_cols)
useless_cols = [
col
for col in cat_cols
if df[col].nunique() == 1
or (
df[col].nunique() == 2
and (df[col].value_counts() / len(df) <= 0.01).any(axis=None)
)
]
cat_cols = [col for col in cat_cols if col not in useless_cols]
for col in useless_cols:
df.drop(col, axis=1, inplace=True)
rare_analyser(df, "SalePrice", cat_cols)
# Label Encoding & ONE-HOT ENCODING
def one_hot_encoder(dataframe, categorical_cols, drop_first=False):
dataframe = pd.get_dummies(
dataframe, columns=categorical_cols, drop_first=drop_first
)
return dataframe
cat_cols, num_cols, cat_but_car = grab_col_names(df)
cat_cols = cat_cols + cat_but_car
df = one_hot_encoder(df, cat_cols, drop_first=True)
check_df(df)
cat_cols, num_cols, cat_but_car = grab_col_names(df)
rare_analyser(df, "SalePrice", cat_cols)
useless_cols_new = [
col for col in cat_cols if (df[col].value_counts() / len(df) <= 0.01).any(axis=None)
]
df[useless_cols_new].head()
for col in useless_cols_new:
cat_summary(df, col)
rare_analyser(df, "SalePrice", useless_cols_new)
# Missing Values
def missing_values_table(dataframe, na_name=False):
na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0]
n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False)
ratio = (
dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100
).sort_values(ascending=False)
missing_df = pd.concat(
[n_miss, np.round(ratio, 2)], axis=1, keys=["n_miss", "ratio"]
)
print(missing_df, end="\n")
if na_name:
return na_columns
missing_values_table(df)
test.shape
missing_values_table(train)
na_cols = [
col for col in df.columns if df[col].isnull().sum() > 0 and "SalePrice" not in col
]
df[na_cols] = df[na_cols].apply(lambda x: x.fillna(x.median()), axis=0)
# Outliers
def outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75):
quartile1 = dataframe[col_name].quantile(q1)
quartile3 = dataframe[col_name].quantile(q3)
interquantile_range = quartile3 - quartile1
up_limit = quartile3 + 1.5 * interquantile_range
low_limit = quartile1 - 1.5 * interquantile_range
return low_limit, up_limit
def check_outlier(dataframe, col_name, q1=0.25, q3=0.75):
low_limit, up_limit = outlier_thresholds(dataframe, col_name, q1, q3)
if dataframe[
(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)
].any(axis=None):
return True
else:
return False
for col in num_cols:
print(col, check_outlier(df, col, q1=0.01, q3=0.99))
# Model
df.shape
train_df = df[df["SalePrice"].notnull()]
test_df = df[df["SalePrice"].isnull()].drop("SalePrice", axis=1)
train_df.shape
test_df.shape
y = np.log1p(train_df["SalePrice"])
X = train_df.drop(["Id", "SalePrice"], axis=1)
X.shape
# Base Models
##################
models = [
("LR", LinearRegression()),
("CART", DecisionTreeRegressor()),
("RF", RandomForestRegressor()),
("GBM", GradientBoostingRegressor()),
("XGBoost", XGBRegressor(objective="reg:squarederror")),
("LightGBM", LGBMRegressor()),
]
for name, regressor in models:
rmse = np.mean(
np.sqrt(
-cross_val_score(regressor, X, y, cv=3, scoring="neg_mean_squared_error")
)
)
print(f"RMSE: {round(rmse, 4)} ({name}) ")
# **Hyperparameter Optimization**
lgbm_model = LGBMRegressor(random_state=46)
# modelleme öncesi hata:
rmse = np.mean(
np.sqrt(-cross_val_score(lgbm_model, X, y, cv=10, scoring="neg_mean_squared_error"))
)
rmse
lgbm_params = {
"learning_rate": [0.01, 0.005],
"n_estimators": [15000, 20000],
"colsample_bytree": [0.5, 0.3],
}
lgbm_gs_best = GridSearchCV(
lgbm_model, lgbm_params, cv=10, n_jobs=-1, verbose=False
).fit(X, y)
final_model = lgbm_model.set_params(**lgbm_gs_best.best_params_).fit(X, y)
rmse = np.mean(
np.sqrt(
-cross_val_score(final_model, X, y, cv=10, scoring="neg_mean_squared_error")
)
)
print(rmse)
# hiperparametrelerin default kendi değeriyle rmse 0.1305858 idi.
# optimizasyonlarla 0.12328 e indirdik
# Feature Selection
def plot_importance(model, features, num=len(X), save=False):
feature_imp = pd.DataFrame(
{"Value": model.feature_importances_, "Feature": features.columns}
)
plt.figure(figsize=(10, 10))
sns.set(font_scale=1)
sns.barplot(
x="Value",
y="Feature",
data=feature_imp.sort_values(by="Value", ascending=False)[0:num],
)
plt.title("Features")
plt.tight_layout()
plt.show()
if save:
plt.savefig("importances.png")
plot_importance(final_model, X, 20)
X.shape
feature_imp = pd.DataFrame(
{"Value": final_model.feature_importances_, "Feature": X.columns}
)
def num_summary(dataframe, numerical_col, plot=False):
quantiles = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99]
print(dataframe[numerical_col].describe(quantiles).T)
num_summary(feature_imp, "Value", True)
feature_imp[feature_imp["Value"] > 0].shape
feature_imp[feature_imp["Value"] < 1].shape
zero_imp_cols = feature_imp[feature_imp["Value"] < 1]["Feature"].values
selected_cols = [col for col in X.columns if col not in zero_imp_cols]
# Hyperparameter Optimization with Selected Features
lgbm_model = LGBMRegressor(random_state=46)
lgbm_params = {
"learning_rate": [0.01, 0.005],
"n_estimators": [15000, 20000],
"colsample_bytree": [0.5, 0.3],
}
lgbm_gs_best = GridSearchCV(
lgbm_model, lgbm_params, cv=10, n_jobs=-1, verbose=True
).fit(X[selected_cols], y)
y = np.log1p(train_df["SalePrice"])
X = train_df.drop(["Id", "SalePrice"], axis=1)
final_model = lgbm_model.set_params(**lgbm_gs_best.best_params_).fit(
X[selected_cols], y
)
rmse = np.mean(
np.sqrt(
-cross_val_score(
final_model, X[selected_cols], y, cv=10, scoring="neg_mean_squared_error"
)
)
)
print(rmse)
# SONUCLARIN YUKLENMESI
#######################################
submission_df = pd.DataFrame()
submission_df["Id"] = test_df["Id"].astype("Int32")
submission_df.head()
y_pred_sub = final_model.predict(test_df[selected_cols])
test_df.head()
y_pred_sub = np.expm1(y_pred_sub)
submission_df["SalePrice"] = y_pred_sub
submission_df.to_csv("submission.csv", index=False)
submission_df
| false | 0 | 4,775 | 4 | 4,775 | 4,775 |
||
69046416
|
<jupyter_start><jupyter_text>House Sales in King County, USA
This dataset contains house sale prices for King County, which includes Seattle. It includes homes sold between May 2014 and May 2015.
It's a great dataset for evaluating simple regression models.
Kaggle dataset identifier: housesalesprediction
<jupyter_code>import pandas as pd
df = pd.read_csv('housesalesprediction/kc_house_data.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 21613 entries, 0 to 21612
Data columns (total 21 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 id 21613 non-null int64
1 date 21613 non-null object
2 price 21613 non-null float64
3 bedrooms 21613 non-null int64
4 bathrooms 21613 non-null float64
5 sqft_living 21613 non-null int64
6 sqft_lot 21613 non-null int64
7 floors 21613 non-null float64
8 waterfront 21613 non-null int64
9 view 21613 non-null int64
10 condition 21613 non-null int64
11 grade 21613 non-null int64
12 sqft_above 21613 non-null int64
13 sqft_basement 21613 non-null int64
14 yr_built 21613 non-null int64
15 yr_renovated 21613 non-null int64
16 zipcode 21613 non-null int64
17 lat 21613 non-null float64
18 long 21613 non-null float64
19 sqft_living15 21613 non-null int64
20 sqft_lot15 21613 non-null int64
dtypes: float64(5), int64(15), object(1)
memory usage: 3.5+ MB
<jupyter_text>Examples:
{
"id": 7129300520,
"date": "2014-10-13 00:00:00",
"price": 221900,
"bedrooms": 3,
"bathrooms": 1.0,
"sqft_living": 1180,
"sqft_lot": 5650,
"floors": 1,
"waterfront": 0,
"view": 0,
"condition": 3,
"grade": 7,
"sqft_above": 1180,
"sqft_basement": 0,
"yr_built": 1955,
"yr_renovated": 0,
"zipcode": 98178,
"lat": 47.5112,
"long": -122.257,
"sqft_living15": 1340,
"...": "and 1 more columns"
}
{
"id": 6414100192,
"date": "2014-12-09 00:00:00",
"price": 538000,
"bedrooms": 3,
"bathrooms": 2.25,
"sqft_living": 2570,
"sqft_lot": 7242,
"floors": 2,
"waterfront": 0,
"view": 0,
"condition": 3,
"grade": 7,
"sqft_above": 2170,
"sqft_basement": 400,
"yr_built": 1951,
"yr_renovated": 1991,
"zipcode": 98125,
"lat": 47.721,
"long": -122.319,
"sqft_living15": 1690,
"...": "and 1 more columns"
}
{
"id": 5631500400,
"date": "2015-02-25 00:00:00",
"price": 180000,
"bedrooms": 2,
"bathrooms": 1.0,
"sqft_living": 770,
"sqft_lot": 10000,
"floors": 1,
"waterfront": 0,
"view": 0,
"condition": 3,
"grade": 6,
"sqft_above": 770,
"sqft_basement": 0,
"yr_built": 1933,
"yr_renovated": 0,
"zipcode": 98028,
"lat": 47.7379,
"long": -122.233,
"sqft_living15": 2720,
"...": "and 1 more columns"
}
{
"id": 2487200875,
"date": "2014-12-09 00:00:00",
"price": 604000,
"bedrooms": 4,
"bathrooms": 3.0,
"sqft_living": 1960,
"sqft_lot": 5000,
"floors": 1,
"waterfront": 0,
"view": 0,
"condition": 5,
"grade": 7,
"sqft_above": 1050,
"sqft_basement": 910,
"yr_built": 1965,
"yr_renovated": 0,
"zipcode": 98136,
"lat": 47.5208,
"long": -122.393,
"sqft_living15": 1360,
"...": "and 1 more columns"
}
<jupyter_script># # King County Houses Prices:
# ## Neigborhoods Classification
# In this notebook, I used an other dataset (SEA Building Energy Benchmarking (Source bellow)) which give us for each building GPS coords and the neighborhood (North, East, Ballard, Delridge, etc) .
# I cleaned the dataset as part of a project for a data scientist training and got the idea using this to classify each King County Houses using a KNN classifier.
#
# It will maybe help improving algorithm performances for predicting house prices.
#
# Results at the bottom of the notebook
# ### Importations
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
sns.set()
data = pd.read_csv("../input/housesalesprediction/kc_house_data.csv")
# ### Exploratory Functions
def describe_columns(df):
desc_df = pd.DataFrame(
index=df.columns,
columns=["NaN count", "NaN frequency (%)", "Number of unique values"],
)
desc_df["NaN count"] = df.isna().sum()
desc_df["NaN frequency (%)"] = desc_df["NaN count"] / df.shape[0] * 100
for column in df.columns:
desc_df["Number of unique values"][column] = len(df[column].dropna().unique())
return desc_df
def move_column(df, column_name, column_place):
mvd_column = df.pop(column_name)
df.insert(column_place, column_name, mvd_column)
return df
def prop_nan(df):
return (df.isna()).sum().sum() / df.size
def nan_map(df, save=False, filename="nan_location"):
plt.figure(figsize=(20, 10))
sns.heatmap(df.isna())
if save:
plt.savefig(filename)
def corr_matrix(
df,
figsize=(30, 20),
maptype="heatmap",
absolute=False,
crit_value=None,
annot=True,
save=False,
filename="corr_matrix",
):
matrix_corr = df.corr()
if absolute:
matrix_corr = matrix_corr.abs()
if crit_value != None:
matrix_corr = matrix_corr >= crit_value
plt.figure(figsize=figsize)
if maptype == "heatmap":
sns.heatmap(matrix_corr, annot=annot)
elif maptype == "clustermap":
sns.clustermap(matrix_corr, annot=annot)
if save:
plt.savefig(filename)
df = data.copy()
# ### Columns descriptions
# id - Unique ID for each home sold
# date - Date of the home sale
# price - Price of each home sold
# bedrooms - Number of bedrooms
# bathrooms - Number of bathrooms, where .5 accounts for a room with a toilet but no shower
# sqft_living - Square footage of the apartments interior living space
# sqft_lot - Square footage of the land space
# floors - Number of floors
# waterfront - A dummy variable for whether the apartment was overlooking the waterfront or not
# view - An index from 0 to 4 of how good the view of the property was
# condition - An index from 1 to 5 on the condition of the apartment,
# grade - An index from 1 to 13, where 1-3 falls short of building construction and design, 7 has an average level of construction and design, and 11-13 have a high quality level of construction and design.
# sqft_above - The square footage of the interior housing space that is above ground level
# sqft_basement - The square footage of the interior housing space that is below ground level
# yr_built - The year the house was initially built
# yr_renovated - The year of the house’s last renovation
# zipcode - What zipcode area the house is in
# lat - Lattitude
# long - Longitude
# sqft_living15 - The square footage of interior housing living space for the nearest 15 neighbors
# sqft_lot15 - The square footage of the land lots of the nearest 15 neighbors
# verified from 2 sources:
# https://www.slideshare.net/PawanShivhare1/predicting-king-county-house-prices
# https://rstudio-pubs-static.s3.amazonaws.com/155304_cc51f448116744069664b35e7762999f.htm
#
df.head()
# ### Scatter 2 numerical columns
def plot_2_features(df, x_name, y_name):
plt.figure(figsize=(12, 8))
plt.scatter(df[x_name], df[y_name], s=2)
plt.xlabel(x_name)
plt.ylabel(y_name)
# ### Plot map with a numerical column
def plot_map_num(df, y_name, interquartile=True, v=None):
plt.figure(figsize=(20, 10))
if v != None:
vmin = v[0]
vmax = v[1]
points = plt.scatter(
df["long"],
df["lat"],
c=df[y_name],
cmap="jet",
lw=0,
s=2,
vmin=vmin,
vmax=vmax,
)
elif interquartile:
desc_df = df.describe()
vmin = desc_df.loc["25%", y_name]
vmax = desc_df.loc["75%", y_name]
points = plt.scatter(
df["long"],
df["lat"],
c=df[y_name],
cmap="jet",
lw=0,
s=2,
vmin=vmin,
vmax=vmax,
)
else:
points = plt.scatter(df["long"], df["lat"], c=df[y_name], cmap="jet", lw=0, s=2)
plt.colorbar(points)
plt.xlabel("Long")
plt.ylabel("Lat")
# ### Plot price map
plot_map_num(df, "price", interquartile=True)
# ### Load dataset containing Neighborhoods with GPS coord
# Source: https://www.kaggle.com/city-of-seattle/sea-building-energy-benchmarking#2015-building-energy-benchmarking.csv
# Note: I loaded a cleaned version of the dataset that I made for a data-science online training.
neighborhood_data = pd.read_csv(
"../input/sea-energy-building-benchmark/data_cleaned.csv"
)
# Selecting only the intersting columns
neighborhood_df = neighborhood_data.copy()
neighborhood_df = neighborhood_df[["Latitude", "Longitude", "Neighborhood"]]
neighborhood_df.head()
neighborhood_df["Neighborhood"].unique()
# ### Importing KNN, MinMaxScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix, classification_report
X = neighborhood_df.drop("Neighborhood", axis=1).values
y = neighborhood_df["Neighborhood"].values
# Splitting Data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Made my own encoding class which is easy to use because I got some errors with LabelEncoder
class Encoding:
def __init__(self):
self.dico = {}
self.inv_dico = {}
def fit(self, y):
i = 0
for classe in pd.Series(y).unique():
self.dico[classe] = i
self.inv_dico[i] = classe
i += 1
def transform(self, y):
return pd.Series(y).map(self.dico).values
def inverse_transform(self, y):
return pd.Series(y).map(self.inv_dico).values
# ### Using Neighborhoods datasets to train a model for predicting Neighborhood in df
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
encoder = Encoding()
encoder.fit(y_train)
y_train_coded = encoder.transform(y_train)
y_test_coded = encoder.transform(y_test)
# KNeighborsClassifier with minimum optimization (maybe need more parameter or an other algorithm). Can be improved.
model = GridSearchCV(KNeighborsClassifier(), {"n_neighbors": range(1, 11)})
# Fitting with training set
model.fit(X_train_scaled, y_train_coded)
# Predicting results on the test set
y_pred = encoder.inverse_transform(model.predict(X_test_scaled))
# Score on the test set
model.score(X_test_scaled, y_test_coded)
# ### Confusion Matrix
plt.figure(figsize=(12, 8))
sns.heatmap(confusion_matrix(y_test, y_pred), annot=True)
# ### Classification report
print(classification_report(y_test, y_pred))
# Adding a new column Neighborhood for King County Houses
df["Neighborhood"] = encoder.inverse_transform(
model.predict(scaler.transform(df[["lat", "long"]].values))
)
# ### Plot map with a categorical column
def plot_map_categ(df, categ_column):
plt.figure(figsize=(20, 10))
for classe in df[categ_column].sort_values().unique():
df_classe = df[df[categ_column] == classe]
plt.scatter(df_classe["long"], df_classe["lat"], lw=0, s=10, label=classe)
plt.legend()
plt.xlabel("Long")
plt.ylabel("Lat")
# ### Neighborhood locations
# Note: The Neighborhood dataset was covering a smaller area for the longitude
# . So the mountain part may not be very accurate.
plot_map_categ(df, "Neighborhood")
# ### Boxplot function
def boxplot_groupes(df, categ_column, target_column, figsize=(20, 10)):
groupes = []
for cat in list(df[categ_column].unique()):
groupes.append(df[df[categ_column] == cat][target_column])
medianprops = {"color": "black"}
meanprops = {
"marker": "o",
"markeredgecolor": "black",
"markerfacecolor": "firebrick",
}
plt.figure(figsize=figsize)
plt.boxplot(
groupes,
labels=list(df[categ_column].unique()),
showfliers=False,
medianprops=medianprops,
vert=False,
patch_artist=True,
showmeans=True,
meanprops=meanprops,
)
plt.ylabel(categ_column)
plt.xlabel(target_column)
# Boxplot Neighborhood / price
boxplot_groupes(df, "Neighborhood", "price")
# ### Updated King County house prices dataSet with a 'Neighborhood' column
df.head()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/046/69046416.ipynb
|
housesalesprediction
|
harlfoxem
|
[{"Id": 69046416, "ScriptId": 18825679, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7571614, "CreationDate": "07/26/2021 08:39:57", "VersionNumber": 4.0, "Title": "King County Houses Neighborhood Classification", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 269.0, "LinesInsertedFromPrevious": 10.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 259.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 91775956, "KernelVersionId": 69046416, "SourceDatasetVersionId": 270}]
|
[{"Id": 270, "DatasetId": 128, "DatasourceVersionId": 270, "CreatorUserId": 680332, "LicenseName": "CC0: Public Domain", "CreationDate": "08/25/2016 15:52:49", "VersionNumber": 1.0, "Title": "House Sales in King County, USA", "Slug": "housesalesprediction", "Subtitle": "Predict house price using regression", "Description": "This dataset contains house sale prices for King County, which includes Seattle. It includes homes sold between May 2014 and May 2015.\n\nIt's a great dataset for evaluating simple regression models.", "VersionNotes": "Initial release", "TotalCompressedBytes": 2515206.0, "TotalUncompressedBytes": 2515206.0}]
|
[{"Id": 128, "CreatorUserId": 680332, "OwnerUserId": 680332.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 270.0, "CurrentDatasourceVersionId": 270.0, "ForumId": 1447, "Type": 2, "CreationDate": "08/25/2016 15:52:49", "LastActivityDate": "02/06/2018", "TotalViews": 996866, "TotalDownloads": 172516, "TotalVotes": 2041, "TotalKernels": 1225}]
|
[{"Id": 680332, "UserName": "harlfoxem", "DisplayName": "harlfoxem", "RegisterDate": "08/05/2016", "PerformanceTier": 1}]
|
# # King County Houses Prices:
# ## Neigborhoods Classification
# In this notebook, I used an other dataset (SEA Building Energy Benchmarking (Source bellow)) which give us for each building GPS coords and the neighborhood (North, East, Ballard, Delridge, etc) .
# I cleaned the dataset as part of a project for a data scientist training and got the idea using this to classify each King County Houses using a KNN classifier.
#
# It will maybe help improving algorithm performances for predicting house prices.
#
# Results at the bottom of the notebook
# ### Importations
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
sns.set()
data = pd.read_csv("../input/housesalesprediction/kc_house_data.csv")
# ### Exploratory Functions
def describe_columns(df):
desc_df = pd.DataFrame(
index=df.columns,
columns=["NaN count", "NaN frequency (%)", "Number of unique values"],
)
desc_df["NaN count"] = df.isna().sum()
desc_df["NaN frequency (%)"] = desc_df["NaN count"] / df.shape[0] * 100
for column in df.columns:
desc_df["Number of unique values"][column] = len(df[column].dropna().unique())
return desc_df
def move_column(df, column_name, column_place):
mvd_column = df.pop(column_name)
df.insert(column_place, column_name, mvd_column)
return df
def prop_nan(df):
return (df.isna()).sum().sum() / df.size
def nan_map(df, save=False, filename="nan_location"):
plt.figure(figsize=(20, 10))
sns.heatmap(df.isna())
if save:
plt.savefig(filename)
def corr_matrix(
df,
figsize=(30, 20),
maptype="heatmap",
absolute=False,
crit_value=None,
annot=True,
save=False,
filename="corr_matrix",
):
matrix_corr = df.corr()
if absolute:
matrix_corr = matrix_corr.abs()
if crit_value != None:
matrix_corr = matrix_corr >= crit_value
plt.figure(figsize=figsize)
if maptype == "heatmap":
sns.heatmap(matrix_corr, annot=annot)
elif maptype == "clustermap":
sns.clustermap(matrix_corr, annot=annot)
if save:
plt.savefig(filename)
df = data.copy()
# ### Columns descriptions
# id - Unique ID for each home sold
# date - Date of the home sale
# price - Price of each home sold
# bedrooms - Number of bedrooms
# bathrooms - Number of bathrooms, where .5 accounts for a room with a toilet but no shower
# sqft_living - Square footage of the apartments interior living space
# sqft_lot - Square footage of the land space
# floors - Number of floors
# waterfront - A dummy variable for whether the apartment was overlooking the waterfront or not
# view - An index from 0 to 4 of how good the view of the property was
# condition - An index from 1 to 5 on the condition of the apartment,
# grade - An index from 1 to 13, where 1-3 falls short of building construction and design, 7 has an average level of construction and design, and 11-13 have a high quality level of construction and design.
# sqft_above - The square footage of the interior housing space that is above ground level
# sqft_basement - The square footage of the interior housing space that is below ground level
# yr_built - The year the house was initially built
# yr_renovated - The year of the house’s last renovation
# zipcode - What zipcode area the house is in
# lat - Lattitude
# long - Longitude
# sqft_living15 - The square footage of interior housing living space for the nearest 15 neighbors
# sqft_lot15 - The square footage of the land lots of the nearest 15 neighbors
# verified from 2 sources:
# https://www.slideshare.net/PawanShivhare1/predicting-king-county-house-prices
# https://rstudio-pubs-static.s3.amazonaws.com/155304_cc51f448116744069664b35e7762999f.htm
#
df.head()
# ### Scatter 2 numerical columns
def plot_2_features(df, x_name, y_name):
plt.figure(figsize=(12, 8))
plt.scatter(df[x_name], df[y_name], s=2)
plt.xlabel(x_name)
plt.ylabel(y_name)
# ### Plot map with a numerical column
def plot_map_num(df, y_name, interquartile=True, v=None):
plt.figure(figsize=(20, 10))
if v != None:
vmin = v[0]
vmax = v[1]
points = plt.scatter(
df["long"],
df["lat"],
c=df[y_name],
cmap="jet",
lw=0,
s=2,
vmin=vmin,
vmax=vmax,
)
elif interquartile:
desc_df = df.describe()
vmin = desc_df.loc["25%", y_name]
vmax = desc_df.loc["75%", y_name]
points = plt.scatter(
df["long"],
df["lat"],
c=df[y_name],
cmap="jet",
lw=0,
s=2,
vmin=vmin,
vmax=vmax,
)
else:
points = plt.scatter(df["long"], df["lat"], c=df[y_name], cmap="jet", lw=0, s=2)
plt.colorbar(points)
plt.xlabel("Long")
plt.ylabel("Lat")
# ### Plot price map
plot_map_num(df, "price", interquartile=True)
# ### Load dataset containing Neighborhoods with GPS coord
# Source: https://www.kaggle.com/city-of-seattle/sea-building-energy-benchmarking#2015-building-energy-benchmarking.csv
# Note: I loaded a cleaned version of the dataset that I made for a data-science online training.
neighborhood_data = pd.read_csv(
"../input/sea-energy-building-benchmark/data_cleaned.csv"
)
# Selecting only the intersting columns
neighborhood_df = neighborhood_data.copy()
neighborhood_df = neighborhood_df[["Latitude", "Longitude", "Neighborhood"]]
neighborhood_df.head()
neighborhood_df["Neighborhood"].unique()
# ### Importing KNN, MinMaxScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix, classification_report
X = neighborhood_df.drop("Neighborhood", axis=1).values
y = neighborhood_df["Neighborhood"].values
# Splitting Data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Made my own encoding class which is easy to use because I got some errors with LabelEncoder
class Encoding:
def __init__(self):
self.dico = {}
self.inv_dico = {}
def fit(self, y):
i = 0
for classe in pd.Series(y).unique():
self.dico[classe] = i
self.inv_dico[i] = classe
i += 1
def transform(self, y):
return pd.Series(y).map(self.dico).values
def inverse_transform(self, y):
return pd.Series(y).map(self.inv_dico).values
# ### Using Neighborhoods datasets to train a model for predicting Neighborhood in df
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
encoder = Encoding()
encoder.fit(y_train)
y_train_coded = encoder.transform(y_train)
y_test_coded = encoder.transform(y_test)
# KNeighborsClassifier with minimum optimization (maybe need more parameter or an other algorithm). Can be improved.
model = GridSearchCV(KNeighborsClassifier(), {"n_neighbors": range(1, 11)})
# Fitting with training set
model.fit(X_train_scaled, y_train_coded)
# Predicting results on the test set
y_pred = encoder.inverse_transform(model.predict(X_test_scaled))
# Score on the test set
model.score(X_test_scaled, y_test_coded)
# ### Confusion Matrix
plt.figure(figsize=(12, 8))
sns.heatmap(confusion_matrix(y_test, y_pred), annot=True)
# ### Classification report
print(classification_report(y_test, y_pred))
# Adding a new column Neighborhood for King County Houses
df["Neighborhood"] = encoder.inverse_transform(
model.predict(scaler.transform(df[["lat", "long"]].values))
)
# ### Plot map with a categorical column
def plot_map_categ(df, categ_column):
plt.figure(figsize=(20, 10))
for classe in df[categ_column].sort_values().unique():
df_classe = df[df[categ_column] == classe]
plt.scatter(df_classe["long"], df_classe["lat"], lw=0, s=10, label=classe)
plt.legend()
plt.xlabel("Long")
plt.ylabel("Lat")
# ### Neighborhood locations
# Note: The Neighborhood dataset was covering a smaller area for the longitude
# . So the mountain part may not be very accurate.
plot_map_categ(df, "Neighborhood")
# ### Boxplot function
def boxplot_groupes(df, categ_column, target_column, figsize=(20, 10)):
groupes = []
for cat in list(df[categ_column].unique()):
groupes.append(df[df[categ_column] == cat][target_column])
medianprops = {"color": "black"}
meanprops = {
"marker": "o",
"markeredgecolor": "black",
"markerfacecolor": "firebrick",
}
plt.figure(figsize=figsize)
plt.boxplot(
groupes,
labels=list(df[categ_column].unique()),
showfliers=False,
medianprops=medianprops,
vert=False,
patch_artist=True,
showmeans=True,
meanprops=meanprops,
)
plt.ylabel(categ_column)
plt.xlabel(target_column)
# Boxplot Neighborhood / price
boxplot_groupes(df, "Neighborhood", "price")
# ### Updated King County house prices dataSet with a 'Neighborhood' column
df.head()
|
[{"housesalesprediction/kc_house_data.csv": {"column_names": "[\"id\", \"date\", \"price\", \"bedrooms\", \"bathrooms\", \"sqft_living\", \"sqft_lot\", \"floors\", \"waterfront\", \"view\", \"condition\", \"grade\", \"sqft_above\", \"sqft_basement\", \"yr_built\", \"yr_renovated\", \"zipcode\", \"lat\", \"long\", \"sqft_living15\", \"sqft_lot15\"]", "column_data_types": "{\"id\": \"int64\", \"date\": \"object\", \"price\": \"float64\", \"bedrooms\": \"int64\", \"bathrooms\": \"float64\", \"sqft_living\": \"int64\", \"sqft_lot\": \"int64\", \"floors\": \"float64\", \"waterfront\": \"int64\", \"view\": \"int64\", \"condition\": \"int64\", \"grade\": \"int64\", \"sqft_above\": \"int64\", \"sqft_basement\": \"int64\", \"yr_built\": \"int64\", \"yr_renovated\": \"int64\", \"zipcode\": \"int64\", \"lat\": \"float64\", \"long\": \"float64\", \"sqft_living15\": \"int64\", \"sqft_lot15\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 21613 entries, 0 to 21612\nData columns (total 21 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 21613 non-null int64 \n 1 date 21613 non-null object \n 2 price 21613 non-null float64\n 3 bedrooms 21613 non-null int64 \n 4 bathrooms 21613 non-null float64\n 5 sqft_living 21613 non-null int64 \n 6 sqft_lot 21613 non-null int64 \n 7 floors 21613 non-null float64\n 8 waterfront 21613 non-null int64 \n 9 view 21613 non-null int64 \n 10 condition 21613 non-null int64 \n 11 grade 21613 non-null int64 \n 12 sqft_above 21613 non-null int64 \n 13 sqft_basement 21613 non-null int64 \n 14 yr_built 21613 non-null int64 \n 15 yr_renovated 21613 non-null int64 \n 16 zipcode 21613 non-null int64 \n 17 lat 21613 non-null float64\n 18 long 21613 non-null float64\n 19 sqft_living15 21613 non-null int64 \n 20 sqft_lot15 21613 non-null int64 \ndtypes: float64(5), int64(15), object(1)\nmemory usage: 3.5+ MB\n", "summary": "{\"id\": {\"count\": 21613.0, \"mean\": 4580301520.864988, \"std\": 2876565571.312057, \"min\": 1000102.0, \"25%\": 2123049194.0, \"50%\": 3904930410.0, \"75%\": 7308900445.0, \"max\": 9900000190.0}, \"price\": {\"count\": 21613.0, \"mean\": 540088.1417665294, \"std\": 367127.19648269983, \"min\": 75000.0, \"25%\": 321950.0, \"50%\": 450000.0, \"75%\": 645000.0, \"max\": 7700000.0}, \"bedrooms\": {\"count\": 21613.0, \"mean\": 3.37084162309721, \"std\": 0.9300618311474517, \"min\": 0.0, \"25%\": 3.0, \"50%\": 3.0, \"75%\": 4.0, \"max\": 33.0}, \"bathrooms\": {\"count\": 21613.0, \"mean\": 2.1147573219821405, \"std\": 0.770163157217742, \"min\": 0.0, \"25%\": 1.75, \"50%\": 2.25, \"75%\": 2.5, \"max\": 8.0}, \"sqft_living\": {\"count\": 21613.0, \"mean\": 2079.8997362698374, \"std\": 918.4408970468115, \"min\": 290.0, \"25%\": 1427.0, \"50%\": 1910.0, \"75%\": 2550.0, \"max\": 13540.0}, \"sqft_lot\": {\"count\": 21613.0, \"mean\": 15106.967565816869, \"std\": 41420.51151513548, \"min\": 520.0, \"25%\": 5040.0, \"50%\": 7618.0, \"75%\": 10688.0, \"max\": 1651359.0}, \"floors\": {\"count\": 21613.0, \"mean\": 1.4943089807060566, \"std\": 0.5399888951423463, \"min\": 1.0, \"25%\": 1.0, \"50%\": 1.5, \"75%\": 2.0, \"max\": 3.5}, \"waterfront\": {\"count\": 21613.0, \"mean\": 0.007541757275713691, \"std\": 0.08651719772788764, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"view\": {\"count\": 21613.0, \"mean\": 0.23430342849211122, \"std\": 0.7663175692736122, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 4.0}, \"condition\": {\"count\": 21613.0, \"mean\": 3.4094295100171195, \"std\": 0.6507430463662071, \"min\": 1.0, \"25%\": 3.0, \"50%\": 3.0, \"75%\": 4.0, \"max\": 5.0}, \"grade\": {\"count\": 21613.0, \"mean\": 7.656873178179799, \"std\": 1.175458756974335, \"min\": 1.0, \"25%\": 7.0, \"50%\": 7.0, \"75%\": 8.0, \"max\": 13.0}, \"sqft_above\": {\"count\": 21613.0, \"mean\": 1788.3906907879516, \"std\": 828.0909776519169, \"min\": 290.0, \"25%\": 1190.0, \"50%\": 1560.0, \"75%\": 2210.0, \"max\": 9410.0}, \"sqft_basement\": {\"count\": 21613.0, \"mean\": 291.5090454818859, \"std\": 442.5750426774682, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 560.0, \"max\": 4820.0}, \"yr_built\": {\"count\": 21613.0, \"mean\": 1971.0051357978994, \"std\": 29.37341080238659, \"min\": 1900.0, \"25%\": 1951.0, \"50%\": 1975.0, \"75%\": 1997.0, \"max\": 2015.0}, \"yr_renovated\": {\"count\": 21613.0, \"mean\": 84.40225790033776, \"std\": 401.6792400191759, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 2015.0}, \"zipcode\": {\"count\": 21613.0, \"mean\": 98077.93980474715, \"std\": 53.505026257473084, \"min\": 98001.0, \"25%\": 98033.0, \"50%\": 98065.0, \"75%\": 98118.0, \"max\": 98199.0}, \"lat\": {\"count\": 21613.0, \"mean\": 47.56005251931708, \"std\": 0.13856371024192418, \"min\": 47.1559, \"25%\": 47.471, \"50%\": 47.5718, \"75%\": 47.678, \"max\": 47.7776}, \"long\": {\"count\": 21613.0, \"mean\": -122.21389640494147, \"std\": 0.14082834238139408, \"min\": -122.519, \"25%\": -122.328, \"50%\": -122.23, \"75%\": -122.125, \"max\": -121.315}, \"sqft_living15\": {\"count\": 21613.0, \"mean\": 1986.552491556008, \"std\": 685.3913042527776, \"min\": 399.0, \"25%\": 1490.0, \"50%\": 1840.0, \"75%\": 2360.0, \"max\": 6210.0}, \"sqft_lot15\": {\"count\": 21613.0, \"mean\": 12768.455651691113, \"std\": 27304.17963133851, \"min\": 651.0, \"25%\": 5100.0, \"50%\": 7620.0, \"75%\": 10083.0, \"max\": 871200.0}}", "examples": "{\"id\":{\"0\":7129300520,\"1\":6414100192,\"2\":5631500400,\"3\":2487200875},\"date\":{\"0\":\"20141013T000000\",\"1\":\"20141209T000000\",\"2\":\"20150225T000000\",\"3\":\"20141209T000000\"},\"price\":{\"0\":221900.0,\"1\":538000.0,\"2\":180000.0,\"3\":604000.0},\"bedrooms\":{\"0\":3,\"1\":3,\"2\":2,\"3\":4},\"bathrooms\":{\"0\":1.0,\"1\":2.25,\"2\":1.0,\"3\":3.0},\"sqft_living\":{\"0\":1180,\"1\":2570,\"2\":770,\"3\":1960},\"sqft_lot\":{\"0\":5650,\"1\":7242,\"2\":10000,\"3\":5000},\"floors\":{\"0\":1.0,\"1\":2.0,\"2\":1.0,\"3\":1.0},\"waterfront\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"view\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"condition\":{\"0\":3,\"1\":3,\"2\":3,\"3\":5},\"grade\":{\"0\":7,\"1\":7,\"2\":6,\"3\":7},\"sqft_above\":{\"0\":1180,\"1\":2170,\"2\":770,\"3\":1050},\"sqft_basement\":{\"0\":0,\"1\":400,\"2\":0,\"3\":910},\"yr_built\":{\"0\":1955,\"1\":1951,\"2\":1933,\"3\":1965},\"yr_renovated\":{\"0\":0,\"1\":1991,\"2\":0,\"3\":0},\"zipcode\":{\"0\":98178,\"1\":98125,\"2\":98028,\"3\":98136},\"lat\":{\"0\":47.5112,\"1\":47.721,\"2\":47.7379,\"3\":47.5208},\"long\":{\"0\":-122.257,\"1\":-122.319,\"2\":-122.233,\"3\":-122.393},\"sqft_living15\":{\"0\":1340,\"1\":1690,\"2\":2720,\"3\":1360},\"sqft_lot15\":{\"0\":5650,\"1\":7639,\"2\":8062,\"3\":5000}}"}}]
| true | 2 |
<start_data_description><data_path>housesalesprediction/kc_house_data.csv:
<column_names>
['id', 'date', 'price', 'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'waterfront', 'view', 'condition', 'grade', 'sqft_above', 'sqft_basement', 'yr_built', 'yr_renovated', 'zipcode', 'lat', 'long', 'sqft_living15', 'sqft_lot15']
<column_types>
{'id': 'int64', 'date': 'object', 'price': 'float64', 'bedrooms': 'int64', 'bathrooms': 'float64', 'sqft_living': 'int64', 'sqft_lot': 'int64', 'floors': 'float64', 'waterfront': 'int64', 'view': 'int64', 'condition': 'int64', 'grade': 'int64', 'sqft_above': 'int64', 'sqft_basement': 'int64', 'yr_built': 'int64', 'yr_renovated': 'int64', 'zipcode': 'int64', 'lat': 'float64', 'long': 'float64', 'sqft_living15': 'int64', 'sqft_lot15': 'int64'}
<dataframe_Summary>
{'id': {'count': 21613.0, 'mean': 4580301520.864988, 'std': 2876565571.312057, 'min': 1000102.0, '25%': 2123049194.0, '50%': 3904930410.0, '75%': 7308900445.0, 'max': 9900000190.0}, 'price': {'count': 21613.0, 'mean': 540088.1417665294, 'std': 367127.19648269983, 'min': 75000.0, '25%': 321950.0, '50%': 450000.0, '75%': 645000.0, 'max': 7700000.0}, 'bedrooms': {'count': 21613.0, 'mean': 3.37084162309721, 'std': 0.9300618311474517, 'min': 0.0, '25%': 3.0, '50%': 3.0, '75%': 4.0, 'max': 33.0}, 'bathrooms': {'count': 21613.0, 'mean': 2.1147573219821405, 'std': 0.770163157217742, 'min': 0.0, '25%': 1.75, '50%': 2.25, '75%': 2.5, 'max': 8.0}, 'sqft_living': {'count': 21613.0, 'mean': 2079.8997362698374, 'std': 918.4408970468115, 'min': 290.0, '25%': 1427.0, '50%': 1910.0, '75%': 2550.0, 'max': 13540.0}, 'sqft_lot': {'count': 21613.0, 'mean': 15106.967565816869, 'std': 41420.51151513548, 'min': 520.0, '25%': 5040.0, '50%': 7618.0, '75%': 10688.0, 'max': 1651359.0}, 'floors': {'count': 21613.0, 'mean': 1.4943089807060566, 'std': 0.5399888951423463, 'min': 1.0, '25%': 1.0, '50%': 1.5, '75%': 2.0, 'max': 3.5}, 'waterfront': {'count': 21613.0, 'mean': 0.007541757275713691, 'std': 0.08651719772788764, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'view': {'count': 21613.0, 'mean': 0.23430342849211122, 'std': 0.7663175692736122, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 4.0}, 'condition': {'count': 21613.0, 'mean': 3.4094295100171195, 'std': 0.6507430463662071, 'min': 1.0, '25%': 3.0, '50%': 3.0, '75%': 4.0, 'max': 5.0}, 'grade': {'count': 21613.0, 'mean': 7.656873178179799, 'std': 1.175458756974335, 'min': 1.0, '25%': 7.0, '50%': 7.0, '75%': 8.0, 'max': 13.0}, 'sqft_above': {'count': 21613.0, 'mean': 1788.3906907879516, 'std': 828.0909776519169, 'min': 290.0, '25%': 1190.0, '50%': 1560.0, '75%': 2210.0, 'max': 9410.0}, 'sqft_basement': {'count': 21613.0, 'mean': 291.5090454818859, 'std': 442.5750426774682, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 560.0, 'max': 4820.0}, 'yr_built': {'count': 21613.0, 'mean': 1971.0051357978994, 'std': 29.37341080238659, 'min': 1900.0, '25%': 1951.0, '50%': 1975.0, '75%': 1997.0, 'max': 2015.0}, 'yr_renovated': {'count': 21613.0, 'mean': 84.40225790033776, 'std': 401.6792400191759, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 2015.0}, 'zipcode': {'count': 21613.0, 'mean': 98077.93980474715, 'std': 53.505026257473084, 'min': 98001.0, '25%': 98033.0, '50%': 98065.0, '75%': 98118.0, 'max': 98199.0}, 'lat': {'count': 21613.0, 'mean': 47.56005251931708, 'std': 0.13856371024192418, 'min': 47.1559, '25%': 47.471, '50%': 47.5718, '75%': 47.678, 'max': 47.7776}, 'long': {'count': 21613.0, 'mean': -122.21389640494147, 'std': 0.14082834238139408, 'min': -122.519, '25%': -122.328, '50%': -122.23, '75%': -122.125, 'max': -121.315}, 'sqft_living15': {'count': 21613.0, 'mean': 1986.552491556008, 'std': 685.3913042527776, 'min': 399.0, '25%': 1490.0, '50%': 1840.0, '75%': 2360.0, 'max': 6210.0}, 'sqft_lot15': {'count': 21613.0, 'mean': 12768.455651691113, 'std': 27304.17963133851, 'min': 651.0, '25%': 5100.0, '50%': 7620.0, '75%': 10083.0, 'max': 871200.0}}
<dataframe_info>
RangeIndex: 21613 entries, 0 to 21612
Data columns (total 21 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 id 21613 non-null int64
1 date 21613 non-null object
2 price 21613 non-null float64
3 bedrooms 21613 non-null int64
4 bathrooms 21613 non-null float64
5 sqft_living 21613 non-null int64
6 sqft_lot 21613 non-null int64
7 floors 21613 non-null float64
8 waterfront 21613 non-null int64
9 view 21613 non-null int64
10 condition 21613 non-null int64
11 grade 21613 non-null int64
12 sqft_above 21613 non-null int64
13 sqft_basement 21613 non-null int64
14 yr_built 21613 non-null int64
15 yr_renovated 21613 non-null int64
16 zipcode 21613 non-null int64
17 lat 21613 non-null float64
18 long 21613 non-null float64
19 sqft_living15 21613 non-null int64
20 sqft_lot15 21613 non-null int64
dtypes: float64(5), int64(15), object(1)
memory usage: 3.5+ MB
<some_examples>
{'id': {'0': 7129300520, '1': 6414100192, '2': 5631500400, '3': 2487200875}, 'date': {'0': '20141013T000000', '1': '20141209T000000', '2': '20150225T000000', '3': '20141209T000000'}, 'price': {'0': 221900.0, '1': 538000.0, '2': 180000.0, '3': 604000.0}, 'bedrooms': {'0': 3, '1': 3, '2': 2, '3': 4}, 'bathrooms': {'0': 1.0, '1': 2.25, '2': 1.0, '3': 3.0}, 'sqft_living': {'0': 1180, '1': 2570, '2': 770, '3': 1960}, 'sqft_lot': {'0': 5650, '1': 7242, '2': 10000, '3': 5000}, 'floors': {'0': 1.0, '1': 2.0, '2': 1.0, '3': 1.0}, 'waterfront': {'0': 0, '1': 0, '2': 0, '3': 0}, 'view': {'0': 0, '1': 0, '2': 0, '3': 0}, 'condition': {'0': 3, '1': 3, '2': 3, '3': 5}, 'grade': {'0': 7, '1': 7, '2': 6, '3': 7}, 'sqft_above': {'0': 1180, '1': 2170, '2': 770, '3': 1050}, 'sqft_basement': {'0': 0, '1': 400, '2': 0, '3': 910}, 'yr_built': {'0': 1955, '1': 1951, '2': 1933, '3': 1965}, 'yr_renovated': {'0': 0, '1': 1991, '2': 0, '3': 0}, 'zipcode': {'0': 98178, '1': 98125, '2': 98028, '3': 98136}, 'lat': {'0': 47.5112, '1': 47.721, '2': 47.7379, '3': 47.5208}, 'long': {'0': -122.257, '1': -122.319, '2': -122.233, '3': -122.393}, 'sqft_living15': {'0': 1340, '1': 1690, '2': 2720, '3': 1360}, 'sqft_lot15': {'0': 5650, '1': 7639, '2': 8062, '3': 5000}}
<end_description>
| 2,790 | 1 | 4,404 | 2,790 |
69046748
|
<jupyter_start><jupyter_text>CommonLit Various
Kaggle dataset identifier: commonlit-various
<jupyter_script>import warnings
warnings.simplefilter("ignore")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_addons as tfa
from tqdm.notebook import tqdm
from nltk.tokenize import word_tokenize, sent_tokenize
from sklearn.model_selection import KFold, StratifiedKFold
from tensorflow.keras.mixed_precision import experimental as mixed_precision
from kaggle_datasets import KaggleDatasets
from scipy.stats import pearsonr
from transformers import RobertaTokenizer, TFRobertaModel
from readability import Readability
from nltk.tokenize import word_tokenize, sent_tokenize
import os
import sys
import nltk
import string
import math
import logging
import glob
import random
tf.get_logger().setLevel(logging.ERROR)
tqdm.pandas()
print(f"tensorflow version: {tf.__version__}")
print(f"tensorflow keras version: {tf.keras.__version__}")
print(f"python version: P{sys.version}")
def set_seeds(seed):
os.environ["PYTHONHASHSEED"] = str(seed)
random.seed(seed)
tf.random.set_seed(seed)
np.random.seed(seed)
set_seeds(42)
SEQ_LENGTH = 250
# # Train
train = pd.read_csv("/kaggle/input/commonlitreadabilityprize/train.csv")
train_ratio_vectors = np.load("/kaggle/input/commonlit-various/train_ratio_vectors.npy")
sample_submission = pd.read_csv(
"/kaggle/input/commonlitreadabilityprize/sample_submission.csv"
)
RATIO_VECTOR_LENGTH = len(train_ratio_vectors[0])
print(
f"train_ratio_vectors shape: {train_ratio_vectors.shape}, RATIO_VECTOR_LENGTH: {RATIO_VECTOR_LENGTH}"
)
train["word_count"] = train["excerpt"].progress_apply(word_tokenize).apply(len)
train["sent_count"] = train["excerpt"].progress_apply(sent_tokenize).apply(len)
# # Info
display(train.info())
display(sample_submission.info())
# # Head
display(train.head())
display(sample_submission.head())
# # Target Distribution
plt.figure(figsize=(15, 8))
train["target"].plot(kind="hist", bins=32)
plt.title("Target Value Distribution", size=18)
plt.show()
display(train["target"].describe())
# # Excerpt Length
plt.figure(figsize=(15, 8))
train["word_count"].plot(kind="hist", bins=32)
plt.title("Word Count Distribution", size=18)
plt.show()
plt.figure(figsize=(15, 8))
train["sent_count"].plot(kind="hist", bins=32)
plt.title("Sentence Count Distribution", size=18)
plt.show()
# # Roberta Tokenize
# Get the trained model we want to use
MODEL = "roberta-base"
# Let's load our model tokenizer
tokenizer = RobertaTokenizer.from_pretrained(MODEL)
# For tf.dataset
AUTO = tf.data.experimental.AUTOTUNE
# This function tokenize the text according to a transformers model tokenizer
def regular_encode(excerpt):
enc_di = tokenizer.batch_encode_plus(
excerpt,
padding="max_length",
truncation=True,
max_length=SEQ_LENGTH,
)
return np.array(enc_di["input_ids"])
train["input_ids"] = regular_encode(train["excerpt"]).tolist()
display(train.head())
# # Training
# Detect hardware, return appropriate distribution strategy
try:
TPU = (
tf.distribute.cluster_resolver.TPUClusterResolver()
) # TPU detection. No parameters necessary if TPU_NAME environment variable is set. On Kaggle this is always the case.
print("Running on TPU ", TPU.master())
except ValueError:
print("Running on GPU")
TPU = None
if TPU:
tf.config.experimental_connect_to_cluster(TPU)
tf.tpu.experimental.initialize_tpu_system(TPU)
strategy = tf.distribute.experimental.TPUStrategy(TPU)
else:
strategy = (
tf.distribute.get_strategy()
) # default distribution strategy in Tensorflow. Works on CPU and single GPU.
REPLICAS = strategy.num_replicas_in_sync
print(f"REPLICAS: {REPLICAS}")
# set half precision policy
mixed_precision.set_policy("float32")
print(f"Compute dtype: {mixed_precision.global_policy().compute_dtype}")
print(f"Variable dtype: {mixed_precision.global_policy().variable_dtype}")
# # Model
def get_model(eps=1e-6, amsgrad=False, weights_path=None):
tf.keras.backend.clear_session()
with strategy.scope():
# Inputs
input_ids = tf.keras.Input(name="input_ids", shape=[SEQ_LENGTH], dtype=tf.int32)
ratio_vector = tf.keras.Input(
name="ratio_vector", shape=[RATIO_VECTOR_LENGTH], dtype=tf.float32
)
# ROBERTA
transformer = TFRobertaModel.from_pretrained(MODEL)
# Load saved weights
transformer.load_weights(
"/kaggle/input/simplenormal-wikipedia-sections/roberta_pretrained.h5"
)
transformer.trainable = True
# RoBERTa
sequence_output = transformer(input_ids)[0]
cls_token = sequence_output[:, 0, :]
# Ratio Vector
ratio_vector_fc = tf.keras.layers.Dense(256)(ratio_vector)
output_concat = tf.keras.layers.Concatenate(axis=1)(
[cls_token, ratio_vector_fc]
)
output = tf.keras.layers.Dense(1, activation="linear", dtype=tf.float32)(
output_concat
)
# Model
model = tf.keras.models.Model(
inputs=[input_ids, ratio_vector], outputs=[output]
)
loss = tf.keras.losses.MeanSquaredError()
optimizer = tf.optimizers.Adam(learning_rate=4e-5, epsilon=eps)
metrics = [
tf.keras.metrics.RootMeanSquaredError(name="RMSE"),
]
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
# Load weights if weights path is provided
if weights_path:
model.load_weights(weights_path)
return model
model = get_model()
model.summary()
tf.keras.utils.plot_model(
model, show_shapes=True, show_dtype=True, show_layer_names=True, expand_nested=False
)
# # Configuration
BATCH_SIZE_BASE = 24 // REPLICAS
BATCH_SIZE = BATCH_SIZE_BASE * REPLICAS
STEPS_PER_EPOCH = len(train) // BATCH_SIZE
KFOLDS = 5
print(f"BATCH SIZE: {BATCH_SIZE}")
def get_kfold_indices():
kf = KFold(n_splits=KFOLDS, shuffle=True, random_state=42)
kfold_indices = list(kf.split(train.index.tolist()))
return kfold_indices
KFOLD_INDICES = get_kfold_indices()
print(f"Train Size: {len(KFOLD_INDICES[0][0])}, Val Size: {len(KFOLD_INDICES[0][1])}")
# # Train Dataset
def get_train_dataset(kfold, drop_remainder=True):
train_idxs, _ = KFOLD_INDICES[kfold]
# TRAIN DATASET
input_ids = np.array(list(train.loc[train_idxs, "input_ids"]), dtype=np.int32)
ratio_vector = train_ratio_vectors[train_idxs]
train_x = {
"input_ids": input_ids,
"ratio_vector": ratio_vector,
}
train_y = train.loc[train_idxs, "target"]
train_dataset = tf.data.Dataset.from_tensor_slices((train_x, train_y))
if drop_remainder:
train_dataset = train_dataset.repeat()
train_dataset = train_dataset.shuffle(len(train_idxs))
train_dataset = train_dataset.batch(BATCH_SIZE, drop_remainder=drop_remainder)
train_dataset = train_dataset.prefetch(1)
if drop_remainder:
TRAIN_STEPS_PER_EPOCH = len(train_idxs) // BATCH_SIZE
else:
TRAIN_STEPS_PER_EPOCH = math.ceil(len(train_idxs) / BATCH_SIZE)
return train_dataset, TRAIN_STEPS_PER_EPOCH
train_dataset, TRAIN_STEPS_PER_EPOCH = get_train_dataset(0, drop_remainder=False)
train_x, train_y = next(iter(train_dataset))
print(f"train_x keys: {list(train_x.keys())}")
print(f"train_y shape: {train_y.shape}, train_y dtype {train_y.dtype}")
# # Val Dataset
def get_val_dataset(kfold, drop_remainder=True):
_, val_idxs = KFOLD_INDICES[kfold]
# VAL DATASET
input_ids = np.array(list(train.loc[val_idxs, "input_ids"]), dtype=np.int32)
ratio_vector = train_ratio_vectors[val_idxs]
val_x = {
"input_ids": input_ids,
"ratio_vector": ratio_vector,
}
val_y = train.loc[val_idxs, "target"]
val_dataset = tf.data.Dataset.from_tensor_slices((val_x, val_y))
val_dataset = val_dataset.batch(BATCH_SIZE, drop_remainder=False)
val_dataset = val_dataset.prefetch(1)
VAL_STEPS_PER_EPOCH = len(val_idxs) // BATCH_SIZE + 1
return val_dataset, VAL_STEPS_PER_EPOCH
val_dataset, VAL_STEPS_PER_EPOCH = get_val_dataset(0)
val_x, val_y = next(iter(val_dataset))
print(f"val_x keys: {list(val_x.keys())}")
print(f"val_y shape: {val_y.shape}, val_y dtypeL {val_y.dtype}")
# # Learning Rate Scheduler
TRAIN_LEN = len(KFOLD_INDICES[0][0])
TRAIN_ROUNDS = 4
STEPS_PER_EPOCH = TRAIN_LEN // (BATCH_SIZE * 16)
EPOCHS = (TRAIN_ROUNDS * TRAIN_LEN) // (STEPS_PER_EPOCH * BATCH_SIZE)
LR_RAMPUP_ITERATIONS = 0
LR_RAMPUP_EPOCHS = int(
LR_RAMPUP_ITERATIONS * (len(KFOLD_INDICES[0][0]) / (BATCH_SIZE * STEPS_PER_EPOCH))
)
print(
f"EPOCHS: {EPOCHS}, STEPS_PER_EPOCH: {STEPS_PER_EPOCH}, LR_RAMPUP_EPOCHS: {LR_RAMPUP_EPOCHS}"
)
# # Training
print("=" * 20, f"start", "=" * 20)
print()
# Histories
HISTORIES = dict()
# Epsilon grid search
for fold in range(KFOLDS):
# Model Checkpoint
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
f"model_fold_{fold}.h5",
monitor="val_RMSE",
save_best_only=True,
save_weights_only=True,
verbose=1,
mode="min",
)
print("=" * 10, f"FOLD {fold}", "=" * 10)
# Models
model = get_model()
# Datasets
train_dataset, TRAIN_STEPS_PER_EPOCH = get_train_dataset(fold)
val_dataset, VAL_STEPS_PER_EPOCH = get_val_dataset(fold)
print(
f"TRAIN_STEPS_PER_EPOCH: {TRAIN_STEPS_PER_EPOCH}, VAL_STEPS_PER_EPOCH: {VAL_STEPS_PER_EPOCH}"
)
# Train Model
HISTORIES[f"FOLD_{fold}"] = model.fit(
train_dataset,
epochs=EPOCHS,
verbose=0,
# Hardcode Steps Per Epoch
steps_per_epoch=STEPS_PER_EPOCH,
# validation
validation_data=val_dataset,
validation_steps=VAL_STEPS_PER_EPOCH,
# callbacks
callbacks=[
checkpoint_callback,
],
)
# OOF RMSE
OOF_RMSE = []
for fold in range(KFOLDS):
OOF_RMSE.append(min(HISTORIES[f"FOLD_{fold}"].history["val_RMSE"]))
print()
print(", ".join([f"fold {i}: {rmse:.4f}" for i, rmse in zip(range(KFOLDS), OOF_RMSE)]))
print(f"OOF_RMSE: {np.mean(OOF_RMSE):.4f}")
print()
# # Train History
def plot_history_metric(history, metric, axes, fold):
N_EPOCHS = len(history.history["loss"])
x = [1, 5] + [10 + 5 * idx for idx in range((N_EPOCHS - 10) // 5 + 1)]
x_ticks = np.arange(1, N_EPOCHS + 1)
val = "val" in "".join(history.history.keys())
# summarize history for accuracy
axes.plot(x_ticks, history.history[metric])
if val:
val_values = history.history[f"val_{metric}"]
val_argmin = np.argmin(val_values)
axes.scatter(
val_argmin + 1, val_values[val_argmin], color="red", s=50, marker="o"
)
axes.plot(x_ticks, val_values)
axes.set_title(f"Fold {fold} - Model {metric}", fontsize=20)
axes.set_ylabel(metric, fontsize=16)
axes.set_xlabel("epoch", fontsize=16)
axes.tick_params(axis="x", labelsize=8)
axes.set_xticks(x) # set tick step to 1 and let x axis start at 1
axes.legend(["train"] + ["test"] if val else [], prop={"size": 18})
axes.grid()
fig, axes = plt.subplots(KFOLDS, 2, figsize=(15, 6 * KFOLDS))
for fold in range(KFOLDS):
history = HISTORIES[f"FOLD_{fold}"]
plot_history_metric(history, "loss", axes[fold, 0], fold)
plot_history_metric(history, "RMSE", axes[fold, 1], fold)
plt.subplots_adjust(hspace=0.40, wspace=0.20)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/046/69046748.ipynb
|
commonlit-various
|
markwijkhuizen
|
[{"Id": 69046748, "ScriptId": 17247939, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4433335, "CreationDate": "07/26/2021 08:44:57", "VersionNumber": 58.0, "Title": "CommonLit Training", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 368.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 366.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 91776584, "KernelVersionId": 69046748, "SourceDatasetVersionId": 2462427}, {"Id": 91776583, "KernelVersionId": 69046748, "SourceDatasetVersionId": 2462365}, {"Id": 91776582, "KernelVersionId": 69046748, "SourceDatasetVersionId": 2452800}, {"Id": 91776580, "KernelVersionId": 69046748, "SourceDatasetVersionId": 2219267}, {"Id": 91776579, "KernelVersionId": 69046748, "SourceDatasetVersionId": 2210416}, {"Id": 91776578, "KernelVersionId": 69046748, "SourceDatasetVersionId": 2199419}]
|
[{"Id": 2462427, "DatasetId": 1390791, "DatasourceVersionId": 2504854, "CreatorUserId": 4433335, "LicenseName": "Unknown", "CreationDate": "07/25/2021 18:42:55", "VersionNumber": 9.0, "Title": "CommonLit Various", "Slug": "commonlit-various", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Added all ratio's", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1390791, "CreatorUserId": 4433335, "OwnerUserId": 4433335.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3028066.0, "CurrentDatasourceVersionId": 3076077.0, "ForumId": 1410023, "Type": 2, "CreationDate": "06/05/2021 15:44:48", "LastActivityDate": "06/05/2021", "TotalViews": 1826, "TotalDownloads": 2, "TotalVotes": 2, "TotalKernels": 1}]
|
[{"Id": 4433335, "UserName": "markwijkhuizen", "DisplayName": "Mark Wijkhuizen", "RegisterDate": "02/04/2020", "PerformanceTier": 3}]
|
import warnings
warnings.simplefilter("ignore")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_addons as tfa
from tqdm.notebook import tqdm
from nltk.tokenize import word_tokenize, sent_tokenize
from sklearn.model_selection import KFold, StratifiedKFold
from tensorflow.keras.mixed_precision import experimental as mixed_precision
from kaggle_datasets import KaggleDatasets
from scipy.stats import pearsonr
from transformers import RobertaTokenizer, TFRobertaModel
from readability import Readability
from nltk.tokenize import word_tokenize, sent_tokenize
import os
import sys
import nltk
import string
import math
import logging
import glob
import random
tf.get_logger().setLevel(logging.ERROR)
tqdm.pandas()
print(f"tensorflow version: {tf.__version__}")
print(f"tensorflow keras version: {tf.keras.__version__}")
print(f"python version: P{sys.version}")
def set_seeds(seed):
os.environ["PYTHONHASHSEED"] = str(seed)
random.seed(seed)
tf.random.set_seed(seed)
np.random.seed(seed)
set_seeds(42)
SEQ_LENGTH = 250
# # Train
train = pd.read_csv("/kaggle/input/commonlitreadabilityprize/train.csv")
train_ratio_vectors = np.load("/kaggle/input/commonlit-various/train_ratio_vectors.npy")
sample_submission = pd.read_csv(
"/kaggle/input/commonlitreadabilityprize/sample_submission.csv"
)
RATIO_VECTOR_LENGTH = len(train_ratio_vectors[0])
print(
f"train_ratio_vectors shape: {train_ratio_vectors.shape}, RATIO_VECTOR_LENGTH: {RATIO_VECTOR_LENGTH}"
)
train["word_count"] = train["excerpt"].progress_apply(word_tokenize).apply(len)
train["sent_count"] = train["excerpt"].progress_apply(sent_tokenize).apply(len)
# # Info
display(train.info())
display(sample_submission.info())
# # Head
display(train.head())
display(sample_submission.head())
# # Target Distribution
plt.figure(figsize=(15, 8))
train["target"].plot(kind="hist", bins=32)
plt.title("Target Value Distribution", size=18)
plt.show()
display(train["target"].describe())
# # Excerpt Length
plt.figure(figsize=(15, 8))
train["word_count"].plot(kind="hist", bins=32)
plt.title("Word Count Distribution", size=18)
plt.show()
plt.figure(figsize=(15, 8))
train["sent_count"].plot(kind="hist", bins=32)
plt.title("Sentence Count Distribution", size=18)
plt.show()
# # Roberta Tokenize
# Get the trained model we want to use
MODEL = "roberta-base"
# Let's load our model tokenizer
tokenizer = RobertaTokenizer.from_pretrained(MODEL)
# For tf.dataset
AUTO = tf.data.experimental.AUTOTUNE
# This function tokenize the text according to a transformers model tokenizer
def regular_encode(excerpt):
enc_di = tokenizer.batch_encode_plus(
excerpt,
padding="max_length",
truncation=True,
max_length=SEQ_LENGTH,
)
return np.array(enc_di["input_ids"])
train["input_ids"] = regular_encode(train["excerpt"]).tolist()
display(train.head())
# # Training
# Detect hardware, return appropriate distribution strategy
try:
TPU = (
tf.distribute.cluster_resolver.TPUClusterResolver()
) # TPU detection. No parameters necessary if TPU_NAME environment variable is set. On Kaggle this is always the case.
print("Running on TPU ", TPU.master())
except ValueError:
print("Running on GPU")
TPU = None
if TPU:
tf.config.experimental_connect_to_cluster(TPU)
tf.tpu.experimental.initialize_tpu_system(TPU)
strategy = tf.distribute.experimental.TPUStrategy(TPU)
else:
strategy = (
tf.distribute.get_strategy()
) # default distribution strategy in Tensorflow. Works on CPU and single GPU.
REPLICAS = strategy.num_replicas_in_sync
print(f"REPLICAS: {REPLICAS}")
# set half precision policy
mixed_precision.set_policy("float32")
print(f"Compute dtype: {mixed_precision.global_policy().compute_dtype}")
print(f"Variable dtype: {mixed_precision.global_policy().variable_dtype}")
# # Model
def get_model(eps=1e-6, amsgrad=False, weights_path=None):
tf.keras.backend.clear_session()
with strategy.scope():
# Inputs
input_ids = tf.keras.Input(name="input_ids", shape=[SEQ_LENGTH], dtype=tf.int32)
ratio_vector = tf.keras.Input(
name="ratio_vector", shape=[RATIO_VECTOR_LENGTH], dtype=tf.float32
)
# ROBERTA
transformer = TFRobertaModel.from_pretrained(MODEL)
# Load saved weights
transformer.load_weights(
"/kaggle/input/simplenormal-wikipedia-sections/roberta_pretrained.h5"
)
transformer.trainable = True
# RoBERTa
sequence_output = transformer(input_ids)[0]
cls_token = sequence_output[:, 0, :]
# Ratio Vector
ratio_vector_fc = tf.keras.layers.Dense(256)(ratio_vector)
output_concat = tf.keras.layers.Concatenate(axis=1)(
[cls_token, ratio_vector_fc]
)
output = tf.keras.layers.Dense(1, activation="linear", dtype=tf.float32)(
output_concat
)
# Model
model = tf.keras.models.Model(
inputs=[input_ids, ratio_vector], outputs=[output]
)
loss = tf.keras.losses.MeanSquaredError()
optimizer = tf.optimizers.Adam(learning_rate=4e-5, epsilon=eps)
metrics = [
tf.keras.metrics.RootMeanSquaredError(name="RMSE"),
]
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
# Load weights if weights path is provided
if weights_path:
model.load_weights(weights_path)
return model
model = get_model()
model.summary()
tf.keras.utils.plot_model(
model, show_shapes=True, show_dtype=True, show_layer_names=True, expand_nested=False
)
# # Configuration
BATCH_SIZE_BASE = 24 // REPLICAS
BATCH_SIZE = BATCH_SIZE_BASE * REPLICAS
STEPS_PER_EPOCH = len(train) // BATCH_SIZE
KFOLDS = 5
print(f"BATCH SIZE: {BATCH_SIZE}")
def get_kfold_indices():
kf = KFold(n_splits=KFOLDS, shuffle=True, random_state=42)
kfold_indices = list(kf.split(train.index.tolist()))
return kfold_indices
KFOLD_INDICES = get_kfold_indices()
print(f"Train Size: {len(KFOLD_INDICES[0][0])}, Val Size: {len(KFOLD_INDICES[0][1])}")
# # Train Dataset
def get_train_dataset(kfold, drop_remainder=True):
train_idxs, _ = KFOLD_INDICES[kfold]
# TRAIN DATASET
input_ids = np.array(list(train.loc[train_idxs, "input_ids"]), dtype=np.int32)
ratio_vector = train_ratio_vectors[train_idxs]
train_x = {
"input_ids": input_ids,
"ratio_vector": ratio_vector,
}
train_y = train.loc[train_idxs, "target"]
train_dataset = tf.data.Dataset.from_tensor_slices((train_x, train_y))
if drop_remainder:
train_dataset = train_dataset.repeat()
train_dataset = train_dataset.shuffle(len(train_idxs))
train_dataset = train_dataset.batch(BATCH_SIZE, drop_remainder=drop_remainder)
train_dataset = train_dataset.prefetch(1)
if drop_remainder:
TRAIN_STEPS_PER_EPOCH = len(train_idxs) // BATCH_SIZE
else:
TRAIN_STEPS_PER_EPOCH = math.ceil(len(train_idxs) / BATCH_SIZE)
return train_dataset, TRAIN_STEPS_PER_EPOCH
train_dataset, TRAIN_STEPS_PER_EPOCH = get_train_dataset(0, drop_remainder=False)
train_x, train_y = next(iter(train_dataset))
print(f"train_x keys: {list(train_x.keys())}")
print(f"train_y shape: {train_y.shape}, train_y dtype {train_y.dtype}")
# # Val Dataset
def get_val_dataset(kfold, drop_remainder=True):
_, val_idxs = KFOLD_INDICES[kfold]
# VAL DATASET
input_ids = np.array(list(train.loc[val_idxs, "input_ids"]), dtype=np.int32)
ratio_vector = train_ratio_vectors[val_idxs]
val_x = {
"input_ids": input_ids,
"ratio_vector": ratio_vector,
}
val_y = train.loc[val_idxs, "target"]
val_dataset = tf.data.Dataset.from_tensor_slices((val_x, val_y))
val_dataset = val_dataset.batch(BATCH_SIZE, drop_remainder=False)
val_dataset = val_dataset.prefetch(1)
VAL_STEPS_PER_EPOCH = len(val_idxs) // BATCH_SIZE + 1
return val_dataset, VAL_STEPS_PER_EPOCH
val_dataset, VAL_STEPS_PER_EPOCH = get_val_dataset(0)
val_x, val_y = next(iter(val_dataset))
print(f"val_x keys: {list(val_x.keys())}")
print(f"val_y shape: {val_y.shape}, val_y dtypeL {val_y.dtype}")
# # Learning Rate Scheduler
TRAIN_LEN = len(KFOLD_INDICES[0][0])
TRAIN_ROUNDS = 4
STEPS_PER_EPOCH = TRAIN_LEN // (BATCH_SIZE * 16)
EPOCHS = (TRAIN_ROUNDS * TRAIN_LEN) // (STEPS_PER_EPOCH * BATCH_SIZE)
LR_RAMPUP_ITERATIONS = 0
LR_RAMPUP_EPOCHS = int(
LR_RAMPUP_ITERATIONS * (len(KFOLD_INDICES[0][0]) / (BATCH_SIZE * STEPS_PER_EPOCH))
)
print(
f"EPOCHS: {EPOCHS}, STEPS_PER_EPOCH: {STEPS_PER_EPOCH}, LR_RAMPUP_EPOCHS: {LR_RAMPUP_EPOCHS}"
)
# # Training
print("=" * 20, f"start", "=" * 20)
print()
# Histories
HISTORIES = dict()
# Epsilon grid search
for fold in range(KFOLDS):
# Model Checkpoint
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
f"model_fold_{fold}.h5",
monitor="val_RMSE",
save_best_only=True,
save_weights_only=True,
verbose=1,
mode="min",
)
print("=" * 10, f"FOLD {fold}", "=" * 10)
# Models
model = get_model()
# Datasets
train_dataset, TRAIN_STEPS_PER_EPOCH = get_train_dataset(fold)
val_dataset, VAL_STEPS_PER_EPOCH = get_val_dataset(fold)
print(
f"TRAIN_STEPS_PER_EPOCH: {TRAIN_STEPS_PER_EPOCH}, VAL_STEPS_PER_EPOCH: {VAL_STEPS_PER_EPOCH}"
)
# Train Model
HISTORIES[f"FOLD_{fold}"] = model.fit(
train_dataset,
epochs=EPOCHS,
verbose=0,
# Hardcode Steps Per Epoch
steps_per_epoch=STEPS_PER_EPOCH,
# validation
validation_data=val_dataset,
validation_steps=VAL_STEPS_PER_EPOCH,
# callbacks
callbacks=[
checkpoint_callback,
],
)
# OOF RMSE
OOF_RMSE = []
for fold in range(KFOLDS):
OOF_RMSE.append(min(HISTORIES[f"FOLD_{fold}"].history["val_RMSE"]))
print()
print(", ".join([f"fold {i}: {rmse:.4f}" for i, rmse in zip(range(KFOLDS), OOF_RMSE)]))
print(f"OOF_RMSE: {np.mean(OOF_RMSE):.4f}")
print()
# # Train History
def plot_history_metric(history, metric, axes, fold):
N_EPOCHS = len(history.history["loss"])
x = [1, 5] + [10 + 5 * idx for idx in range((N_EPOCHS - 10) // 5 + 1)]
x_ticks = np.arange(1, N_EPOCHS + 1)
val = "val" in "".join(history.history.keys())
# summarize history for accuracy
axes.plot(x_ticks, history.history[metric])
if val:
val_values = history.history[f"val_{metric}"]
val_argmin = np.argmin(val_values)
axes.scatter(
val_argmin + 1, val_values[val_argmin], color="red", s=50, marker="o"
)
axes.plot(x_ticks, val_values)
axes.set_title(f"Fold {fold} - Model {metric}", fontsize=20)
axes.set_ylabel(metric, fontsize=16)
axes.set_xlabel("epoch", fontsize=16)
axes.tick_params(axis="x", labelsize=8)
axes.set_xticks(x) # set tick step to 1 and let x axis start at 1
axes.legend(["train"] + ["test"] if val else [], prop={"size": 18})
axes.grid()
fig, axes = plt.subplots(KFOLDS, 2, figsize=(15, 6 * KFOLDS))
for fold in range(KFOLDS):
history = HISTORIES[f"FOLD_{fold}"]
plot_history_metric(history, "loss", axes[fold, 0], fold)
plot_history_metric(history, "RMSE", axes[fold, 1], fold)
plt.subplots_adjust(hspace=0.40, wspace=0.20)
| false | 2 | 3,659 | 0 | 3,682 | 3,659 |
||
69046772
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
stock_1 = pd.read_csv(
"/kaggle/input/show-your-data-skills-snu21/stock.csv"
) # recheck the filepath from the above code cell output
# # Clean and create the resampled spread below. Best of Luck! :)
stock_1.head()
stock_1.shape
stock_1.info()
df = stock_1.copy()
df.head()
df = df[df["Volume"] >= 5]
df.shape
df["Timestamp"] = pd.to_datetime(df["Timestamp"])
df2 = df[(df["Timestamp"].dt.hour <= 22) & (df["Timestamp"].dt.hour >= 10)]
del df2["Hour"]
df2.head(15)
df2.shape
q = df.set_index("Timestamp")
q.head(10)
dict = {"Open": "first", "High": "max", "Low": "min", "Close": "last", "Volume": "sum"}
q = q.resample("24H", closed="left", label="left").apply(dict)
q.head(24)
q.shape
resampled_df = q.dropna()
resampled_df.shape
# # Before submitting ensure that you have 5 columns ( Open, High, low, Close, Volume) in case you have the Timestamp column set as index OR 6 columns (Timestamp, Open, High, low, Close, Volume) in case you have index as 0,1,2,3,...
resampled_df.shape
# resampled_df.to_csv('submission.csv', index=False) # use index=False if you have 6 columns as specified above
resampled_df.to_csv(
"submission.csv", index=True
) # use index=True if you have 5 columns
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/046/69046772.ipynb
| null | null |
[{"Id": 69046772, "ScriptId": 18842152, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7984187, "CreationDate": "07/26/2021 08:45:19", "VersionNumber": 2.0, "Title": "Sample_Notebook", "EvaluationDate": "07/26/2021", "IsChange": false, "TotalLines": 78.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 78.0, "LinesInsertedFromFork": 47.0, "LinesDeletedFromFork": 1.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 31.0, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
stock_1 = pd.read_csv(
"/kaggle/input/show-your-data-skills-snu21/stock.csv"
) # recheck the filepath from the above code cell output
# # Clean and create the resampled spread below. Best of Luck! :)
stock_1.head()
stock_1.shape
stock_1.info()
df = stock_1.copy()
df.head()
df = df[df["Volume"] >= 5]
df.shape
df["Timestamp"] = pd.to_datetime(df["Timestamp"])
df2 = df[(df["Timestamp"].dt.hour <= 22) & (df["Timestamp"].dt.hour >= 10)]
del df2["Hour"]
df2.head(15)
df2.shape
q = df.set_index("Timestamp")
q.head(10)
dict = {"Open": "first", "High": "max", "Low": "min", "Close": "last", "Volume": "sum"}
q = q.resample("24H", closed="left", label="left").apply(dict)
q.head(24)
q.shape
resampled_df = q.dropna()
resampled_df.shape
# # Before submitting ensure that you have 5 columns ( Open, High, low, Close, Volume) in case you have the Timestamp column set as index OR 6 columns (Timestamp, Open, High, low, Close, Volume) in case you have index as 0,1,2,3,...
resampled_df.shape
# resampled_df.to_csv('submission.csv', index=False) # use index=False if you have 6 columns as specified above
resampled_df.to_csv(
"submission.csv", index=True
) # use index=True if you have 5 columns
| false | 0 | 613 | 0 | 613 | 613 |
||
69046835
|
<jupyter_start><jupyter_text>Shark attack dataset
# Global Shark Attack
Because they provide a glimpse - a window - into the world of sharks and their behaviors. By understanding when and why shark attacks occur, it is possible to lessen the likelihood of these incidents. Humans are familiar with predators found on land; we know enough not to walk into a pride of lions and we don't try to pet a growling dog that is baring its teeth. Similarly, we need to recognize and avoid potentially dangerous situations in the water. The individual case histories provide insights about specific geographical areas and their indigenous species of sharks. However, when all known case histories worldwide are examined, much is revealed about species behavior, and specific patterns emerge.
Most of the incidents in the Global Shark Attack File have nothing to do with predation on humans. Some accidents are motivated by a displacement or territorial behavior when a shark feels threatened; still others are the result of the shark responding to sensory predatory input (i.e., overwhelmed by the presence of many fishes) and environmental conditions (murky water), which may cause them to respond in a reflexive response to stimuli. Sharks also exhibit curiosity and may investigate unknown or unfamiliar objects; they learn by exploring their environment, and - lacking hands - they use their mouths and teeth to examine unfamiliar objects.
A very small percentage of shark species, about two dozen, are considered potentially dangerous to humans because of their size and dentition. Yet each year, for every human killed by a shark, our species slaughters millions of sharks - about 73 million sharks last year. We are stripping the world's oceans of one of its most valuable predators - animals that play a critical role in maintaining the health of the world's oceans. An unreasonable fear of sharks has been implanted in our minds by the hype that surrounds the rare shark attack and by movies that exploit our primal fears. It is the mission of the Global Shark Attack File to present facts about these events, thus enabling them to be put in perspective. Sharks are vital to the ocean ecosystem. Without sharks our planet's ocean would soon become a watery graveyard. This is not the legacy the Global Shark Accident File and the Shark Research Institute wishes to leave our children and our children's children.
The Global Shark Attack File was created to provide medical personnel, shark behaviorists, lifesavers, and the media with meaningful information resulting from the scientific forensic examination of shark accidents. Whenever possible, our investigators conduct personal interviews with victims and witnesses, medical personnel and other professionals, and conduct examinations of the incident site. Weather and sea conditions and environmental data are evaluated in an attempt to identify factors that contributed to the incident.
Early on, we became aware that the word "attack" was usually a misnomer. An "attack" by a shark is an extremely rare event, even less likely than statistics suggest. When a shark bites a surfboard, leaving the surfer unharmed, it was historically recorded as an "attack". Collisions between humans and sharks in low visibility water were also recorded as "attacks".
When a shark grabs a person by the hand/wrist and tows them along the surface, tosses a surfboard (or a Frisbee as in case 1968.08.24) it is probably "play behavior", not aggression. How can case GSAF 1971.04.11 which the swimmer was repeatedly bitten by a large shark and case 1985.01.04 in which the diver's injury necessitated a Band-aid be compared? It is akin to comparing a head-on high-speed vehicular collision with a shopping cart ding on the door of a parked car. Global Shark Attack File believes the only way to sort fact from hype is by forensic examination of each incident.
Although incidents that occur in remote areas may go unrecorded, the Global Shark Attack File is a compilation of a number of data sources, and we have a team of qualified researchers throughout the world that actively investigate these incidents. One of our objectives is to provide a clear picture of the actual threat presented by sharks to humans. In this regard, we remind our visitors that more people drown in a single year in the United States than have been killed by sharks throughout the entire world in the last two centuries.
Copyright © 2005, Shark Research Institute, Inc. All rights reserved
Kaggle dataset identifier: shark-attack-dataset
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
shark = pd.read_csv("../input/shark-attack-dataset/attacks.csv")
shark.head()
shark.columns
shark = pd.DataFrame(shark)
shark["Sex "].value_counts()
sharka = shark.groupby(["Sex ", "Fatal (Y/N)"], as_index=False).size()
sharka = sharka.sort_values(by=["size"], ascending=False)
sharka = sharka[0:7]
sharka.drop([5], inplace=True)
sharka
import matplotlib.pyplot as plt
mlabels = [
"Male Fatal",
"Male Non Fatal",
"Female Non Fatal",
"Female Fatal",
"Male Unknown",
"Female Unknown",
]
plt.pie(sharka["size"], labels=mlabels, autopct="%1.1f%%")
plt.title("comparison of fatal/non fatal accidents among women and men")
fig = plt.gcf()
fig.set_size_inches(12, 12)
plt.show()
shark.dropna(subset=["Activity"], inplace=True)
from wordcloud import WordCloud
words = shark["Activity"].tolist()
words = "".join(str(words))
plt.figure(figsize=(12, 12))
plt.imshow(WordCloud().generate(words))
sharkb = shark.groupby(["Country"], as_index=False).size()
sharkb = sharkb.sort_values(by=["size"], ascending=False)
sharkb
import plotly.express as px
px.choropleth(
sharkb,
locations="Country",
color="size",
color_continuous_scale="Turbo",
locationmode="country names",
scope="world",
range_color=(0, 2000),
title="",
height=600,
)
import seaborn as sns
sharkb = sharkb[0:5]
plt.figure(figsize=(18, 10))
plt.title("top 5 countries with the most shark attacks")
sns.barplot(x="Country", y="size", data=sharkb)
sharkc = shark.groupby(["Species "], as_index=False).size()
sharkc = sharkc.sort_values(by=["size"], ascending=False)
sharkc.drop(
[783, 1033, 1045, 1044, 409, 480, 152, 109, 941, 943, 87, 350, 454, 411, 231, 324],
inplace=True,
)
sharkc = sharkc[0:4]
sharkc
mlabels = ["White shark", "Tiger shark", "Bull shark", "Wobbegong shark"]
plt.pie(sharkc["size"], labels=mlabels, autopct="%1.1f%%")
plt.title("percentage share between the 4 most dangerous sharks")
fig = plt.gcf()
fig.set_size_inches(12, 12)
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/046/69046835.ipynb
|
shark-attack-dataset
|
felipeesc
|
[{"Id": 69046835, "ScriptId": 18842772, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7206642, "CreationDate": "07/26/2021 08:46:07", "VersionNumber": 4.0, "Title": "Most dangeurous sharks,countries+Worldmap+analysis", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 85.0, "LinesInsertedFromPrevious": 20.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 65.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 91776789, "KernelVersionId": 69046835, "SourceDatasetVersionId": 2462873}]
|
[{"Id": 2462873, "DatasetId": 1490782, "DatasourceVersionId": 2505301, "CreatorUserId": 7756990, "LicenseName": "Other (specified in description)", "CreationDate": "07/25/2021 23:04:38", "VersionNumber": 1.0, "Title": "Shark attack dataset", "Slug": "shark-attack-dataset", "Subtitle": "Global Shark attack - https://www.sharkattackfile.net/index.htm", "Description": "# Global Shark Attack\nBecause they provide a glimpse - a window - into the world of sharks and their behaviors. By understanding when and why shark attacks occur, it is possible to lessen the likelihood of these incidents. Humans are familiar with predators found on land; we know enough not to walk into a pride of lions and we don't try to pet a growling dog that is baring its teeth. Similarly, we need to recognize and avoid potentially dangerous situations in the water. The individual case histories provide insights about specific geographical areas and their indigenous species of sharks. However, when all known case histories worldwide are examined, much is revealed about species behavior, and specific patterns emerge.\n\nMost of the incidents in the Global Shark Attack File have nothing to do with predation on humans. Some accidents are motivated by a displacement or territorial behavior when a shark feels threatened; still others are the result of the shark responding to sensory predatory input (i.e., overwhelmed by the presence of many fishes) and environmental conditions (murky water), which may cause them to respond in a reflexive response to stimuli. Sharks also exhibit curiosity and may investigate unknown or unfamiliar objects; they learn by exploring their environment, and - lacking hands - they use their mouths and teeth to examine unfamiliar objects.\n\nA very small percentage of shark species, about two dozen, are considered potentially dangerous to humans because of their size and dentition. Yet each year, for every human killed by a shark, our species slaughters millions of sharks - about 73 million sharks last year. We are stripping the world's oceans of one of its most valuable predators - animals that play a critical role in maintaining the health of the world's oceans. An unreasonable fear of sharks has been implanted in our minds by the hype that surrounds the rare shark attack and by movies that exploit our primal fears. It is the mission of the Global Shark Attack File to present facts about these events, thus enabling them to be put in perspective. Sharks are vital to the ocean ecosystem. Without sharks our planet's ocean would soon become a watery graveyard. This is not the legacy the Global Shark Accident File and the Shark Research Institute wishes to leave our children and our children's children.\n\nThe Global Shark Attack File was created to provide medical personnel, shark behaviorists, lifesavers, and the media with meaningful information resulting from the scientific forensic examination of shark accidents. Whenever possible, our investigators conduct personal interviews with victims and witnesses, medical personnel and other professionals, and conduct examinations of the incident site. Weather and sea conditions and environmental data are evaluated in an attempt to identify factors that contributed to the incident.\n\nEarly on, we became aware that the word \"attack\" was usually a misnomer. An \"attack\" by a shark is an extremely rare event, even less likely than statistics suggest. When a shark bites a surfboard, leaving the surfer unharmed, it was historically recorded as an \"attack\". Collisions between humans and sharks in low visibility water were also recorded as \"attacks\".\n\nWhen a shark grabs a person by the hand/wrist and tows them along the surface, tosses a surfboard (or a Frisbee as in case 1968.08.24) it is probably \"play behavior\", not aggression. How can case GSAF 1971.04.11 which the swimmer was repeatedly bitten by a large shark and case 1985.01.04 in which the diver's injury necessitated a Band-aid be compared? It is akin to comparing a head-on high-speed vehicular collision with a shopping cart ding on the door of a parked car. Global Shark Attack File believes the only way to sort fact from hype is by forensic examination of each incident.\n\nAlthough incidents that occur in remote areas may go unrecorded, the Global Shark Attack File is a compilation of a number of data sources, and we have a team of qualified researchers throughout the world that actively investigate these incidents. One of our objectives is to provide a clear picture of the actual threat presented by sharks to humans. In this regard, we remind our visitors that more people drown in a single year in the United States than have been killed by sharks throughout the entire world in the last two centuries.\n\n\n\nCopyright \u00a9 2005, Shark Research Institute, Inc. All rights reserved", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1490782, "CreatorUserId": 7756990, "OwnerUserId": 7756990.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2462873.0, "CurrentDatasourceVersionId": 2505301.0, "ForumId": 1510486, "Type": 2, "CreationDate": "07/25/2021 23:04:38", "LastActivityDate": "07/25/2021", "TotalViews": 16257, "TotalDownloads": 2297, "TotalVotes": 46, "TotalKernels": 7}]
|
[{"Id": 7756990, "UserName": "felipeesc", "DisplayName": "Felipe_Esc", "RegisterDate": "06/24/2021", "PerformanceTier": 1}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
shark = pd.read_csv("../input/shark-attack-dataset/attacks.csv")
shark.head()
shark.columns
shark = pd.DataFrame(shark)
shark["Sex "].value_counts()
sharka = shark.groupby(["Sex ", "Fatal (Y/N)"], as_index=False).size()
sharka = sharka.sort_values(by=["size"], ascending=False)
sharka = sharka[0:7]
sharka.drop([5], inplace=True)
sharka
import matplotlib.pyplot as plt
mlabels = [
"Male Fatal",
"Male Non Fatal",
"Female Non Fatal",
"Female Fatal",
"Male Unknown",
"Female Unknown",
]
plt.pie(sharka["size"], labels=mlabels, autopct="%1.1f%%")
plt.title("comparison of fatal/non fatal accidents among women and men")
fig = plt.gcf()
fig.set_size_inches(12, 12)
plt.show()
shark.dropna(subset=["Activity"], inplace=True)
from wordcloud import WordCloud
words = shark["Activity"].tolist()
words = "".join(str(words))
plt.figure(figsize=(12, 12))
plt.imshow(WordCloud().generate(words))
sharkb = shark.groupby(["Country"], as_index=False).size()
sharkb = sharkb.sort_values(by=["size"], ascending=False)
sharkb
import plotly.express as px
px.choropleth(
sharkb,
locations="Country",
color="size",
color_continuous_scale="Turbo",
locationmode="country names",
scope="world",
range_color=(0, 2000),
title="",
height=600,
)
import seaborn as sns
sharkb = sharkb[0:5]
plt.figure(figsize=(18, 10))
plt.title("top 5 countries with the most shark attacks")
sns.barplot(x="Country", y="size", data=sharkb)
sharkc = shark.groupby(["Species "], as_index=False).size()
sharkc = sharkc.sort_values(by=["size"], ascending=False)
sharkc.drop(
[783, 1033, 1045, 1044, 409, 480, 152, 109, 941, 943, 87, 350, 454, 411, 231, 324],
inplace=True,
)
sharkc = sharkc[0:4]
sharkc
mlabels = ["White shark", "Tiger shark", "Bull shark", "Wobbegong shark"]
plt.pie(sharkc["size"], labels=mlabels, autopct="%1.1f%%")
plt.title("percentage share between the 4 most dangerous sharks")
fig = plt.gcf()
fig.set_size_inches(12, 12)
plt.show()
| false | 1 | 928 | 0 | 2,026 | 928 |
||
69046687
| "<jupyter_start><jupyter_text>quora-question-pair-hand-annotated-dataset\nKaggle dataset identifier:(...TRUNCATED) |
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/046/69046687.ipynb
|
quoraquestionpairhandannotateddataset
|
huikang
| "[{\"Id\": 69046687, \"ScriptId\": 18844119, \"ParentScriptVersionId\": NaN, \"ScriptLanguageId\": 9(...TRUNCATED) |
[{"Id": 91776448, "KernelVersionId": 69046687, "SourceDatasetVersionId": 2455641}]
| "[{\"Id\": 2455641, \"DatasetId\": 1486121, \"DatasourceVersionId\": 2498036, \"CreatorUserId\": 168(...TRUNCATED) | "[{\"Id\": 1486121, \"CreatorUserId\": 1680925, \"OwnerUserId\": 1680925.0, \"OwnerOrganizationId\":(...TRUNCATED) | "[{\"Id\": 1680925, \"UserName\": \"huikang\", \"DisplayName\": \"Tong Hui Kang\", \"RegisterDate\":(...TRUNCATED) | "# This notebook covers\n# - Dataset Preparation (train-test split)\n# - TF-IDF indexes\n# - Evaluat(...TRUNCATED) | "[{\"quoraquestionpairhandannotateddataset/df_handeval.csv\": {\"column_names\": \"[\\\"test_qid\\\"(...TRUNCATED) | true | 2 | "<start_data_description><data_path>quoraquestionpairhandannotateddataset/df_handeval.csv:\n<column_(...TRUNCATED) | 16,591 | 0 | 17,256 | 16,591 |
69046660
| "import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd(...TRUNCATED) |
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/046/69046660.ipynb
| null | null | "[{\"Id\": 69046660, \"ScriptId\": 18841892, \"ParentScriptVersionId\": NaN, \"ScriptLanguageId\": 9(...TRUNCATED) | null | null | null | null | "import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd(...TRUNCATED) | false | 0 | 627 | 0 | 627 | 627 |
End of preview. Expand
in Data Studio
- Downloads last month
- 1