script
stringlengths 113
767k
|
---|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# As I mentioned in the MP1 assignment, I did these assignments out of order, so this is actually the data
# I will be using for my mini project.
# I connected to the Twitter API to examine what topics are trending in the Seattle area. This is interesting to me
# as a Twitter user and Seattle local.
# I looked at this page for reference on how to access the Twitter API: http://socialmedia-class.org/twittertutorial.html
import json
import tweepy
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
CONSUMER_KEY = user_secrets.get_secret("TwitterKey")
CONSUMER_SECRET = user_secrets.get_secret("TwitterSecretKey")
ACCESS_SECRET = user_secrets.get_secret("TwitterSecretToken")
ACCESS_TOKEN = user_secrets.get_secret("TwitterToken")
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
api = tweepy.API(
auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, compression=True
)
# this prints the tweets on my home page:
for status in tweepy.Cursor(api.home_timeline).items(200):
print(status._json)
# this prints the trending topics for a location based on the WOEID:
sea_trends = api.trends_place(id=2490383)
print(json.dumps(sea_trends, indent=4))
with open("sea_trends.txt", "w") as outfile:
json.dump(sea_trends, outfile, indent=4)
for dirname, _, filenames in os.walk("/kaggle/working"):
for filename in filenames:
print(os.path.join(dirname, filename))
# this is as far as I could get with making a data frame out of the above trend data.
# it seems like it's reading my data as a list, so it can't parse it? I don't know how to fix it.
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_json("/kaggle/working/sea_trends.txt", orient="split")
df
# since I can't get my API to work, here are some visualizations of some Home Price Index data I found in Kaggle
import pandas as pd
HPI = pd.read_csv("../input/hpindex/HPI_master.csv")
HPI
HPI.plot(kind="scatter", figsize=(10, 10), x="yr", y="index_nsa", alpha=0.3, c="Purple")
HPI.plot(kind="scatter", figsize=(10, 10), x="yr", y="index_nsa", alpha=0.3, c="Purple")
hist = HPI.hist(figsize=(12, 8))
HPI["index_nsa"].mean
# not sure why this doesn't work. Trying to show year on the x axis and the mean index_nsa on the y axis
mean = HPI["index_nsa"].mean
HPI.plot(kind="scatter", figsize=(10, 10), x="yr", y=mean, alpha=0.3, c="Purple")
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (
Input,
Conv2D,
MaxPooling2D,
Dropout,
concatenate,
Conv2DTranspose,
UpSampling2D,
)
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import MeanIoU
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import LambdaCallback
import os
from tqdm import tqdm
# # Reading Data
train_dir = "/kaggle/input/brain-and-breast-scans/Dataset/Brain scans/Tumor/TRAIN"
train_mask_dir = (
"/kaggle/input/brain-and-breast-scans/Dataset/Brain scans/Tumor/TRAIN_masks"
)
test_dir = "/kaggle/input/brain-and-breast-scans/Dataset/Brain scans/Tumor/TEST"
test_mask_dir = (
"/kaggle/input/brain-and-breast-scans/Dataset/Brain scans/Tumor/TEST_masks"
)
def read_resize_img(img_path, img_size):
img = cv2.imread(img_path)
img = cv2.resize(img, img_size)
return img
def load_image(img_dir, mask_dir):
X = []
y = []
for img_name in tqdm(os.listdir(img_dir)):
img_path = os.path.join(img_dir, img_name)
mask_path = os.path.join(mask_dir, img_name.replace(".jpg", ".png"))
img = read_resize_img(img_path, (128, 128))[:, :, 0]
mask = read_resize_img(mask_path, (128, 128))[:, :, 0]
X.append(img)
y.append(mask)
return np.array(X).reshape(-1, 128, 128, 1), np.array(y) / 255
X_train, y_train = load_image(train_dir, train_mask_dir)
X_test, y_test = load_image(test_dir, test_mask_dir)
# # Modelling
def unet(input_shape):
# Define the U-Net model
inputs = Input(input_shape)
# Downsample path
conv1 = Conv2D(64, 3, activation="relu", padding="same")(inputs)
conv1 = Conv2D(64, 3, activation="relu", padding="same")(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation="relu", padding="same")(pool1)
conv2 = Conv2D(128, 3, activation="relu", padding="same")(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation="relu", padding="same")(pool2)
conv3 = Conv2D(256, 3, activation="relu", padding="same")(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation="relu", padding="same")(pool3)
conv4 = Conv2D(512, 3, activation="relu", padding="same")(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
# Bottleneck
conv5 = Conv2D(1024, 3, activation="relu", padding="same")(pool4)
conv5 = Conv2D(1024, 3, activation="relu", padding="same")(conv5)
drop5 = Dropout(0.5)(conv5)
# Upsample path
up6 = UpSampling2D(size=(2, 2))(drop5)
up6 = Conv2D(512, 2, activation="relu", padding="same")(up6)
merge6 = concatenate([drop4, up6], axis=3)
conv6 = Conv2D(512, 3, activation="relu", padding="same")(merge6)
conv6 = Conv2D(512, 3, activation="relu", padding="same")(conv6)
up7 = UpSampling2D(size=(2, 2))(conv6)
up7 = Conv2D(256, 2, activation="relu", padding="same")(up7)
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(256, 3, activation="relu", padding="same")(merge7)
conv7 = Conv2D(256, 3, activation="relu", padding="same")(conv7)
up8 = UpSampling2D(size=(2, 2))(conv7)
up8 = Conv2D(128, 2, activation="relu", padding="same")(up8)
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(128, 3, activation="relu", padding="same")(merge8)
conv8 = Conv2D(128, 3, activation="relu", padding="same")(conv8)
up9 = UpSampling2D(size=(2, 2))(conv8)
up9 = Conv2D(64, 2, activation="relu", padding="same")(up9)
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(64, 3, activation="relu", padding="same")(merge9)
conv9 = Conv2D(64, 3, activation="relu", padding="same")(conv9)
conv9 = Conv2D(1, 3, activation="relu", padding="same")(conv9)
# Output
output = Conv2D(1, 1, activation="sigmoid")(conv9)
# Define the model
model = Model(inputs=inputs, outputs=output)
return model
try:
del model
except:
print("model is not defined")
model = unet(X_train.shape[1:])
def dice_coef(y_true, y_pred):
smooth = 1
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2.0 * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
model.compile(
optimizer=Adam(learning_rate=0.0001),
loss="binary_crossentropy",
metrics=[dice_coef],
)
model.fit(X_train, y_train, epochs=80, validation_data=(X_test, y_test))
model.save("./UNET-Eh.h5")
# Download File
for i in range(10):
fig, ax = plt.subplots(1, 5, figsize=(10, 8))
mask_pred = np.squeeze(model.predict(np.expand_dims(X_test[i], axis=0)), axis=0)
ax[0].imshow(X_test[i], cmap="gray")
ax[0].set_title("Original Scan")
ax[1].imshow(y_test[i], cmap="gray")
ax[1].set_title("Actual Mask")
ax[2].imshow(mask_pred, cmap="gray")
ax[2].set_title("Pred Mask")
ax[3].imshow(X_test[i], cmap="gray")
ax[3].imshow(y_test[i], alpha=0.5, cmap="gray")
ax[3].set_title("Scan with Original Mask")
ax[4].imshow(X_test[i], cmap="gray")
ax[4].imshow(mask_pred, alpha=0.5, cmap="gray")
ax[4].set_title("Scan with Pred. Mask")
plt.tight_layout()
plt.show()
from matplotlib.colors import ListedColormap
for i in range(10):
fig, ax = plt.subplots(1, 5, figsize=(10, 8))
mask_pred = np.squeeze(model.predict(np.expand_dims(X_test[i], axis=0)), axis=0)
cmap = ListedColormap(["black", "red"])
ax[0].imshow(X_test[i], cmap="gray")
ax[0].set_title("Original Scan")
ax[1].imshow(y_test[i], cmap=cmap)
ax[1].set_title("Actual Mask")
ax[2].imshow(mask_pred, cmap=cmap)
ax[2].set_title("Pred Mask")
ax[3].imshow(X_test[i], cmap="gray")
ax[3].imshow(y_test[i], alpha=0.5, cmap=cmap)
ax[3].set_title("Scan with Original Mask")
ax[4].imshow(X_test[i], cmap="gray")
ax[4].imshow(mask_pred, alpha=0.5, cmap=cmap)
ax[4].set_title("Scan with Pred. Mask")
plt.tight_layout()
plt.show()
|
# **Code for importing the dataset**
import pandas as pd
Life_Expectancy_Data = pd.read_csv("../input/life-expectancy/Life Expectancy Data.csv")
Life_Expectancy_Data
# **Listing the dataset information which contains number of rows and columns, data types count and memory usage by the dataset.**
Life_Expectancy_Data.info()
# **Listing the columns in the dataset**
Life_Expectancy_Data.columns
# **From the above output it is clearly known that few column names have extra spaces and some special characters. We have to clean them inorder to make it appropriate for future use.
# I have used strip () method to remove the beginning and trailing spaces in the column names.**
Life_Expectancy_Data.columns = Life_Expectancy_Data.columns.str.strip()
Life_Expectancy_Data.columns
# **To remove the space inbetween the column names and replace it with _ (underscore), I use replace() method.**
Life_Expectancy_Data.columns = Life_Expectancy_Data.columns.str.replace(" ", "_")
Life_Expectancy_Data.columns
# **Since the column 'HIV/AIDS' have special charcter / , I rename it to HIV by using rename() method**
Life_Expectancy_Data.rename(columns={"HIV/AIDS": "HIV"}, inplace=True)
Life_Expectancy_Data.columns
|
# # **Point Couds with zarr**
# Generate Point Clouds while leveraging efficient image loading with zarr.
# Credit: https://www.kaggle.com/code/brettolsen/efficient-image-loading-with-zarr/notebookimport os
import os
import shutil
import time
import PIL.Image as Image
from glob import glob
from tifffile import tifffile
import numpy as np
import PIL.Image as Image
import torch.utils.data as data
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from tqdm import tqdm
from ipywidgets import interact, fixed
import zarr
import open3d as o3
INPUT_FOLDER = "/kaggle/input/vesuvius-challenge-ink-detection"
WORKING_FOLDER = "/kaggle/working/"
TEMP_FOLDER = "kaggle/temp/"
class TimerError(Exception):
pass
class Timer:
def __init__(self, text=None):
if text is not None:
self.text = text + ": {:0.4f} seconds"
else:
self.text = "Elapsed time: {:0.4f} seconds"
def logfunc(x):
print(x)
self.logger = logfunc
self._start_time = None
def start(self):
if self._start_time is not None:
raise TimerError("Timer is already running. Use .stop() to stop it.")
self._start_time = time.time()
def stop(self):
if self._start_time is None:
raise TimerError("Timer is not running. Use .start() to start it.")
elapsed_time = time.time() - self._start_time
self._start_time = None
if self.logger is not None:
self.logger(self.text.format(elapsed_time))
return elapsed_time
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.stop()
class FragmentImageException(Exception):
pass
class FragmentImageData:
"""A general class that uses persistent zarr objects to store the surface volume data,
binary data mask, and for training sets, the truth data and infrared image of a papyrus
fragment, in a compressed and efficient way.
"""
def __init__(self, sample_type: str, sample_index: str, working: bool = True):
if sample_type not in ("test, train"):
raise FragmentImageException(
f"Invalid sample type f{sample_type}, must be one of 'test' or 'train'"
)
zarrpath = self._zarr_path(sample_type, sample_index, working)
if os.path.exists(zarrpath):
self.zarr = self.load_from_zarr(zarrpath)
else:
dirpath = os.path.join(INPUT_FOLDER, sample_type, sample_index)
if not os.path.exists(dirpath):
raise FragmentImageException(
f"No input data found at f{zarrpath} or f{dirpath}"
)
self.zarr = self.load_from_directory(dirpath, zarrpath)
@property
def surface_volume(self):
return self.zarr.surface_volume
@property
def mask(self):
return self.zarr.mask
@property
def truth(self):
return self.zarr.truth
@property
def infrared(self):
return self.zarr.infrared
@staticmethod
def _zarr_path(sample_type: str, sample_index: str, working: bool = True):
filename = f"{sample_type}-{sample_index}.zarr"
if working:
return os.path.join(WORKING_FOLDER, filename)
else:
return os.path.join(TEMP_FOLDER, filename)
@staticmethod
def clean_zarr(sample_type: str, sample_index: str, working: bool = True):
zarrpath = FragmentImageData._zarr_path(sample_type, sample_index, working)
if os.path.exists(zarrpath):
shutil.rmtree(zarrpath)
@staticmethod
def load_from_zarr(filepath):
with Timer("Loading from existing zarr"):
return zarr.open(filepath, mode="r")
@staticmethod
def load_from_directory(dirpath, zarrpath):
if os.path.exists(zarrpath):
raise FragmentImageException(
f"Trying to overwrite existing zarr at f{zarrpath}"
)
# Initialize the root zarr group and write the file
root = zarr.open_group(zarrpath, mode="w")
# Load in the surface volume tif files
with Timer("Surface volume loading"):
init = True
imgfiles = sorted(
[
imgfile
for imgfile in os.listdir(os.path.join(dirpath, "surface_volume"))
]
)
for imgfile in imgfiles:
print(f"Loading file {imgfile}", end="\r")
img_data = np.array(
Image.open(os.path.join(dirpath, "surface_volume", imgfile))
)
if init:
surface_volume = root.zeros(
name="surface_volume",
shape=(img_data.shape[0], img_data.shape[1], len(imgfiles)),
chunks=(1000, 1000, 4),
dtype=img_data.dtype,
write_empty_chunks=False,
)
init = False
z_index = int(imgfile.split(".")[0])
surface_volume[:, :, z_index] = img_data
# Load in the mask
with Timer("Mask loading"):
img_data = np.array(
Image.open(os.path.join(dirpath, "mask.png")), dtype=bool
)
mask = root.array(
name="mask",
data=img_data,
shape=img_data.shape,
chunks=(1000, 1000),
dtype=img_data.dtype,
write_empty_chunks=False,
)
# Load in the truth set (if it exists)
with Timer("Truth set loading"):
truthfile = os.path.join(dirpath, "inklabels.png")
if os.path.exists(truthfile):
img_data = np.array(Image.open(truthfile), dtype=bool)
truth = root.array(
name="truth",
data=img_data,
shape=img_data.shape,
chunks=(1000, 1000),
dtype=img_data.dtype,
write_empty_chunks=False,
)
# Load in the infrared image (if it exists)
with Timer("Infrared image loading"):
irfile = os.path.join(dirpath, "ir.png")
if os.path.exists(irfile):
img_data = np.array(Image.open(irfile))
infrared = root.array(
name="infrared",
data=img_data,
shape=img_data.shape,
chunks=(1000, 1000),
dtype=img_data.dtype,
write_empty_chunks=False,
)
return root
# # Load data
FragmentImageData.clean_zarr("train", 1)
data = FragmentImageData("train", "1")
print(data.surface_volume.info)
print(data.mask.info)
print(data.truth.info)
print(data.infrared.info)
with Timer():
plt.imshow(data.mask, cmap="gray")
with Timer():
plt.imshow(data.surface_volume[:, :, 20], cmap="gray")
# ### Plot vertical slices of the surface volumes
with Timer():
plt.figure(figsize=(10, 1))
plt.imshow(data.surface_volume[2000, :, :].T, cmap="gray", aspect="auto")
with Timer():
plt.figure(figsize=(10, 1))
plt.imshow(data.surface_volume[:, 2000, :].T, cmap="gray", aspect="auto")
# # Dsiplay Voxels
data.surface_volume.shape
voxelarray = np.array(data.surface_volume[1000:2000, 1000:2000, 0:10])
type(voxelarray)
voxelarray.shape
ax = plt.figure().add_subplot(projection="3d")
# ax.voxels(voxelarray, facecolors=colors, edgecolor='k')
ax.voxels(voxelarray, edgecolor="k")
# # Create Point Cloud
# ## Sample from Surface Volumes
data.surface_volume.shape
ROWS = data.surface_volume.shape[0]
COLS = data.surface_volume.shape[1]
Z_DIM = data.surface_volume.shape[2] # number of volume slices
N_SAMPLES = 1000
type(np.ravel(data.mask))
with Timer():
# sample from valid regions of surface volume
c = np.ravel(data.mask).cumsum()
samples = np.random.uniform(low=0, high=c[-1], size=(N_SAMPLES, Z_DIM)).astype(int)
# get valid indexes
x, y = np.unravel_index(c.searchsorted(samples), data.mask.shape)
x, y = x[np.newaxis, ...], y[np.newaxis, ...]
# get z dimensions from surface volume locations
z = np.arange(0, Z_DIM)
z = np.tile(z, N_SAMPLES).reshape(N_SAMPLES, -1)[np.newaxis, ...]
# get point cloud
xyz = np.vstack((x, y, z))
xyz.shape
# ### Get Normalized Intensities
intensities = np.zeros((N_SAMPLES, Z_DIM))
with Timer():
for i in range(Z_DIM):
img = data.surface_volume[:, :, i]
intensities[:, i] = img[xyz[0, :, i], xyz[1, :, i]] / 65535.0
intensities = intensities.astype(np.float32)
# #### Sanity Check
print(xyz[:, 20, 1], intensities[20, 1])
print(
xyz.T.reshape((-1, 3))[20 + N_SAMPLES, :],
intensities.T.reshape((-1))[20 + N_SAMPLES],
)
# ### Reshape and Normalize
xyz = xyz.T.reshape((-1, 3))
xyz = xyz / xyz.max(axis=0)
intensities = intensities.T.reshape((-1)).repeat((3)).reshape((-1, 3))
# ## Get Colormap and Convert to Point Cloud
colors = plt.get_cmap("bone") # also use 'cool', 'bone'
colors
pcd = o3.geometry.PointCloud()
pcd.points = o3.utility.Vector3dVector(xyz)
pcd.colors = o3.utility.Vector3dVector(colors(intensities)[:, 0, :3])
pcd
# # Display Point Cloud
o3.visualization.draw_plotly([pcd])
voxel_grid
voxel_grid = o3.geometry.VoxelGrid.create_from_point_cloud(pcd, voxel_size=0.005)
o3.visualization.draw_plotly([voxel_grid])
o3.visualization.draw_geometries([voxel_grid])
idx = 0
cnts, bins, _ = plt.hist(
np.ravel(data.surface_volume[:, :, idx][data.mask]) / 65535.0, bins=100
)
plt.savefig(os.path.join(WORKING_FOLDER, f"hist_{idx}.png"))
# # Animate intensity Histograms for each surface layer
# Animation code resused from: https://www.kaggle.com/code/leonidkulyk/eda-vc-id-volume-layers-animation
from celluloid import Camera
fig, ax = plt.subplots(1, 1)
camera = Camera(fig) # define the camera that gets the fig we'll plot
for i in range(Z_DIM):
cnts, bins, _ = plt.hist(
np.ravel(data.surface_volume[:, :, i][data.mask]) / 65535.0, bins=100
)
ax.set_title(f"Surfacer Layer: {i}")
camera.snap() # the camera takes a snapshot of the plot
plt.close(fig) # close figure
animation = camera.animate() # get plt animation
fix_video_adjust = (
"<style> video {margin: 0px; padding: 0px; width:100%; height:auto;} </style>"
)
display(HTML(fix_video_adjust + animation.to_html5_video())) # displaying the animation
|
import os
from tqdm.auto import tqdm
import time, gc
import numpy as np
import pandas as pd
# pd.set_option('display.max_columns', None)
from matplotlib import pyplot as plt
import cv2
import albumentations as A
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model, Input, load_model
from keras.layers import Dense, Conv2D, Flatten, Activation, Concatenate
from keras.layers import MaxPool2D, AveragePooling2D, GlobalAveragePooling2D
from keras.layers import Dropout, BatchNormalization
from keras.optimizers import Adam
from keras import backend as K
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping
from keras.initializers import RandomNormal
from keras.applications import DenseNet121
from sklearn.model_selection import train_test_split
start_time = time.time()
# ## Resource path setting
dataset = "/kaggle/input/bengaliai-cv19"
pretrained = "../input/bangla-graphemes-pretrained-weights"
# ## Checking Model
if os.path.isfile(
os.path.join(pretrained, "GraphemeDenseNet121.h5")
) and os.path.isfile(os.path.join(pretrained, "hist.csv")):
print("Model is present")
else:
print("Error. No Model Found")
# ## Size and Channel of images
SIZE = 112 # input image size
N_ch = 1
# ## Loading Pretrained Densenet121 Model
# ### Batch Size: 256
# ### Epochs: 30 (Early Stopped in 20)
model = load_model(os.path.join(pretrained, "GraphemeDenseNet121.h5"))
# ## DenseNet121 Model Summary
model.summary()
# ## Loading Images and Pre-processing
# Resize image size
def resize(df, size=112):
resized = {}
resize_size = 112
angle = 0
for i in range(df.shape[0]):
image = df.loc[df.index[i]].values.reshape(137, 236)
# Centering
image_center = tuple(np.array(image.shape[1::-1]) / 2)
matrix = cv2.getRotationMatrix2D(image_center, angle, 1.0)
image = cv2.warpAffine(
image,
matrix,
image.shape[1::-1],
flags=cv2.INTER_AREA,
borderMode=cv2.BORDER_CONSTANT,
borderValue=(0, 0, 0),
)
# Scaling
matrix = cv2.getRotationMatrix2D(image_center, 0, 1.0)
image = cv2.warpAffine(
image,
matrix,
image.shape[1::-1],
flags=cv2.INTER_AREA,
borderMode=cv2.BORDER_CONSTANT,
borderValue=(0, 0, 0),
)
# Removing Blur
# aug = A.GaussianBlur(p=1.0)
# image = aug(image=image)['image']
# Noise Removing
# augNoise=A.MultiplicativeNoise(p=1.0)
# image = augNoise(image=image)['image']
# Removing Distortion
# augDist=A.ElasticTransform(sigma=50, alpha=1, alpha_affine=10, p=1.0)
# image = augDist(image=image)['image']
# Brightness
augBright = A.RandomBrightnessContrast(p=1.0)
image = augBright(image=image)["image"]
_, thresh = cv2.threshold(
image, 30, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU
)
contours, _ = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[
-2:
]
idx = 0
ls_xmin = []
ls_ymin = []
ls_xmax = []
ls_ymax = []
for cnt in contours:
idx += 1
x, y, w, h = cv2.boundingRect(cnt)
ls_xmin.append(x)
ls_ymin.append(y)
ls_xmax.append(x + w)
ls_ymax.append(y + h)
xmin = min(ls_xmin)
ymin = min(ls_ymin)
xmax = max(ls_xmax)
ymax = max(ls_ymax)
roi = image[ymin:ymax, xmin:xmax]
resized_roi = cv2.resize(
roi, (resize_size, resize_size), interpolation=cv2.INTER_AREA
)
# image=affine_image(image)
# image= crop_resize(image)
# image = cv2.resize(image,(size,size),interpolation=cv2.INTER_AREA)
# image=resize_image(image,(64,64))
# image = cv2.resize(image,(size,size),interpolation=cv2.INTER_AREA)
# gaussian_3 = cv2.GaussianBlur(image, (5,5), cv2.BORDER_DEFAULT) #unblur
# image = cv2.addWeighted(image, 1.5, gaussian_3, -0.5, 0, image)
# kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]]) #filter
# image = cv2.filter2D(image, -1, kernel)
# ret,image = cv2.threshold(image, 128, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
resized[df.index[i]] = resized_roi.reshape(-1)
resized_df = pd.DataFrame(resized).T
return resized_df
# ## Accuracy and Loss Curve
df = pd.read_csv(os.path.join(pretrained, "hist.csv"))
# Plot the loss and accuracy curves for training and validation
fig, ax = plt.subplots(1, 2, figsize=(12, 4))
ax[0].plot(
df[
[
"root_loss",
"vowel_loss",
"consonant_loss",
"val_root_loss",
"val_vowel_loss",
"val_consonant_loss",
]
]
)
ax[0].set_ylim(0, 2)
ax[0].set_title("Loss")
ax[0].legend(
[
"train_root_loss",
"train_vowel_loss",
"train_conso_loss",
"val_root_loss",
"val_vowel_loss",
"val_conso_loss",
],
loc="upper right",
)
ax[0].grid()
ax[1].plot(
df[
[
"root_acc",
"vowel_acc",
"consonant_acc",
"val_root_acc",
"val_vowel_acc",
"val_consonant_acc",
]
]
)
ax[1].set_ylim(0.5, 1)
ax[1].set_title("Accuracy")
ax[1].legend(
[
"train_root_acc",
"train_vowel_acc",
"train_conso_acc",
"val_root_acc",
"val_vowel_acc",
"val_conso_acc",
],
loc="lower right",
)
ax[1].grid()
# ## Target Columns
tgt_cols = ["grapheme_root", "vowel_diacritic", "consonant_diacritic"]
# ## Prediction on Test Images
row_ids = []
targets = []
id = 0
for i in range(4):
img_df = pd.read_parquet(
os.path.join(dataset, "test_image_data_" + str(i) + ".parquet")
)
img_df = img_df.drop("image_id", axis=1)
img_df = resize(img_df, SIZE) / 255
X_test = img_df.values.reshape(-1, SIZE, SIZE, N_ch)
preds = model.predict(X_test)
for j in range(len(X_test)):
for k in range(3):
row_ids.append("Test_" + str(id) + "_" + tgt_cols[k])
targets.append(np.argmax(preds[k][j]))
id += 1
# ## Creating Submission CSV File
df_submit = pd.DataFrame(
{"row_id": row_ids, "target": targets}, columns=["row_id", "target"]
)
df_submit.to_csv("submission.csv", index=False)
df_submit.head(10)
|
# # Corona Virus 2019
# This Notebook have visualize info for Corona Virus spread all over the word also some insights from the data like which country has maximum spread and which country has max success rate in coronavirus cure.
# # Insight
# Thiland has max(more than 25% people are recoverd) recoverd ratio so the medicine used by them are effective.
import pandas as pd
data = pd.read_csv("../input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv")
data.info()
# # Data Cleaning and EDA
data["Date"] = pd.to_datetime(data.Date)
data["Date"] = pd.DatetimeIndex(data["Date"]).date
data["Date"] = data.Date.apply(str)
data.groupby("Date").sum()["Confirmed"]
c = data.groupby("Country").sum()
c = c.drop(["Sno"], axis=1)
c.style.background_gradient(cmap="rainbow")
print("Total number of Corona Virus Confirmed Case are " + str(sum(data.Confirmed)))
print("Total number of Corona Virus Deaths are " + str(sum(data.Deaths)))
print("Total number of Corona Virus Recovered " + str(sum(data.Recovered)))
# ## Visualization
import seaborn as sns
import matplotlib.pyplot as plt
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 10
fig_size[1] = 8
plt.rcParams["figure.figsize"] = fig_size
groupedvalues = data.groupby("Date").sum().reset_index()
sns.set(style="whitegrid")
sns.barplot(x="Confirmed", y="Date", data=groupedvalues)
plt.title(
"Number of Confirmed Corona Virus cases each day from 22-01-2020 to 18-02-2020"
)
sns.barplot(x="Deaths", y="Date", data=groupedvalues)
plt.title("Number of Deaths due to Corona Virus from 22-01-2020 to 18-02-2020")
sns.barplot(x="Recovered", y="Date", data=groupedvalues)
plt.title("Number of Recovered cases from 22-01-2020 to 18-02-2020")
groupedvalues = groupedvalues.drop(["Sno"], axis=1)
df = groupedvalues.melt(id_vars=["Date"], var_name="Type", value_name="NumberOfPeople")
fig, ax = plt.subplots()
sns.set(style="whitegrid")
sns.barplot(x="Date", y="NumberOfPeople", data=df, hue="Type")
fig.autofmt_xdate()
# ## Corona Virus Spread On World Map
import plotly.express as px
groupedvalues = data.groupby("Country").sum().reset_index()
groupedvalues = groupedvalues.drop(["Sno"], axis=1)
fig = px.scatter_geo(
groupedvalues,
locations="Country",
locationmode="country names",
color="Confirmed",
hover_name="Country",
range_color=[0, 20],
projection="natural earth",
title="Spread across the world",
)
fig.update(layout_coloraxis_showscale=False)
fig.show()
# # Ratio of Recovered people in each country
groupedvalues["RecoveredRatio"] = (groupedvalues["Recovered"] * 100) / groupedvalues[
"Confirmed"
]
fig, ax = plt.subplots()
sns.set(style="whitegrid")
sns.barplot(x="Country", y="RecoveredRatio", data=groupedvalues)
fig.autofmt_xdate()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import zipfile
path = "/kaggle/input/dogs-vs-cats/train.zip"
zip_ref = zipfile.ZipFile(path, "r")
zip_ref.extractall("/kaggle/working/")
path = "/kaggle/input/dogs-vs-cats/test1.zip"
zip_ref = zipfile.ZipFile(path, "r")
zip_ref.extractall("/kaggle/working/")
zip_ref.close()
# import pandas as pd
# sampleSubmission = pd.read_csv("../input/dogs-vs-cats/sampleSubmission.csv")
# print(sampleSubmission)
import os
dirname = "/kaggle/working/test1/dog"
os.mkdir(dirname)
dirname = "/kaggle/working/test1/cat"
os.mkdir(dirname)
dirname = "/kaggle/working/train/dog"
os.mkdir(dirname)
dirname = "/kaggle/working/train/cat"
os.mkdir(dirname)
dirname = "/kaggle/working/validation"
os.mkdir(dirname)
dirname = "/kaggle/working/validation/cat"
os.mkdir(dirname)
dirname = "/kaggle/working/validation/dog"
os.mkdir(dirname)
# for dirname, _, filenames in os.walk('/kaggle/working/test1'):
# print(filenames)
import shutil
for dirname, _, filenames in os.walk("/kaggle/working/train"):
for filename in filenames:
if filename[:3] == "dog":
dog_path = os.path.join(dirname, filename)
shutil.move(dog_path, "/kaggle/working/train/dog/" + filename)
if filename[:3] == "cat":
cat_path = os.path.join(dirname, filename)
shutil.move(cat_path, "/kaggle/working/train/cat/" + filename)
import shutil
for dirname, _, filenames in os.walk("/kaggle/working/train/cat/"):
i = 0
for filename in filenames:
if i < 1300:
dog_path = os.path.join(dirname, filename)
shutil.move(dog_path, "/kaggle/working/validation/cat/" + filename)
i = i + 1
for dirname, _, filenames in os.walk("/kaggle/working/train/dog/"):
i = 0
for filename in filenames:
if i < 1300:
dog_path = os.path.join(dirname, filename)
shutil.move(dog_path, "/kaggle/working/validation/dog/" + filename)
i = i + 1
# for dirname, _, filenames in os.walk('/kaggle/working/train/cat/'):
# print(filenames)
# **Begin Algorithm**
import tensorflow as tf
import os
import zipfile
from os import path, getcwd, chdir
def train_model():
model = tf.keras.models.Sequential(
[
tf.keras.layers.Conv2D(
16, (3, 3), input_shape=(64, 64, 3), activation="relu"
),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation="relu"),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation="relu"),
tf.keras.layers.Dense(1, activation="sigmoid"),
]
)
from tensorflow.keras.optimizers import RMSprop
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1 / 255)
train_generator = train_datagen.flow_from_directory(
"/kaggle/working/train/",
target_size=(64, 64),
batch_size=500,
class_mode="binary",
)
validation_generator = train_datagen.flow_from_directory(
"/kaggle/working/validation/",
target_size=(64, 64),
batch_size=50,
class_mode="binary",
)
history = model.fit_generator(
train_generator,
epochs=100,
# steps_per_epoch=10,
validation_data=validation_generator,
verbose=1,
)
model.save("model_loc.h5")
return history.history["acc"][-1]
train_model()
|
# **Medical Cost Prediction**
# ## *Load the data*
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
insurance = pd.read_csv("../input/insurance/insurance.csv")
insurance.head(5)
# # Explore dataset
insurance.info()
insurance.describe(include="all")
insurance.hist(bins=50, figsize=(12, 8))
plt.show()
# ## Split Train-Test
#
## We need to stratify train and test sets in order to make the data representative
insurance["age_cat"] = pd.cut(
insurance["age"],
bins=[0.0, 20.0, 30.0, 40.0, 50.0, 60.0, np.inf],
labels=[1, 2, 3, 4, 5, 6],
)
insurance["age_cat"].value_counts().sort_index().plot.bar(rot=0, grid=True)
plt.xlabel("Age category")
plt.ylabel("Number of Patients")
plt.show()
from sklearn.model_selection import train_test_split
strat_train_set, strat_test_set = train_test_split(
insurance, test_size=0.2, stratify=insurance["age_cat"], random_state=42
)
strat_test_set["age_cat"].value_counts() / len(strat_test_set)
insurance["age_cat"].value_counts() / len(insurance)
# # Visualize dataset
#
from pandas.plotting import scatter_matrix
attrs = ["age", "bmi", "charges"]
scatter_matrix(strat_train_set[attrs], figsize=(12, 8))
plt.show()
corr_matrix = strat_train_set.corr()
corr_matrix["charges"].sort_values(ascending=False)
# it looks like their can be a linear relationship between age, bmi and charges
test_with_log_charges = strat_train_set.copy()
test_with_log_charges["log_charges"] = np.log(test_with_log_charges["charges"])
corr_matrix = test_with_log_charges.corr()
corr_matrix["log_charges"].sort_values(ascending=False)
# the log of labels has a much higher correlation with charges
# can be useful too try later when training the model
# # Preprocessing
insurance = strat_train_set.drop(["charges"], axis=1)
insurance_labels = np.log(test_with_log_charges["charges"])
from sklearn.compose import (
ColumnTransformer,
make_column_selector,
make_column_transformer,
)
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder, FunctionTransformer
num_pipeline = Pipeline(
[
("standardize", StandardScaler()),
]
)
cat_pipeline = Pipeline([("onehot", OneHotEncoder(handle_unknown="ignore"))])
preprocessing = make_column_transformer(
(num_pipeline, make_column_selector(dtype_include=np.number)),
(cat_pipeline, make_column_selector(dtype_include=object)),
)
insurance_prepared = preprocessing.fit_transform(insurance)
target_scaler = StandardScaler()
scaled_labels = target_scaler.fit_transform(insurance_labels.to_frame())
view_prep = pd.DataFrame(
insurance_prepared,
columns=preprocessing.get_feature_names_out(),
index=insurance.index,
)
view_prep
# # Model Selection
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
lig_reg = Pipeline([("preprocess", preprocessing), ("regressor", LinearRegression())])
lig_reg.fit(insurance, scaled_labels)
insurance_predictions = lig_reg.predict(insurance)
lin_rmse = mean_squared_error(scaled_labels, insurance_predictions, squared=False)
print(f"Rmse = {lin_rmse}")
# the mean error is pretty good. Let's try other regression models
from sklearn.tree import DecisionTreeRegressor
tree_reg = Pipeline(
[
("preprocess", preprocessing),
("regressor", DecisionTreeRegressor(random_state=42)),
]
)
tree_reg.fit(insurance, scaled_labels)
insurance_predictions = tree_reg.predict(insurance)
tree_rmse = mean_squared_error(scaled_labels, insurance_predictions, squared=False)
print(f"Rmse = {tree_rmse}")
# Well desision tree is almost amazing
from sklearn.ensemble import RandomForestRegressor
random_reg = Pipeline(
[
("preprocess", preprocessing),
("regressor", RandomForestRegressor(random_state=42)),
]
)
random_reg.fit(insurance, scaled_labels)
insurance_predictions = random_reg.predict(insurance)
random_rmse = mean_squared_error(scaled_labels, insurance_predictions, squared=False)
print(f"Rmse = {random_rmse}")
# Looks like the best model is the decision tree regressor let's use that in cross val
from sklearn.model_selection import cross_val_score
tree_rmses = -cross_val_score(
tree_reg, insurance, scaled_labels, scoring="neg_root_mean_squared_error", cv=10
)
pd.Series(tree_rmses).describe()
# Well turns out it highly overfitted the data let's check other methods
from sklearn.model_selection import cross_val_score
random_rmses = -cross_val_score(
random_reg, insurance, scaled_labels, scoring="neg_root_mean_squared_error", cv=10
)
pd.Series(random_rmses).describe()
from sklearn.model_selection import cross_val_score
lin_rmses = -cross_val_score(
lig_reg, insurance, scaled_labels, scoring="neg_root_mean_squared_error", cv=10
)
pd.Series(lin_rmses).describe()
# Overall random_forest is the best
# # Fine Tuning the model
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import randint
full_pipeline = Pipeline(
[
("preprocessing", preprocessing),
("random_forest", RandomForestRegressor(random_state=42)),
]
)
params_distribs = {"random_forest__max_features": randint(low=2, high=20)}
rnd_search = RandomizedSearchCV(
full_pipeline,
param_distributions=params_distribs,
n_iter=10,
cv=3,
scoring="neg_root_mean_squared_error",
random_state=42,
)
rnd_search.fit(insurance, scaled_labels)
cv_res = pd.DataFrame(rnd_search.cv_results_)
cv_res
## Feature importance and get the final model
final_model = rnd_search.best_estimator_
feature_importances = final_model["random_forest"].feature_importances_
sorted(
zip(feature_importances, final_model["preprocessing"].get_feature_names_out()),
reverse=True,
)
# **It looks like the most important features are : 'smoker', 'bmi'and 'age'**
# # Testing
X_test = strat_test_set.drop(["charges"], axis=1)
y_test = strat_test_set["charges"].copy()
scaled_y_test = target_scaler.fit_transform(np.log(y_test).to_frame())
scaled_final_predictions = final_model.predict(X_test)
final_predictions = np.exp(
target_scaler.inverse_transform(scaled_final_predictions.reshape(-1, 1))
)
final_rmse = mean_squared_error(y_test, final_predictions, squared=False)
print(f"Final Rmse Test Score : {final_rmse}")
final_model.score(X_test, scaled_y_test)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
complaints = pd.read_csv("../input/complaints.csv")
# Print out the top 5 rows of the dataframe
complaints.head()
# Number of rows and columns in the df
complaints.shape
# Print out a list of all the column names in the df
complaints.columns
# Print the number of columns in the df
len(complaints.columns)
# Copy the dataframe with only a few of the columns
# df1 = df[['a','b']]
complaints_small = complaints[["Product", "Issue", "Company public response", "State"]]
complaints_small.head()
# df.groupby('age').size()
complaints_small.groupby("State").size()
# df.loc[df['column_name'] == some_value]
complaints_small.loc[complaints_small["State"] == "WA"]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from scipy.io import mmread
import networkx as nx
g1_scipy_sparse = mmread("../input/tdl-a1-dataset/graph1.mtx")
G1 = nx.from_scipy_sparse_matrix(g1_scipy_sparse)
# Compute statistics for graph1
num_nodes1 = G1.number_of_nodes()
num_edges1 = G1.number_of_edges()
degree_stats1 = nx.degree(G1)
mean_degree1 = sum(dict(degree_stats1).values()) / num_nodes1
min_degree1 = min(dict(degree_stats1).values())
max_degree1 = max(dict(degree_stats1).values())
density1 = nx.density(G1)
sparsity1 = 1 - density1
# Load graph2 from file
g2_scipy_sparse = mmread("../input/tdl-a1-dataset/graph2.mtx")
G2 = nx.from_scipy_sparse_matrix(g2_scipy_sparse)
# Compute statistics for graph2
num_nodes2 = G2.number_of_nodes()
num_edges2 = G2.number_of_edges()
degree_stats2 = nx.degree(G2)
mean_degree2 = sum(dict(degree_stats2).values()) / num_nodes2
min_degree2 = min(dict(degree_stats2).values())
max_degree2 = max(dict(degree_stats2).values())
density2 = nx.density(G2)
sparsity2 = 1 - density2
# Print statistics for both graphs
print("Graph1 statistics:")
print("Number of nodes:", num_nodes1)
print("Number of edges:", num_edges1)
print("Mean degree:", mean_degree1)
print("Minimum degree:", min_degree1)
print("Maximum degree:", max_degree1)
print("Density:", density1)
print("Sparsity:", sparsity1)
print("Graph2 statistics:")
print("Number of nodes:", num_nodes2)
print("Number of edges:", num_edges2)
print("Mean degree:", mean_degree2)
print("Minimum degree:", min_degree2)
print("Maximum degree:", max_degree2)
print("Density:", density2)
print("Sparsity:", sparsity2)
# Find 3-cliques and 4-cliques
cliques3 = list(nx.find_cliques(G1))
cliques4 = [c for c in cliques3 if len(c) >= 4]
# Compute node centrality
centrality = nx.degree_centrality(G1)
# Compute clustering coefficients
clustering = nx.clustering(G1)
# Print results
print(f"Number of nodes: {G1.number_of_nodes()}")
print(f"Number of edges: {G1.number_of_edges()}")
print(f"Number of 3-cliques: {len(cliques3)}")
print(f"Number of 4-cliques: {len(cliques4)}")
print(f"Mean degree centrality: {sum(centrality.values()) / len(centrality)}")
print(f"Max clustering coefficient: {max(clustering.values())}")
# Find 3-cliques and 4-cliques
cliques3 = list(nx.find_cliques(G2))
cliques4 = [c for c in cliques3 if len(c) >= 4]
# Compute node centrality
centrality = nx.degree_centrality(G2)
# Compute clustering coefficients
clustering = nx.clustering(G2)
# Print results
print(f"Number of nodes: {G2.number_of_nodes()}")
print(f"Number of edges: {G2.number_of_edges()}")
print(f"Number of 3-cliques: {len(cliques3)}")
print(f"Number of 4-cliques: {len(cliques4)}")
print(f"Mean degree centrality: {sum(centrality.values()) / len(centrality)}")
print(f"Max clustering coefficient: {max(clustering.values())}")
from node2vec import Node2Vec
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, average_precision_score, f1_score
from sklearn.model_selection import train_test_split
# Learn node embeddings using Node2Vec
node2vec = Node2Vec(G1, dimensions=16, walk_length=30, num_walks=200, workers=4)
model1 = node2vec.fit()
embeddings1 = {node: model1.wv.get_vector(node) for node in G1.nodes()}
node2vec = Node2Vec(G2, dimensions=16, walk_length=30, num_walks=200, workers=4)
model2 = node2vec.fit()
embeddings2 = {node: model2.wv.get_vector(node) for node in G2.nodes()}
# Split the edges of the graph datasets into train and test sets
edges1 = list(G1.edges())
edges2 = list(G2.edges())
train_edges1, test_edges1 = train_test_split(edges1, test_size=0.2, random_state=42)
train_edges2, test_edges2 = train_test_split(edges2, test_size=0.2, random_state=42)
print(embeddings1[0])
print(type(embeddings1))
print(len(embeddings1))
# print(embeddings1.keys())
print(type(np.array(embeddings1.values())))
# use cosine similarity to predict new links
from sklearn.metrics.pairwise import cosine_similarity
feature_vectors = list(list(ele) for ele in embeddings1.values())
# print()
# feauture_vectors = [list(arr) for arr in feature_vectors]
similarity_matrix = cosine_similarity(feature_vectors)
# set a threshold for similarity scores
threshold = 0.9
# generate a list of new links with similarity scores above the threshold
new_links = []
for i in range(len(similarity_matrix)):
for j in range(i + 1, len(similarity_matrix)):
if similarity_matrix[i, j] > threshold:
new_links.append((i + 1, j + 1, similarity_matrix[i, j]))
# print the predicted new links with their similarity scores
print(new_links[:5])
predicted_edges = []
for link in new_links:
if not G1.has_edge(link[0], link[1]):
# print(link)
predicted_edges.append(link)
# # Evaluate the performance of the model using evaluation metrics
auc_roc1 = roc_auc_score(y_test1, y_pred1)
auc_pr1 = average_precision_score(y_test1, y_pred1)
f1_score1 = f1_score(y_test1, y_pred1.round())
auc_roc2 = roc_auc_score(y_test2, y_pred2)
auc_pr2 = average_precision_score(y_test2, y_pred2)
f1_score2 = f1_score(y_test2, y_pred2.round())
print(
f"Graph 1 link prediction results: AUC-ROC={auc_roc1:.4f}, AUC-PR={auc_pr1:.4f}, F1-score={f1_score1:.4f}"
)
# print(f"Graph 2 link prediction results: AUC")
from torch_geometric.utils import erdos_renyi_graph, to_networkx, from_networkx
graph = from_networkx(G1)
print(graph)
import networkx as nx
import matplotlib.pyplot as plt
# create the KG
G = nx.MultiDiGraph()
G.add_node("Alice", entity_type="person", id="1")
G.add_node("Bob", entity_type="person", id="2")
G.add_node("Charlie", entity_type="person", id="3")
G.add_node("David", entity_type="person", id="4")
G.add_node("Eve", entity_type="person", id="5")
# {"id": 6, "type": "Language", "name": "English"},
G.add_node("English", entity_type="language", id="6")
G.add_node("Spanish", entity_type="language", id="7")
G.add_node("Chinese", entity_type="language", id="8")
# {"id": 9, "type": "Country", "name": "United States"},
G.add_node("United States", entity_type="country", id="9")
G.add_node("Mexico", entity_type="country", id="10")
G.add_node("China", entity_type="country", id="11")
G.add_node("Software Engineer", entity_type="job", id="12")
G.add_node("Data Scientist", entity_type="job", id="13")
G.add_node("Cooking", entity_type="hobby", id="14")
G.add_node("Hiking", entity_type="hobby", id="15")
G.add_node("Reading", entity_type="hobby", id="16")
G.add_node("Music", entity_type="hobby", id="17")
# {"id": 18, "type": "Organization", "name": "Google"},
# {"id": 19, "type": "Organization", "name": "Microsoft"},
# {"id": 20, "type": "Organization", "name": "Amazon"}
G.add_node("Google", entity_type="organization", id="18")
G.add_node("Microsoft", entity_type="organization", id="19")
G.add_node("Amazon", entity_type="organization", id="20")
# edges
G.add_edge("Alice", "English", relation_type="Speaks")
G.add_edge("Bob", "English", relation_type="Speaks")
G.add_edge("Charlie", "Spanish", relation_type="Speaks")
G.add_edge("David", "English", relation_type="Speaks")
G.add_edge("Eve", "Spanish", relation_type="Speaks")
G.add_edge("Alice", "United States", relation_type="LivesIn")
G.add_edge("Bob", "United States", relation_type="LivesIn")
G.add_edge("Charlie", "Mexico", relation_type="LivesIn")
G.add_edge("David", "United States", relation_type="LivesIn")
G.add_edge("Eve", "Mexico", relation_type="LivesIn")
G.add_edge("Alice", "Software Engineer", relation_type="WorksAs")
G.add_edge("Bob", "Software Engineer", relation_type="WorksAs")
G.add_edge("Charlie", "Data Scientist", relation_type="WorksAs")
G.add_edge("David", "Software Engineer", relation_type="WorksAs")
G.add_edge("Eve", "Software Engineer", relation_type="WorksAs")
G.add_edge("Alice", "Cooking", relation_type="Likes")
G.add_edge("Bob", "Hiking", relation_type="Likes")
# set node and edge colors based on entity and relation types
node_colors = {
"person": "lightblue",
"language": "lightgreen",
"country": "pink",
"job": "pink",
"hobby": "pink",
"organization": "pink",
}
edge_colors = {"Speaks": "blue", "LivesIn": "red", "WorksAs": "green", "Likes": "black"}
for n in G.nodes():
print(G.nodes[n]["entity_type"])
for e in G.edges.data():
print(e[2]["relation_type"])
node_color_list = [node_colors[G.nodes[n]["entity_type"]] for n in G.nodes()]
edge_color_list = [edge_colors[e[2]["relation_type"]] for e in G.edges.data()]
# draw the KG
plt.figure(figsize=(16, 16))
pos = nx.spring_layout(G, seed=42)
nx.draw_networkx_nodes(G, pos, node_color=node_color_list)
nx.draw_networkx_labels(G, pos)
nx.draw_networkx_edges(G, pos, edge_color=edge_color_list, arrows=True)
# set legend labels and colors
node_legend_labels = [
("person", "lightblue"),
("language", "lightgreen"),
("country", "pink"),
("job", "pink"),
("hobby", "pink"),
("orgainzation", "pink"),
]
edge_legend_labels = [
("Speaks", "blue"),
("LivesIn", "red"),
("WorksAs", "green"),
("Likes", "black"),
]
node_legend_colors = [color for _, color in node_legend_labels]
edge_legend_colors = [color for _, color in edge_legend_labels]
# draw the legend
node_legend = plt.legend(
node_legend_labels,
loc="upper left",
title="Node Types",
frameon=True,
facecolor="white",
framealpha=1,
)
plt.axis("off")
plt.show()
# # ONE HOP QUERY
# import networkx as nx
# # create the KG
# G = nx.MultiDiGraph()
# # G.add_node('entity1', type='type1')
# # G.add_node('entity2', type='type2')
# # G.add_edge('entity1', 'entity2', relation='relation1')
# # execute a one-hop query
# results = []
# for node, attr in G.nodes(data=True):
# if attr['type'] == 'type1':
# neighbors = G.neighbors(node)
# for neighbor in neighbors:
# if G[node][neighbor][0]['relation'] == 'relation1':
# results.append((node, neighbor))
# # print the query results
# print(results)
# # PATH QUERY
# import networkx as nx
# # create the KG
# G = nx.MultiDiGraph()
# G.add_node('entity1', type='type1')
# G.add_node('entity2', type='type2')
# G.add_node('entity3', type='type3')
# G.add_edge('entity1', 'entity2', relation='relation1')
# G.add_edge('entity2', 'entity3', relation='relation2')
# # execute a path query
# results = []
# for start_node, start_attr in G.nodes(data=True):
# if start_attr['type'] == 'type1':
# for end_node, end_attr in G.nodes(data=True):
# if end_attr['type'] == 'type3':
# paths = nx.all_simple_paths(G, start_node, end_node, cutoff=2)
# for path in paths:
# if G[path[0]][path[1]][0]['relation'] == 'relation1' and G[path[1]][path[2]][0]['relation'] == 'relation2':
# results.append(path)
# # print the query results
# print(results)
|
# # Project Name :- Bike Rental
# ### Project Description -
# Objective of the analysis is to find out the determining factor that drives the demand on bike share rentals,
# construct statistical models and then try to make prediction on rentals based on the information and models we have.
# Exploration and the analysis of the data will be performed in R and Python.
# ### Loading Libraries & Data
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import chi2_contingency
import os
import statistics
from sklearn.metrics import r2_score
from scipy import stats
from sklearn.model_selection import train_test_split, RandomizedSearchCV
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve, auc, roc_auc_score
sns.set_style("whitegrid")
import warnings
warnings.filterwarnings("ignore")
# Setting working directory
os.chdir("C:/Users/Click/Desktop/Bike rental")
print(os.getcwd())
# Loading Dataset
data = pd.read_csv("Bike_Rental.csv")
data = pd.DataFrame(data)
# Creating Duplicate instances of data for Preprocessing and exploration
df = data.copy()
# ### Exploring Data
data.head(5)
# Checking info of data -> data types and rows n cols
data.info()
# This shows that we have no Missing Values for any column.
data.describe()
# calculating number of unique values for all df columns
data.nunique()
data.columns
##We know that 'cnt' which is our target variable is sum of two other variables - 'registered' and 'casusal'.
#'instant' variable is of no use and can be dropped
#'dteday' variable is a date column which is not significant in our analysis and can be excluded
# So we will drop these variables now itself
drop1 = ["casual", "registered", "instant", "dteday"]
data = data.drop(drop1, axis=1)
# Variables are " Continuos" and "Categorical"
con = ["temp", "atemp", "hum", "windspeed", "cnt"]
cat = ["season", "yr", "mnth", "holiday", "weekday", "workingday", "weathersit"]
# Target Variable probability data distribution
plt.figure(figsize=(8, 6))
plt.hist(data["cnt"], normed=True, bins=30)
plt.ylabel("Probability", fontsize=15)
plt.xlabel("Number of Users", fontsize=15)
plt.savefig("Count of Users.png")
plt.title("Bike Rental Statistics", fontsize=20)
# Function to view the categories present in each categorical feature and thier values
def view_feature_cat(obj):
for i in range(len(obj)):
print("*******************************************")
print("Feature:", obj[i])
print("-----------------------")
print(data[str(obj[i])].value_counts())
print("*******************************************")
view_feature_cat(cat)
# ### Data Understanding
sns.catplot(x="weekday", y="cnt", data=data)
plt.savefig("days_bikecnt.png")
sns.catplot(x="mnth", y="cnt", data=data)
plt.savefig("mnth_bikecnt.png")
sns.catplot(x="season", y="cnt", data=data)
plt.savefig("season_bikecnt.png")
sns.catplot(x="weathersit", y="cnt", data=data)
plt.savefig("hol_bikecnt.png")
# Checking the distribution of values for variables in data
for i in con:
if i == "cnt":
continue
sns.distplot(data[i], bins="auto")
plt.title("Checking Distribution for Variable " + str(i))
plt.ylabel("Density")
plt.savefig("{i}_Vs_Density.png".format(i=i))
plt.show()
# ### OutLier Analysis
"""def box_plot(x):
plt.boxplot(data[x])
plt.xlabel(x,fontsize= 15)
plt.ylabel('Values',fontsize= 15)
plt.xticks(fontsize=10, rotation=90)
plt.yticks(fontsize=10)
plt.title("Boxplot for {X}".format(X=x),fontsize = 20)
plt.savefig("Boxplot for {X}.png".format(X=x))
plt.show()
box_plot('windspeed')
box_plot('temp')
box_plot('atemp')
box_plot('hum')"""
box = plt.boxplot(
[data["temp"], data["atemp"], data["hum"], data["windspeed"]], patch_artist=True
)
plt.xlabel(["1. Temperature", "2. Feeling Temperature", "3. Humidity", "4. Windspeed"])
plt.title("BoxPlot of the Variables for Weather Conditions")
colors = ["cyan", "lightblue", "lightgreen", "tan"]
for patch, color in zip(box["boxes"], colors):
patch.set_facecolor(color)
plt.ylabel("Values")
plt.savefig("BoxPlot of the Variables for Weather Conditions")
box2 = plt.boxplot([data["cnt"]], patch_artist=True)
plt.xlabel(["1. Total Count"])
plt.title("BoxPlot of the Variables for user count")
colors = ["red"]
for patch, color in zip(box2["boxes"], colors):
patch.set_facecolor(color)
plt.ylabel("Values")
plt.savefig("BoxPlot of the Variables for user count")
# From the above boxplot we can conclude that there are outliers windspeed variables
# Getting 75 and 25 percentile of variable "windspeed"
q75, q25 = np.percentile(data["windspeed"], [75, 25])
# Calculating Interquartile range
iqr = q75 - q25
# Calculating upper extream and lower extream
minimum = q25 - (iqr * 1.5)
maximum = q75 + (iqr * 1.5)
# Replacing all the outliers value to NA
data.loc[data["windspeed"] < minimum, "windspeed"] = np.nan
data.loc[data["windspeed"] > maximum, "windspeed"] = np.nan
# Checking % of missing values
data.isnull().sum().sum()
# Checking missing values in train dataset
print(data.isnull().sum())
# result shows there are missing values in the dataset
##we will impute the missing values which was outlier values by using mean imputation
# we chose mean imputation because median imputation is majorly suitable for the data having outliers
## as we dont have outliers so we will choose mean imputation over KNN.
data["windspeed"] = data["windspeed"].fillna(data["windspeed"].mean())
print(data.isnull().sum())
# ### Feature Selection
# Code for plotting pairplot
sns_plot = sns.pairplot(data=data[con])
plt.plot()
plt.savefig("Pairplot")
##Correlation analysis for continuous variables
# Correlation plot
data_corr = data.loc[:, con]
# Set the width and hieght of the plot
f, ax = plt.subplots(figsize=(10, 10))
# Generate correlation matrix
corr = data_corr.corr()
# Plot using seaborn library
sns.heatmap(
corr,
mask=np.zeros_like(corr, dtype=np.bool),
cmap=sns.diverging_palette(220, 50, as_cmap=True),
square=True,
ax=ax,
annot=True,
)
plt.plot()
plt.savefig("Heatmap")
label = "cnt"
obj_dtype = cat
drop_feat = []
## ANOVA TEST FOR P VALUES
import statsmodels.api as sm
from statsmodels.formula.api import ols
anova_p = []
for i in obj_dtype:
buf = label + " ~ " + i
mod = ols(buf, data=data).fit()
anova_op = sm.stats.anova_lm(mod, typ=2)
print(anova_op)
anova_p.append(anova_op.iloc[0:1, 3:4])
p = anova_op.loc[i, "PR(>F)"]
if p >= 0.05:
drop_feat.append(i)
drop_feat
# As a result of correlation analysis and ANOVA, we have concluded that we should remove 6 columns
#'temp' and 'atemp' are correlated and hence one of them should be removed
#'holiday', 'weekday' and 'workingday' have p>0.05 and hence should be removed
# Droping the variables which has redundant information
to_drop = ["atemp", "holiday", "weekday", "workingday"]
data = data.drop(to_drop, axis=1)
data.info()
# Updating the Continuous and Categorical Variables after droping correlated variables
con = [i for i in con if i not in to_drop]
cat = [i for i in cat if i not in to_drop]
# ### Feature Scaling
# Checking the distribution of values for variables in data
for i in con:
if i == "data":
continue
sns.distplot(data[i], bins="auto")
plt.title("Checking Distribution for Variable " + str(i))
plt.ylabel("Density")
plt.savefig("{i}_Vs_Density.png".format(i=i))
plt.show()
# Data before scaling
data.head()
# Since our data is normally distributed, we will use Standardization for Feature Scalling
# #Standardization
for i in con:
if i == "cnt":
continue
data[i] = (data[i] - data[i].mean()) / (data[i].std())
# Data after scaling
data.head()
# #### Before going for modelling algorithms, we will create dummy variables for our categorical variables
dummy_data = pd.get_dummies(data=data, columns=cat)
# Copying dataframe
bike_data = dummy_data.copy()
dummy_data.head()
# ### Machine Learning algorithms
# Using train test split functionality for creating sampling
X_train, X_test, y_train, y_test = train_test_split(
dummy_data.iloc[:, dummy_data.columns != "cnt"],
dummy_data.iloc[:, 3],
test_size=0.33,
random_state=101,
)
(X_train.shape), (y_train.shape)
# ### Decision Tree Regressor
# Importing libraries for Decision Tree
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error
# Building model on top of training dataset
fit_DT = DecisionTreeRegressor(max_depth=2).fit(X_train, y_train)
# Calculating RMSE for test data to check accuracy
pred_test = fit_DT.predict(X_test)
rmse_for_test = np.sqrt(mean_squared_error(y_test, pred_test))
def MAPE(y_true, y_pred):
mape = np.mean(np.abs((y_true - y_pred) / y_true)) * 100
return mape
DT_rmse = rmse_for_test
DT_mape = MAPE(y_test, pred_test)
DT_r2 = r2_score(y_test, pred_test)
print("Decision Tree Regressor Model Performance:")
print("Root Mean Squared Error For Test data = " + str(rmse_for_test))
print("R^2 Score(coefficient of determination) = " + str(r2_score(y_test, pred_test)))
print("MAPE(Mean Absolute Percentage Error) = " + str(DT_mape))
# Decision Tree Regressor Model Performance:
# Root Mean Squared Error For Test data = 997.3873927346699
# R^2 Score(coefficient of determination) = 0.7073525764693427
# MAPE(Mean Absolute Percentage Error) = 25.707144204754727
# ### Random Forest
# Importing libraries for Random Forest
from sklearn.ensemble import RandomForestRegressor
# Building model on top of training dataset
fit_RF = RandomForestRegressor(n_estimators=500).fit(X_train, y_train)
# Calculating RMSE for test data to check accuracy
pred_test = fit_RF.predict(X_test)
rmse_for_test = np.sqrt(mean_squared_error(y_test, pred_test))
RF_rmse = rmse_for_test
RF_mape = MAPE(y_test, pred_test)
RF_r2 = r2_score(y_test, pred_test)
print("Random Forest Regressor Model Performance:")
print("Root Mean Squared Error For Test data = " + str(rmse_for_test))
print("R^2 Score(coefficient of determination) = " + str(r2_score(y_test, pred_test)))
print("MAPE(Mean Absolute Percentage Error) = " + str(RF_mape))
# Random Forest Regressor Model Performance:
# Root Mean Squared Error For Test data = 567.4712836267795
# R^2 Score(coefficient of determination) = 0.9052662486980746
# MAPE(Mean Absolute Percentage Error) = 13.33175245911665
# ### Linear Regression
# Importing libraries for Linear Regression
from sklearn.linear_model import LinearRegression
# Building model on top of training dataset
fit_LR = LinearRegression().fit(X_train, y_train)
# Calculating RMSE for test data to check accuracy
pred_test = fit_LR.predict(X_test)
rmse_for_test = np.sqrt(mean_squared_error(y_test, pred_test))
LR_rmse = rmse_for_test
LR_mape = MAPE(y_test, pred_test)
LR_r2 = r2_score(y_test, pred_test)
print("Linear Regression Model Performance:")
print("Root Mean Squared Error For Test data = " + str(rmse_for_test))
print("R^2 Score(coefficient of determination) = " + str(r2_score(y_test, pred_test)))
print("MAPE(Mean Absolute Percentage Error) = " + str(LR_mape))
# Linear Regression Model Performance:
# Root Mean Squared Error For Test data = 736.2047259447531
# R^2 Score(coefficient of determination) = 0.8405538055300172
# MAPE(Mean Absolute Percentage Error) = 17.217590042129938
# ### Gradient Boosting Regressor
# Importing library for Gradient Boosting
from sklearn.ensemble import GradientBoostingRegressor
# Building model on top of training dataset
fit_GB = GradientBoostingRegressor().fit(X_train, y_train)
# Calculating RMSE for test data to check accuracy
pred_test = fit_GB.predict(X_test)
rmse_for_test = np.sqrt(mean_squared_error(y_test, pred_test))
GBR_rmse = rmse_for_test
GBR_mape = MAPE(y_test, pred_test)
GBR_r2 = r2_score(y_test, pred_test)
print("Gradient Boosting Regressor Model Performance:")
print("Root Mean Squared Error For Test data = " + str(rmse_for_test))
print("R^2 Score(coefficient of determination) = " + str(r2_score(y_test, pred_test)))
print("MAPE(Mean Absolute Percentage Error) = " + str(GBR_mape))
# Gradient Boosting Regressor Model Performance:
# Root Mean Squared Error For Test data = 575.7689853723047
# R^2 Score(coefficient of determination) = 0.9024755542385117
# MAPE(Mean Absolute Percentage Error) = 13.039727726693526
# ### Final Results for all models
dat = {
"Model_name": [
"Decision tree default",
"Random Forest Default",
"Linear Regression",
"Gradient Boosting Default",
],
"RMSE": [DT_rmse, RF_rmse, LR_rmse, GBR_rmse],
"MAPE": [DT_mape, RF_mape, LR_mape, GBR_mape],
"R^2": [DT_r2, RF_r2, LR_r2, GBR_r2],
}
results = pd.DataFrame(data=dat)
results
# ### Random Forest CV
# Importing essential libraries
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import make_regression
##Random Search CV
from sklearn.model_selection import RandomizedSearchCV
RRF = RandomForestRegressor(random_state=0)
n_estimator = list(range(1, 20, 2))
depth = list(range(1, 100, 2))
# Create the random grid
rand_grid = {"n_estimators": n_estimator, "max_depth": depth}
randomcv_rf = RandomizedSearchCV(
RRF, param_distributions=rand_grid, n_iter=5, cv=5, random_state=0
)
randomcv_rf = randomcv_rf.fit(X_train, y_train)
predictions_RRF = randomcv_rf.predict(X_test)
predictions_RRF = np.array(predictions_RRF)
view_best_params_RRF = randomcv_rf.best_params_
best_model = randomcv_rf.best_estimator_
predictions_RRF = best_model.predict(X_test)
# R^2
RRF_r2 = r2_score(y_test, predictions_RRF)
# Calculating MSE
RRF_mse = np.mean((y_test - predictions_RRF) ** 2)
# Calculate MAPE
RRF_mape = MAPE(y_test, predictions_RRF)
print("Random Search CV Random Forest Regressor Model Performance:")
print("Best Parameters = ", view_best_params_RRF)
print("R-squared = {:0.2}.".format(RRF_r2))
print("MSE = ", round(RRF_mse))
print("MAPE = {:0.4}%.".format(RRF_mape))
print("**********************************************")
### END OF CODE ###
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
path = "/kaggle/input/predicting-a-pulsar-star/pulsar_stars.csv"
stars = pd.read_csv(path)
print("size of our data : ", len(stars))
stars.head()
print("our data columns :\n", stars.columns)
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder
def encod(cat_feat, df):
encoder = LabelEncoder()
for each in cat_feat:
fea = each + "_cat"
encoded = encoder.fit_transform(df[each])
df[fea] = encoded
stars2 = stars.copy()
cat = stars2.columns[4:8]
stars2[cat] = stars2[cat].astype(int)
stars2.head()
encod(cat, stars2)
stars2.head()
fr = 0.1
vsize = int(len(stars) * fr)
train = stars2[: -2 * vsize]
valid = stars2[-2 * vsize : -vsize]
test = stars2[:-vsize]
for each in [train, valid, test]:
print(f"Percentage of target values : {stars.target_class.mean():.4f}")
import lightgbm as lgb
def categ(feat_cols, train, valid, test):
dtrain = lgb.Dataset(data=train[feat_cols], label=train["target_class"])
dvalid = lgb.Dataset(data=valid[feat_cols], label=valid["target_class"])
dtest = lgb.Dataset(data=test[feat_cols], label=test["target_class"])
param = {"num_leaves": 64, "objectives": "binary"}
param["metric"] = "auc"
num_round = 500
bst = lgb.train(
param, dtrain, num_round, valid_sets=[dvalid], early_stopping_rounds=10
)
return bst
feat_cols = stars2.columns[9:12]
res = categ(feat_cols, train, valid, test)
from sklearn import metrics
ypred = res.predict(test[feat_cols])
score = metrics.roc_auc_score(test["target_class"], ypred)
print(f"our score is: {score}")
|
# # Introduction to BDTs
import pandas
import numpy as np
import matplotlib.pyplot as plt
import json
# Load data
hd_Signal = pandas.read_hdf("../input/Signal.h5", "df")
hd_Background = pandas.read_hdf("../input/Background.h5", "df")
hd_Signal.head()
# Select set of variables
variablelist = [
"nJets_OR_T",
"nJets_OR_T_MV2c10_70",
"Mll01",
"minDeltaR_LJ_0",
"minDeltaR_LJ_1",
"max_eta",
"lep_Pt_1",
"MET_RefFinal_et",
"DRll01",
]
# Plot distributions for two classes of events: Signal and Background
fig, ax = plt.subplots(3, 4, figsize=(25, 15))
nbins = 50
varcounter = -1
for i, axobjlist in enumerate(ax):
for j, axobj in enumerate(axobjlist):
varcounter += 1
if varcounter < len(variablelist):
var = variablelist[varcounter]
p_Signal = pandas.DataFrame({var: hd_Signal[var]})
p_Background = pandas.DataFrame({var: hd_Background[var]})
# b.replace([np.inf, -np.inf], np.nan, inplace=True)
# c.replace([np.inf, -np.inf], np.nan, inplace=True)
# b = b.dropna()
# c = c.dropna()
minval = np.amin(p_Signal[var])
maxval = max([np.amax(p_Signal[var]), np.amax(p_Background[var])]) * 1.4
binning = np.linspace(minval, maxval, nbins)
axobj.hist(
p_Signal[var], binning, histtype="step", label="Signal", density=1
) # color='orange',
axobj.hist(
p_Background[var],
binning,
histtype="step",
label="Background",
density=1,
) # color='b',
axobj.legend()
axobj.set_yscale("log", nonposy="clip")
axobj.set_title(variablelist[varcounter])
else:
axobj.axis("off")
plt.tight_layout()
plt.show()
# Prepare dataset for BDT training
Signal_vars = hd_Signal[variablelist]
Background_vars = hd_Background[variablelist]
X = np.concatenate((Signal_vars, Background_vars)) # training data
y = np.concatenate(
(np.ones(Signal_vars.shape[0]), np.zeros(Background_vars.shape[0]))
) # class lables
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc
# split data to train and test samples
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
bdt_0 = AdaBoostClassifier(
DecisionTreeClassifier(
max_depth=4, max_features="auto", min_samples_split=10, min_samples_leaf=10
),
n_estimators=100,
learning_rate=0.5,
)
bdt_0.fit(X_train, y_train)
y_predicted_0 = bdt_0.predict(X_test)
print(
classification_report(y_test, y_predicted_0, target_names=["signal", "background"])
)
print(
"Area under ROC curve: %.4f"
% (roc_auc_score(y_test, bdt_0.decision_function(X_test)))
)
import xgboost
# bdt_xgb = xgboost.XGBClassifier(tree_method="hist", thread_count=-1)
bdt_xgb = xgboost.XGBClassifier(
tree_method="hist",
thread_count=-1,
max_depth=3,
learning_rate=0.1,
n_estimators=1000,
verbosity=1,
objective="binary:logistic",
booster="gbtree",
n_jobs=1,
gamma=0,
min_child_weight=1,
)
# , max_delta_step=0, subsample=1, colsample_bytree=1, colsample_bylevel=1, colsample_bynode=1, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, base_score=0.5, random_state=0, missing=None, gpu_id=-1, **kwargs)
# agrees to the tth default:
# Method_Opt = "!H:!V:NTrees=1000:MinNodeSize=1.5%:BoostType=Grad:Shrinkage=0.10:
# UseBaggedBoost:BaggedSampleFraction=0.5:nCuts=20:MaxDepth=2";
bdt_xgb.fit(X_train, y_train)
y_predicted_xgb = bdt_xgb.predict(X_test)
print(
classification_report(
y_test, y_predicted_xgb, target_names=["signal", "background"]
)
)
xgb_bdt_ROC = roc_auc_score(y_test, bdt_xgb.predict_proba(X_test)[:, 1])
print("XGBoost ROC AUC = {:.3f}".format(xgb_bdt_ROC))
print(
"wrt BDT: %.4f"
% (xgb_bdt_ROC / roc_auc_score(y_test, bdt_0.decision_function(X_test)))
)
# ### Evaluate model time/ROC
import time
def evaluate_models(models_dict):
for model_name, model in models_dict.items():
start = time.time()
model.fit(X_train, y_train)
end = time.time()
print(
"{}; train time {:.3f} s; ROC AUC = {:.3f}".format(
model_name,
end - start,
roc_auc_score(y_test, model.predict_proba(X_test)[:, 1]),
)
)
mods = {"AdaBoost": bdt_0, "XGboost": bdt_xgb}
evaluate_models(mods)
fpr = dict()
tpr = dict()
roc_auc = dict()
i = 0
for model_name, model in mods.items():
# for i in range(len(mods.keys())):
print(i)
# fpr[i], tpr[i], _ = roc_curve(y_test, pred_vec[i])
fpr[i], tpr[i], _ = roc_curve(y_test, model.predict_proba(X_test)[:, 1])
# roc_auc_score(y_test, model.predict_proba(X_test)[:, 1])
roc_auc[i] = auc(fpr[i], tpr[i])
i += 1
# ### Compute ROC curve and ROC area for each class
plt.figure()
lw = 2
for i in range(len(mods)):
plt.plot(
fpr[i],
tpr[i],
lw=lw,
label="%s ROC (%0.3f)" % (list(mods.keys())[i], roc_auc[i]),
) # color='darkorange',
plt.plot([0, 1], [0, 1], color="navy", lw=lw, linestyle="--")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("Receiver operating characteristic example")
plt.legend(loc="lower right")
plt.show()
# ### Display the structure of the tree
from xgboost import plot_tree
plot_tree(bdt_xgb, rankdir="LR")
# plot_tree(bst, num_trees=2),num_trees=1
fig = plt.gcf()
fig.set_size_inches(150, 100)
# ### Plot discriminant distribution for two classes
plot_range = (0, 1)
colors = ["orange", "blue"]
class_names = ["Signal", "Background"]
nbins = 40
for i in range(2):
plt.hist(
bdt_xgb.predict_proba(X_test)[:, i],
nbins,
range=plot_range,
label="Test %s" % class_names[i],
color=colors[i],
alpha=0.5,
density=True,
)
plt.hist(
bdt_xgb.predict_proba(X_train)[:, i],
nbins,
range=plot_range,
label="Train %s" % class_names[i],
color=colors[i],
alpha=0.5,
histtype="step",
density=True,
)
# x1, x2, y1, y2 = plt.axis()
# plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc="upper right")
plt.ylabel("Samples")
plt.xlabel("Score")
plt.title("Decision Scores")
# ### Overfitting example
bdt_xgb_ovf = xgboost.XGBClassifier(
tree_method="hist",
thread_count=-1,
max_depth=20,
learning_rate=0.1,
n_estimators=100,
verbosity=1,
objective="binary:logistic",
booster="gbtree",
)
bdt_xgb_ovf.fit(X_train, y_train)
y_predicted_xgb_ovf = bdt_xgb_ovf.predict(X_test)
xgb_bdt_ovf_ROC = roc_auc_score(y_test, bdt_xgb_ovf.predict_proba(X_test)[:, 1])
print("XGBoost ROC AUC = {:.3f}".format(xgb_bdt_ovf_ROC))
plot_range = (0, 1)
colors = ["orange", "blue"]
class_names = ["Signal", "Background"]
nbins = 40
for i in range(2):
plt.hist(
bdt_xgb_ovf.predict_proba(X_test)[:, i],
nbins,
range=plot_range,
label="Test %s" % class_names[i],
color=colors[i],
alpha=0.5,
density=True,
)
plt.hist(
bdt_xgb_ovf.predict_proba(X_train)[:, i],
nbins,
range=plot_range,
label="Train %s" % class_names[i],
color=colors[i],
alpha=0.5,
histtype="step",
density=True,
)
# x1, x2, y1, y2 = plt.axis()
# plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc="upper right")
plt.ylabel("Samples")
plt.xlabel("Score")
plt.title("Decision Scores")
from scipy import stats
from scipy.stats import ks_2samp
KS_stat = ks_2samp(
bdt_xgb.predict_proba(X_test)[:, 1], bdt_xgb.predict_proba(X_train)[:, 1]
)
KS_stat_ovf = ks_2samp(
bdt_xgb_ovf.predict_proba(X_test)[:, 1], bdt_xgb_ovf.predict_proba(X_train)[:, 1]
)
print(
"Kolmogorov-Smirnoff statistics for : \n - shallow tree - ",
KS_stat,
"\n - overfitting model",
KS_stat_ovf,
)
def empirical_cdf(sample, plotting=True):
N = len(sample)
rng = max(sample) - min(sample)
if plotting:
xs = np.concatenate(
[
np.array([min(sample) - rng / 3]),
np.sort(sample),
np.array([max(sample) + rng / 3]),
]
)
ys = np.append(np.arange(N + 1) / N, 1)
else:
xs = np.sort(sample)
ys = np.arange(1, N + 1) / N
return (xs, ys)
xs_test, ys_test = empirical_cdf(bdt_xgb.predict_proba(X_test)[:, 1])
xs_train, ys_train = empirical_cdf(bdt_xgb.predict_proba(X_train)[:, 1])
xs_test_ovf, ys_test_ovf = empirical_cdf(bdt_xgb_ovf.predict_proba(X_test)[:, 1])
xs_train_ovf, ys_train_ovf = empirical_cdf(bdt_xgb_ovf.predict_proba(X_train)[:, 1])
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(18, 6))
# fig.suptitle('Horizontally stacked subplots')
ax1.set_title("Normal model")
ax1.plot(xs_test, ys_test, label="Test Signal", linewidth=3, linestyle=":")
ax1.plot(xs_train, ys_train, label="Train Signal")
ax1.set_ylabel("c. d. f.")
ax1.set_xlabel("Score")
ax1.legend()
ax2.set_title("Overfitting model")
ax2.set_ylabel("c. d. f.")
ax2.plot(xs_test_ovf, ys_test_ovf, label="Test Signal")
ax2.plot(xs_train_ovf, ys_train_ovf, label="Train Signal")
ax2.set_xlabel("Score")
ax2.legend()
# plt.step(xs_test, ys_test)
##plt.step(xs_train, ys_train)
# plt.step(xs_test_ovf, ys_test_ovf)
# plt.step(xs_train_ovf, ys_train_ovf)
plot_step = 0.2
x_min, x_max = X[:, 4].min(), X[:, 4].max()
y_min, y_max = X[:, 6].min(), X[:, 6].max()
xx, yy = np.meshgrid(
np.arange(x_min, x_max, plot_step * 2), np.arange(y_min, y_max, plot_step * 2)
)
# Plot the decision boundaries
plt.subplot(121)
plt.axis("tight")
for i in range(2):
idx = np.where(y == i)
plt.scatter(
X[idx, 4],
X[idx, 6],
# c=c, cmap=plt.cm.Paired,
s=20,
edgecolor="k",
label="Class %s" % i,
)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc="upper right")
plt.xlabel("x")
plt.ylabel("y")
plt.title("Decision Boundary")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
from sklearn import preprocessing
# Any results you write to the current directory are saved as output.
df = pd.read_csv(
"/kaggle/input/sloan-digital-sky-survey/Skyserver_SQL2_27_2018 6_51_39 PM.csv"
)
df.head(10)
df.columns
labels = df["class"]
df.drop("class", inplace=True, axis=1)
print(labels.unique())
listt = df.columns
dict = []
for i in listt:
dict.append((len(df[i].unique()) / df.shape[0], i))
dict = sorted(dict)
for i, j in dict:
print(i, j)
# drop first 2 columns
df.drop(["objid", "rerun"], inplace=True, axis=1)
dict2 = []
for i in df.columns:
dict2.append((df[i].isnull().sum(), i))
dict2 = sorted(dict2, reverse=False)
for i, j in dict2:
print(i, j)
df.describe()
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
df = ss.fit_transform(df)
df1 = pd.DataFrame(df)
le = preprocessing.LabelEncoder()
labels = le.fit_transform(labels)
print(labels)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df1, labels, test_size=0.33)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
from sklearn import svm
clf = svm.SVC(kernel="rbf")
clf.fit(X_train, y_train)
print(clf.score(X_test, y_test))
from sklearn.metrics import f1_score
y_pred = clf.predict(X_test)
print(f1_score(y_test, y_pred, average="macro"))
print(f1_score(y_test, y_pred, average="micro"))
print(f1_score(y_test, y_pred, average="weighted"))
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
clf.fit(X_train, y_train)
print(clf.score(X_test, y_test))
from sklearn.metrics import f1_score
y_pred = clf.predict(X_test)
print(f1_score(y_test, y_pred, average="macro"))
print(f1_score(y_test, y_pred, average="micro"))
print(f1_score(y_test, y_pred, average="weighted"))
label_df = pd.DataFrame(y_train)
zeros = label_df[label_df == 0].count()
ones = label_df[label_df == 1].count()
two = label_df[label_df == 2].count()
import matplotlib.pyplot as plt
plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
objects = ("0", "1", "2")
y_pos = np.arange(len(objects))
performance = [int(zeros), int(ones), int(two)]
plt.bar(y_pos, performance, align="center", alpha=0.5)
plt.xticks(y_pos, objects)
plt.ylabel("Usage")
plt.title("Programming language usage")
plt.show()
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
from sklearn.metrics import f1_score
print(clf.score(X_test, y_test))
y_pred = clf.predict(X_test)
print(f1_score(y_test, y_pred, average="macro"))
print(f1_score(y_test, y_pred, average="micro"))
print(f1_score(y_test, y_pred, average="weighted"))
|
# #Bellabeat by C
# author: "Chinwe O."
# date: "2023-04-10"
# Bellabeat Case Study in R
# ##About Bellabeat
# Founded in 2013 by Urška Sršen and Sando Mur, Bellabeat is a wellness technology company producing health-focused products targeted at women. With a range of products including the Bellabeat app, the Leaf and Time wearable trackers, as well as the Spring water bottle, They are a successful small company with the potential to become a more significant player in the global smart device market.
# Their sleek well-crafted products inspire women all around the world. Helping them collect data on fitness sleep and reproductive health, Bellabeat empowers women with the information to take charge of their well-being.
# Global expansion in 2016 saw the company grow in presence in many countries and online as well, partnering with several online retailers as well as their own company website. They are also invested in advertising, both traditional and digital, to create impact, especially around important marketing dates.
# Sršen believes that an analysis of consumer data will uncover more opportunities for growth. She has asked for an analysis of data on one Bellabeat product to gain insight as to how people already use their device.
# ##The Ask
# 1. What are some trends in smart device usage?
# 2. How could these trends apply to Bellabeat customers?
# 3. How could these trends help influence Bellabeat marketing strategy?
# ##The Business Task
# To analyze smart device usage data in order to gain insight into how consumers use non-Bellabeat smart
# devices.
# Select one Bellabeat product to apply these insights to my presentation.
# ##The Key Stakeholders
# 1. Urška Sršen: cofounder and Chief Creative Officer.
# 2. Sando Mur: cofounder and Bellabeat executive team member.
# 3. Bellabeat marketing analytics team.
# The Prepare Phase
# I am using 18 CSV files from FitBit Fitness Tracker Data by Morbius, available on Kaggle. This dataset contains the contributions personal fitness tracker from thirty Fitbit users consented to the submission of personal tracker data.
# Loading my packages
library(tidyverse)
library(lubridate)
library(ggplot2)
library(tidyr)
library(dplyr)
library(knitr)
library(readr)
# Upload files from FitBit Fitness Tracker Data by Möbius
weightLog < -read.csv(
"../input/fitbit/Fitabase Data 4.12.16-5.12.16/weightLogInfo_merged.csv"
)
# Viewed all uploaded data sets to establish variables to best use in creating our findings
# Clean data
# First by counting number of user id
#
count(distinct(dailyactivity, Id))
count(distinct(dailycalories, Id))
count(distinct(weightLog, Id))
count(distinct(dailysteps, Id))
count(distinct(dailysleep, Id))
count(distinct(dailyintensities, Id))
# All dataframes have 33 users except for dailysleep(24) and weighlog(8). We will exclude weightlog data as the sample sizes are too small.
# Limited dailysleep data will be sited in final analysis
# Now check for any duplicated data
#
anyDuplicated(dailyactivity)
anyDuplicated(dailycalories)
anyDuplicated(dailysteps)
anyDuplicated(dailysleep)
anyDuplicated(dailyintensities)
|
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
from datetime import datetime
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import folium
from folium.plugins import HeatMap
print("Last updated: ", datetime.now().strftime("%Y-%m-%d %H:%M:%S UTC"))
wc = pd.read_csv("../input/world-coordinates/world_coordinates.csv")
df = pd.read_csv("../input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv")
df
# Drop duplicate entries, if any
df.drop_duplicates(inplace=True)
df
# Remove columns not required for study
df.drop(["Sno", "Last Update"], axis=1, inplace=True)
df.head()
# List of affected provinces/states
aff_ps = df["Province/State"].unique()
print(aff_ps)
print("Total:", len(aff_ps))
# Number of cases in each Province/State
case_ps = df.groupby("Province/State", as_index=False)[
["Confirmed", "Deaths", "Recovered"]
].max()
with pd.option_context(
"display.max_rows", None, "display.max_columns", None
): # Prevent truncation
display(case_ps) # Maintain rich formatting by using display() instead of print()
# List of affected countries
aff_c = df.Country.unique()
print(aff_c)
print("Total:", len(aff_c))
# Replacing Mainland China with China to make dataset consistent
df["Country"].replace({"Mainland China": "China"}, inplace=True)
# Number of cases in each Country
case_c = df.groupby(["Country", "Date"]).sum().reset_index()
case_c = case_c.sort_values("Date", ascending=False)
case_c = case_c.drop_duplicates(subset=["Country"])
with pd.option_context("display.max_rows", None, "display.max_columns", None):
display(
case_c.sort_values("Country")[
["Country", "Confirmed", "Deaths", "Recovered"]
].reset_index(drop=True)
)
# Total number of cases
print("Total Confirmed:", case_c["Confirmed"].sum())
print("Total Deaths:", case_c["Deaths"].sum())
print("Total Recovered:", case_c["Recovered"].sum())
# Plot number of cases in different countries
plt.rcParams["figure.figsize"] = (16, 8)
sns.barplot(x="Country", y="Confirmed", data=case_c)
plt.xticks(rotation=90)
plt.xlabel("Affected countries", fontsize=15)
plt.ylabel("Number of cases", fontsize=15)
# Number of cases in different provinces in China
case_ps.rename(columns={"Province/State": "Province"}, inplace=True)
sns.barplot(x="Province", y="Confirmed", data=case_ps)
plt.xticks(rotation=90)
plt.xlabel("Affected provinces", fontsize=15)
plt.ylabel("Number of cases", fontsize=15)
plt.rcParams["figure.figsize"] = (16, 8)
# Number of cases in Provinces other than Hubei
sns.barplot(x="Province", y="Confirmed", data=case_ps[case_ps.Province != "Hubei"])
plt.xticks(rotation=90)
plt.xlabel("Other affected provinces", fontsize=15)
plt.ylabel("Number of cases", fontsize=15)
plt.rcParams["figure.figsize"] = (16, 8)
# Number of cases in countries other than China
sns.barplot(
x="Country",
y="Confirmed",
data=case_c[case_c.Country != "China"][case_c.Country != "Others"],
)
plt.xticks(rotation=90)
plt.xlabel("Other affected countries", fontsize=15)
plt.ylabel("Number of cases", fontsize=15)
plt.rcParams["figure.figsize"] = (16, 8)
# Time-series analysis
df_date = df.groupby("Date", as_index=False)[["Confirmed", "Deaths", "Recovered"]].sum()
df_date
# If Timestamp is required, run the following code
# df['Timestamp'] = pd.to_datetime(df['Date']).astype(int)/10**10
df_date["Date"] = pd.to_datetime(
df_date["Date"]
).dt.date # Converting date-time to date
df_date
# Plot the cases
plt.subplot(1, 2, 1)
plt.plot(
"Date",
"Confirmed",
data=df_date.groupby(["Date"]).sum().reset_index(),
color="blue",
)
plt.xticks(rotation=60)
plt.xlabel("Dates", fontsize=12)
plt.ylabel("Number of cases", fontsize=12)
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(
"Date", "Deaths", data=df_date.groupby(["Date"]).sum().reset_index(), color="red"
)
plt.plot(
"Date",
"Recovered",
data=df_date.groupby(["Date"]).sum().reset_index(),
color="green",
)
plt.xticks(rotation=60)
plt.xlabel("Dates", fontsize=12)
plt.ylabel("Number of cases", fontsize=12)
plt.legend()
plt.rcParams["figure.figsize"] = (18, 8)
plt.show()
# Merge world coordinates with nCoV dataframe
wc_df = pd.merge(wc, case_c, on="Country")
wc_df.drop(["Code", "Date", "Deaths", "Recovered"], axis=1, inplace=True)
wc_df
# Folium Heatmap
heatmap = folium.Map(location=[35.861660, 104.195397], zoom_start=3)
heat_data = [
[row["latitude"], row["longitude"], row["Confirmed"]]
for index, row in wc_df.iterrows()
]
# Plot it on the map
HeatMap(heat_data).add_to(heatmap)
# Display the map
heatmap
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import np_utils
from keras.layers.core import Dropout
from keras.layers.core import Flatten
from keras.layers.normalization import BatchNormalization
from sklearn.utils import class_weight
df2 = pd.read_csv("/kaggle/input/bitsf312-lab1/train.csv", sep=",")
# size = {"Small" : 1, "Medium" : 2, "Big": 3, "?" : np.nan}
# df2.Size = [size[item] for item in df2.Size]
for col in df2:
for x in range(len(df2[col])):
if df2[col][x] == "?":
df2[col][x] = np.nan
df2["Size"].value_counts()
df2.head()
df2["Size"].fillna("Medium", inplace=True)
df2.head()
df2.dtypes
df2["Class"].value_counts()
df2 = pd.concat([df2, pd.get_dummies(df2["Size"], prefix="size")], axis=1)
df2.drop(["Size"], axis=1, inplace=True)
df2.head()
for col in df2:
df2[col] = pd.to_numeric(df2[col])
df2.dtypes
for col in df2:
df2[col].fillna((df2[col].mean()), inplace=True)
plt.figure(figsize=(20, 20))
corr = df2.corr()
corr.style.background_gradient(cmap="RdYlGn")
dfY = df2["Class"]
cols = [0, 2, 4, 5, 11]
dftest = df2.drop(df2.columns[cols], axis=1)
plt.figure(figsize=(20, 20))
corr = dftest.corr()
corr.style.background_gradient(cmap="RdYlGn")
dftest
dftest = dftest.values
y = dfY.values
dftest.shape, y.shape
from sklearn import preprocessing
X = dftest
min_max_scaler = preprocessing.MinMaxScaler()
X_scale = min_max_scaler.fit_transform(X)
X.shape
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X_scale, y, test_size=0.2)
dummy_ytrain = np_utils.to_categorical(Y_train)
dummy_ytest = np_utils.to_categorical(Y_test)
dummy_ytrain
model = Sequential()
model.add(Dense(10, input_dim=10, activation="relu"))
model.add(Dropout(rate=0.25))
model.add(Dense(32, activation="relu"))
model.add(Dropout(rate=0.25))
model.add(Dense(32, activation="relu"))
# model.add(Dropout(rate=0.2))
# model.add(Dense(64, activation='relu'))
model.add(Dropout(rate=0.25))
model.add(Dense(6, activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(X_train, dummy_ytrain, epochs=250, validation_split=0.2, batch_size=25)
model.evaluate(X_test, dummy_ytest)
df1 = pd.read_csv("/kaggle/input/bitsf312-lab1/test.csv", sep=",")
df_submit = pd.DataFrame()
df_submit["ID"] = df1["ID"]
df1 = pd.concat([df1, pd.get_dummies(df1["Size"], prefix="size")], axis=1)
df1.drop(["Size"], axis=1, inplace=True)
df1.head()
cols = [0, 2, 4, 5]
df1 = df1.drop(df1.columns[cols], axis=1)
df1.head()
dtest = df1.values
Xtest = dtest
min_max_scaler = preprocessing.MinMaxScaler()
Xtest_scale = min_max_scaler.fit_transform(Xtest)
Xtest.shape
y_submit = model.predict_classes(Xtest, batch_size=40)
y_submit
df_submit["Class"] = y_submit
df_submit.to_csv("Test1.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # data Visualization
import seaborn as sns # data Visualization
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# Importing Datasets
df_train = pd.read_csv("/kaggle/input/cat-in-the-dat-ii/train.csv")
df_test = pd.read_csv("/kaggle/input/cat-in-the-dat-ii/test.csv")
df_sub = pd.read_csv("/kaggle/input/cat-in-the-dat-ii/sample_submission.csv")
sns.countplot(x=df_train["target"], data=df_train, palette="seismic")
plt.title("TARGET DISTRIBUTION", fontsize=20)
plt.xlabel("Target Values", fontsize=15)
plt.ylabel("Count", fontsize=15)
plt.show()
df_train.sort_index(inplace=True)
df_train.head()
y_train = df_train["target"]
test_id = df_test["id"]
df_train.drop(["target", "id"], axis=1, inplace=True)
df_test.drop("id", axis=1, inplace=True)
cat_feat_to_encode = df_train.columns.tolist()
smoothing = 0.20
import category_encoders as ce
oof = pd.DataFrame([])
from sklearn.model_selection import StratifiedKFold
for tr_idx, oof_idx in StratifiedKFold(
n_splits=5, random_state=1032, shuffle=True
).split(df_train, y_train):
ce_target_encoder = ce.TargetEncoder(cols=cat_feat_to_encode, smoothing=smoothing)
ce_target_encoder.fit(df_train.iloc[tr_idx, :], y_train.iloc[tr_idx])
oof = oof.append(
ce_target_encoder.transform(df_train.iloc[oof_idx, :]), ignore_index=False
)
ce_target_encoder = ce.TargetEncoder(cols=cat_feat_to_encode, smoothing=smoothing)
ce_target_encoder.fit(df_train, y_train)
df_train = oof.sort_index()
df_test = ce_target_encoder.transform(df_test)
x_train = df_train.iloc[:, :].values
x_test = df_test.iloc[:, :].values
# Importing the Libraries for ANN
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
# Initilasing the ANN
classifier = Sequential()
# Now lets creat Neural Networks
classifier.add(
Dense(units=256, kernel_initializer="uniform", input_dim=23, activation="relu")
)
classifier.add(Dense(units=128, kernel_initializer="uniform", activation="relu"))
classifier.add(Dense(units=64, kernel_initializer="uniform", activation="relu"))
classifier.add(Dense(units=32, kernel_initializer="uniform", activation="relu"))
classifier.add(Dense(units=1, kernel_initializer="uniform", activation="sigmoid"))
# Compile the ANN
from keras.optimizers import adam
adam = adam(lr=0.001)
classifier.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
# Fitting ANN to the Traning set
classifier.fit(x_train, y_train, batch_size=1000, epochs=100)
# Predicting the Test set result
y_pred = classifier.predict_proba(x_test)[:, 0]
# Sumbmission the result
df_sub = pd.DataFrame()
df_sub["id"] = test_id
df_sub["target"] = y_pred
df_sub.to_csv("submission.csv", index=False)
df_sub.head(20)
|
# # PitchFork data wrangling and visualisation
# Table of Contents
# PitchFork data wrangling and visualisation1. Explore data2. Do review scores for an individual artist improve over time, or go down?3. Is the average of score of an artist correlated with the number of reviews?
import sqlite3, datetime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.ticker import MaxNLocator
# For Interactive control
import ipywidgets as widgets
from ipywidgets import interact, interact_manual
import cufflinks as cf
# For Regression
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
# Import all data
with sqlite3.connect("../input/pitchfork-data/database.sqlite") as conn:
artists = pd.read_sql("SELECT * FROM artists", conn)
content = pd.read_sql("SELECT * FROM content", conn)
genres = pd.read_sql("SELECT * FROM genres", conn)
labels = pd.read_sql("SELECT * FROM labels", conn)
reviews = pd.read_sql("SELECT * FROM reviews", conn)
years = pd.read_sql("SELECT * FROM years", conn)
# # For presentation, hide code
# from IPython.display import HTML
# HTML('''<script>
# code_show=true;
# function code_toggle() {
# if (code_show){
# $('div.input').hide();
# } else {
# $('div.input').show();
# }
# code_show = !code_show
# }
# $( document ).ready(code_toggle);
# </script>
# <form action="javascript:code_toggle()"><input type="submit" value="Hide/Show raw code."></form>''')
# # 1. Explore data
df_list = [artists, content, genres, labels, reviews, years]
for df in df_list:
display(df.head())
display(df.info())
# Check if there are duplicates reviewid: Yes
for df in df_list:
print(df["reviewid"].nunique())
# Create datetime column for YYYY-mm
reviews["date"] = pd.to_datetime(reviews["pub_date"])
reviews["year_month"] = reviews["date"].dt.strftime("%Y-%m")
# Number of reviews over year
df_year = reviews.groupby("pub_year")["reviewid"].nunique()
fig, ax = plt.subplots(1, 1, figsize=(12, 6))
df_year.plot.bar()
# Histogram of scores
fig, ax = plt.subplots(1, 1, figsize=(12, 6))
reviews["score"].hist(ax=ax, bins=50, edgecolor="white", grid=False)
reviews["score"].describe()
# # 2. Do review scores for an individual artist improve over time, or go down?
df2 = reviews.groupby(["artist", "year_month"], as_index=False).agg(
{"score": np.mean, "reviewid": "nunique"}
)
@interact
def ind_artist(artist=df2["artist"].unique()):
df2_artist = df2[df2["artist"] == artist]
df2_artist = df2_artist.set_index("year_month")
fig, ax1 = plt.subplots(1, 1, figsize=(12, 6))
# Plot monthly average score
df2_artist["score"].plot.bar(ax=ax1, rot=0, color="silver")
ax1.set_xlabel("")
ax1.set_ylabel("Average Score")
ax1.set_title("Average Review Score of Individual Artist Across Time")
# Add bar values
for p in ax1.patches:
ax1.annotate(
"{:,.1f}".format(np.round(p.get_height(), decimals=4)),
(p.get_x() + p.get_width() / 2.0, p.get_height()),
ha="center",
va="center",
xytext=(0, 10),
textcoords="offset points",
)
# Remove the frame
for spine in ax1.spines.values():
spine.set_visible(False)
# Plot monthly review numbers
ax2 = ax1.twinx()
ax2.plot(
df2_artist["reviewid"],
color="darkred",
linewidth=2,
marker="o",
markersize=7,
markeredgecolor="w",
)
ax2.set_ylabel("Number of reviews", color="darkred")
# Remove the frame
for spine in ax2.spines.values():
spine.set_visible(False)
# Force y-axis ticks integer
max_review = df2_artist["reviewid"].max()
ax2.set_ylim([0, max_review + 1])
ax2.yaxis.set_major_locator(MaxNLocator(integer=True))
# # 3. Is the average of score of an artist correlated with the number of reviews?
df3 = reviews.groupby(["artist"], as_index=False).agg(
{"score": np.mean, "reviewid": "nunique"}
)
df3 = df3.rename(columns={"reviewid": "reviews"})
df3.head()
# Remove outliers
df3 = df3[df3["reviews"] <= df3["reviews"].quantile(0.99)]
fig, ax = plt.subplots(1, 1, figsize=(12, 6))
ax = sns.violinplot(x=df3["reviews"], y=df3["score"])
# Create a bucket of 0.5 points for average scores
df3["score_g"] = df3["score"].apply(lambda x: np.ceil(x / 0.5) * 0.5)
df3.head()
# Plot scatter plot average score for an artist and number of reviews
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
ax.scatter(x=df3["reviews"], y=df3["score_g"], s=10, color="b", alpha=0.2)
ax.set_xlabel("Number of reviews for an artist")
ax.set_ylabel("Average score")
ax.set_title("Correlation between average score and number of reviews for an artist")
print("Correlation between average score and number of reviews for an artist: ")
print(df3[["reviews", "score"]].corr())
# Fit square regression model
x = df3["reviews"].values.reshape(-1, 1)
y = df3["score_g"].values.reshape(-1, 1)
model = PolynomialFeatures(degree=1)
x_model = model.fit_transform(x)
model.fit(x_model, y)
model1 = LinearRegression()
model1.fit(x_model, y)
# Plotting
x_range = np.arange(df3["reviews"].min(), df3["reviews"].max() + 1, 1).reshape(-1, 1)
ax.plot(
x_range,
model1.predict(model.fit_transform(x_range)),
color="r",
linewidth=1,
zorder=1,
label="ZFS",
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import plotly.graph_objects as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
df = pd.read_csv("/kaggle/input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv")
df["Country"] = df["Country"].replace("Mainland China", "China")
df[df["Country"] == "China"]
df = df.drop(["Sno", "Date"], axis=1)
df["Last Update"] = pd.to_datetime(df["Last Update"])
df[["Province/State", "Country"]] = df[["Province/State", "Country"]].fillna(
"Unavailable"
)
df[["Confirmed", "Deaths", "Recovered"]] = df[
["Confirmed", "Deaths", "Recovered"]
].fillna(0.0)
df.head(5)
# df[df['Country'] == 'China']
df.shape
latest_data_df = (
df.groupby(["Country", "Province/State"])[
"Last Update", "Confirmed", "Deaths", "Recovered"
]
.max()
.reset_index()
)
latest_data_df = latest_data_df[
["Country", "Province/State", "Confirmed", "Recovered", "Deaths", "Last Update"]
]
latest_data_df.shape
latest_data_df.head(50)
# df[df['Country'] == 'Australia'].tail(5)
latest_data_df.head(5)
china_df = latest_data_df[latest_data_df["Country"] == "China"].reset_index(drop=True)
china_df
grouped_cnf_df = (
latest_data_df.groupby(["Country"])["Confirmed", "Recovered", "Deaths"]
.sum()
.reset_index()
)
grouped_cnf_df.head(5)
grouped_cnf_df = grouped_cnf_df[
(grouped_cnf_df["Country"] != "China") & (grouped_cnf_df["Country"] != "Others")
]
grouped_cnf_df
import plotly.express as px
init_notebook_mode(connected=True) # do not miss this line
fig = px.bar(
grouped_cnf_df,
x="Confirmed",
y="Country",
orientation="h",
color="Confirmed",
height=600,
)
fig.update_layout(yaxis={"categoryorder": "total ascending"})
plotly.offline.iplot(fig)
import plotly.graph_objects as go
fig = go.Figure(
go.Bar(
x=grouped_cnf_df["Confirmed"],
y=grouped_cnf_df["Country"],
name="Confirmed",
orientation="h",
)
)
fig.add_trace(
go.Bar(
x=grouped_cnf_df["Deaths"],
y=grouped_cnf_df["Country"],
name="Deaths",
orientation="h",
)
)
fig.add_trace(
go.Bar(
x=grouped_cnf_df["Recovered"],
y=grouped_cnf_df["Country"],
name="Recovered",
orientation="h",
)
)
fig.update_layout(
barmode="stack", yaxis={"categoryorder": "total ascending"}, height=1000
)
fig.show()
# China based visualizations
china_df.head(5)
fig = go.Figure(
go.Bar(
x=china_df["Confirmed"],
y=china_df["Province/State"],
name="Confirmed",
orientation="h",
)
)
fig.add_trace(
go.Bar(
x=china_df["Deaths"],
y=china_df["Province/State"],
name="Deaths",
orientation="h",
)
)
fig.add_trace(
go.Bar(
x=china_df["Recovered"],
y=china_df["Province/State"],
name="Recovered",
orientation="h",
)
)
fig.update_layout(
barmode="stack", yaxis={"categoryorder": "total ascending"}, height=1000
)
fig.show()
fig = go.Figure(
go.Bar(
x=china_df[china_df["Province/State"] != "Hubei"]["Confirmed"],
y=china_df[china_df["Province/State"] != "Hubei"]["Province/State"],
name="Confirmed",
orientation="h",
)
)
fig.add_trace(
go.Bar(
x=china_df[china_df["Province/State"] != "Hubei"]["Deaths"],
y=china_df[china_df["Province/State"] != "Hubei"]["Province/State"],
name="Deaths",
orientation="h",
)
)
fig.add_trace(
go.Bar(
x=china_df[china_df["Province/State"] != "Hubei"]["Recovered"],
y=china_df[china_df["Province/State"] != "Hubei"]["Province/State"],
name="Recovered",
orientation="h",
)
)
fig.update_layout(
barmode="stack", yaxis={"categoryorder": "total ascending"}, height=1000
)
fig.show()
from plotly.subplots import make_subplots
fig = make_subplots(
rows=2,
cols=1,
start_cell="bottom-left",
row_heights=[0.96, 0.04],
vertical_spacing=0.09,
)
fig.add_trace(
go.Bar(
x=china_df[china_df["Province/State"] != "Hubei"]["Confirmed"],
y=china_df[china_df["Province/State"] != "Hubei"]["Province/State"],
name="Confirmed",
orientation="h",
),
row=1,
col=1,
)
fig.add_trace(
go.Bar(
x=china_df[china_df["Province/State"] != "Hubei"]["Deaths"],
y=china_df[china_df["Province/State"] != "Hubei"]["Province/State"],
name="Deaths",
orientation="h",
),
row=1,
col=1,
)
fig.add_trace(
go.Bar(
x=china_df[china_df["Province/State"] != "Hubei"]["Recovered"],
y=china_df[china_df["Province/State"] != "Hubei"]["Province/State"],
name="Recovered",
orientation="h",
),
row=1,
col=1,
)
fig.add_trace(
go.Bar(
x=china_df[china_df["Province/State"] == "Hubei"]["Confirmed"],
y=china_df[china_df["Province/State"] == "Hubei"]["Province/State"],
name="Confirmed",
orientation="h",
),
row=2,
col=1,
)
fig.add_trace(
go.Bar(
x=china_df[china_df["Province/State"] == "Hubei"]["Deaths"],
y=china_df[china_df["Province/State"] == "Hubei"]["Province/State"],
name="Deaths",
orientation="h",
),
row=2,
col=1,
)
fig.add_trace(
go.Bar(
x=china_df[china_df["Province/State"] == "Hubei"]["Recovered"],
y=china_df[china_df["Province/State"] == "Hubei"]["Province/State"],
name="Recovered",
orientation="h",
),
row=2,
col=1,
)
fig.update_layout(
showlegend=False,
barmode="stack",
yaxis={"categoryorder": "total ascending"},
height=700,
)
fig.show()
import plotly.express as px
data = px.data.gapminder()
data_canada = data[data.country == "Canada"]
fig = px.bar(
data_canada,
x="year",
y="pop",
hover_data=["lifeExp", "gdpPercap"],
color="lifeExp",
labels={"pop": "population of Canada"},
height=400,
)
fig.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.model_selection import KFold
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import SVR
from sklearn import *
import datetime as dt
#### Import Dependencies
#### Start Python Imports
import math, time, random, datetime
#### Data Manipulation
import numpy as np
import pandas as pd
#### Visualization
import matplotlib.pyplot as plt
import missingno
import seaborn as sns
plt.style.use("seaborn-whitegrid")
#### Preprocessing
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, label_binarize
#### Machine learning
import catboost
from sklearn.model_selection import train_test_split
from sklearn import model_selection, tree, preprocessing, metrics, linear_model
from sklearn.svm import LinearSVC
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LinearRegression, LogisticRegression, SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from catboost import CatBoostClassifier, Pool, cv
##### Let's be rebels and ignore warnings for now
import warnings
warnings.filterwarnings("ignore")
train = pd.read_csv("../input/cat-in-the-dat/train.csv")
test = pd.read_csv("../input/cat-in-the-dat/test.csv")
print("Data is loaded!")
def RMSLE(y, pred):
return metrics.mean_squared_error(y, pred) ** 0.5
data = train.copy()
valid = test.copy()
# data.nunique()
# valid.nunique()
# in case needs
# get a list of object cat columns
# Get list of categorical variables
s = data.dtypes == "object"
object_cols = list(s[s].index)
print("Categorical variables:")
print(object_cols)
len(object_cols)
# We will seperate the object columns that should be one hot encoded (< 12 unique values) and columns that should be label encoded (rest of the object categorical columns)
OH_col = data.loc[:, data.nunique() < 15].columns
new_OH = []
for x in OH_col:
if x in object_cols:
new_OH.append(x)
# new_OH
LE_col = data.loc[:, data.nunique() >= 15].columns
new_LE = []
for x in LE_col:
if x in object_cols:
new_LE.append(x)
# new_LE
# ### Lebel encoding : inplace
# Make copy to avoid changing original data
label_X_train = data.copy()
label_X_valid = valid.copy()
# Apply label encoder to each column with categorical data
label_encoder = LabelEncoder()
for col in new_LE:
label_X_train[col] = label_encoder.fit_transform(data[col])
label_X_valid[col] = label_encoder.fit_transform(valid[col])
print(label_X_train.shape)
print(label_X_valid.shape)
label_X_train.head(2)
label_X_valid.head(2)
# use label_X_train and label_X_valid for next calculations ( One hot encoding )
# ### * One Hot encoding
# label_X_train[new_OH].nunique()
# Apply one-hot encoder to each column with categorical data
OH_encoder = OneHotEncoder(handle_unknown="ignore", sparse=False)
OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(label_X_train[new_OH]))
OH_cols_valid = pd.DataFrame(OH_encoder.fit_transform(label_X_valid[new_OH]))
## check if fit_transform or just transform should be used.... for valid data set....
print(OH_cols_train.shape)
print(OH_cols_valid.shape)
label_X_train[new_OH].nunique().sum()
# means OH_cols_train has no data of rest of columns....
# so now add the data back
# One-hot encoding removed index; put it back
OH_cols_train.index = label_X_train.index
OH_cols_valid.index = label_X_valid.index
# Remove categorical columns (will replace with one-hot encoding)
# these are columns which has numerical data and lebel encoding columns that's been processed already.
num_X_train = label_X_train.drop(new_OH, axis=1)
num_X_valid = label_X_valid.drop(new_OH, axis=1)
# num_X_train.head(2)
# num_X_valid.head(2)
# Add one-hot encoded columns to numerical features
OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1)
OH_X_valid = pd.concat([num_X_valid, OH_cols_valid], axis=1)
# OH_X_train.head(2)
# OH_X_valid.head(2)
print(OH_X_train.shape)
print(OH_X_valid.shape)
# > ### * ML Algo
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.utils.testing import ignore_warnings
rf = RandomForestClassifier(n_estimators=200, n_jobs=-1, verbose=2)
# model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
lr1 = LogisticRegression(solver="lbfgs", C=0.1)
X_train = OH_X_train.drop("target", axis=1)
y_train = OH_X_train["target"]
X_train = X_train.drop("id", axis=1)
X_test = OH_X_valid.drop("id", axis=1)
# scaler = MinMaxScaler(feature_range=(0, 1))
# X_train = scaler.fit_transform(X_train)
# X_test = scaler.fit_transform(X_test)
print(X_train.shape)
print(y_train.shape)
# rf.fit(X_train, y_train)
# lr1.fit(X_train, y_train)
# alternate cv method
X, X_hideout, y, y_hideout = model_selection.train_test_split(
X_train, y_train, test_size=0.13, random_state=42
)
# Set up folds
K = 4
kf = model_selection.KFold(n_splits=K, random_state=1, shuffle=True)
np.random.seed(1)
# model = SVR(kernel='rbf')
params = {
"n_estimators": 10, # change to 9000 to obtain 0.505 on LB (longer run time expected)
"max_depth": 5,
"min_samples_split": 200,
"min_samples_leaf": 50,
"learning_rate": 0.005,
"max_features": "sqrt",
"subsample": 0.8,
"loss": "ls",
}
# model = ensemble.GradientBoostingRegressor(**params)
model = ensemble.RandomForestClassifier(n_jobs=-1, verbose=2)
print("Started CV at ", dt.datetime.now())
for i, (train_index, test_index) in enumerate(kf.split(X)):
# Create data for this fold
y_train, y_valid = y.iloc[train_index].copy(), y.iloc[test_index]
X_train, X_valid = X.iloc[train_index, :].copy(), X.iloc[test_index, :].copy()
# X_test = test[col]
print("\nFold ", i)
fit_model = model.fit(X_train, y_train)
pred = model.predict(X_valid)
print("RMSLE GBM Regressor, validation set, fold ", i, ": ", RMSLE(y_valid, pred))
pred_hideout = model.predict(X_hideout)
print(
"RMSLE GBM Regressor, hideout set, fold ",
i,
": ",
RMSLE(y_hideout, pred_hideout),
)
print(
"Prediction length on validation set, GBM Regressor, fold ", i, ": ", len(pred)
)
# Accumulate test set predictions
del X_train, X_valid, y_train
print("Finished CV at ", dt.datetime.now())
# scores = []
# best_svr = SVR(kernel='rbf')
# #random_state=42, shuffle=False
# cv = KFold(n_splits=10)
# for train_index, test_index in cv.split(X_train):
# print("Train Index: ", train_index, "\n")
# print("Test Index: ", test_index)
# X_tr = X_train.iloc[train_index,:]
# X_tes = X_train.iloc[test_index,:]
# y_tr = y_train.iloc[train_index]
# y_tes = y_train.iloc[test_index]
# print(X_tr.shape)
# print(X_tes.shape)
# print(y_tr.shape)
# print(y_tes.shape)
# #best_svr.fit(X_tr, y_tr)
# #scores.append(best_svr.score(X_tes, y_tes))
# X_train.iloc[[1,3],:]
# y_train.iloc[30000]
X_test.head(2)
# predictions = rf.predict(X_test)
# predict_lr = lr1.predict_proba(X_test)
# prediction_svr = best_svr.predict(X_test)
# submission = pd.DataFrame()
# submission_LR = pd.DataFrame()
# submission_svr = pd.DataFrame()
# submission["id"] = OH_X_valid["id"]
# submission_LR["id"] = OH_X_valid["id"]
# submission_svr['id'] = OH_X_valid["id"]
# submission["target"] = predictions
# submission_LR["target"] = predict_lr[:, 1]
# submission_svr["target"] = prediction_svr
prediction = model.predict(X_test)
submission = pd.DataFrame()
submission["id"] = OH_X_valid["id"]
submission["target"] = prediction
submission.to_csv("cat_submission1.csv", index=False)
predict_lr[:, 1]
submission.target.value_counts().sum()
submission.to_csv("cat_submission1.csv", index=False)
submission_LR.to_csv("cat_submission_lr.csv", index=False)
from sklearn.model_selection import cross_validate
score = cross_validate(lr1, X_train, y_train, cv=3, scoring="roc_auc")[
"test_score"
].mean()
print(f"{score:.6f}")
|
# All data are taken at 250 Hz
# Importing Libraries Requird for the Preprocessing
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import scipy.io
from math import pi
from scipy.fftpack import fft
import scipy.signal as sig
import os
import math
# It will load the data into matrix
def load_data():
f = []
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
f.append(os.path.join(dirname, filename))
return f
# Calling the load function
f = load_data()
# showing the data
for k in f:
print(k)
# this contain two datatset
# Datatset 1 contain sos, stop, and medicine
# Dataset 3 contain come here and washroom
# In extract raw data function we seprated them into Mdata_1 and Mdata_3
def extract_raw_data():
Mdata_1 = []
Mdata_3 = []
for i in range(len(f)):
mat = scipy.io.loadmat(f[i])
col = mat["labels"]
data = mat["data"]
col = [j for j in col]
# pdata is a dataframe
pdata = pd.DataFrame(data, columns=col)
if i < 13:
Mdata_3.append(pdata)
else:
Mdata_1.append(pdata)
return Mdata_1, Mdata_3
# Calling function to extract dataframe into Mdata_1 and Mdata_3
# Mdata_1 contains::
# + SOS - SOS - SOS + STOP - STOP - STOP - + MEDICINE - MEDICINE - MEDICINE
# +: 2sec, words:1sec, blank: 2sec
# Mdata_3 contains::
# + COME_HERE - COME_HERE - COME_HERE + WASHROOM - WASHROOM - WASHROOM
# +: 2sec, words:1sec, blanks:2sec
Mdata_1, Mdata_3 = extract_raw_data()
# showing shape of dataframe of each subject(13 subjects)
# for Mdata_1 :
for i in Mdata_1:
print(i.shape)
# ////////////////////rough
plt.rcParams["figure.figsize"] = (20, 20)
Fs = 250
# f=20
# x=np.sin(2*pi*f*t)#+0.5*np.sin(2*pi*40*t)+0.5*np.sin(2*pi*80*t)
# Generate Noise
# y=0.25*np.sin(2*pi*50*t)
# y=np.array([50]*len(t))
# x=x+y;#Noisy Signal
x = Mdata_1[0]["ExG1"] / (max(abs(Mdata_1[0]["ExG1"])))
# t=np.arange(0,,1/Fs)
plt.figure(1)
plt.subplot(2, 1, 1)
plt.plot(x)
plt.title("sinusodial wave")
plt.xlabel("Time(s)")
plt.ylabel("Amplitude")
# spectral Analysis
# Compute FFT
X = fft(x)
# Generate Frequency axis
# n=np.size(t)
# fr=(Fs/2)*np.linspace(0,9,(n//2))
x_m = abs(X) / (max(abs(X)))
# print((fr))
plt.subplot(2, 1, 2)
plt.plot((pow(10, x_m)))
plt.title("Magnitude spectrum")
plt.xlabel("Frequency")
plt.ylabel("Magnitude")
plt.tight_layout()
# make dataframe of equal size of Mdata_1
for i in range(len(Mdata_1)):
Mdata_1[i] = Mdata_1[i][0:7250]
print(Mdata_1[i].shape)
# showing shape of dataframe of each subject(13 subjects)
# for Mdata_3 :
for i in Mdata_3:
print(i.shape)
# making dataframe of equal size Mdata_3
for i in range(len(Mdata_3)):
Mdata_3[i] = Mdata_3[i][0:4500]
print(Mdata_3[i].shape)
# Collecting the useful data where subject were thinking the words
# for Mdata_1
def seg_data1():
f = 250
u1 = []
for n in range(len(Mdata_1)):
a = 2 * f
g = pd.DataFrame()
for i in range(9):
if i == 5:
h = Mdata_1[n][a : a + f]
a += 2 * f
else:
h = Mdata_1[n][a : a + f]
g = pd.concat([g, h])
a += 3 * f
u1.append(g)
return u1
# Collecting the useful data where subject were thinking the words
# for Mdata_3
def seg_data3():
f = 250
u3 = []
for n in range(len(Mdata_1)):
a = 2 * f
g = pd.DataFrame()
for i in range(6):
h = Mdata_1[n][a : a + f]
g = pd.concat([g, h])
a += 3 * f
u3.append(g)
return u3
data1 = seg_data1()
data3 = seg_data3()
data1[0]["ExG5"].shape
plt.rcParams["figure.figsize"] = (20, 150)
a, b = plt.subplots(30)
loopi = 0
for y in list(data1[0].columns.values):
b[loopi].plot([i for i in range(len(data1[1][y]))], data1[1][y])
loopi += 1
plt.rcParams["figure.figsize"] = (20, 10)
t = [i for i in range(len(data1[0]["ExG5"]))]
plt.plot(t, data1[0]["ExG5"])
plt.rcParams["figure.figsize"] = (20, 20)
Fs = 250
t = np.arange(0, 9, 1 / Fs)
# f=20
# x=np.sin(2*pi*f*t)#+0.5*np.sin(2*pi*40*t)+0.5*np.sin(2*pi*80*t)
# Generate Noise
# y=0.25*np.sin(2*pi*50*t)
# y=np.array([50]*len(t))
# x=x+y;#Noisy Signal
x = data1[0]["ExG1"] / (max(abs(data1[0]["ExG1"])))
plt.figure(1)
plt.subplot(2, 1, 1)
plt.plot(t, x)
plt.title("sinusodial wave")
plt.xlabel("Time(s)")
plt.ylabel("Amplitude")
# spectral Analysis
# Compute FFT
X = fft(x)
# Generate Frequency axis
n = np.size(t)
fr = (Fs / 2) * np.linspace(0, 9, (n // 2))
x_m = abs(X[0 : n // 2])
# print((fr))
plt.subplot(2, 1, 2)
plt.plot(fr, ((x_m)))
plt.title("Magnitude spectrum")
plt.xlabel("Frequency")
plt.ylabel("Magnitude")
plt.tight_layout()
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from math import pi
from scipy.fftpack import fft
import scipy.signal as sig
y = data1[0]["ExG1"] / (max(abs(data1[0]["ExG1"])))
x = [i for i in range(len(y))]
rolling_mean = y.rolling(window=5).mean()
rolling_mean2 = y.rolling(window=10).mean()
plt.plot(x, y, label="AMD")
plt.plot(x, rolling_mean, label="5 window size maf", color="orange")
plt.plot(x, rolling_mean2, label="10 window size maf", color="magenta")
plt.legend(loc="upper left")
plt.show()
for i in x_m:
print(i)
plt.plot([i for i in range(len(x_m))], x_m)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Dense,
Conv2D,
Flatten,
MaxPool2D,
Dropout,
BatchNormalization,
AveragePooling2D,
BatchNormalization,
)
from sklearn.utils import shuffle
from tensorflow.keras.optimizers import Adam
import random
train = pd.read_csv("/kaggle/input/Kannada-MNIST/train.csv")
test = pd.read_csv("/kaggle/input/Kannada-MNIST/test.csv")
validation = pd.read_csv("/kaggle/input/Kannada-MNIST/Dig-MNIST.csv")
def prepare_data(features, target=None):
"""
here is a helper function to resize training data to (28,28,1)
and to categorize the target variable
"""
# shuffle data first
features, target = shuffle(features, target)
x = features.values.reshape((len(features), 28, 28, 1))
y = to_categorical(target)
print(y.shape)
# normalize
x = x / 255.0
return (x, y)
X, y = train.loc[:, train.columns != "label"], train.label
X_val, y_val = validation.loc[:, validation.columns != "label"], validation.label
print(
"We have {} training examples and {} validation examples.".format(
X.shape[0], X_val.shape[0]
)
)
print(
"X_train: {} | y_train: {} |\nX_val: {} | y_val: {}".format(
X.shape, y.shape, X_val.shape, y_val.shape
)
)
x_train, y_train = prepare_data(X, y)
x_val, y_val = prepare_data(X_val, y_val)
print(
"X_train: {} | y_train: {} |\nX_val: {} | y_val: {}".format(
x_train.shape, y_train.shape, x_val.shape, y_val.shape
)
)
# # data augmentation
# Initialising the ImageDataGenerator class.
# We will pass in the augmentation parameters in the constructor.
datagen = ImageDataGenerator(zoom_range=0.15, rotation_range=2)
model = Sequential()
# LeNet
# Input Image Dimensionns : 28x28x1
# 1. Conv2D - kernel : 11x11x96. strides=4,4;padding-valid
model.add(
Conv2D(
filters=6,
kernel_size=(5, 5),
strides=(1, 1),
padding="valid",
activation="relu",
input_shape=(28, 28, 1),
)
)
model.add(Dropout(0.2))
model.add(BatchNormalization())
# 2. Average Pool - kernel - 2x2, strides = 2,2
model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding="valid"))
# 3. Conv2d - kernel - 5x5x16, strides = 1,1
model.add(
Conv2D(
filters=16,
kernel_size=(5, 5),
strides=(1, 1),
padding="valid",
activation="relu",
)
)
model.add(Dropout(0.2))
model.add(BatchNormalization())
# 4. Average Pool - kernel - 2x2, strides = 2,2
model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding="valid"))
# # 5. Convolution - kernel - 5x5x120 ,strides = 1,1
# model.add(Conv2D(filters=120,kernel_size=(4,4),strides=(1,1),padding='valid',activation='relu'))
# model.add(BatchNormalization())
model.add(Flatten())
# 6. Fully connected layer - 84 nodes
model.add(Dense(84, activation="relu"))
# 7. Output layer - 10 nodes
model.add(Dense(10, activation="softmax"))
# Compile
optimizer = Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999)
model.compile(
optimizer=optimizer, loss=["categorical_crossentropy"], metrics=["accuracy"]
)
# # Train
# model.fit(x_train,y_train,epochs=10,validation_data=(x_val,y_val))
model.fit(x_train, y_train, epochs=10, validation_data=(x_val, y_val))
test = pd.read_csv("/kaggle/input/Kannada-MNIST/test.csv")
test_features = test.iloc[0:, test.columns != "id"]
test_features = test_features.values.reshape((len(test_features), 28, 28, 1))
test_features = test_features.astype("float")
test_features = test_features / 255.0
predictions = model.predict(test_features)
predictions = np.argmax(predictions, axis=1)
predictions.shape
final_df = pd.DataFrame({"id": test["id"], "label": predictions})
final_df.head()
final_df.to_csv("submission.csv", index=False)
|
# # CFG Python Data Challenge - Analyse Sales Data
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# pd.options.display.float_format = '{:,}'.format
# pd.set_option('precision', 2)
pd.options.display.float_format = "{:,.2f}".format
fpath = "/kaggle/input/sales-data/sales_dataset (3).csv"
df = pd.read_csv(fpath)
display(df.head())
display(df.describe())
print(df.shape)
print(df.info())
# Calculate total sales for each product
df["Total Sales"] = df["Sale Price"] * df["Quantity Sold"]
df_sorted = df.sort_values("Total Sales", ascending=False)
display(df_sorted)
total_sales = sum(df["Total Sales"])
print("Total sales across all products for the year: £{:,}.".format(total_sales))
category_price_mean = df.groupby("Category").mean()[["Sale Price"]]
category_price_mean.columns = ["Sale Price Mean"]
category_price_median = df.groupby("Category").median()[["Sale Price"]]
category_price_median.columns = ["Sale Price Median"]
category_price_std = df.groupby("Category").std()[["Sale Price"]]
category_price_std.columns = ["Sale Price STD"]
category_sale_count = df.groupby("Category").sum()[["Quantity Sold"]]
category_sale_count.columns = ["Quantity Sold"]
category_df = pd.concat(
[
category_price_mean,
category_price_median,
category_price_std,
category_sale_count,
],
axis=1,
)
display(category_df.sort_values("Sale Price Mean", ascending=False))
# month with the highest and lowest sales
sales_by_month = (
df.groupby("Month")
.sum()[["Total Sales"]]
.sort_values("Total Sales", ascending=False)
)
display(sales_by_month)
# February was the month with the highest sales, October had the lowest sales.
# Would be nice to have an area plot with total sales, stacking each category.
# Sort month Jan - Dec
# customers with highest purchases
purchases_by_customer = df.groupby("Customer Name").sum()[
["Total Sales", "Quantity Sold"]
]
display(purchases_by_customer.sort_values("Total Sales", ascending=False))
# display(purchases_by_customer.sort_values('Quantity Sold',ascending = False))
# Write results of analysis to csv file
# Visualise results
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# Import all required library
import pandas as pd
import numpy as np
import os
# to save model
import pickle
# Import visualization modules
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
data = pd.read_csv(
"/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv"
)
data_1.head(5)
data.describe()
# checking if there is any NULL data
missing_values = data.isnull()
missing_values.head(5)
# checking via Heat Map
sns.heatmap(data=missing_values, yticklabels=False, cbar=False, cmap="viridis")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from scipy import stats
DATA_DIR = "/kaggle/input/house-prices-advanced-regression-techniques/"
train_df = pd.read_csv(DATA_DIR + "train.csv")
test_df = pd.read_csv(DATA_DIR + "test.csv")
train_df
test_df
sns.distplot(train_df["SalePrice"])
fig = plt.figure()
res = stats.probplot(train_df["SalePrice"], plot=plt)
train_df["SalePrice"].describe()
lowerbound, upperbound = np.percentile(train_df["SalePrice"], [0.5, 99.5])
print(lowerbound, upperbound)
train_df = train_df.drop(
train_df[
(train_df["SalePrice"] < lowerbound) | (train_df["SalePrice"] > upperbound)
].index
)
train_df["SalePrice"].describe()
# SCALE TARGET VARIABLE
train_df["SalePrice"] = np.log1p(train_df["SalePrice"])
corr_matrix = train_df.corr()
corr_matrix.sort_values(by="SalePrice", inplace=True, axis=1, ascending=False)
plt.figure(figsize=(25, 25))
sns.heatmap(corr_matrix, square=True, annot=True, fmt="0.2f")
sns.scatterplot(x=train_df["OverallQual"], y=train_df["SalePrice"])
train_df = train_df.drop(train_df[train_df["OverallQual"] <= 2].index)
train_df = train_df.drop(
train_df[(train_df["OverallQual"] == 10) & (train_df["SalePrice"] < 12.5)].index
)
train_df = train_df.drop(
train_df[(train_df["OverallQual"] == 4) & (train_df["SalePrice"] > 12.3)].index
)
train_df = train_df.drop(
train_df[(train_df["OverallQual"] == 7) & (train_df["SalePrice"] < 11.5)].index
)
sns.scatterplot(x=train_df["OverallQual"], y=train_df["SalePrice"])
sns.scatterplot(x=train_df["GrLivArea"], y=train_df["SalePrice"])
train_df = train_df.drop(
train_df[(train_df["GrLivArea"] > 3300) & (train_df["SalePrice"] < 12.5)].index
)
sns.scatterplot(x=train_df["GarageCars"], y=train_df["SalePrice"])
sns.scatterplot(x=train_df["GarageArea"], y=train_df["SalePrice"])
train_df = train_df.drop(train_df[train_df["GarageArea"] > 1230].index)
sns.scatterplot(x=train_df["GarageYrBlt"], y=train_df["SalePrice"], alpha=0.6)
train_df["GarageTotal"] = train_df["GarageArea"] * train_df["GarageCars"]
test_df["GarageTotal"] = test_df["GarageArea"] * test_df["GarageCars"]
sns.scatterplot(x=train_df["GarageTotal"], y=train_df["SalePrice"])
train_df = train_df.drop(train_df[train_df["GarageTotal"] > 3750].index)
train_df = train_df.drop(
train_df[(train_df["SalePrice"] < 11.7) & (train_df["GarageTotal"] > 2000)].index
)
train_df.drop(["GarageArea", "GarageCars"], axis=1, inplace=True)
test_df.drop(["GarageArea", "GarageCars"], axis=1, inplace=True)
sns.scatterplot(x=train_df["TotalBsmtSF"], y=train_df["SalePrice"], alpha=0.6)
train_df = train_df.drop(train_df[train_df["TotalBsmtSF"] > 3000].index)
train_df = train_df.drop(
train_df[(train_df["SalePrice"] < 11.1) & (train_df["TotalBsmtSF"] > 1000)].index
)
sns.scatterplot(x=train_df["YearBuilt"], y=train_df["SalePrice"], alpha=0.6)
sns.scatterplot(x=train_df["FullBath"], y=train_df["SalePrice"], alpha=0.6)
train_df = train_df.drop(
train_df[(train_df["SalePrice"] < 11.2) & (train_df["FullBath"] == 2)].index
)
train_df = train_df.drop(
train_df[(train_df["SalePrice"] > 12.8) & (train_df["FullBath"] <= 1)].index
)
sns.scatterplot(x=train_df["YearRemodAdd"], y=train_df["SalePrice"], alpha=0.6)
train_df["RemodToSold"] = train_df["YrSold"] - train_df["YearRemodAdd"]
test_df["RemodToSold"] = test_df["YrSold"] - test_df["YearRemodAdd"]
sns.scatterplot(x=train_df["RemodToSold"], y=train_df["SalePrice"], alpha=0.6)
train_df.drop(["YrSold", "YearRemodAdd"], axis=1, inplace=True)
test_df.drop(["YrSold", "YearRemodAdd"], axis=1, inplace=True)
sns.scatterplot(x=train_df["Fireplaces"], y=train_df["SalePrice"], alpha=0.6)
sns.scatterplot(x=train_df["MasVnrArea"], y=train_df["SalePrice"], alpha=0.6)
train_df = train_df.drop(
train_df[(train_df["SalePrice"] < 12.5) & (train_df["MasVnrArea"] > 1000)].index
)
train_df = train_df.drop(
train_df[(train_df["SalePrice"] < 11.5) & (train_df["MasVnrArea"] > 500)].index
)
# # Categorical Features
cat_features = [f for f in train_df.columns if train_df[f].dtype == "object"]
cat_features
def analyzeCategoricalFeature(x, y):
f, axes = plt.subplots(1, 3, figsize=(20, 5))
f.suptitle(x)
axes[0].set_title("box plot")
axes[0].tick_params(axis="x", labelrotation=45)
sns.boxplot(x=train_df[x], y=train_df[y], ax=axes[0])
axes[1].set_title("stirp plot")
axes[1].tick_params(axis="x", labelrotation=45)
sns.stripplot(
x=train_df[x],
y=train_df[y],
jitter=0.4,
alpha=0.5,
marker="D",
size=5,
ax=axes[1],
)
axes[2].set_title("frequency plot")
axes[2].tick_params(axis="x", labelrotation=45)
sns.countplot(x=train_df[x], ax=axes[2])
cat_to_drop = [
"Street",
"Alley",
"LandContour",
"Utilities",
"LotConfig",
"LandSlope",
"Condition1",
"Condition2",
"BldgType",
"RoofStyle",
"RoofMatl",
"Exterior2nd",
"BsmtFinType2",
"Heating",
"Functional",
"GarageQual",
"GarageCond",
"PoolQC",
"Fence",
"MiscFeature",
]
# for f in cat_features:
# analyzeCategoricalFeature(f, 'SalePrice')
train_df.drop(cat_to_drop, axis=1, inplace=True)
test_df.drop(cat_to_drop, axis=1, inplace=True)
# missing data cleaning
not_missing_cols = [f for f in train_df.columns if train_df[f].isna().sum() == 0]
not_missing_cols
missing_cols = [f for f in train_df.columns if train_df[f].isna().sum() > 0]
train_df[missing_cols].isna().sum().sort_values()
test_missing_cols = [f for f in test_df.columns if test_df[f].isna().sum() > 0]
test_df[test_missing_cols].isna().sum().sort_values()
# missing value imputing
combined = train_df.drop("SalePrice", axis=1).append(test_df)
missing = combined.isna().sum() > 0
missing_features = missing[missing == True].index
print(missing_features)
for feature in missing_features:
if combined[feature].dtype == "object":
combined[feature] = combined.groupby(["Neighborhood", "OverallQual"])[
feature
].transform(
lambda x: x.fillna(x.value_counts().index[0])
if (len(x.value_counts().index) > 0)
else None
)
else:
combined[feature] = combined.groupby(["Neighborhood", "OverallQual"])[
feature
].transform(lambda x: x.fillna(x.mean()))
missing = combined.isna().sum() > 0
missing_features = missing[missing == True].index
print(missing_features)
for feature in missing_features:
if combined[feature].dtype == "object":
combined[feature] = combined.groupby(["Neighborhood"])[feature].transform(
lambda x: x.fillna(x.value_counts().index[0])
if (len(x.value_counts().index) > 0)
else None
)
else:
combined[feature] = combined.groupby(["Neighborhood"])[feature].transform(
lambda x: x.fillna(x.mean())
)
combined.isna().sum().any()
features_to_encode = [f for f in train_df.columns if train_df[f].dtype == "object"]
features_to_encode
# Categorical Feature Encoding
def getObjectColumnsList(df):
return [cname for cname in df.columns if df[cname].dtype == "object"]
def PerformOneHotEncoding(df, columnsToEncode):
return pd.get_dummies(df, columns=columnsToEncode)
cat_cols = getObjectColumnsList(combined)
combined = PerformOneHotEncoding(combined, features_to_encode)
combined
# split again
train_df_final = combined.iloc[0 : train_df.shape[0]].copy()
# df_train.loc[:, "SalePrice"] = np.log(train.SalePrice)
test_df_final = combined.iloc[train_df.shape[0] : :].copy()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
train_df_final, train_df["SalePrice"], test_size=0.2, random_state=0
)
print(X_train.count())
print(y_train.count())
# BUILDING MODEL
import xgboost as xgb
# from sklearn.linear_model import Lasso
# models
model_xgb = xgb.XGBRegressor(n_estimators=800, learning_rate=0.25)
# predictors = ['OverallQual', 'GrLivArea', 'GarageTotal', 'GarageYrBlt', 'TotalBsmtSF', 'FullBath', 'RemodToSold', 'Fireplaces', 'MasVnrArea']
model_xgb.fit(pd.DataFrame(train_df_final), train_df["SalePrice"])
prediction = np.expm1(model_xgb.predict(pd.DataFrame(test_df_final)))
prediction
from sklearn.model_selection import GridSearchCV
parameters = [
{
"n_estimators": [200, 400, 600, 800],
"max_depth": [3, 4, 5, 6],
"learning_rate": [0.001, 0.01, 0.1, 1],
"booster": ["gbtree", "gblinear", "dart"],
"gamma": [0.001, 0.01, 0.1, 1, 10],
"reg_alpha": [0.001, 0.01, 0.1, 1, 10],
"reg_lambda": [0.001, 0.01, 0.1, 1, 10],
}
]
# parameters = [
# {
# 'n_estimators': [200],
# 'max_depth': [1, 3],
# 'learning_rate': [0.001, 0.01, 0.1],
# 'booster': ['gbtree', 'gblinear', 'dart'],
# 'gamma': [0.001, 0.01],
# 'reg_alpha': [0.001, 0.01],
# 'reg_lambda': [0.001]
# }
# ]
grid_search = GridSearchCV(
estimator=model_xgb,
param_grid=parameters,
scoring="neg_mean_squared_error",
cv=10,
n_jobs=-1,
)
grid_search = grid_search.fit(pd.DataFrame(train_df_final), train_df["SalePrice"])
best_accuracy = grid_search.best_score_
best_parameters = grid_search.best_params_
print("best accuracy", best_accuracy)
print("best parameters", best_parameters)
submission = pd.DataFrame({"Id": test_df["Id"], "SalePrice": prediction})
submission.to_csv("submission.csv", index=False)
|
# <div style="padding:20px;
# color:white;
# margin:10;
# font-size:200%;
# text-align:center;
# display:fill;
# border-radius:5px;
# background-color:#191970;
# overflow:hidden;
# font-weight:700">[S03E12] 🚀 Stacking Tuned Models ⚙️🔧
# # Table of Contents
# - [1. Loading and Inspecting Data](#loading-data)
# - [2. Adversarial Validation](#adv-validation)
# - [3. Exploratory Data Analysis](#eda)
# - [4. Feature Engineering](#feat-eng)
# - [5. Modelling](#modelling)
# - [6. Tuning Models with Optuna](#tuning)
# - [6.1. CatBoostClassifier](#tuning-catboost)
# - [6.2. RandomForestClassifier](#tuning-rf)
# - [6.3. Logistic Regression](#tuning-logreg)
# - [7. Ensembling Models](#Ensembling)
# - [8. Making Final Predictions - Tuned CatBoost Model](#predictions1)
# - [9. Making Final Predictions - Ensemble Model](#predictions2)
# - [10. Final Scores on Public Leaderboard](#final)
# Importing libraries
# Data Handling
import pandas as pd
import numpy as np
# Data Visualization
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from plotly.offline import init_notebook_mode
init_notebook_mode(connected=True)
# Machine Learning Classification Models
from xgboost import XGBClassifier
from sklearn.linear_model import LogisticRegression
from lightgbm import LGBMClassifier
from catboost import CatBoostClassifier
from sklearn.ensemble import (
AdaBoostClassifier,
RandomForestClassifier,
StackingClassifier,
)
from sklearn.ensemble import StackingClassifier
# Preprocessing imports
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import QuantileTransformer
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.model_selection import (
StratifiedKFold,
cross_val_score,
KFold,
train_test_split,
)
# Metrics imports
from sklearn.metrics import roc_auc_score, roc_curve, auc
# Tuning
import optuna
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
# #
# <div style="padding:20px;
# color:white;
# margin:10;
# font-size:200%;
# text-align:left;
# display:fill;
# border-radius:5px;
# background-color:#191970;
# overflow:hidden;
# font-weight:700">1. Loading and Inspecting Data
# > 📝 | This competition's data was synthetically generated from the Kidney Stone Prediction based on Urine Analysis dataset.
# The goal of this competition is to build a model that is going to output the probabilities of a person to have kidney stones based on the components of this person's urine. The model will be evaluated based on the AUC score.
# Load Competition Data
train = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
test = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv")
# Loading original Data
original_df = pd.read_csv(
"/kaggle/input/kidney-stone-prediction-based-on-urine-analysis/kindey stone urine analysis.csv"
)
# Defining inspection function
def inspect_data(df):
print(f"\n the dataset has {df.shape[0]} rows and {df.shape[1]} attributes ")
print(f"\n Null Values:\n{df.isnull().sum()}")
print(f"\n Duplicates: {df.duplicated().sum()}")
print(f"\n Data Types:\n{df.dtypes}")
inspect_data(train)
inspect_data(test)
inspect_data(original_df)
# #
# <div style="padding:20px;
# color:white;
# margin:10;
# font-size:200%;
# text-align:left;
# display:fill;
# border-radius:5px;
# background-color:#191970;
# overflow:hidden;
# font-weight:700">2. Adversarial Validation
# > 📝 | The Adversarial Validation is an important step to decide whether it is adequate to merge the train dataframe and the original dataframe based on how different they are from each other.
# We're going to use a classifier to see how well it can distinguish the data sample originated from the original dataset and the train dataset. If our classifier have an AUC score below 0.5, we can infer that there is not much difference between both datasets, and it's safe to merge them.
# Labeling data samples from the training set as 'is_train' = 1
train["is_train"] = 1
train
# Labeling data samples from the original set as 'is_train' = 0
original_df["is_train"] = 0
original_df
# Creating a new dataset by concatenating the train test and the original set
av_df = pd.concat([train, original_df], axis=0)
av_df = av_df.sample(frac=1, random_state=42) # Shuffling data
av_df
av_df["is_train"].value_counts() # Counting is_train values
# > 💡 | There's a strong class imbalance in our dataset. 414 data samples are from the train set, while only 79 comes from the original dataset.
# We're going to use the StratifiedKFold to perform cross-validation for this adversarial validation, since it is more adequate when dealing with strong class imbalance.
X = av_df.drop(["is_train", "id"], axis=1) # Independent variables
y = av_df.is_train # Target variable
# Initiating XGBClassifier to perform a binary logistic classification
xgb_model = XGBClassifier(objective="binary:logistic", random_state=42)
# Initiating StratifiedKFold
cv = StratifiedKFold(
n_splits=5, shuffle=True, random_state=42 # 5 Folds # Shuffling data samples
)
# Performing Cross-Validation
cross_val_scores = []
for i, (train_av, test_av) in enumerate(cv.split(X, y)):
X_train, X_test = X.iloc[train_av], X.iloc[test_av]
y_train, y_test = y.iloc[train_av], y.iloc[test_av]
xgb_model.fit(X_train, y_train)
y_pred = xgb_model.predict_proba(X_test)[:, 1]
score = roc_auc_score(y_test, y_pred)
cross_val_scores.append(score)
print(f"Fold {i+1}, AUC Score = {score:.3f}")
# Computing the mean AUC scores
mean_auc_score = np.mean(cross_val_scores)
# Plotting AUC-ROC curve
fpr, tpr, _ = roc_curve(y_test, y_pred)
plt.plot(fpr, tpr, label="ROC Curve (AUC = %0.4f)" % mean_auc_score)
plt.plot([0, 1], [0, 1], linestyle="--", color="gray", label="Random Guess")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.legend()
plt.show()
# > 💡 | The classifier can't distinguish between data samples from the original and train sets. This is a good indicator that these datasets are extremely similar on their distribution and we can merge them to form a larger training set.
# Creating new training set by concatenating the original_df and train set
train_df = pd.concat([original_df, train], axis=0)
train_df
train_df.drop(
["id", "is_train"], axis=1, inplace=True
) # Removing 'id' and 'is_train' columns
train_df
# #
# <div style="padding:20px;
# color:white;
# margin:10;
# font-size:200%;
# text-align:left;
# display:fill;
# border-radius:5px;
# background-color:#191970;
# overflow:hidden;
# font-weight:700">3. Exploratory Data Analysis
# > 📝 | Our training dataset has the following attributes:
# .gravity: Gravity measures the density of the urine compared to water. A higher gravity in this case can be an indicator of substances in the urine that can lead to the increased risk of stone formation.
# .ph: The ph measures the acidity of the urine. Overall, both a low ph and a high ph can lead to increased risk of stone formation.
# .osmo: The osmolarity is proportional to the concentration of molecules in the urine.
# .cond: The conductivity measures the ability of urine to conduct an electrical current. A higher level of conductivity may also be an indicator of increased risk for stone formation.
# .urea: This is the measure of the concentration of urea, which is a waste product of protein metabolism, in urine.
# .calc: A high concentration of calcium in the urine can be a contributor to the formation of kidney stones.
# > 📝 | The first thing we're going to analyze is the distribution of the target variable.
# Counting values in 'target' variable
target_count = train["target"].value_counts()
# Creating a new dataframe containing the values of each label
values = pd.DataFrame({"target": target_count.index, "count": target_count.values})
# Plotting a pie plot
fig = px.pie(
values,
values="count",
names="target",
template="ggplot2",
title="Target Variable Distribution",
)
fig.update_traces(hole=0.4)
fig.show()
# > 💡 | Most patients, about 55.6% of them, do not have kidney stones.
# Even though we have more samples of people without kidney stones, the data imbalance isn't really much strong here.
# Listing columns for data analysis
cols = ["gravity", "ph", "osmo", "cond", "urea", "calc"]
# Plotting histogram for each attribute
for col in cols:
plt.figure()
sns.histplot(data=train_df, x=col, kde=True)
plt.title(f"{col} distribution")
plt.show()
# > 💡 | The attributes not normally distributed, which indicates we may need to transform them to a gaussian-like distribution.
# Plotting boxplots
for col in cols:
plt.figure()
sns.boxplot(data=train_df, x=col)
plt.title(f"{col} boxplot")
plt.show()
# > 💡 | There are outliers in gravity and ph, which we may have to deal with later on.
# Plotting pairplots to observe relationship among features
sns.pairplot(train_df[cols], kind="reg")
plt.show()
# Plotting correlation heatmap
plt.figure(figsize=(12, 8))
corr = train_df.corr()
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
sns.heatmap(corr, annot=True, mask=mask)
plt.show()
# > 💡 | Strongest correlations:
# . osmo and urea (0.82)
# . osmo and cond (0.73)
# . urea and gravity (0.66)
# Boxplots by target
for col in cols:
fig = px.box(
train_df,
y="urea",
x="target",
color="target",
title=f"Boxplots of {col} by Target",
template="ggplot2",
height=600,
)
fig.show()
# > 💡 | Overall, patients with kidney stones present higher levels of concentration of gravity, osmo, calc, urea, and higher ph than those who are not suffering with kidney stones. We found a relevant pattern.
# #
# <div style="padding:20px;
# color:white;
# margin:10;
# font-size:200%;
# text-align:left;
# display:fill;
# border-radius:5px;
# background-color:#191970;
# overflow:hidden;
# font-weight:700">4. Feature Engineering
# > 📝 | To increase the predictive powers of our models, I am going to add the following features to our datasets:
# . osmo-to-urea-ratio: Osmo divided by urea.
# . osmo-to-cond-diff: Osmo subtracted by cond.
# . calc-to-ph-ratio: Calc divided by ph.
# . osmo-to-urea-diff: Osmo subtracted by urea.
# . ph-category: A categorical feature that is going to classify any ph below 6 as “acidic”, ph between 6 and 8 as “natural”, and ph above 8 as “basic”.
# #
# <div style="padding:20px;
# color:white;
# margin:10;
# font-size:200%;
# text-align:left;
# display:fill;
# border-radius:5px;
# background-color:#191970;
# overflow:hidden;
# font-weight:700">5. Modelling
# Independent feature
X = train_df.drop("target", axis=1)
y = train_df.target # Target variable
# Splitting data
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.7, random_state=42
) # 70% of data for training
# > 📝 | The FeatureEngineering class below performs a bunch of transformation to the data that are going to be added to a Pipeline.
# First, we create the new features by performing operations on the input data. Then, we use RobustScaler, which is effective against outliers, to standardize the data. The next step is to use QuantileTransformer to normalize the data into a gaussian-like distribution. Lastly, this is going to return the X variables transformed, standardized, and with new features.
#
# Creating FeatureEngineering class
class FeatureEngineering(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# New Features
X["osmo-to-urea-ratio"] = X["osmo"] / X["urea"]
X["osmo-to-cond-dff"] = X["osmo"] - X["cond"]
X["calc-to-ph-ratio"] = X["calc"] / X["ph"]
X["osmo-to-urea-diff"] = X["osmo"] - X["urea"]
X["ph_category"] = pd.cut(
X["ph"], bins=[-np.inf, 6, 8, np.inf], labels=["acidic", "neutral", "basic"]
)
dummies = pd.get_dummies(X["ph_category"])
X.drop("ph_category", axis=1, inplace=True)
# Standardizing data
scaler = RobustScaler()
X_scaled = scaler.fit_transform(X.values)
X = pd.DataFrame(X_scaled, index=X.index, columns=X.columns)
# Normalizing Distribution
transformer = QuantileTransformer(
output_distribution="normal", n_quantiles=X.shape[0]
)
X_normalized = transformer.fit_transform(X.values)
X = pd.DataFrame(X_normalized, index=X.index, columns=X.columns)
X = pd.concat([X, dummies], axis=1)
return X
# > 📝 | In the code below, we create a function to remove outliers according to the Intequartile Range (IQR). The reason why I didn't add this step into the pipeline, is that we only remove outliers from the training. We can't really perform outlier removal on the test set, as we wouldn't do that in a production environment.
#
# Creating outlier removal function using the IQR method
def outlier_removal(col):
Q1 = np.percentile(col, 25)
Q3 = np.percentile(col, 75)
IQR = Q3 - Q1
lower_bound = Q1 - 1.5 * IQR
upper_bound = Q3 + 1.5 * IQR
return col[(col > lower_bound) & (col < upper_bound)]
# Creating list of columns with outliers
outlier_cols = ["gravity", "ph"]
# Applying 'outlier_removal' function in the columns above
X_train[outlier_cols] = outlier_removal(X_train[outlier_cols])
X_test[outlier_cols] = outlier_removal(X_test[outlier_cols])
# Removing from the y sets the same samples (outliers) removed from the X sets
outlier_mask_train = np.all(np.isin(X_train, X_train), axis=1)
outlier_mask_test = np.all(np.isin(X_test, X_test), axis=1)
X_train = X_train[outlier_mask_train]
y_train = y_train[outlier_mask_train]
X_test = X_test[outlier_mask_test]
y_test = y_test[outlier_mask_test]
# Printing shape of X_train and y_train to certify they have the same length
print(X_train.shape)
print(y_train.shape)
# Creating Pipeline
pipeline = Pipeline([("feature_engineering", FeatureEngineering())])
# Applying Pipeline to X_train and X_test
X_train = pipeline.fit_transform(X_train)
X_test = pipeline.transform(X_test)
X_train # Visualizing results
# > 📝 | Now I'm going to create a list called models that is going to receive some classifiers.
# After that, we're going to iterate over each classifier in modelsfit them to the training data and perform probability predictions on the test data. We're then going to print the AUC score for each classifier.
#
# Creating a 'models' list
models = [
LogisticRegression(random_state=42),
XGBClassifier(random_state=42),
LGBMClassifier(random_state=42),
CatBoostClassifier(random_state=42, verbose=False),
AdaBoostClassifier(random_state=42),
RandomForestClassifier(random_state=42),
]
# Iterating through models in the list
for i in models:
i.fit(X_train, y_train) # Fitting data
y_pred = i.predict_proba(X_test)[:, 1] # Predicting probabilities
auc_score = roc_auc_score(y_test, y_pred) # Evaluating
print(f"{type(i).__name__}: AUC Score = {auc_score:.3f}") # Printing results
# > 💡 | The top three best classifiers were:
# 1. CatBoostClassifier (AUC = 0.829)
# 2. RandomForestClassifier (AUC = 0.824)
# 3. LogisticRegression (AUC = 0.809)
# #
# <div style="padding:20px;
# color:white;
# margin:10;
# font-size:200%;
# text-align:left;
# display:fill;
# border-radius:5px;
# background-color:#191970;
# overflow:hidden;
# font-weight:700">6. Tuning Models with Optuna
# > 📝 | In the next cells of code, we're going to perform a hyperparameter optimization in all our three best models to find the most optimal parameters for higher AUC scores.
# #
# <div style="padding:10px;
# color:white;
# margin:10;
# font-size:200%;
# text-align:left;
# display:fill;
# border-radius:5px;
# background-color:#191970;
# overflow:hidden;
# font-weight:700">6.1. CatBoostClassifier
# Defining trial function
def tuning_catboost(trial):
# Parameters optimization
n_estimators = trial.suggest_int("n_estimators", 50, 1000, step=50)
max_depth = trial.suggest_int("max_depth", 2, 10)
learning_rate = trial.suggest_float("learning_rate", 0.01, 0.5, log=True)
l2_leaf_reg = trial.suggest_float("l2_leaf_reg", 1, 10, log=True)
# Initializing the CatBoost model with the parameters above
catboost = CatBoostClassifier(
n_estimators=n_estimators,
max_depth=max_depth,
learning_rate=learning_rate,
l2_leaf_reg=l2_leaf_reg,
random_state=42,
verbose=False,
)
catboost.fit(X_train, y_train) # Training
y_pred = catboost.predict_proba(X_test)[:, 1] # Validating
auc_score = roc_auc_score(y_test, y_pred)
return auc_score # Returning score
study_catboost = optuna.create_study(
direction="maximize"
) # Creating study trying to maximize results
study_catboost.optimize(tuning_catboost, n_trials=500) # Running optimization search
catboost_best_params = study_catboost.best_params # Obtaining the best parameters
catboost_best_score = study_catboost.best_value # Obtaining the best score
# Printing best parameters and best score
print(f"Best params: {catboost_best_params}")
print(f"Best score: {catboost_best_score:.3f}")
# Creating tuned CatBoost Model
tuned_cb = CatBoostClassifier(
**catboost_best_params, # Adding the best parameters to the model
verbose=False,
random_state=42,
)
tuned_cb.fit(X_train, y_train)
y_pred = tuned_cb.predict_proba(X_test)[:, 1]
auc_score = roc_auc_score(y_test, y_pred)
# Plotting the AUC-ROC curve and printing the AUC score
fpr, tpr, _ = roc_curve(y_test, y_pred)
plt.plot(fpr, tpr, label="ROC Curve (AUC = %0.3f)" % auc_score)
plt.plot([0, 1], [0, 1], linestyle="--", color="grey", label="Random Guess")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.legend()
plt.show()
print(f"Tuned CatBoostClassifier AUC Score = {auc_score:.3f}")
# #
# <div style="padding:10px;
# color:white;
# margin:10;
# font-size:200%;
# text-align:left;
# display:fill;
# border-radius:5px;
# background-color:#191970;
# overflow:hidden;
# font-weight:700">6.2. RandomForestClassifier
def tuning_rf(trial):
n_estimators = trial.suggest_int("n_estimators", 50, 1000, step=50)
max_depth = trial.suggest_int("max_depth", 2, 50)
min_samples_split = trial.suggest_int("min_samples_split", 2, 20)
min_samples_leaf = trial.suggest_int("min_samples_leaf", 1, 20)
rf = RandomForestClassifier(
n_estimators=n_estimators,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
random_state=42,
)
rf.fit(X_train, y_train)
y_pred = rf.predict_proba(X_test)[:, 1]
auc_score = roc_auc_score(y_test, y_pred)
return auc_score
study = optuna.create_study(direction="maximize")
study.optimize(tuning_rf, n_trials=500)
rf_best_params = study.best_params
rf_best_score = study.best_value
print(f"Best params: {rf_best_params}")
print(f"Best score: {rf_best_score:.3f}")
tuned_rf = RandomForestClassifier(**rf_best_params, random_state=42)
tuned_rf.fit(X_train, y_train)
y_pred = tuned_rf.predict_proba(X_test)[:, 1]
auc_score = roc_auc_score(y_test, y_pred)
fpr, tpr, _ = roc_curve(y_test, y_pred)
plt.plot(fpr, tpr, label="ROC Curve (AUC = %0.3f)" % auc_score)
plt.plot([0, 1], [0, 1], linestyle="--", color="grey", label="Random Guess")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.legend()
plt.show()
print(f"Tuned RandomForestClassifier AUC Score = {auc_score:.3f}")
# #
# <div style="padding:10px;
# color:white;
# margin:10;
# font-size:200%;
# text-align:left;
# display:fill;
# border-radius:5px;
# background-color:#191970;
# overflow:hidden;
# font-weight:700">6.3. Logistic Regression
def tuning_logistic(trial):
C = trial.suggest_float("C", 1e-3, 1e3, log=True)
penalty = trial.suggest_categorical("penalty", ["none", "l1", "l2"])
if penalty == "l1":
solver = "saga"
else:
solver = "lbfgs"
logistic = LogisticRegression(C=C, penalty=penalty, solver=solver, random_state=42)
logistic.fit(X_train, y_train)
y_pred = logistic.predict_proba(X_test)[:, 1]
auc_score = roc_auc_score(y_test, y_pred)
return auc_score
study_logistic = optuna.create_study(direction="maximize")
study_logistic.optimize(tuning_logistic, n_trials=500)
logistic_best_params = study_logistic.best_params
logistic_best_score = study_logistic.best_value
print(f"Best params: {logistic_best_params}")
print(f"Best score: {logistic_best_score:.3f}")
tuned_logreg = LogisticRegression(
**logistic_best_params, random_state=42, solver="saga"
)
tuned_logreg.fit(X_train, y_train)
y_pred = tuned_logreg.predict_proba(X_test)[:, 1]
auc_score = roc_auc_score(y_test, y_pred)
fpr, tpr, _ = roc_curve(y_test, y_pred)
plt.plot(fpr, tpr, label="ROC Curve (AUC = %0.3f)" % auc_score)
plt.plot([0, 1], [0, 1], linestyle="--", color="grey", label="Random Guess")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.legend()
plt.show()
print(f"Tuned LogisticRegression AUC Score = {auc_score:.3f}")
# > 💡 | The tunings worked quite well and we've been able to achieve even higher AUC scores for our models.
# #
# <div style="padding:20px;
# color:white;
# margin:10;
# font-size:200%;
# text-align:left;
# display:fill;
# border-radius:5px;
# background-color:#191970;
# overflow:hidden;
# font-weight:700">7. Ensembling Models
# > 📝 | Now we're going to use Sklearn's StackingClassifier to create a meta model by ensembling the tuned CatBoost, the tuned RandomForest, and the tuned LogisticRegression all together.
# Creating meta model
ensemble_model = StackingClassifier(
estimators=[
("CatBoost", tuned_cb),
("RandomForest", tuned_rf),
("LogisticRegression", tuned_logreg),
],
cv=5,
)
ensemble_model.fit(X_train, y_train)
y_pred = ensemble_model.predict_proba(X_test)[:, 1]
auc_score = roc_auc_score(y_test, y_pred)
print(f"\nEnsemble model AUC score = {auc_score:.3f}\n")
fpr, tpr, _ = roc_curve(y_test, y_pred)
plt.plot(fpr, tpr, label="ROC Curve (AUC = %0.3f)" % auc_score)
plt.plot([0, 1], [0, 1], linestyle="--", color="grey", label="Random Guess")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve - Ensemble Model")
plt.legend()
plt.show()
# > 💡 | We have two different models with the highest scores:
# Tuned CatBoostClassifier, with an AUC score of 0.849
# Ensemble Model with an AUC score of 0.844
# #
# <div style="padding:20px;
# color:white;
# margin:10;
# font-size:200%;
# text-align:left;
# display:fill;
# border-radius:5px;
# background-color:#191970;
# overflow:hidden;
# font-weight:700">8. Making Final Predictions - Tuned CatBoost Model
# Visualizing test set
test
test = test.set_index("id") # Removing the index and selecting 'Id' as index
test = pipeline.transform(test) # Passing the test set through the pipeline
y_pred = tuned_cb.predict_proba(test)[:, 1] # Performing Predictions
y_pred
# Loading submission dataframe
submission = pd.read_csv("/kaggle/input/playground-series-s3e12/sample_submission.csv")
submission
# Replacing values in 'target' column by the values in y_pred
submission["target"] = y_pred
submission
# Saving submission as csv
submission.to_csv("submission.csv", index=False)
# #
# <div style="padding:20px;
# color:white;
# margin:10;
# font-size:200%;
# text-align:left;
# display:fill;
# border-radius:5px;
# background-color:#191970;
# overflow:hidden;
# font-weight:700">9. Making Final Predictions - Ensemble Model
test # Visualizing test dataframe again
# Making predictions
y_pred = ensemble_model.predict_proba(test)[:, 1]
y_pred
# Replacing values
submission["target"] = y_pred
submission
# Saving to CSV file
submission.to_csv("submission.csv", index=False)
|
# # Australian Bushfire - Map analyis
# The Australian bushfire has led to massive loss to wildlife, forest area and has even caused human casualities, inclding firefirghters from U.S. It has even affected the air quality in nearby cities such as sydney and melbourne. We will take a look at fire data obtained from NASA satellite's MODIS and VIIRS.
# What is covered -
# - Regions with Highest recorded fire radiation in a day
# - Dates on which bushfires were at peak
# - Timeline of bushfire - barplot
# - Heat map with time - for Australian bushfire
# - Canbbera Fire over last 10 days
# - Kangaroo island fire
# Note :
# - The notebook may take some time to load, load in firefox for faster results.
# - Also since the loading time is high we will conly consider data for last 2 months - Dec 1,2019 to Jan 31,2020.
# ## Install dependencies and set file path
# dependencies
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # plotting
import seaborn as sns # for beatiful visualization
import folium
from folium import plugins
# set file path
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
folium.__version__
folium.branca.__version__
# ## Load the data
fire_nrt_m6 = pd.read_csv(
"../input/australian-bush-fire-satellite-data-nasa/fire_archive_M6_101673.csv"
)
fire_archive_m6 = pd.read_csv(
"../input/australian-bush-fire-satellite-data-nasa/fire_archive_M6_101673.csv"
)
fire_nrt_v1 = pd.read_csv(
"../input/australian-bush-fire-satellite-data-nasa/fire_nrt_V1_101674.csv"
)
fire_archive_v1 = pd.read_csv(
"../input/australian-bush-fire-satellite-data-nasa/fire_archive_V1_101674.csv"
)
type(fire_nrt_v1)
# Since VIIRS provides more spatial resolution(375m), We will be using VIIRS for further visualization and analysis.
# # Merge archive and nrt data
# Archive data is between sept 1,2019 to dec 31,2019.
# Nrt is between jan 1,2020 to jan 31,2020
# We will be merging both the data
df_merged = pd.concat([fire_archive_v1, fire_nrt_v1], sort=True)
data = df_merged
data.head()
data.info()
# We will be concentrating particularly on frp(Fire radiation power) which can detect bushfires
# ## Filter the data
# We will consider only 4 fields - latitude,longitude,acq_date and frp (fire radiation power) dor this analysis.
df_filter = data.filter(["latitude", "longitude", "acq_date", "frp"])
df_filter.head()
# - **Also since most of the fire activity happened after november, and the complete data takes time to lad in this notebook, will will filter the data between Dec 1, 2019 to Jan 31, 2020**
df = df_filter[df_filter["acq_date"] >= "2019-12-01"]
df.head()
# ## Regions with Highest recorded fire radiation in a day
data_topaffected = df.sort_values(by="frp", ascending=False).head(10)
data_topaffected
# By reverse geocoding we can obtain the locations(Mentioned in Conclusion at the end).
# **Below is the map marking the regions which were highest affected in a day**
# Create a map
m = folium.Map(
location=[-35.0, 144], control_scale=True, zoom_start=3, attr="text some"
)
df_copy = data_topaffected.copy()
# loop through data to create Marker for each hospital
for i in range(0, len(df_copy)):
folium.Marker(
location=[df_copy.iloc[i]["latitude"], df_copy.iloc[i]["longitude"]],
# popup=popup,
tooltip="frp: "
+ str(df_copy.iloc[i]["frp"])
+ "<br/> date: "
+ str(df_copy.iloc[i]["acq_date"]),
icon=folium.Icon(color="red", icon="fire", prefix="fa"),
).add_to(m)
m
# ## Dates on which bushfires were at peak
dfdate = df[["acq_date", "frp"]].set_index("acq_date")
dfdate_highest = dfdate.groupby("acq_date").sum().sort_values(by="frp", ascending=False)
dfdate_highest.head(10)
# ## Timeline of bushfire - barplot
# - Note : this may take sometime to execute
#
plt.figure(figsize=(10, 5))
sns.set_palette("pastel")
ax = sns.barplot(x="acq_date", y="frp", data=df)
for ind, label in enumerate(ax.get_xticklabels()):
if ind % 10 == 0: # every 10th label is kept
label.set_visible(True)
else:
label.set_visible(False)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.xlabel("Date")
plt.ylabel("FRP (fire radiation power)")
plt.title("time line of bushfire in Australia")
plt.tight_layout()
# - The above barplot represents the progress of fire from dec 1, 2019 to jan 31, 2020
# - You can notice three big spikes after 30th december, representing highest frp activity
# ## Heat map with time - for Australian bushfire
# Note : Play button layer might not display properly in chrome, due to ongoing issue with folium.HeatMapWithTime .**Use firefox**.
from folium.plugins import HeatMapWithTime
# A small function to get heat map with time given the data
def getmap(ip_data, location, zoom, radius):
# get day list
dfmap = ip_data[["acq_date", "latitude", "longitude", "frp"]]
df_day_list = []
for day in dfmap.acq_date.sort_values().unique():
df_day_list.append(
dfmap.loc[
dfmap.acq_date == day, ["acq_date", "latitude", "longitude", "frp"]
]
.groupby(["latitude", "longitude"])
.sum()
.reset_index()
.values.tolist()
)
# Create a map using folium
m = folium.Map(location, zoom_start=zoom, tiles="Stamen Terrain")
# creating heatmap with time
HeatMapWithTime(
df_day_list,
index=list(dfmap.acq_date.sort_values().unique()),
auto_play=True,
radius=radius,
gradient={0.2: "blue", 0.4: "lime", 0.6: "orange", 1: "red"},
min_opacity=0.5,
max_opacity=0.8,
use_local_extrema=True,
).add_to(m)
return m
getmap(df, [-27, 132], 3.5, 3)
# - The above map gives heatmap with time
# - Play it at higher fps to increase speed
# ## Canbbera Fire over last 10 days
# Note : Play button layer might not display properly in chrome, due to ongoing issue with folium.HeatMapWithTime .**Use firefox**.
# df tail for the latest data
df_10days = df.tail(21500)
# Using getmap function to obtain map from above, location set to canberra
getmap(df_10days, [-35.6, 149.12], 8, 3)
# - You can see the red spot appearing in Canberra over last 4 days, indicating fire activity
#
## Kangaroo Island fire
# Note : Play button layer might not display properly in chrome, due to ongoing issue with folium.HeatMapWithTime .**Use firefox**.
# Using getmap function to obtain map from above, location set to kangaroo island
getmap(df, [-36, 137.22], 8.5, 3)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data = pd.read_csv(
"/kaggle/input/political-social-media-posts/political_social_media.csv",
usecols=[7, 20],
names=["bias", "text"],
encoding="ISO-8859-1",
)
data.loc[data["bias"] == "neutral", "bias"] = 1
data.loc[data["bias"] == "partisan", "bias"] = 0
data.head()
import re
def preprocess_text(text):
# Remove URLs
text = re.sub(r"http\S+", "", text)
# Remove mentions
text = re.sub(r"@\w+", "", text)
# Remove hashtags
text = re.sub(r"#\w+", "", text)
# Remove special characters and digits
text = re.sub(r"[^a-zA-Z\s]", "", text)
# Convert to lowercase
text = text.lower()
return text
# Apply the preprocessing function to the 'text' column
data["cleaned_text"] = data["text"].apply(preprocess_text)
# Display the first few rows of the dataset with the cleaned text
data.head()
text = data["cleaned_text"].to_numpy()
X = text[1:]
y = data["bias"].values
y = y[1:]
print(X[:2])
print(y[:2])
print(np.unique(y_train))
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
# Split the dataset into train, validation, and test sets
X_train, X_temp, y_train, y_temp = train_test_split(
X, y, test_size=0.2, random_state=42
)
X_val, X_test, y_val, y_test = train_test_split(
X_temp, y_temp, test_size=0.5, random_state=42
)
# Create the vectorizer
cv = CountVectorizer()
# Fit the vectorizer on the training data
X_train_cv = cv.fit_transform(X_train)
# Transform the validation and test data using the vectorizer
X_val_cv = cv.transform(X_val)
X_test_cv = cv.transform(X_test)
y_train = np.array(y_train, dtype=int)
y_test = np.array(y_test, dtype=int)
# first I will try to use the logistic regression classifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# instantiate the classifier
log_reg_classifier = LogisticRegression(max_iter=1000)
log_reg_classifier.fit(X_train_cv, y_train)
# Make predictions on the test set
y_test_pred_log_reg = log_reg_classifier.predict(X_test_cv)
# Calculate and report the accuracy
log_reg_accuracy = accuracy_score(y_test, y_test_pred_log_reg)
print(f"Logistic Regression accuracy: {log_reg_accuracy:.4f}")
from sklearn.svm import SVC
# Instantiate the SVM classifier
svm_classifier = SVC()
# Train the classifier
svm_classifier.fit(X_train_cv, y_train)
# Make predictions on the test set
y_test_pred_svm = svm_classifier.predict(X_test_cv)
# Calculate and report the accuracy
svm_accuracy = accuracy_score(y_test, y_test_pred_svm)
print(f"SVM accuracy: {svm_accuracy:.4f}")
from sklearn.naive_bayes import MultinomialNB
# Instantiate the Naive Bayes classifier
nb_classifier = MultinomialNB()
# Train the classifier
nb_classifier.fit(X_train_cv, y_train)
# Make predictions on the test set
y_test_pred_nb = nb_classifier.predict(X_test_cv)
# Calculate and report the accuracy
nb_accuracy = accuracy_score(y_test, y_test_pred_nb)
print(f"Naive Bayes accuracy: {nb_accuracy:.4f}")
from sklearn.metrics import precision_score, recall_score, f1_score
def print_metrics(y_true, y_pred, classifier_name):
precision = precision_score(y_true, y_pred)
recall = recall_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred)
print(f"{classifier_name} Precision: {precision:.4f}")
print(f"{classifier_name} Recall: {recall:.4f}")
print(f"{classifier_name} F1-score: {f1:.4f}")
print()
# SVM Metrics
print_metrics(y_test, y_test_pred_svm, "SVM")
# Naive Bayes Metrics
print_metrics(y_test, y_test_pred_nb, "Naive Bayes")
# logistic Regression Metrics
print_metrics(y_test, y_test_pred_log_reg, "Logistic Regression")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# ### Read in data
# Read in training data
train = pd.read_csv("/kaggle/input/chapman-cs530-redwinequality/train.csv")
train.head()
# Read in testing data
test = pd.read_csv("/kaggle/input/chapman-cs530-redwinequality/test.csv")
test.head()
# ### Create submissions
# Read in sample submissions
sample_submission = pd.read_csv(
"/kaggle/input/chapman-cs530-redwinequality/sample_submission.csv"
)
sample_submission.head()
# Create a dummy submission that has entries as many as the test set.
y_pred = (
np.random.rand(test.shape[0]) * 10
) # Create random numbers from 0-10 as dummy solution
sample_submission.loc[
:, "Predicted"
] = y_pred # Change the Predicted column to your prediction
sample_submission.head()
sample_submission.to_csv(
"your_submission.csv", header=True, index=False
) # Save the header but not the index
|
import os
import warnings
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import log_loss
warnings.filterwarnings("ignore")
# ## Data Preprocessing
tourney_result = pd.read_csv(
"../input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneyDetailedResults.csv"
)
tourney_seed = pd.read_csv(
"../input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneySeeds.csv"
)
tourney_result = pd.merge(
tourney_result,
tourney_seed,
left_on=["Season", "WTeamID"],
right_on=["Season", "TeamID"],
how="left",
)
tourney_result.rename(columns={"Seed": "Seed1"}, inplace=True)
tourney_result = tourney_result.drop("TeamID", axis=1)
tourney_result = pd.merge(
tourney_result,
tourney_seed,
left_on=["Season", "LTeamID"],
right_on=["Season", "TeamID"],
how="left",
)
tourney_result.rename(columns={"Seed": "Seed2"}, inplace=True)
tourney_result = tourney_result.drop("TeamID", axis=1)
tourney_result
def get_seed(x):
return int(x[1:3])
tourney_result["Seed1"] = tourney_result["Seed1"].map(lambda x: get_seed(x))
tourney_result["Seed2"] = tourney_result["Seed2"].map(lambda x: get_seed(x))
tourney_result
season_result = pd.read_csv(
"../input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MRegularSeasonDetailedResults.csv"
)
season_result.columns
season_win_result = season_result[
[
"Season",
"WTeamID",
"WScore",
"WFGM",
"WFGA",
"WFGM3",
"WFGA3",
"WFTM",
"WFTA",
"WOR",
"WDR",
"WAst",
"WTO",
"WStl",
"WBlk",
"WPF",
"DayNum",
"NumOT",
]
]
season_lose_result = season_result[
[
"Season",
"LTeamID",
"LScore",
"LFGM",
"LFGA",
"LFGM3",
"LFGA3",
"LFTM",
"LFTA",
"LOR",
"LDR",
"LAst",
"LTO",
"LStl",
"LBlk",
"LPF",
"DayNum",
"NumOT",
]
]
season_win_result.rename(
columns={
"WTeamID": "TeamID",
"WScore": "Score",
"WFGM": "FGM",
"WFGA": "FGA",
"WDR": "DR",
"WFGA3": "FGA3",
"WFGM3": "FGM3",
"WFTM": "FTM",
"WFTA": "FTA",
"WOR": "OR",
"WAst": "Ast",
"WTO": "TO",
"WStl": "Stl",
"WBlk": "Blk",
"WPF": "PF",
},
inplace=True,
)
season_lose_result.rename(
columns={
"LTeamID": "TeamID",
"LScore": "Score",
"LFGM": "FGM",
"LFGA": "FGA",
"LDR": "DR",
"LFGA3": "FGA3",
"LFGM3": "FGM3",
"LFTM": "FTM",
"LFTA": "FTA",
"LOR": "OR",
"LAst": "Ast",
"LTO": "TO",
"LStl": "Stl",
"LBlk": "Blk",
"LPF": "PF",
},
inplace=True,
)
season_result = pd.concat((season_win_result, season_lose_result)).reset_index(
drop=True
)
season_result
season_score = (
season_result.groupby(["Season", "TeamID"])[
[
"Score",
"FGM",
"FGA",
"DR",
"FGA3",
"FGM3",
"FTM",
"FTA",
"OR",
"Ast",
"TO",
"Stl",
"Blk",
"PF",
]
]
.sum()
.reset_index()
)
season_score
tourney_result = pd.merge(
tourney_result,
season_score,
left_on=["Season", "WTeamID"],
right_on=["Season", "TeamID"],
how="left",
)
tourney_result.rename(columns={"Score": "ScoreT1"}, inplace=True)
tourney_result = tourney_result.drop("TeamID", axis=1)
tourney_result = pd.merge(
tourney_result,
season_score,
left_on=["Season", "LTeamID"],
right_on=["Season", "TeamID"],
how="left",
)
tourney_result.rename(columns={"Score": "ScoreT2"}, inplace=True)
tourney_result = tourney_result.drop("TeamID", axis=1)
tourney_result
columns = tourney_result.columns
columns
columns = [
"WTeamID",
"LTeamID",
"Seed1",
"Seed2",
"ScoreT1",
"FGM_x",
"FGA_x",
"DR_x",
"FGA3_x",
"FGM3_x",
"FTM_x",
"FTA_x",
"OR_x",
"Ast_x",
"TO_x",
"Stl_x",
"Blk_x",
"PF_x",
"ScoreT2",
"FGM_y",
"FGA_y",
"DR_y",
"FGA3_y",
"FGM3_y",
"FTM_y",
"FTA_y",
"OR_y",
"Ast_y",
"TO_y",
"Stl_y",
"Blk_y",
"PF_y",
]
tourney_win_result = tourney_result[columns].copy()
tourney_loss_result = tourney_result[columns].copy()
tourney_loss_result["Seed1"] = tourney_win_result["Seed2"]
tourney_loss_result["Seed2"] = tourney_win_result["Seed1"]
tourney_loss_result["ScoreT1"] = tourney_win_result["ScoreT2"]
tourney_loss_result["ScoreT2"] = tourney_win_result["ScoreT1"]
for c in [
"FGM",
"FGA",
"DR",
"FGA3",
"FGM3",
"FTM",
"FTA",
"OR",
"Ast",
"TO",
"Stl",
"Blk",
"PF",
]:
tourney_loss_result[c + "_x"] = tourney_win_result[c + "_y"]
tourney_loss_result[c + "_y"] = tourney_win_result[c + "_x"]
tourney_win_result["result"] = 1
tourney_loss_result["result"] = 0
tourney_result = pd.concat((tourney_win_result, tourney_loss_result)).reset_index(
drop=True
)
for c in [
"FGM",
"FGA",
"DR",
"FGA3",
"FGM3",
"FTM",
"FTA",
"OR",
"Ast",
"TO",
"Stl",
"Blk",
"PF",
]:
tourney_result[c + "diff"] = tourney_result[c + "_y"] - tourney_result[c + "_x"]
tourney_result["result"]
tourney_result["Seed_diff"] = tourney_result["Seed1"] - tourney_result["Seed2"]
tourney_result["ScoreT_diff"] = tourney_result["ScoreT1"] - tourney_result["ScoreT2"]
tourney_result
test_df = pd.read_csv(
"../input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv"
)
test_df["Season"] = test_df["ID"].map(lambda x: int(x[:4]))
test_df["WTeamID"] = test_df["ID"].map(lambda x: int(x[5:9]))
test_df["LTeamID"] = test_df["ID"].map(lambda x: int(x[10:14]))
test_df = pd.merge(
test_df,
tourney_seed,
left_on=["Season", "WTeamID"],
right_on=["Season", "TeamID"],
how="left",
)
test_df.rename(columns={"Seed": "Seed1"}, inplace=True)
test_df = test_df.drop("TeamID", axis=1)
test_df = pd.merge(
test_df,
tourney_seed,
left_on=["Season", "LTeamID"],
right_on=["Season", "TeamID"],
how="left",
)
test_df.rename(columns={"Seed": "Seed2"}, inplace=True)
test_df = test_df.drop("TeamID", axis=1)
test_df = pd.merge(
test_df,
season_score,
left_on=["Season", "WTeamID"],
right_on=["Season", "TeamID"],
how="left",
)
test_df.rename(columns={"Score": "ScoreT1"}, inplace=True)
test_df = test_df.drop("TeamID", axis=1)
test_df = pd.merge(
test_df,
season_score,
left_on=["Season", "LTeamID"],
right_on=["Season", "TeamID"],
how="left",
)
test_df.rename(columns={"Score": "ScoreT2"}, inplace=True)
test_df = test_df.drop("TeamID", axis=1)
test_df["Seed1"] = test_df["Seed1"].map(lambda x: get_seed(x))
test_df["Seed2"] = test_df["Seed2"].map(lambda x: get_seed(x))
for c in [
"FGM",
"FGA",
"DR",
"FGA3",
"FGM3",
"FTM",
"FTA",
"OR",
"Ast",
"TO",
"Stl",
"Blk",
"PF",
]:
test_df[c + "diff"] = test_df[c + "_y"] - test_df[c + "_x"]
test_df["Seed_diff"] = test_df["Seed1"] - test_df["Seed2"]
test_df["ScoreT_diff"] = test_df["ScoreT1"] - test_df["ScoreT2"]
test_df = test_df.drop(["ID", "Pred", "Season"], axis=1)
test_df
MAX_EMBINT = max(tourney_result.WTeamID.unique()) + 1
scaler = StandardScaler()
icolumns = [
"LTeamID",
"WTeamID",
"Seed2",
"Seed1",
"ScoreT2",
"FGM_y",
"FGA_y",
"DR_y",
"FGA3_y",
"FGM3_y",
"FTM_y",
"FTA_y",
"OR_y",
"Ast_y",
"TO_y",
"Stl_y",
"Blk_y",
"PF_y",
"ScoreT1",
"FGM_x",
"FGA_x",
"DR_x",
"FGA3_x",
"FGM3_x",
"FTM_x",
"FTA_x",
"OR_x",
"Ast_x",
"TO_x",
"Stl_x",
"Blk_x",
"PF_x",
]
X, y = tourney_result[columns].values, tourney_result["result"].values
X_test = test_df[columns].values
X_itest = test_df[icolumns].values
X[:, 2:] = scaler.fit_transform(X[:, 2:])
X_test[:, 2:] = scaler.transform(X_test[:, 2:])
X_itest[:, 2:] = scaler.transform(X_itest[:, 2:])
# ## Keras Model
import tensorflow as tf
import tensorflow_addons as tfa
def mish(x):
return x * tf.keras.backend.softplus(tf.keras.backend.tanh(x))
def get_model():
feature_inp = tf.keras.layers.Input((30,), name="FeatureInput")
id1_inp = tf.keras.layers.Input((1,), name="ID1Input")
id2_inp = tf.keras.layers.Input((1,), name="ID2Input")
emb = tf.keras.layers.Embedding(MAX_EMBINT, 2, input_length=1)
e1 = tf.keras.layers.Flatten()(emb(id1_inp))
e2 = tf.keras.layers.Flatten()(emb(id2_inp))
e1 = tf.keras.layers.Dropout(0.5)(e1)
e2 = tf.keras.layers.Dropout(0.5)(e2)
x = tf.keras.layers.Dense(128, activation="relu")(feature_inp)
x = tf.keras.layers.Dropout(0.5)(x)
x = tf.keras.layers.Dense(256, activation="relu")(x)
x = tf.keras.layers.Dropout(0.5)(x)
e = tf.keras.layers.Concatenate()([e1, e2])
e = tf.keras.layers.Dense(32, activation="relu")(e)
e = tf.keras.layers.Dropout(0.5)(e)
x = tf.keras.layers.Concatenate()([x, e])
x = tf.keras.layers.Dense(256, activation="relu")(x)
x = tf.keras.layers.Dropout(0.5)(x)
x = tf.keras.layers.Dense(128, activation="relu")(x)
x = tf.keras.layers.Dropout(0.5)(x)
x = tf.keras.layers.Dense(1, activation="sigmoid")(x)
model = tf.keras.Model([feature_inp, id1_inp, id2_inp], x)
model.compile(
optimizer=tfa.optimizers.RectifiedAdam(lr=3e-3),
loss="binary_crossentropy",
metrics=["binary_crossentropy"],
)
return model
cv = StratifiedKFold(n_splits=20, shuffle=True)
losses = []
nn_predicts = []
for i, (train_ind, valid_ind) in enumerate(cv.split(X, y)):
tf.keras.backend.clear_session()
X_train, X_valid = X[train_ind], X[valid_ind]
y_train, y_valid = y[train_ind], y[valid_ind]
model = get_model()
if i == 0:
print(model.summary())
er = tf.keras.callbacks.EarlyStopping(
monitor="val_loss", patience=25, restore_best_weights=True
)
model.fit(
[X_train[:, 2:], X_train[:, 0].astype("int32"), X_train[:, 1].astype("int32")],
y_train,
epochs=256,
batch_size=64,
validation_data=[
[
X_valid[:, 2:],
X_valid[:, 0].astype("int32"),
X_valid[:, 1].astype("int32"),
],
y_valid,
],
verbose=0,
callbacks=[er],
)
preds = model.predict(
[X_valid[:, 2:], X_valid[:, 0].astype("int32"), X_valid[:, 1].astype("int32")]
)
print(f"Fold {i}: {log_loss(y_valid, preds)}")
test_pred = (
0.5
* model.predict(
[X_test[:, 2:], X_test[:, 0].astype("int32"), X_test[:, 1].astype("int32")]
)
+ 0.5
- 0.5
* model.predict(
[
X_itest[:, 2:],
X_itest[:, 0].astype("int32"),
X_itest[:, 1].astype("int32"),
]
)
)
nn_predicts.append(test_pred)
# Take the average probabilty on 5 folds
nn_predicts = np.asarray(predicts)
nn_predicts = np.mean(nn_predicts, axis=0)
import seaborn as sns
sns.distplot(nn_predicts)
submission_df = pd.read_csv(
"../input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv"
)
submission_df["Pred"] = nn_predicts
submission_df.to_csv("submission.csv", index=False)
|
# ## Exploratory Data Analysis on Corona Virus
# ## What is a Corona Virus?
# As listed on WHO website, Coronaviruses (CoV) are a large family of viruses that cause illness ranging from the common cold to more severe diseases such as Middle East Respiratory Syndrome (MERS-CoV) and Severe Acute Respiratory Syndrome (SARS-CoV). A novel coronavirus (nCoV) is a new strain that has not been previously identified in humans.
# Common signs of infection include respiratory symptoms, fever, cough, shortness of breath and breathing difficulties. In more severe cases, infection can cause pneumonia, severe acute respiratory syndrome, kidney failure and even death.
# ## Objective:
# Since we see that outbreak of Corona Virus is increasing Day by day, we can explore trends from the given data and try to predict future.
# ## Dataset Source: https://www.kaggle.com/sudalairajkumar/novel-corona-virus-2019-dataset
# ## Exploratory Data Analysis
# Let's perform EDA on the dataset.
# importing all necessary libraries
import pandas as pd
import numpy as np
from datetime import date
import matplotlib.pyplot as plt
from matplotlib.ticker import StrMethodFormatter
import seaborn as sns
import plotly.express as px
import matplotlib.pyplot as plt
import pycountry
import plotly.graph_objects as go
# Reading the dataset
coronaVirus_df = pd.read_csv(
"/kaggle/input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv",
index_col="Last Update",
parse_dates=["Last Update"],
)
coronaVirus_df.tail()
coronaVirus_df.shape
# ### Data Cleaning and Transformation
# 1. Check for missing values and filling missing values
# 2. Change data type for Last Update column and modify other columns if required.
# 3. Remove 'Sno' column as it is not required.
# Checking missing values and transforming data
coronaVirus_df.isnull().values.any()
coronaVirus_df.isnull().sum()
# replacing null values in Province/State with Country names
coronaVirus_df["Province/State"].fillna(coronaVirus_df["Country"], inplace=True)
coronaVirus_df.drop(["Sno"], axis=1, inplace=True)
# creating new columns for date, month and time which would be helpful for furthur computation
coronaVirus_df["year"] = pd.DatetimeIndex(coronaVirus_df["Date"]).year
coronaVirus_df["month"] = pd.DatetimeIndex(coronaVirus_df["Date"]).month
coronaVirus_df["time"] = pd.DatetimeIndex(coronaVirus_df["Date"]).time
coronaVirus_df.head()
# > ### Latest Update on number of confirmed, reported and deaths across the globe****
# We are trying to analyze number of cases reported.
# A look at the different cases - confirmed, death and recovered
print("Globally Confirmed Cases: ", coronaVirus_df["Confirmed"].sum())
print("Global Deaths: ", coronaVirus_df["Deaths"].sum())
print("Globally Recovered Cases: ", coronaVirus_df["Recovered"].sum())
# It is seen that total of 123360 confirmed cases have been reported, 2646 deaths have been confirmed and 3284 people have sucessfully fought the virus and are showing signs of recovery. The data is from 22nd Jan to 4th Feb 2020.
# It is important to analyze latest scenario as per the last update so that we can predict numbers in future.
coronaVirus_df[["Confirmed", "Deaths", "Recovered"]].sum().plot(kind="bar")
# ### Geographical Widespread of CoronaVirus
# Using the given data, Here are few questions which we are going to answer
# 1. Total number of countries whch are affected by the virus
# 2. Number of confirmed, recovered, deaths cases reported Country wise
# 2. Number of confirmed cases reported State/Province wise
# 3. Top 5 Affected Countries
# 4. Top 5 countries which are unaffected.
# 5. Distribution of virus in India and US population.
# Total Number Of countries which are affected by the virus
coronaVirus_df.Country.unique()
# Number of confirmed cases reported Country wise
global_confirmed_cases = coronaVirus_df.groupby("Country").sum().Confirmed
global_confirmed_cases.sort_values(ascending=False)
global_death_cases = coronaVirus_df.groupby("Country").sum().Deaths
global_death_cases.sort_values(ascending=False)
global_recovered_cases = coronaVirus_df.groupby("Country").sum().Recovered
global_recovered_cases.sort_values(ascending=False)
# plotting graphs for total Confirmed, Death and Recovery cases
plt.rcParams["figure.figsize"] = (12, 9)
ax1 = coronaVirus_df[["Date", "Confirmed"]].groupby(["Date"]).sum().plot()
ax1.set_ylabel("Total Number of Confirmed Cases")
ax1.set_xlabel("Date")
ax2 = coronaVirus_df[["Date", "Deaths", "Recovered"]].groupby(["Date"]).sum().plot()
ax2.set_ylabel("Recovered and Deaths Cases")
ax2.set_xlabel("Date")
fig = px.scatter_matrix(coronaVirus_df, dimensions=["Confirmed"], color="Date")
fig.show()
fig = px.scatter_matrix(
coronaVirus_df, dimensions=["Recovered", "Deaths"], color="Date"
)
fig.show()
# Let's look the various Provinces/States affected
data_countryprovince = coronaVirus_df.groupby(["Country", "Province/State"]).sum()
data_countryprovince.sort_values(by="Confirmed", ascending=False)
# Top Affected countries
top_affected_countries = global_confirmed_cases.sort_values(ascending=False)
top_affected_countries.head(5)
# Finding countries which are relatively safe due to less number of reported cases
top_unaffected_countries = global_confirmed_cases.sort_values(ascending=True)
top_unaffected_countries.head(5)
# Above list are unaffected countries which means that relative to other countries, there are very less number of cases reported. These countries should take all measures to prevent spreading the virus.
# ### Plotting cases confirmed in China
# Mainland China
China_data = coronaVirus_df[coronaVirus_df["Country"] == "China"]
China_data
x = China_data.groupby("Province/State")["Confirmed"].sum().sort_values().tail(15)
x.plot(kind="barh", color="#86bf91")
plt.xlabel("Confirmed case Count", labelpad=14)
plt.ylabel("States/Province", labelpad=14)
plt.title("Confirmed cases count in China states", y=1.02)
# 1. > ### ****Geographical Distribution in India and US ****
# > Now let's understand distribution of virus in US population
US_data = coronaVirus_df[coronaVirus_df["Country"] == "US"]
US_data
x = (
US_data.groupby("Province/State")["Confirmed"]
.sum()
.sort_values(ascending=False)
.tail(20)
)
x
x.plot(kind="barh", color="#86bf91")
plt.xlabel("Confirmed case Count", labelpad=14)
plt.ylabel("States", labelpad=14)
plt.title("Confirmed cases count in US states", y=1.02)
India_data = coronaVirus_df[coronaVirus_df["Country"] == "India"]
India_data
import plotly.express as px
# India_data = px.data.gapminder().query("country == 'India'")
fig = px.bar(US_data, x="Province/State", y="Confirmed")
fig.show()
# ## Time Series Analysis
# It is important to understand correlation of time and cases reported.
# Using plotly.express
import plotly.express as px
import pandas as pd
fig = px.line(coronaVirus_df, x="Date", y="Confirmed")
fig.show()
fig = px.line(coronaVirus_df, x="Date", y="Deaths")
fig.show()
import pandas as pd
import plotly.graph_objects as go
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=coronaVirus_df["Date"],
y=coronaVirus_df["Confirmed"],
name="Confirmed",
line_color="deepskyblue",
opacity=0.8,
)
)
fig.add_trace(
go.Scatter(
x=coronaVirus_df["Date"],
y=coronaVirus_df["Recovered"],
name="Recovered",
line_color="dimgray",
opacity=0.8,
)
)
fig.add_trace(
go.Scatter(
x=coronaVirus_df["Date"],
y=coronaVirus_df["Deaths"],
name="Deaths",
line_color="red",
opacity=0.8,
)
)
# Use date string to set xaxis range
fig.update_layout(
xaxis_range=["2020-01-22", "2020-02-03"], title_text="Cases over time"
)
fig.show()
import pandas as pd
import plotly.graph_objects as go
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=coronaVirus_df["Date"],
y=coronaVirus_df["Recovered"],
name="Recovered",
line_color="deepskyblue",
opacity=0.8,
)
)
fig.add_trace(
go.Scatter(
x=coronaVirus_df["Date"],
y=coronaVirus_df["Deaths"],
name="Deaths",
line_color="red",
opacity=0.8,
)
)
# Use date string to set xaxis range
fig.update_layout(
xaxis_range=["2020-01-22 00:00:00", "2020-02-03 23:59:59"],
title_text="Recovered vs Deaths over time in China",
)
fig.show()
import pandas as pd
import plotly.graph_objects as go
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=coronaVirus_df.time,
y=coronaVirus_df["Confirmed"],
name="Confirmed",
line_color="deepskyblue",
opacity=0.8,
)
)
# Use date string to set xaxis range
fig.update_layout(
xaxis_range=["2020-01-31", "2020-02-03"], title_text="Confirmed Cases over time"
)
fig.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# # READ THE DATA
df = pd.read_csv("/kaggle/input/tmdb-movie-metadata/tmdb_5000_movies.csv")
df.head()
df["original_language"] = df["original_language"].apply(lambda x: 1 if x == "en" else 0)
df["original_language"].unique()
df.rename(columns={"original_language": "English"}, inplace=True)
# # Figuring out how to accept string data as lists in python
a = df["production_countries"][0]
a
import ast
a = ast.literal_eval(a)
a
df["release_date"] = df["release_date"].fillna("1992-09-04")
df["release_date"].isna().sum()
df["release_date"] = pd.to_datetime(df["release_date"])
df["release_date"] = df["release_date"].apply(lambda x: int(x.year))
df["release_date"].head()
df
# # DROP NO ESSENTIAL FEATURES
df.drop(
[
"homepage",
"id",
"keywords",
"original_title",
"overview",
"status",
"tagline",
"title",
"English",
],
axis=1,
inplace=True,
)
df["production_companies"] = df["production_companies"].apply(
lambda x: ast.literal_eval(x)
)
df["production_companies"] = df["production_companies"].apply(lambda x: len(x))
df["production_companies"].head()
df["genres"] = df["genres"].apply(lambda x: ast.literal_eval(x))
df["genres"] = df["genres"].apply(lambda x: len(x))
df["genres"].head()
# # Some Data needs to be length of a list instead of whole list
df["production_countries"] = df["production_countries"].apply(
lambda x: ast.literal_eval(x)
)
df["production_countries"] = df["production_countries"].apply(lambda x: len(x))
df["production_countries"].head()
df["spoken_languages"] = df["spoken_languages"].apply(lambda x: ast.literal_eval(x))
df["spoken_languages"] = df["spoken_languages"].apply(lambda x: len(x))
df["spoken_languages"].head()
# # RENAMING SOME COLUMNS
# df.rename(columns={"spoken_languages": "Number of spoken_languages"},inplace=True)
# df.rename(columns={"production_countries": "Number of countries produced in"},inplace=True)
# df.rename(columns={"production_companies": "Number of producers"},inplace=True)
# # Filling nan values and changing datatypes
df["runtime"] = df["runtime"].fillna(df["runtime"].mean())
df["popularity"] = df["popularity"].apply(lambda x: int(x))
df["runtime"] = df["runtime"].apply(lambda x: int(x))
df
# # REPLACING 0s
df["production_companies"] = df["production_companies"].replace(0, 1)
df["production_countries"] = df["production_countries"].replace(0, 1)
quant = 0.0156
df["revenue"] = df["revenue"].replace(0, df["revenue"].quantile(quant))
df["budget"] = df["budget"].replace(0, df["budget"].quantile(quant))
df["popularity"] = df["popularity"].replace(0, df["popularity"].quantile(quant))
df["runtime"] = df["runtime"].replace(0, df["runtime"].quantile(quant))
df["spoken_languages"] = df["spoken_languages"].replace(0, 1)
# df.drop(['runtime'], axis=1,inplace=True)
df.info()
df.columns
# # Creating features and target label
X = df.drop(["revenue"], axis=1)
y = df["revenue"]
df.describe()
# # Scaling the data
from sklearn.preprocessing import MaxAbsScaler
scaler = MaxAbsScaler()
X = scaler.fit_transform(X)
# y=scaler.fit_transform(y)
# # Trying PCA
# import matplotlib.pyplot as plt
# from sklearn.decomposition import PCA
# pca = PCA()
# principalComponents = pca.fit_transform(X)
# plt.figure()
# plt.plot(np.cumsum(pca.explained_variance_ratio_))
# plt.xlabel('Number of Components')
# plt.ylabel('Variance (%)') #for each component
# plt.title('Explained Variance')
# plt.show()
# pca = PCA(n_components=5)
# X = pca.fit_transform(X)
# # Importing the regression Model
from sklearn.ensemble import RandomForestRegressor as forest
clf = forest(max_depth=40, max_features=0.4, n_estimators=45, random_state=42)
# # Splitiing the data for validation after trainin g
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42
)
# # Training and evaluating the model
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
# # Saving pickles
import pickle
filename = "RandomForest_model.pickle"
pickle.dump(clf, open(filename, "wb"))
filename_scaler = "scaler_model.pickle"
pickle.dump(scaler, open(filename_scaler, "wb"))
# # HYPER PARAMETER TUNING
##Hyper parameter tuning
# n_estimators = [int(x) for x in np.linspace(start = 40, stop = 120, num = 10)]
# # Number of features to consider at every split
# max_features = ['auto', 'sqrt']
# # Maximum number of levels in tree
# max_depth = [int(x) for x in np.linspace(10, 75, num = 10)]
# max_depth.append(None)
# # Minimum number of samples required to split a node
# min_samples_split = [2, 5, 10]
# # Minimum number of samples required at each leaf node
# min_samples_leaf = [1, 2, 4]
# random_grid = {'n_estimators': n_estimators,
# 'max_features': max_features,
# 'max_depth': max_depth,
# 'min_samples_split': min_samples_split,
# 'min_samples_leaf': min_samples_leaf,
# }
# from sklearn.model_selection import GridSearchCV
# grid_search = GridSearchCV(estimator=clf,param_grid=random_grid,cv=2,n_jobs =-1,verbose = 3)
# grid_search.fit(X_train, y_train)
# grid_search.best_params_
# [Parallel(n_jobs=-1)]: Using backend LokyBackend with 4 concurrent workers.
# [Parallel(n_jobs=-1)]: Done 24 tasks | elapsed: 49.3s
# [Parallel(n_jobs=-1)]: Done 120 tasks | elapsed: 6.5min
# [Parallel(n_jobs=-1)]: Done 280 tasks | elapsed: 15.5min
# [Parallel(n_jobs=-1)]: Done 504 tasks | elapsed: 26.2min
# [Parallel(n_jobs=-1)]: Done 792 tasks | elapsed: 33.7min
# /opt/conda/lib/python3.6/site-packages/joblib/externals/loky/process_executor.py:706: UserWarning: A worker stopped while some jobs were given to the executor. This can be caused by a too short worker timeout or by a memory leak.
# "timeout or by a memory leak.", UserWarning
# [Parallel(n_jobs=-1)]: Using backend LokyBackend with 4 concurrent workers.
# [Parallel(n_jobs=-1)]: Done 24 tasks | elapsed: 49.3s
# [Parallel(n_jobs=-1)]: Done 120 tasks | elapsed: 6.5min
# [Parallel(n_jobs=-1)]: Done 280 tasks | elapsed: 15.5min
# [Parallel(n_jobs=-1)]: Done 504 tasks | elapsed: 26.2min
# [Parallel(n_jobs=-1)]: Done 792 tasks | elapsed: 33.7min
# /opt/conda/lib/python3.6/site-packages/joblib/externals/loky/process_executor.py:706: UserWarning: A worker stopped while some jobs were given to the executor. This can be caused by a too short worker timeout or by a memory leak.
# "timeout or by a memory leak.", UserWarning
# [Parallel(n_jobs=-1)]: Done 1144 tasks | elapsed: 53.7min
|
# ### Content Based Recommendation System
import pandas as pd
df = pd.read_csv("../input/content-based-recomm-sys/movies_metadata.csv")
import os
os.listdir("../input")
df.head()
df["tagline"].fillna("")
df["description"] = df["overview"] + df["tagline"]
# df['description'] = df['description'].fillna('')
df.shape
df.dropna(subset=["description"], inplace=True)
df["title"].drop_duplicates(inplace=True)
df.shape
df.reset_index()
from sklearn.feature_extraction.text import TfidfVectorizer
tf = TfidfVectorizer(
analyzer="word", ngram_range=(1, 3), min_df=0, stop_words="english"
)
tfidf_matrix = tf.fit_transform(df["description"])
print(tfidf_matrix)
tfidf_matrix.shape
from sklearn.metrics.pairwise import linear_kernel
cosine_similarities = linear_kernel(tfidf_matrix, tfidf_matrix)
cosine_similarities.shape
cosine_similarities[0].shape
# df = df.reset_index()
titles = df["title"]
indices = pd.Series(df.index, index=df["title"])
def recommend(title):
idx = indices[title]
sim_scores = list(enumerate(cosine_similarities[idx]))
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = sim_scores[1:31]
movie_indices = [i[0] for i in sim_scores]
return titles.iloc[movie_indices]
recommend("The Godfather").head(10)
recommend("The Dark Knight Rises").head(10)
|
# # Session 3: Exploratory Data Analysis
# First we import relevant libraries.
import pandas as pd
import numpy as np
import matplotlib
# Then we load the file as a data table into the variable `df`.
df = pd.read_csv("../input/salary-dataset/salary_dataset.csv")
# We view the first five records (through `df.head()`) to get a sense of what data (and specifically, data types) are in the table.
df.head()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
from sklearn import (
feature_extraction,
linear_model,
model_selection,
preprocessing,
metrics,
)
import pandas as pd
sample_submission = pd.read_csv("../input/nlp-getting-started/sample_submission.csv")
test = pd.read_csv("../input/nlp-getting-started/test.csv")
train = pd.read_csv("../input/nlp-getting-started/train.csv")
def model_evaluator(model, train_data, target_data, test_data):
model.fit(train_data, target_data)
score = model_selection.cross_val_score(
model, train_data, target_data, cv=3, scoring="f1"
)
return score
# exploring data
train.head()
count_vectorizer = feature_extraction.text.CountVectorizer()
clf = linear_model.RidgeClassifier()
train_vectors = count_vectorizer.fit_transform(train.text[:])
target_vectors = train.target[:]
model_selection.cross_val_score(
clf, X=train_vectors, y=target_vectors, cv=3, scoring="f1"
)
clf = linear_model.RidgeClassifier()
train_vectors = count_vectorizer.fit_transform(train.text[:])
target_vectors = train.target[:]
test_vectors = count_vectorizer.transform(test.text[:])
clf.fit(train_vectors, target_vectors)
test_preds = clf.predict(test_vectors)
a = pd.DataFrame({"id": test.id, "target": test_preds})
a.to_csv("submission.csv", index=False)
train_vectors.sum(axis=1)[:10]
print(train.text[1])
tfidf = feature_extraction.text.TfidfTransformer()
train_tfidf = tfidf.fit_transform(train_vectors)
test_tfidf = tfidf.transform(test_vectors)
a = model_evaluator(clf, train_vectors, target_vectors, test_vectors)
print("cross validation score for count_vectorized data:", a.mean())
a = model_evaluator(clf, train_tfidf, target_vectors, test_tfidf)
print("cross validation score for TF_IDF data:", a.mean())
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pd.plotting.register_matplotlib_converters()
import matplotlib.pyplot as plt
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import seaborn as sns
file_path = "../input/prestigious-awards-in-india-from-1954-to-2013/Bharat Ratna and Padma Awards.csv"
data = pd.read_csv(file_path, index_col="YEAR")
# Replacing the values
data.replace(to_replace="Awards Not Announced", value="0", inplace=True)
data.info()
# Converting the object datatype to integer datatype
data["BHARAT RATNA"] = data["BHARAT RATNA"].astype(int)
data["PADMA VIBHUSHAN"] = data["PADMA VIBHUSHAN"].astype(int)
data["PADMA BHUSHAN"] = data["PADMA BHUSHAN"].astype(int)
data["PADMA SHRI"] = data["PADMA SHRI"].astype(int)
data["TOTAL"] = data["TOTAL"].astype(int)
data.info()
plt.figure(figsize=(18, 10))
plt.title("National Honours from 1954-2013")
sns.lineplot(data=data)
# counting total number of awardees for each award
x1 = data["PADMA SHRI"].sum()
x2 = data["PADMA BHUSHAN"].sum()
x3 = data["PADMA VIBHUSHAN"].sum()
x4 = data["BHARAT RATNA"].sum()
count = {
"PADMA SHRI": [x1],
"PADMA BHUSHAN": [x2],
"PADMA VIBHUSHAN": [x3],
"BHARAT RATNA": [x4],
}
count = pd.DataFrame(count)
# Set the width and height of the figure
plt.figure(figsize=(20, 6))
# Add title
plt.title("Comparison of each category")
sns.barplot(data=count)
# Add label for vertical axis
plt.ylabel("Number of people awarded")
|
# Nesse notebook farei uma análise do conhecido caso do Titanic.
# Será feita uma análise exploratória dos dados, no que seria uma etapa anterior ao processo de predição, para determinação das features (variáveis) mais relevantes na sobrevivência ou não nesta tragédia.
# Importando as bibliotecas necessárias para análise dos dados
import pandas as pd # Criação e manipulação de dataset
import numpy as np # Manipulação de matrizes
import matplotlib as plt # Plotagem e visualização dos dados
# Criando dataset a partir do arquivo csv
dados = pd.read_csv("../input/titanic/train_and_test2.csv")
# Visualização inicial do dataset para primeira análise
dados.head()
# Podemos perceber que a tabela acima tem muitas colunas somente com valor 0.
# Além disso, algumas colunas não irão agregar valor na análise exploratória dos dados e serão retiradas do dataset.
# Removendo colunas que não serão utilizadas na análise
dados.drop(
columns=[
"Passengerid",
"zero",
"zero.1",
"zero.2",
"zero.3",
"zero.4",
"zero.5",
"zero.6",
"zero.7",
"zero.8",
"zero.9",
"zero.10",
"zero.11",
"zero.12",
"zero.13",
"zero.14",
"zero.15",
"zero.16",
"zero.17",
"zero.18",
]
)
import pandas as pd
train_and_test2 = pd.read_csv("../input/titanic/train_and_test2.csv")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import sys
sys.path.append("/kaggle/input/test-modules3/")
# https://github.com/qubvel/classification_models/tree/master/classification_models/models
import keras
from models_factory import ModelsFactory
class KerasModelsFactory(ModelsFactory):
@staticmethod
def get_kwargs():
return {
"backend": keras.backend,
"layers": keras.layers,
"models": keras.models,
"utils": keras.utils,
}
Classifiers = KerasModelsFactory()
ResNet18, preprocess_input = Classifiers.get("resnet18")
model = ResNet18((224, 224, 3))
test_batch = np.random.rand(32, 224, 224, 3)
predict = model.predict(test_batch)
predict
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pathlib, os, random, mplcyberpunk
import splitfolders
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.layers import (
BatchNormalization,
Dense,
Dropout,
Flatten,
MaxPool2D,
Conv2D,
Activation,
)
base_path = (
r"/kaggle/input/tuberculosis-tb-chest-xray-dataset/TB_Chest_Radiography_Database/"
)
base_path = pathlib.Path(base_path)
base_path
splitfolders.ratio(
base_path, output="X_ray_Imgs", seed=123, ratio=(0.7, 0.15, 0.15), group_prefix=None
)
# necessary libraries
import os
import pandas as pd
# visualizations libraries
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib.image import imread
# tensorflow libraries
import tensorflow as tf
from tensorflow import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg19 import VGG19
from keras.models import Sequential, Model
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from keras import optimizers
from tensorflow.keras.utils import load_img
from tensorflow.keras.utils import img_to_array
# model evaluation libraries
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
from mlxtend.plotting import plot_confusion_matrix
from glob import glob
import os
import numpy as np
import pandas as pd
import random
from skimage.io import imread
import matplotlib.pyplot as plt
from keras.applications.resnet import ResNet50
from keras.applications.resnet import preprocess_input, decode_predictions
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Flatten, GlobalAveragePooling2D
from tensorflow.keras.preprocessing.image import (
ImageDataGenerator,
img_to_array,
load_img,
)
from keras.applications.mobilenet_v2 import MobileNetV2
from keras.layers import Dense, Dropout, Flatten, BatchNormalization, GlobalMaxPooling2D
batch_size = 20
img_height, img_width = 200, 200
input_shape = (img_height, img_width, 3)
datagen = ImageDataGenerator(rescale=1 / 255)
train_data = datagen.flow_from_directory(
"X_ray_Imgs/train",
target_size=(200, 200),
batch_size=batch_size,
class_mode="categorical",
subset="training",
)
test_data = datagen.flow_from_directory(
"X_ray_Imgs/test",
target_size=(200, 200),
batch_size=batch_size,
class_mode="categorical",
shuffle=False,
)
val_data = datagen.flow_from_directory(
"X_ray_Imgs/val/",
target_size=(200, 200),
batch_size=batch_size,
class_mode="categorical",
shuffle=False,
)
class_name = train_data.class_indices
class_names = list(class_name.keys())
class_name
num_classes = 2
model = Sequential()
model.add(
ResNet50(
input_shape=(200, 200, 3), include_top=False, pooling="avg", weights="imagenet"
)
)
model.add(Dense(num_classes, activation="softmax"))
model.layers[0].trainable = False
model.compile(optimizer="sgd", loss="categorical_crossentropy", metrics=["accuracy"])
history = model.fit(
train_data,
steps_per_epoch=train_data.samples // train_data.batch_size,
validation_data=val_data,
validation_steps=val_data.samples // val_data.batch_size,
epochs=10,
verbose=1,
)
history.history.keys()
epochs = range(1, len(history.history["accuracy"]) + 1)
plt.plot(epochs, history.history["accuracy"], color="purple")
plt.plot(epochs, history.history["val_accuracy"], color="pink")
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.title("Accuracy plot")
plt.legend(["train_acc", "val_acc"])
plt.show()
plt.plot(epochs, history.history["loss"], color="purple")
plt.plot(epochs, history.history["val_loss"], color="pink")
plt.xlabel("epochs")
plt.ylabel("loss")
plt.title("Loss plot")
plt.legend(["train_loss", "val_loss"])
plt.show()
import numpy as np
prediction = model.predict(
val_data, steps=np.ceil(val_data.samples / val_data.batch_size), verbose=1
)
prediction = prediction > 0.5
prediction
val_labels = val_data.classes
val_labels
pred1 = [i[0] for i in prediction]
pred2 = [i[1] for i in prediction]
cm = confusion_matrix(val_data.classes, pred2)
plot_confusion_matrix(cm, figsize=(5, 5))
print(accuracy_score(val_data.classes, pred2))
print(classification_report(val_data.classes, pred2))
# Importing the metrics package from sklearn library
from sklearn import metrics
# Creating the confusion matrix
cm = metrics.confusion_matrix(val_data.classes, pred2)
# Assigning columns names
cm_df = pd.DataFrame(
cm,
columns=["Predicted Negative", "Predicted Positive"],
index=["Actual Negative", "Actual Positive"],
)
# Showing the confusion matrix
cm_df
# Creating a function to report confusion metrics
def confusion_metrics(conf_matrix):
# save confusion matrix and slice into four pieces
TP = conf_matrix[1][1]
TN = conf_matrix[0][0]
FP = conf_matrix[0][1]
FN = conf_matrix[1][0]
print("True Positives:", TP)
print("True Negatives:", TN)
print("False Positives:", FP)
print("False Negatives:", FN)
# calculate accuracy
conf_accuracy = float(TP + TN) / float(TP + TN + FP + FN)
# calculate mis-classification
conf_misclassification = 1 - conf_accuracy
# calculate the sensitivity
conf_sensitivity = TP / float(TP + FN)
# calculate the specificity
conf_specificity = TN / float(TN + FP)
# calculate precision
conf_precision = TN / float(TN + FP)
# calculate f_1 score
conf_f1 = 2 * (
(conf_precision * conf_sensitivity) / (conf_precision + conf_sensitivity)
)
print("-" * 50)
print(f"Accuracy: {round(conf_accuracy,2)}")
print(f"Mis-Classification: {round(conf_misclassification,2)}")
print(f"Sensitivity: {round(conf_sensitivity,2)}")
print(f"Specificity: {round(conf_specificity,2)}")
print(f"Precision: {round(conf_precision,2)}")
print(f"f_1 Score: {round(conf_f1,2)}")
confusion_metrics(cm)
from sklearn.metrics import roc_curve, auc
fpr, tpr, threshold = roc_curve(val_data.classes, pred2)
auc_resnet = auc(fpr, tpr)
plt.figure(figsize=(5, 5), dpi=100)
plt.plot(fpr, tpr, linestyle="-", label="Resnet50 (auc = %0.3f)" % auc_resnet)
plt.xlabel("False Positive Rate -->")
plt.ylabel("True Positive Rate -->")
plt.legend()
plt.title("ROC curve")
plt.show()
import gradio as gr
class_names = ["Normal", "Tuberculosis"]
def predict_image(img1):
img1 = img1.reshape(200, 200, -1)
img1 = tf.keras.utils.img_to_array(img1)
img1 = np.expand_dims(img1, axis=0)
img1 = img1 / 255
prediction = model.predict(img1).flatten()
print(prediction)
for i in range(2):
print(class_names[i], float(prediction[i]))
return {class_names[i]: float(prediction[i]) for i in range(2)}
image = gr.inputs.Image(shape=(200, 200))
label = gr.outputs.Label(num_top_classes=2)
gr.Interface(
fn=predict_image, inputs=image, outputs=label, interpretation="default"
).launch(debug="True")
# predicting an image
from keras.preprocessing import image
import numpy as np
image_path = "/kaggle/input/tuberculosis-tb-chest-xray-dataset/TB_Chest_Radiography_Database/Tuberculosis/Tuberculosis-2.png"
new_img = tf.keras.utils.load_img(image_path, target_size=(200, 200))
img = tf.keras.utils.img_to_array(new_img)
img = np.expand_dims(img, axis=0)
img = img / 255
print("Following is our prediction:")
prediction = model.predict(img)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
d = prediction.flatten()
j = d.max()
# print(d)
# print(j)
for index, item in enumerate(d):
if item == j:
print(item)
print(j)
class_name = class_names[index]
print(class_names[index])
# Another way
# img_class = model.predict_classes(img)
# img_prob = model.predict_proba(img)
# print(img_class ,img_prob )
# ploting image with predicted class name
plt.figure(figsize=(4, 4))
plt.imshow(new_img)
plt.axis("off")
plt.title(class_name)
plt.show()
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style("whitegrid")
df_2017 = pd.read_csv("../input/utmb_results_2017.csv")
df_2018 = pd.read_csv("../input/utmb_results_2018.csv")
df_2019 = pd.read_csv("../input/utmb_results_2019.csv")
df_2017.head()
df_2018.head()
df_2019.head()
df_2017["Year"] = 2017
df_2018["Year"] = 2018
df_2019["Year"] = 2019
df = pd.concat([df_2017, df_2018, df_2019]) # combine the three dataframes
df = df.drop(["Unnamed: 0"], axis=1)
df.columns
df.info()
df.head()
def convert_to_minutes(row):
return sum(i * j for i, j in zip(map(float, row.split(":")), [60, 1, 1 / 60]))
df["Minutes"] = df["Time"].apply(convert_to_minutes)
df["Minutes"] = df["Minutes"].round(2)
df.head()
plt.figure(num=None, figsize=(8, 6), dpi=80)
plt.hist(df["Minutes"], alpha=0.5)
plt.title("2017 & 2018 & 2019 UTMB Times", fontsize=18, fontweight="bold")
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlabel("Time (min)", fontsize=18)
plt.ylabel("Frequency", fontsize=18)
plt.show()
df.describe()
plt.figure(figsize=(8, 6), dpi=80)
sns.boxplot(x="Year", y="Minutes", data=df)
plt.title("UTMB Results by Year", fontsize=18, fontweight="bold")
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlabel("Year", fontsize=18)
plt.ylabel("Minutes", fontsize=18)
plt.figure(figsize=(8, 6), dpi=80)
sns.violinplot(x="Year", y="Minutes", data=df, inner="quartile")
plt.title("UTMB Results by Year", fontsize=18, fontweight="bold")
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlabel("Year", fontsize=18)
plt.ylabel("Minutes", fontsize=18)
plt.savefig("UTMB ViolinPlot.png")
# Add Gender
df["Cat."].value_counts()
df[df["Cat."].str.contains("H", regex=False) == False]
df.loc[df["Cat."].str.contains("H", regex=False) == False, "Gender"] = "Female"
df.loc[df["Cat."].str.contains("H", regex=False) == True, "Gender"] = "Male"
df
plt.figure(figsize=(12, 10), dpi=80)
sns.swarmplot(x="Year", y="Minutes", hue="Gender", data=df)
plt.title("UTMB Results by Year", fontsize=18, fontweight="bold")
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlabel("Year", fontsize=18)
plt.ylabel("Minutes", fontsize=18)
df.loc[df["Cat."].str.contains("E", regex=False) == True, "Age group"] = "22-39"
df.loc[df["Cat."].str.contains("1", regex=False) == True, "Age group"] = "40-49"
df.loc[df["Cat."].str.contains("2", regex=False) == True, "Age group"] = "50-59"
df.loc[df["Cat."].str.contains("3", regex=False) == True, "Age group"] = "60-69"
df.loc[df["Cat."].str.contains("4", regex=False) == True, "Age group"] = "70"
# subset only men's results
men = df.loc[df["Gender"] == "Male"]
# plot violin and swarm plots by age
plt.figure(figsize=(8, 6), dpi=80)
sns.violinplot(
x="Age group", y="Minutes", data=men, color="lightblue", inner="quartile"
)
plt.title("Mens UTMB Results by Age", fontsize=18, fontweight="bold")
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlabel("Age Groups", fontsize=18)
plt.ylabel("Minutes", fontsize=18)
plt.savefig("UTMB Mens SwarmPlot.png")
# subset only women's results
women = df.loc[df["Gender"] == "Female"]
# plot violin and swarm plots by age categories
plt.figure(figsize=(8, 6), dpi=80)
sns.violinplot(
x="Age group", y="Minutes", data=women, color="lightblue", inner="quartile"
)
sns.swarmplot(x="Age group", y="Minutes", data=women, color="darkblue")
plt.title("Womens UTMB Results by Age", fontsize=18, fontweight="bold")
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlabel("Age Groups", fontsize=18)
plt.ylabel("Minutes", fontsize=18)
plt.savefig("UTMB Womens SwarmPlot.png")
df[df["Gender"] == "Female"]["Age group"].value_counts()
group_times = women["Minutes"].where(women["Age group"] == "22-39").dropna()
group_times
# subset my age group results
group_times = women["Minutes"].where(women["Age group"] == "22-39").dropna()
# 25, 50 and 75 percentiles for total time
np.round(np.percentile(group_times, [25, 50, 75]) / 60, 1)
# 25, 50 and 75 percentiles for calculated per km pace
np.round(np.percentile(group_times, [25, 50, 75]) / 171, 1)
import plotly.graph_objects as go
fig = go.Figure(
data=[
go.Table(
header=dict(
values=["Finish", "Total time, hours", "Pace required, min/km"]
),
cells=dict(
values=[
["In the top 25%", "In the top 50%", "In the top 75%"],
np.round(np.percentile(group_times, [25, 50, 75]) / 60, 1),
np.round(np.percentile(group_times, [25, 50, 75]) / 171, 1),
]
),
)
]
)
fig.show()
df["Nationality"].value_counts()
df["Nationality"].value_counts().to_dict().items()
items = df["Nationality"].value_counts().to_dict().items()
# Filtering only those rows where duplicate entries occur more than n
n = 80
nations = df[df["Nationality"].isin([key for key, val in items if val > n])][
"Nationality"
].value_counts()
nations
nations["rest"] = (
df[df["Nationality"].isin([key for key, val in items if val < n])]["Nationality"]
.value_counts()
.sum()
)
nations
nations.tolist()
labels = nations.index.tolist()
counts = nations.tolist()
fig1, ax1 = plt.subplots(figsize=(13, 13))
ax1.pie(counts, labels=labels, autopct="%1.1f%%", startangle=30)
ax1.axis("equal")
plt.show()
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder
from sklearn.model_selection import (
train_test_split,
KFold,
cross_val_score,
StratifiedKFold,
)
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression, PassiveAggressiveClassifier
from sklearn.ensemble import (
GradientBoostingClassifier,
RandomForestClassifier,
ExtraTreesRegressor,
)
from sklearn.decomposition import PCA
from sklearn.feature_selection import RFE
from sklearn.metrics import accuracy_score
from sklearn.mixture import GaussianMixture
from category_encoders import LeaveOneOutEncoder, BinaryEncoder, TargetEncoder
import time
import logging
sample_submission = pd.read_csv("../input/cat-in-the-dat-ii/sample_submission.csv")
test = pd.read_csv("../input/cat-in-the-dat-ii/test.csv")
train = pd.read_csv("../input/cat-in-the-dat-ii/train.csv")
def replace_nans(dataframe):
for each in dataframe.columns:
if each == "id":
continue
if dataframe[each].dtype != "object" or dataframe[each].dtype != "datetime64":
dataframe.loc[:, each] = dataframe.fillna(dataframe[each].mode()[0])
else:
dataframe.loc[:, each] = dataframe.fillna("UNKNOWN")
return dataframe
def encoder(dataframe, columns, enc_type="bin"):
if enc_type == "bin":
for col in columns:
unique = dataframe[col].unique()
dataframe.loc[:, col] = dataframe[col].apply(
lambda x: 1 if x == unique[0] else (0 if x == unique[1] else None)
)
if enc_type == "ord":
encoder = OrdinalEncoder(dtype=np.int16)
for col in columns:
dataframe.loc[:, col] = encoder.fit_transform(
np.array(dataframe[col]).reshape(-1, 1)
)
return dataframe
def rank_features(estimator, X_train, y_train):
selector = RFE(estimator, 10, step=1)
selector = selector.fit(X_train, y_train)
return selector.ranking_
def fitter(clf, X_train, X_test, y_train, y_test):
print("training ", clf)
y_train = np.array([[target] for target in y_train])
y_test = np.array([[target] for target in y_test])
clf.fit(X_train, y_train)
# predictions = clf.predict(X_test)
# print('accuracy:', accuracy_score(y_test, predictions))
try:
print("score:", clf.score(clf, X_test, y_test))
except Exception:
print(clf.best_score_)
return clf
def main_2():
data = train
data = replace_nans(data)
submission_data = replace_nans(test)
print(data.columns)
nom_cols = ["nom_0", "nom_1", "nom_2"]
ord_cols = ["ord_3", "ord_4", "ord_5"]
bin_cols = ["bin_3", "bin_4"]
ord_encoder = OrdinalEncoder()
for enc in ord_cols + nom_cols:
data[enc] = ord_encoder.fit_transform(np.array(data[enc]).reshape(-1, 1))
submission_data[enc] = ord_encoder.fit_transform(
np.array(submission_data[enc]).reshape(-1, 1)
)
for enc in ["nom_3", "nom_4"]:
enc1 = pd.get_dummies(data[enc], prefix=enc)
data.drop(columns=enc, inplace=True)
data = pd.concat([data, enc1], axis=1)
for enc in ["nom_3", "nom_4"]:
enc1 = pd.get_dummies(submission_data[enc], prefix=enc)
submission_data.drop(columns=enc, inplace=True)
submission_data = pd.concat([submission_data, enc1], axis=1)
target = data["target"]
data = data.drop("target", axis=1)
# for enc in ['nom_5', 'nom_6', 'nom_7', 'nom_8', 'nom_9']:
loo_enc = LeaveOneOutEncoder(
cols=["nom_5", "nom_6", "nom_7", "nom_8", "nom_9"], return_df=True
)
loo_enc.fit(data, target)
data = loo_enc.transform(data)
# print(data)
# submission_data[enc] = target_enc.transform(submission_data[enc].values.reshape(-1, 1))
submission_data = loo_enc.transform(submission_data)
data = encoder(data, ["ord_1", "ord_2"], enc_type="ord")
data = encoder(data, bin_cols, enc_type="bin")
submission_data = encoder(submission_data, ["ord_1", "ord_2"], enc_type="ord")
submission_data = encoder(submission_data, bin_cols, enc_type="bin")
time_features = ["day", "month"]
for feature in time_features:
data[feature + "_sin"] = np.sin(
(2 * np.pi * data[feature]) / max(data[feature])
)
data[feature + "_cos"] = np.cos(
(2 * np.pi * data[feature]) / max(data[feature])
)
data.drop(time_features, axis=1, inplace=True)
for feature in time_features:
submission_data[feature + "_sin"] = np.sin(
(2 * np.pi * submission_data[feature]) / max(submission_data[feature])
)
submission_data[feature + "_cos"] = np.cos(
(2 * np.pi * submission_data[feature]) / max(submission_data[feature])
)
submission_data.drop(time_features, axis=1, inplace=True)
# X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.25)
clf_2 = LogisticRegression(solver="saga", verbose=1)
clf_3 = GradientBoostingClassifier(
n_estimators=100, verbose=1, learning_rate=0.05, max_depth=7
)
clf_4 = PassiveAggressiveClassifier(verbose=1)
clf_5 = RandomForestClassifier(
n_estimators=500, verbose=2
) # , criterion='entropy')
clf_6 = ExtraTreesRegressor(n_estimators=500, bootstrap=False, n_jobs=2, verbose=1)
# clf_6 = GradientBoostingRegressor(n_estimators=500, learning_rate=0.1, verbose=1)
kf = KFold(n_splits=5)
for train_index, test_index in kf.split(data):
X_train, X_test = data.values[train_index], data.values[test_index]
y_train, y_test = target.values[train_index], target.values[test_index]
clf_3.fit(X_train, y_train)
print(clf_3.score(X_test, y_test))
predictions = clf_3.predict_proba(submission_data.values)
predictions = [x[1] for x in predictions]
# print(predictions)
submission_data = pd.read_csv("../input/cat-in-the-dat-ii/test.csv")
submission_data["target"] = predictions
submission_data = pd.concat(
[submission_data["id"], submission_data["target"]], axis=1
)
submission_data.to_csv("submission.csv", index=False)
main_2()
|
# Importing the necessary libraries
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, confusion_matrix
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# Let's look at the data.
train = pd.DataFrame(pd.read_csv("/kaggle/input/mushroom-classification/mushrooms.csv"))
train.columns
# Renaming the columns
train.columns = [
"class",
"cap_shape",
"cap_surface",
"cap_color",
"bruises",
"odor",
"gill_attachment",
"gill_spacing",
"gill_size",
"gill_color",
"stalk_shape",
"stalk_root",
"stalk_surface_above_ring",
"stalk_surface_below_ring",
"stalk_color_above_ring",
"stalk_color_below_ring",
"veil_type",
"veil_color",
"ring_number",
"ring_type",
"spore_print_color",
"population",
"habitat",
]
# Checking for missing values
train.head(5)
train.isna().sum()
# train['stalk-root'].unique()
train.dtypes
# Let's map Categorical variables
mapping = [
{"e": 1, "p": 0},
{"b": 0, "c": 1, "x": 2, "f": 3, "k": 4, "s": 5},
{"f": 0, "g": 1, "y": 2, "s": 3},
{"n": 0, "b": 1, "c": 2, "g": 3, "r": 4, "p": 5, "u": 6, "e": 7, "w": 8, "y": 9},
{"t": 1, "f": 0},
{"a": 1, "l": 2, "c": 3, "y": 4, "f": 5, "m": 6, "n": 0, "p": 7, "s": 8},
{"a": 0, "d": 1, "f": 2, "n": 3},
{"c": 0, "w": 1, "d": 2},
{"b": 0, "n": 1},
{
"k": 0,
"n": 1,
"b": 2,
"h": 3,
"g": 4,
"r": 5,
"o": 6,
"p": 7,
"u": 8,
"e": 9,
"w": 10,
"y": 11,
},
{"e": 0, "t": 1},
{"b": 0, "c": 1, "u": 2, "e": 3, "z": 4, "r": 5, "?": 6},
{"f": 0, "y": 1, "k": 2, "s": 3},
{"f": 0, "y": 1, "k": 2, "s": 3},
{"n": 0, "b": 1, "c": 2, "g": 3, "o": 4, "p": 5, "e": 5, "w": 6, "y": 7},
{"n": 0, "b": 1, "c": 2, "g": 3, "o": 4, "p": 5, "e": 6, "w": 7, "y": 8},
{"p": 0, "u": 1},
{"n": 0, "o": 1, "w": 2, "y": 3},
{"n": 0, "o": 1, "t": 2},
{"c": 4, "e": 1, "f": 2, "l": 3, "n": 0, "p": 5, "s": 6, "z": 7},
{"k": 0, "n": 1, "b": 2, "h": 3, "r": 4, "o": 5, "u": 6, "w": 7, "y": 8},
{"a": 0, "c": 1, "n": 2, "s": 3, "v": 4, "y": 5},
{"g": 0, "l": 1, "m": 2, "p": 3, "u": 4, "w": 5, "d": 6},
]
len(mapping), len(train.columns)
for i in range(len(train.columns)):
train[train.columns[i]] = train[train.columns[i]].map(mapping[i]).astype(int)
# Data types have changed from object to int
train.shape
# Separating depend varible from predictors and splitting the dataset
x = train.iloc[:, 1:]
y = train.iloc[:, 0]
x_tr, x_ts, y_tr, y_ts = train_test_split(x, y, test_size=0.2)
# Predictions using Logistic regression
lr = LogisticRegression()
lr.fit(x_tr, y_tr)
accuracy_score(y_ts, lr.predict(x_ts)), confusion_matrix(y_ts, lr.predict(x_ts))
# Predictions using KNN
kn = KNeighborsClassifier(n_neighbors=5, metric="minkowski", p=2)
kn.fit(x_tr, y_tr)
accuracy_score(y_ts, kn.predict(x_ts)), confusion_matrix(y_ts, kn.predict(x_ts))
# Linear SVM
svm = SVC(kernel="linear", random_state=0)
svm.fit(x_tr, y_tr)
ysvc_pred = svm.predict(x_ts)
accuracy_score(y_ts, ysvc_pred), confusion_matrix(y_ts, ysvc_pred)
# Kernel SVM
kersvm = SVC(kernel="rbf", random_state=0)
kersvm.fit(x_tr, y_tr)
yksvm_pred = kersvm.predict(x_ts)
accuracy_score(y_ts, yksvm_pred), confusion_matrix(y_ts, yksvm_pred)
# Naive Bayes
gnb = GaussianNB()
gnb.fit(x_tr, y_tr)
ygnb_pred = gnb.predict(x_ts)
accuracy_score(y_ts, ygnb_pred), confusion_matrix(y_ts, ygnb_pred)
# Decision Trees
dct = DecisionTreeClassifier(random_state=0)
dct.fit(x_tr, y_tr)
ydct_pred = dct.predict(x_ts)
accuracy_score(y_ts, ydct_pred), confusion_matrix(y_ts, ydct_pred)
# Random Forest
rf = RandomForestClassifier(random_state=0, n_estimators=100)
rf.fit(x_tr, y_tr)
yrf_pred = rf.predict(x_ts)
accuracy_score(y_ts, yrf_pred), confusion_matrix(y_ts, yrf_pred)
# XGBoost
xgb = XGBClassifier()
xgb.fit(x_tr, y_tr)
y_xgb = xgb.predict(x_ts)
accuracy_score(y_ts, y_xgb), confusion_matrix(y_ts, y_xgb)
x_tr.shape
# For the task submission , Using ANN
classifier = Sequential()
classifier.add(Dense(64, activation="relu", input_dim=22))
# classifier.add(Dense(output_dim=1,init='uniform',activation='relu'))
classifier.add(Dense(1, activation="sigmoid"))
classifier.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
classifier.fit(x_tr, y_tr, batch_size=10, epochs=100)
y_pred = classifier.predict(x_ts)
y_pred = y_pred > 0.5
confusion_matrix(y_ts, y_pred)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
train = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
test = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/test.csv")
train.head()
print(f"Train has {train.shape[0]} rows and {train.shape[1]} columns")
print(f"Test has {test.shape[0]} rows and {test.shape[1]} columns")
train.info()
test.info()
train = train.fillna(0)
test = test.fillna(0)
print(train.isna().sum())
print(test.isna().sum())
def objtypelist(df):
objecttype = []
for col in df.columns:
if df[col].dtype == np.float64 or df[col].dtype == np.int64:
pass
else:
objecttype.append(col)
return objecttype
train_obj = objtypelist(train)
test_obj = objtypelist(test)
from sklearn.preprocessing import LabelEncoder
lb_make = LabelEncoder()
train.corr()["SalePrice"].sort_values()
plt.figure(figsize=(20, 10))
sns.heatmap(train.corr(), annot=True, cmap="Reds")
plt.show()
X = train[["OverallQual"]]
y = train["SalePrice"]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.7, random_state=100
)
from sklearn.linear_model import LinearRegression
mod = LinearRegression()
mod.fit(X_train, y_train)
mod.intercept_, mod.coef_
from sklearn.metrics import r2_score, mean_squared_error
y_train_pred = mod.predict(X_train)
r2_score(y_train, y_train_pred)
test["SalePrice"] = -90334.15280801835 + 44467.70101543 * test["OverallQual"]
test["SalePrice"]
my_submission = pd.DataFrame({"Id": test.Id, "SalePrice": test["SalePrice"]})
my_submission.to_csv("submission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # data visualization library
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# context
# I am interested in looking at airbnb price trends by date and availability.
# Are the booked reservations on average cheaper than the available ones?
# what do the price clusters look like over time? Do the clusters change over time?
# is there a time of year that is cheaper to book?
# I want to understand when is the cheapest time to book
df = pd.read_csv("/kaggle/input/airbnb-nov-2019-cal/calendar.csv") # read csv file
df.head() # check the first five rows to see how data looks
df_dp = df[
["date", "price"]
] # clean up data to isolate the two varibales we are interested in
df_dp.head() # check the last five rows to see how the data looks
# kaggle got annoyed with me and made me include the following lines of code
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# First,convert the columns that have numbers into numeric values so they can be easily plotted
# change the dates from string to datetime data types
x_numbers = pd.to_datetime(df_dp["date"])
# remove the $ from the price convert price from string to float datatype
y_numbers = df_dp["price"].replace("[\$,]", "", regex=True).astype(float)
# create a scatter plot with date and price
plt.scatter(x_numbers, y_numbers, s=5)
# show scatter plot
plt.show()
# it is interesting to see the different pricing tiers and their consistency over time
# I was curious how a line graph would look.
# Turns out it tells us nothing becuase there are way too many price points at each moment in time
# Pretty hilarious
plt.plot(x_numbers, y_numbers)
# A histgram is probably more useful although I have no idea how to select the number of bins
# I am just going to play with it and increase bin size until I can see a good amount of detail
# limit the x axis to zoom in on histogram
plt.xlim([0, 1200])
# lable the axises for clarity
plt.ylabel("number of listings")
plt.xlabel("price ($)")
# give plot a title
plt.title("Seattle Airbnb Prices 2019 - 2021")
# plot the histogram to see the price dsitribution accross 2000 airbnbs
plt.hist(y_numbers, bins=3000, alpha=0.5)
# I wonder if more expensive places in Seattle are less likely to be booked?
# first I will create a new data frame that only include booking status and price infor
df_booked = df[["available", "price"]]
df_booked
# But this df_booked data frame needs to be split
# I want to separate the free rooms (indiacted as f in the available column
# from the taken rooms (indicated as t in teh available column)
# I will store the newly split data in their own data frames to easily get each pricing averagge
# filter rows for't' which indciates the room is taken, using the boolean expression
df_t = df_booked[df_booked["available"] == "t"]
t_price_numbers = df_t["price"].replace("[\$,]", "", regex=True).astype(float)
df_t.tail()
# filter rows for 'f' which indciates the room is free, using the boolean expression
df_f = df_booked[df_booked["available"] == "f"]
f_price_numbers = df_f["price"].replace("[\$,]", "", regex=True).astype(float)
df_f.tail()
# now that the numbers have been converted to floats
# I want to find the means of taken and free airbnbs and graph them
# define names and values for bar graph
names = ["booked", "available"]
# lable the y axis for clarity
plt.ylabel("average price ($)")
# values are the means of both booked and availble prices
values = [t_price_numbers.mean(), f_price_numbers.mean()]
# plot the graph and show it
plt.bar(names, values)
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.svm import SVC
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import accuracy_score
df = pd.read_csv("/kaggle/input/ckdisease/kidney_disease.csv")
df.head()
# Map text to 1/0 and do some cleaning
df[["htn", "dm", "cad", "pe", "ane"]] = df[["htn", "dm", "cad", "pe", "ane"]].replace(
to_replace={"yes": 1, "no": 0}
)
df[["rbc", "pc"]] = df[["rbc", "pc"]].replace(to_replace={"abnormal": 1, "normal": 0})
df[["pcc", "ba"]] = df[["pcc", "ba"]].replace(
to_replace={"present": 1, "notpresent": 0}
)
df[["appet"]] = df[["appet"]].replace(to_replace={"good": 1, "poor": 0, "no": np.nan})
df["classification"] = df["classification"].replace(
to_replace={"ckd": 1.0, "ckd\t": 1.0, "notckd": 0.0, "no": 0.0}
)
df.rename(columns={"classification": "class"}, inplace=True)
# Further cleaning
df["pe"] = df["pe"].replace(
to_replace="good", value=0
) # Not having pedal edema is good
df["appet"] = df["appet"].replace(to_replace="no", value=0)
df["cad"] = df["cad"].replace(to_replace="\tno", value=0)
df["dm"] = df["dm"].replace(to_replace={"\tno": 0, "\tyes": 1, " yes": 1, "": np.nan})
df.drop("id", axis=1, inplace=True)
df.head()
df2 = df.dropna(axis=0)
df2["class"].value_counts()
df2.apply(pd.to_numeric)
df2.dtypes
for i in range(0, df2.shape[1]):
if df2.dtypes[i] == "object":
print(df2.columns[i], "<--- having object datatype")
df2["pcv"] = df2.pcv.astype(float)
df2["wc"] = df2.wc.astype(float)
df2["rc"] = df2.rc.astype(float)
df2["dm"] = df2.dm.astype(float)
df2.dtypes
df2["class"] = df2["class"].astype(int)
X = df2.drop("class", axis=1)
X = StandardScaler().fit_transform(X)
y = df2["class"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=0, stratify=df2["class"]
)
model = SVC()
parameters = [
{"kernel": ["rbf"], "gamma": [1e-3, 1e-4], "C": [1, 10, 100, 1000]},
{"kernel": ["linear"], "C": [1, 10, 100, 1000]},
]
grid = GridSearchCV(estimator=model, param_grid=parameters, cv=5)
grid.fit(X_train, y_train)
roc_auc = np.around(
np.mean(cross_val_score(grid, X_test, y_test, cv=5, scoring="roc_auc")), decimals=4
)
print("Score: {}".format(roc_auc))
model1 = RandomForestClassifier(n_estimators=1000)
tuned_parameters = [
{
"n_estimators": [7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
"max_depth": [2, 3, 4, 5, 6, None],
"class_weight": [None, {0: 0.33, 1: 0.67}, "balanced"],
"random_state": [42],
}
]
clf = GridSearchCV(model1, tuned_parameters, cv=10, scoring="roc_auc")
clf.fit(X_train, y_train)
score1 = np.mean(cross_val_score(model1, X_test, y_test, cv=5, scoring="roc_auc"))
np.around(score1, decimals=4)
df2 = df.dropna(axis=0)
no_na = df2.index.tolist()
some_na = df.drop(no_na).apply(lambda x: pd.to_numeric(x, errors="coerce"))
some_na = some_na.fillna(0) # Fill up all Nan by zero.
clf_best = clf.best_estimator_
X_test = some_na.iloc[:, :-1]
y_test = some_na["class"]
y_true = y_test
lr_pred = clf_best.predict(X_test)
print(classification_report(y_true, lr_pred))
confusion = confusion_matrix(y_test, lr_pred)
print("Confusion Matrix:")
print(confusion)
score2 = accuracy_score(y_true, lr_pred)
print("Score: %3f" % score2)
model2 = KNeighborsClassifier()
model2.fit(X_train, y_train)
score3 = np.around(
np.mean(cross_val_score(model2, X_test, y_test, cv=5, scoring="roc_auc")),
decimals=4,
)
print("Score : {}".format(score3))
model3 = LogisticRegression()
parameters = {"C": [0.001, 0.01, 0.1, 1, 10, 100]}
grid = GridSearchCV(estimator=model3, param_grid=parameters, cv=5)
grid.fit(X_train, y_train)
score4 = np.around(
np.mean(cross_val_score(model3, X_test, y_test, cv=5, scoring="roc_auc")),
decimals=4,
)
print("Score : {}".format(score4))
names = []
scores = []
names.extend(["RF", "KNN", "LR"])
scores.extend([score2, score3, score4])
alg = pd.DataFrame({"Score": scores}, index=names)
print("Most Accurate : \n{}".format(alg.loc[alg["Score"].idxmax()]))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# LOAD LIBRARIES
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import LearningRateScheduler
import pandas as pd
sample_submission = pd.read_csv("../input/digit-recognizer/sample_submission.csv")
test = pd.read_csv("../input/digit-recognizer/test.csv")
train = pd.read_csv("../input/digit-recognizer/train.csv")
# PREPARE DATA FOR NEURAL NETWORK
Y_train = train["label"]
X_train = train.drop(labels=["label"], axis=1)
X_train = X_train / 255.0
X_test = test / 255.0
X_train = X_train.values.reshape(-1, 28, 28, 1)
X_test = X_test.values.reshape(-1, 28, 28, 1)
Y_train = to_categorical(Y_train, num_classes=10)
import matplotlib.pyplot as plt
# PREVIEW IMAGES
plt.figure(figsize=(15, 4.5))
for i in range(30):
plt.subplot(3, 10, i + 1)
plt.imshow(X_train[i].reshape((28, 28)), cmap=plt.cm.binary)
plt.axis("off")
plt.subplots_adjust(wspace=-0.1, hspace=-0.1)
plt.show()
# CREATE MORE IMAGES VIA DATA AUGMENTATION
datagen = ImageDataGenerator(
rotation_range=10, zoom_range=0.10, width_shift_range=0.1, height_shift_range=0.1
)
# PREVIEW AUGMENTED IMAGES
X_train3 = X_train[9,].reshape((1, 28, 28, 1))
Y_train3 = Y_train[9,].reshape((1, 10))
plt.figure(figsize=(15, 4.5))
for i in range(30):
plt.subplot(3, 10, i + 1)
X_train2, Y_train2 = datagen.flow(X_train3, Y_train3).next()
plt.imshow(X_train2[0].reshape((28, 28)), cmap=plt.cm.binary)
plt.axis("off")
if i == 9:
X_train3 = X_train[11,].reshape((1, 28, 28, 1))
if i == 19:
X_train3 = X_train[18,].reshape((1, 28, 28, 1))
plt.subplots_adjust(wspace=-0.1, hspace=-0.1)
plt.show()
# BUILD CONVOLUTIONAL NEURAL NETWORKS
nets = 15
model = [0] * nets
for j in range(nets):
model[j] = Sequential()
model[j].add(Conv2D(32, kernel_size=3, activation="relu", input_shape=(28, 28, 1)))
model[j].add(BatchNormalization())
model[j].add(Conv2D(32, kernel_size=3, activation="relu"))
model[j].add(BatchNormalization())
model[j].add(
Conv2D(32, kernel_size=5, strides=2, padding="same", activation="relu")
)
model[j].add(BatchNormalization())
model[j].add(Dropout(0.4))
model[j].add(Conv2D(64, kernel_size=3, activation="relu"))
model[j].add(BatchNormalization())
model[j].add(Conv2D(64, kernel_size=3, activation="relu"))
model[j].add(BatchNormalization())
model[j].add(
Conv2D(64, kernel_size=5, strides=2, padding="same", activation="relu")
)
model[j].add(BatchNormalization())
model[j].add(Dropout(0.4))
model[j].add(Conv2D(128, kernel_size=4, activation="relu"))
model[j].add(BatchNormalization())
model[j].add(Flatten())
model[j].add(Dropout(0.4))
model[j].add(Dense(10, activation="softmax"))
# COMPILE WITH ADAM OPTIMIZER AND CROSS ENTROPY COST
model[j].compile(
optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]
)
# DECREASE LEARNING RATE EACH EPOCH
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95**x)
# TRAIN NETWORKS
history = [0] * nets
epochs = 20
for j in range(nets):
X_train2, X_val2, Y_train2, Y_val2 = train_test_split(
X_train, Y_train, test_size=0.1
)
history[j] = model[j].fit_generator(
datagen.flow(X_train2, Y_train2, batch_size=66),
epochs=epochs,
steps_per_epoch=X_train2.shape[0] // 66,
validation_data=(X_val2, Y_val2),
callbacks=[annealer],
verbose=0,
)
print(
"CNN {0:d}: Epochs={1:d}, Train accuracy={2:.5f}, Validation accuracy={3:.5f}".format(
j + 1,
epochs,
max(history[j].history["accuracy"]),
max(history[j].history["val_accuracy"]),
)
)
# ENSEMBLE PREDICTIONS AND SUBMIT
results = np.zeros((X_test.shape[0], 10))
for j in range(nets):
results = results + model[j].predict(X_test)
results = np.argmax(results, axis=1)
results = pd.Series(results, name="Label")
submission = pd.concat([pd.Series(range(1, 28001), name="ImageId"), results], axis=1)
submission.to_csv("submission_digit.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pycountry
import plotly.express as px
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
df = pd.read_csv("/kaggle/input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv")
df.drop("Sno", axis=1, inplace=True)
df.head()
df.info()
countries = {}
for country in pycountry.countries:
countries[country.name] = country.alpha_3
df["iso_alpha"] = df["Country"].map(countries.get)
data = df.groupby("iso_alpha").sum().reset_index()
fig_Confirmed = px.choropleth(
data,
locations="iso_alpha",
color="Confirmed",
hover_name="iso_alpha",
color_continuous_scale=px.colors.sequential.Plasma,
)
fig_Confirmed.show()
fig_Deaths = px.choropleth(
data,
locations="iso_alpha",
color="Deaths",
hover_name="iso_alpha",
color_continuous_scale=px.colors.sequential.Plasma,
)
fig_Deaths.show()
fig_Recovered = px.scatter_geo(
data,
locations="iso_alpha",
color="Recovered",
hover_name="iso_alpha",
size="Recovered",
)
fig_Recovered.show()
|
# # Titanic Competition
# This is the notebook of my first all alone, no tutorials, Kaggle competition.
# It will be used for the study of data cleaning, exploratory analysis, data visualization, and machine learning models.
# Feel free to make any comments, suggestions and critics.
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib as plt
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# ## Reading and separating Data
from sklearn.model_selection import train_test_split
# Reading Data
X = pd.read_csv("../input/titanic/train.csv")
X_test_full = pd.read_csv("../input/titanic/test.csv")
# Putting target variable in 'y' and dropping that column from X_train
y = X["Survived"]
X = X.drop(["Survived"], axis=1)
# Breaking off X in train and validation datasets
X_train_full, X_valid_full, y_train, y_valid = train_test_split(
X, y, train_size=0.8, test_size=0.2, random_state=0
)
# ## Preprocessing
# ### Exploring and treating missing data
# Here I check if there's any missing values in the dataset, counting them by columns to choose how to treat them.
# Number of missing values in each column of X_train_full
X_train_full.isnull().sum()
# Number of missing values in each column of X (train + valid)
X.isnull().sum()
# Now we know that only 3 features have missing values: 'Age' (Age of the passengers), Cabin (Cabin number of the passenger) and Embarked (port of embarkation).
# We will treat these 3 columns differently according to the type of data they represent (numerical or categorical) and to the number of missing values they have.
# 1. The **Cabin** data could be useful finding a correlation between the cabins and survival rate since people in the cabins nearest to the rescue boats could have a higher chance of survive (assuming that they were in the cabin when the ship started to sink). Although, as a start point, I chose to drop the cabin column, since it has more than 77% of the data missing.
# 1. The NA values **Embarked** column will be treated with an imputer puting the most frequent value (which is S for Southampton, with 72% of the entries). Since it's only two values, the treatment will not have a huge impact on the model. Another possibility could be erase those two entries from the dataset. Besides that, a further investigation of the data of those two entries could possibly let us infer these missing values.
# 1. The NA values in the **Age** column are the most problematic, since it has large number of missing values (177), but not in a proportion huge enough for us to discard the column (177 out of 891 accounts for approximately 20%). Moreover, basing in common sense, age could play a huge role in survival rate, since children and elders could have more difficulty to acess the rescue boats, or, on the other hand, have preference in the embark. Having said that, the first method of treatment of the missing values will be an imputer by the mean, bearing in mind that a more refined method will certainly bring much better results.
# ### Strategy to deal with categorical data
# The dataset has 5 categorical columns: 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'. The Cabin column was already ruled out by the amount of missing values it had. The question here is what strategy will I use to work with the other categorical columns? In a first glance, and for sake of simplicity in this first try, I will drop 'Name' and 'Ticket' columns and use One Hot Encoding for the rest.
# * The 'Name' column, based on common sense, probably wouldn't have a big impact on survival rate (although the title before the names could indicate some sort of socio-economic differentiation).
# * The 'Ticket' comlumn have a very high cardinality (681 unique entries out of 891 samples) and couldn't be treated by an One Hot Encoding, so I ruled out as well in this first try.
# ### Preprocessor code
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
# Droping columns from train and validation sets
drop_columns = ["Cabin", "Name", "Ticket"]
X_train = X_train_full.drop(drop_columns, axis=1)
X_valid = X_valid_full.drop(drop_columns, axis=1)
# Creating the transformers
numeric_transformer = SimpleImputer(strategy="mean")
categoric_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="most_frequent")),
("onehot", OneHotEncoder()),
]
)
categoric_cols = ["Sex", "Embarked"]
# Creating the pipeline with ColumnTransformer
preprocessor = ColumnTransformer(
transformers=[
("imputer_numeric", numeric_transformer, ["Age"]),
("imputer_categoric", categoric_transformer, categoric_cols),
]
)
# Fitting the preprocessor before the ML model
# preprocessor.fit(X_train)
# X_train = preprocessor.transform(X_train)
# X_valid = preprocessor.transform(X_valid)
# ## Machine Learning Model
# On a first try, I will use the Logistic Regression model.
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import accuracy_score
# def get_accuracy(n_estimators, X_train, X_valid, y_train, y_valid):
# model = RandomForestRegressor(n_estimators=n_estimators, random_state=0)
# func_pipeline = Pipeline(steps=[('preprocessor', preprocessor), ('model', model)])
# func_pipeline.fit(X_train,y_train)
# preds_val = func_pipeline.predict(X_valid)
# score = accuracy_score(y_valid, preds_val)
# return(score)
# Defining Model
model = LogisticRegression()
# Bundle preprocessing and modeling code in a Pipeline
my_pipeline = Pipeline(steps=[("preprocessor", preprocessor), ("model", model)])
my_pipeline.fit(X_train, y_train)
preds_val = my_pipeline.predict(X_valid)
# Accuracy of the prediction
accuracy_score(y_valid, preds_val)
# ## Output
X_test = X_test_full.drop(["Name", "Cabin", "Ticket"], axis=1)
preds_test = my_pipeline.predict(X_test)
# Save test predictions to file
output = pd.DataFrame({"PassengerId": X_test_full.index, "Survived": preds_test})
output.to_csv("submission.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import seaborn as sns
import matplotlib.pyplot as plt
data = pd.read_csv(
"/kaggle/input/2019-coronavirus-dataset-01212020-01262020/2019_nC0v_20200121_20200126_cleaned.csv"
)
data.head()
data.describe()
data["Country"] = data["Country"].replace("Mainland China", "China")
data
df_countries = (
data.groupby(["Country", "Date last updated"])
.sum()
.sort_values("Suspected", ascending=False)
)
df_countries
|
# # BERT with KFold
# ## References
# * https://www.kaggle.com/xhlulu/disaster-nlp-keras-bert-using-tfhub
# * https://qiita.com/koshian2/items/81abfc0a75ea99f726b9
# We will use the official tokenization script created by the Google team
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, LSTM, Bidirectional
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.callbacks import ModelCheckpoint
import tensorflow_hub as hub
import tokenization
def bert_encode(texts, tokenizer, max_len=512):
all_tokens = []
all_masks = []
all_segments = []
for text in texts:
text = tokenizer.tokenize(text)
text = text[: max_len - 2]
input_sequence = ["[CLS]"] + text + ["[SEP]"]
pad_len = max_len - len(input_sequence)
tokens = tokenizer.convert_tokens_to_ids(input_sequence)
tokens += [0] * pad_len
pad_masks = [1] * len(input_sequence) + [0] * pad_len
segment_ids = [0] * max_len
all_tokens.append(tokens)
all_masks.append(pad_masks)
all_segments.append(segment_ids)
return np.array(all_tokens), np.array(all_masks), np.array(all_segments)
def build_model(bert_layer, max_len=512):
def inner_build_model():
input_word_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids")
input_mask = Input(shape=(max_len,), dtype=tf.int32, name="input_mask")
segment_ids = Input(shape=(max_len,), dtype=tf.int32, name="segment_ids")
_, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids])
clf_output = Bidirectional(LSTM(128))(sequence_output)
# clf_output = sequence_output[:, 0, :]
out = Dense(1, activation="sigmoid")(clf_output)
model = Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=out)
model.compile(
Adam(lr=2e-6), loss="binary_crossentropy", metrics=["accuracy", "mse"]
)
return model
return inner_build_model
module_url = "https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1"
# module_url = 'https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1'
bert_layer = hub.KerasLayer(module_url, trainable=True)
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case)
train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
submission = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv")
train["token_len"] = train["text"].apply(lambda x: len(tokenizer.tokenize(x)))
test["token_len"] = test["text"].apply(lambda x: len(tokenizer.tokenize(x)))
token_max_len = max(train["token_len"].max(), test["token_len"].max()) + 2
display(token_max_len)
from sklearn.model_selection import train_test_split, KFold, cross_val_score
def get_kfold_sets(train, k=5):
kf = KFold(n_splits=k, shuffle=True)
for train_texts, train_labels in kf.split(train.text.values, train.target.values):
train_texts, valid_texts, train_labels, valid_labels = train_test_split(
train.text.values, train.target.values, test_size=0.2
)
train_input = bert_encode(train_texts, tokenizer, max_len=token_max_len)
valid_input = bert_encode(valid_texts, tokenizer, max_len=token_max_len)
yield train_input, train_labels, valid_input, valid_labels
def get_train_sets(train):
train_input = bert_encode(train.text.values, tokenizer, max_len=token_max_len)
train_labels = train.target.values
return train_input, train_labels
test_input = bert_encode(test.text.values, tokenizer, max_len=token_max_len)
from sklearn.metrics import f1_score
from keras.callbacks import Callback
class F1Callback(Callback):
def __init__(self, model, X_val, y_val):
self.model = model
self.X_val = X_val
self.y_val = y_val
def on_epoch_end(self, epoch, logs):
pred = self.model.predict(self.X_val)
f1_val = f1_score(self.y_val, np.round(pred))
print("\n f1_val = ", f1_val)
model_template = build_model(bert_layer, max_len=token_max_len)()
model_template.summary()
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.models import clone_model
def cross_val_score(train, k=3, epochs=10, batch_size=16):
f1_vals = []
models = []
i = 0
for train_input, train_labels, valid_input, valid_labels in get_kfold_sets(
train, k=k
):
model = clone_model(model_template)
model.compile(
Adam(lr=2e-6), loss="binary_crossentropy", metrics=["accuracy", "mse"]
)
train_history = model.fit(
train_input,
train_labels,
validation_data=(valid_input, valid_labels),
epochs=epochs,
batch_size=batch_size,
callbacks=[
EarlyStopping(patience=1, monitor="val_mse", mode="min", verbose=True)
],
)
pred = model.predict(valid_input)
f1_val = f1_score(valid_labels, np.round(pred))
print(f"f1-val: {f1_val}")
f1_vals.append(f1_val)
models.append(model)
df = pd.DataFrame(train_history.history)
df["f1-val"] = f1_val
df.to_csv(f"history_{i}.csv")
i += 1
return np.array(f1_vals).mean(), models
k = 5
f1_val, models = cross_val_score(train, k=k)
print(f"f1-mean: {f1_val}")
train_input, train_labels = get_train_sets(train)
def calc_best_threshold(pred, labels):
f1_vals = []
ts = []
for t in np.arange(0.1, 1, 0.1):
f1_val = f1_score(train_labels, [1 if p >= t else 0 for p in train_pred])
f1_vals.append(f1_val)
ts.append(t)
return ts[np.argmax(f1_vals)]
best_ts = []
for model in models:
train_pred = model.predict(train_input)
tmp = calc_best_threshold(train_pred, train_labels)
best_ts.append(tmp)
print(f"best ts: {best_ts}")
test_preds = []
for model in models:
test_pred = model.predict(test_input)
test_preds.append(test_pred)
test_preds = np.array(test_preds)
print(test_preds.shape)
test_size = test_preds.shape[1]
mean_pred = []
for s in range(test_size):
tmp = []
for i in range(k):
# tmp.append(test_preds[i][s][0].round())
tmp.append(1 if test_preds[i][s][0] >= best_ts[i] else 0)
mean = np.mean(tmp)
mean_pred.append(mean)
mean_pred = np.array(mean_pred)
print(mean_pred.shape)
print(mean_pred[20:])
print(mean_pred[:20])
submission["target"] = mean_pred.round().astype(int)
submission.to_csv("submission.csv", index=False)
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv("/kaggle/input/covid19-in-usa/us_counties_covid19_daily.csv")
df
# **Data Cleaning**
# Here we will make Day Date & year seperate Columns for better understanding
df["Date"] = pd.to_datetime(df["date"])
df["Year"] = df["Date"].dt.year
df["Month"] = df["Date"].dt.month
df["Day"] = df["Date"].dt.day
df1 = df.drop("date", axis=1)
df1
# In this section we will see what is our dataset is all about , its columns & columns datatypes.
df1.info()
df1.columns
df1["county"].unique()
df1["state"].unique()
df1["state"].shape
df1["Year"].unique()
df1["Month"].unique()
df1["Day"].unique()
df1.head(2)
sns.countplot(x="state", data=df1.head(100))
plt.show()
# from the graph we can conclude that max count of state which shows california has max 42 Count
plt.figure(figsize=(10, 12))
plt.subplots_adjust(wspace=0.3)
plt.subplot(221)
sns.barplot(x="state", y="cases", data=df1.head(50))
plt.xticks(rotation=90)
plt.subplot(222)
sns.barplot(x="county", y="cases", data=df1.head(50))
plt.xticks(rotation=90)
# * From the aove graph we can conclude that cases found in county which shows that san francisco has maximum cases 2
# * From the above graph we can conclude that deaths in state which shows that san francisco has ,maximum deaths are in washington 3.5
df1["county"].unique()
plt.figure(figsize=(15, 8))
sns.barplot(x="county", y="deaths", data=df1.head(550))
plt.xticks(rotation=90)
# * From the above graph we can conclude that deaths in county which shows that san francisco has maximum deaths 2
plt.figure(figsize=(15, 10))
sns.barplot(x="state", y="deaths", data=df1)
plt.xticks(rotation=90)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# ### Loading Dataset
# importing required libraraies.
import pandas as pd
import numpy as np
test_df = pd.read_csv("/kaggle/input/titanic/test.csv")
train_df = pd.read_csv("/kaggle/input/titanic/train.csv")
# ### Exploring the dataset
#
train_df.head()
train_df.describe()
train_df.shape
train_df.isnull()
train_df.isnull().sum()
missing_val_count = train_df.isnull().sum().sum()
print(f"The number of missing values in the dataset is: {missing_val_count}")
# > Dropping columns with missing values can make sense since there are over 50 % of NAN values in the columns
# ### Cleaning the Data set
#
X_full = train_df.drop(columns=["Cabin"])
X_test = test_df.drop(columns=["Cabin"])
X_full.head()
Age_interpolated = X_full["Age"].interpolate(method="linear")
X_test_age = X_test["Age"].interpolate(method="linear")
Age_interpolated
final_df = X_full.replace(X_full["Age"], Age_interpolated)
final_X_test = X_test.replace(X_test["Age"], X_test_age)
final_df.tail()
final_df.columns
# ### creating target and features
features_name = [
"PassengerId",
"Pclass",
"Name",
"Sex",
"Age",
"SibSp",
"Parch",
"Ticket",
"Fare",
"Embarked",
]
X = final_df[features_name]
y = final_df.Survived
# ### Dealing with categorical columns
X_num_col = [col for col in X.columns if X[col].dtypes in ["int64", "float64"]]
X_test_num_col = [
col
for col in final_X_test.columns
if final_X_test[col].dtypes in ["int64", "float64"]
]
X_cat_col = [col for col in X.columns if X[col].dtypes == "object"]
print(X_cat_col)
X_test_cat_col = [
col for col in final_X_test.columns if final_X_test[col].dtypes == "object"
]
from sklearn.preprocessing import OneHotEncoder
|
# # NY Rental Properties Pricing
# # Dataset is obtained from kaggle
# https://www.kaggle.com/datasets/ivanchvez/ny-rental-properties-pricing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("/kaggle/input/ny-rental-properties-pricing/NY Realstate Pricing.csv")
df
df.shape
df.head()
df.columns
df.info()
df.describe()
df.isnull().sum()
df.isnull()
df.isnull().sum().sum()
len(df["F1"].unique())
df["room_type"].value_counts()
df.drop(["F1", "id"], axis=1, inplace=True)
df.shape
for col in df.columns:
unique_vals = df[col].unique()
print(f"Unique values in column {col}: {unique_vals}")
for column in df.columns:
unique_values = df[column].nunique()
value_counts = df[column].value_counts()
print(f"Column name: {column}")
print(f"Number of unique values: {unique_values}")
print(f"Value counts:\n{value_counts}\n")
df.duplicated().sum()
# Histogram of price
plt.figure(figsize=(10, 8))
sns.histplot(x="price", data=df, bins=50)
plt.title("Histogram of Price")
plt.show()
# Bar chart of room type
plt.figure(figsize=(10, 8))
sns.countplot(x="room_type", data=df)
plt.title("Count of Room Types")
plt.show()
# Scatterplot of price vs minimum nights
plt.figure(figsize=(10, 8))
sns.scatterplot(x="minimum_nights", y="price", data=df)
plt.title("Price vs Minimum Nights")
plt.show()
# Correlation matrix
corr = df.corr()
plt.figure(figsize=(10, 8))
sns.heatmap(corr, annot=True, cmap="coolwarm")
plt.title("Correlation Matrix")
plt.show()
from autoviz import data_cleaning_suggestions
data_cleaning_suggestions(df)
df.drop(["latitude", "longitude"], axis=1, inplace=True)
for i in df.columns:
if df[i].dtype == "object":
df[i] = df[i].astype("category").cat.codes
df.dtypes
x = df.drop(columns=["price"])
y = df["price"]
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=42
)
from sklearn.linear_model import LinearRegression
# Install the LinearRegression model to predict the cost
linear = LinearRegression()
# fit the model to the train data
linear.fit(x_train, y_train)
linear_pred = linear.predict(x_test)
linear_pred
acc_lr = linear.score(x_test, y_test)
print(linear.score(x_test, y_test))
print(linear.score(x_train, y_train))
sns.distplot(df["price"], bins=70)
plt.show()
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import OneHotEncoder
X = df.drop(columns=["price"])
y = df["price"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
from sklearn.metrics import classification_report
# Define the models
models = {
"Linear Regression": LinearRegression(),
"Ridge Regression": Ridge(alpha=1),
"Lasso Regression": Lasso(alpha=1),
"Decision Tree Regression": DecisionTreeRegressor(max_depth=5),
"Random Forest Regression": RandomForestRegressor(n_estimators=100, max_depth=5),
}
# Train and evaluate the models
for name, model in models.items():
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# Evaluate the model
r2 = r2_score(y_test, y_pred)
mse = mean_squared_error(y_test, y_pred)
mae = mean_absolute_error(y_test, y_pred)
print("Model:", name)
print("R^2:", r2)
print("Mean Squared Error:", mse)
print("Mean Absolute Error:", mae)
# Plot the results
plt.figure(figsize=(10, 6))
plt.title(f"{name} - Actual vs. Predicted")
plt.scatter(y_test, y_pred)
plt.xlabel("Actual")
plt.ylabel("Predicted")
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import keras
from keras.preprocessing.image import *
from keras.layers import *
from keras.models import Sequential
from keras.callbacks import *
face_mask_detection_dir = "../input/face-mask-detection/images"
with_without_mask_train = "../input/withwithout-mask/maskdata/maskdata/train"
with_without_mask_test = "../input/withwithout-mask/maskdata/maskdata/test"
with_mask_train_dir = os.path.join(with_without_mask_train, "with_mask")
without_mask_train_dir = os.path.join(with_without_mask_train, "without_mask")
with_mask_test_dir = os.path.join(with_without_mask_test, "with_mask")
without_mask_test_dir = os.path.join(with_without_mask_test, "without_mask")
categories = os.listdir(with_without_mask_train)
labels = [i for i in range(len(categories))]
label_dict = dict(zip(categories, labels)) # empty dictionary
print(label_dict)
print(categories)
print(labels)
img_size = 100
data = []
target = []
for category in categories:
folder_path = os.path.join(with_without_mask_train, category)
img_names = os.listdir(folder_path)
for img_name in img_names:
img_path = os.path.join(folder_path, img_name)
img = cv2.imread(img_path)
try:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
resized = cv2.resize(gray, (img_size, img_size))
data.append(resized)
target.append(label_dict[category])
except Exception as e:
print("Exception:", e)
data = np.array(data) / 255.0
data = np.reshape(data, (data.shape[0], img_size, img_size, 1))
target = np.array(target)
from keras.utils import np_utils
new_target = np_utils.to_categorical(target)
np.save("data", data)
np.save("target", new_target)
data = np.load("data.npy")
target = np.load("target.npy")
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Dropout
from keras.layers import Conv2D, MaxPooling2D
from keras.callbacks import ModelCheckpoint
model = Sequential()
model.add(Conv2D(200, (3, 3), input_shape=data.shape[1:]))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
# The first CNN layer followed by Relu and MaxPooling layers
model.add(Conv2D(100, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
# The second convolution layer followed by Relu and MaxPooling layers
model.add(Flatten())
model.add(Dropout(0.5))
# Flatten layer to stack the output convolutions from second convolution layer
model.add(Dense(50, activation="relu"))
# Dense layer of 64 neurons
model.add(Dense(2, activation="softmax"))
# The Final layer with two outputs for two categories
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
checkpoint = ModelCheckpoint(
"model-{epoch:03d}.model",
monitor="val_loss",
verbose=0,
save_best_only=True,
mode="auto",
)
history = model.fit(
data, target, epochs=20, callbacks=[checkpoint], validation_split=0.2
)
plt.plot(history.history["accuracy"], "r", label="training accuracy")
plt.plot(history.history["val_accuracy"], label="validation accuracy")
plt.xlabel("# epochs")
plt.ylabel("loss")
plt.legend()
plt.show()
categories = os.listdir(with_without_mask_test)
labels = [i for i in range(len(categories))]
label_dict = dict(zip(categories, labels)) # empty dictionary
print(label_dict)
print(categories)
print(labels)
img_size = 100
data_test = []
target_test = []
for category in categories:
folder_path = os.path.join(with_without_mask_test, category)
img_names = os.listdir(folder_path)
for img_name in img_names:
img_path = os.path.join(folder_path, img_name)
img = cv2.imread(img_path)
try:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
resized = cv2.resize(gray, (img_size, img_size))
data_test.append(resized)
target_test.append(label_dict[category])
except Exception as e:
print("Exception:", e)
data_test = np.array(data_test) / 255.0
data_test = np.reshape(data_test, (data_test.shape[0], img_size, img_size, 1))
target_test = np.array(target_test)
from keras.utils import np_utils
new_target_test = np_utils.to_categorical(target_test)
np.save("data_test", data_test)
np.save("target_test", new_target_test)
data_test = np.load("data.npy")
target_test = np.load("target.npy")
print(model.evaluate(data_test, target_test))
# As you can see the model has approximately 97% accuracy!
from keras.models import load_model
import cv2
import numpy as np
# model = load_model('model-017.model')
face_clsfr = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
source = cv2.VideoCapture(2)
labels_dict = {0: "MASK", 1: "NO MASK"}
color_dict = {0: (0, 255, 0), 1: (0, 0, 255)}
while True:
ret, img = source.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_clsfr.detectMultiScale(gray, 1.3, 5)
for x, y, w, h in faces:
face_img = gray[y : y + w, x : x + w]
resized = cv2.resize(face_img, (100, 100))
normalized = resized / 255.0
reshaped = np.reshape(normalized, (1, 100, 100, 1))
result = model.predict(reshaped)
label = np.argmax(result, axis=1)[0]
cv2.rectangle(img, (x, y), (x + w, y + h), color_dict[label], 2)
cv2.rectangle(img, (x, y - 40), (x + w, y), color_dict[label], -1)
cv2.putText(
img,
labels_dict[label],
(x, y - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.8,
(255, 255, 255),
2,
)
cv2.imshow("LIVE", img)
key = cv2.waitKey(1)
if key == 27:
break
cv2.destroyAllWindows()
source.release()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import torch
import torchvision
import torchvision.models as models
import torchvision.datasets as datasets
import torchvision.transforms as T
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, TensorDataset, random_split
from torchvision.models.segmentation.deeplabv3 import DeepLabHead
import matplotlib.pyplot as plt
import seaborn as sn
# Support libraries
import numpy as np
import pandas as pd
import itertools
import os
import random
from PIL import Image
class ParticleSeg(Dataset):
def __init__(
self,
root_dir: str,
img_transforms: object = None,
mask_transforms: object = None,
):
super().__init__()
self.root_dir = root_dir
self.transforms_mask = mask_transforms
self.transforms_img = img_transforms
self.mask_dir = os.path.join(root_dir, "segmaps")
self.img_dir = os.path.join(root_dir, "images")
self.file_names = os.listdir(self.mask_dir)
self.mask_paths = [
os.path.join(self.mask_dir, mask_name) for mask_name in self.file_names
]
self.img_paths = [
os.path.join(self.img_dir, img_name) for img_name in self.file_names
]
def __len__(self):
return len(self.file_names)
def __getitem__(self, ix: int):
seed = np.random.randint(2022)
random.seed(seed)
torch.manual_seed(seed)
mask_path, img_path = self.mask_paths[ix], self.img_paths[ix]
mask, img = Image.open(mask_path), Image.open(img_path).convert("RGB")
if self.transforms_img is not None:
img = self.transforms_img(img)
random.seed(seed)
torch.manual_seed(seed)
if self.transforms_mask is not None:
mask = self.transforms_mask(mask)
mask, img = mask.float(), img.float()
mask[mask > 0] = 1
# img = img / 255
return img, mask
img_transforms = T.Compose(
[
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
T.Resize((244, 244)),
]
)
mask_transforms = T.Compose([T.ToTensor(), T.Resize((244, 244))])
dataset = ParticleSeg(
root_dir="/kaggle/input/electron-microscopy-particle-segmentation",
img_transforms=img_transforms,
mask_transforms=mask_transforms,
)
mask, img = dataset[10]
fig, ax = plt.subplots(nrows=1, ncols=2)
pos1 = ax[0].imshow(img.permute((1, 2, 0)))
pos2 = ax[1].imshow(mask.permute((1, 2, 0)))
dataset_train, dataset_val = random_split(
dataset, [int(0.8 * len(dataset)), len(dataset) - int(0.8 * len(dataset))]
)
print(len(dataset_train), len(dataset_val))
dataloader_train = DataLoader(dataset_train, batch_size=16)
dataloader_val = DataLoader(dataset_val, batch_size=16)
def createDeepLabv3(outputchannels=1):
model = models.segmentation.deeplabv3_resnet50(pretrained=True, progress=True)
model.classifier = DeepLabHead(2048, outputchannels)
return model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = createDeepLabv3(1).to(device)
criterion = nn.BCEWithLogitsLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
epochs = 50
def train(model, dataloader):
model.train()
train_loss = 0
for data in dataloader:
optimizer.zero_grad()
images, masks = data[0].to(device), data[1].to(device)
out = model(images)["out"]
loss = criterion(out, masks)
train_loss += loss.item()
loss.backward()
optimizer.step()
return train_loss / len(dataloader)
def test(model, dataloader):
model.eval()
test_loss = 0
for data in dataloader:
images, masks = data[0].to(device), data[1].to(device)
out = model(images)["out"]
loss = criterion(out, masks)
test_loss += loss.item()
return test_loss / len(dataloader)
for epoch in range(epochs):
train_loss = train(model, dataloader_train)
test_loss = test(model, dataloader_val)
print(
"Epoch [{}/{}], Train loss: {:.4f}, Val loss {:.4f}".format(
epoch + 1, epochs, train_loss, test_loss
)
)
dataiter = iter(dataloader_train)
images, masks = next(dataiter)
images = images.to(device)
output = model(images)["out"]
plt.imshow(images[15].cpu().permute((1, 2, 0)))
plt.show()
plt.imshow(output[15].cpu().detach().permute((1, 2, 0)))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
digit_recon_tran_csv = pd.read_csv(
"/kaggle/input/digit-recognizer/train.csv", dtype=np.float32
)
digit_recon_test_csv = pd.read_csv(
"/kaggle/input/digit-recognizer/test.csv", dtype=np.float32
)
print("tran dataset size: ", digit_recon_tran_csv.size, "\n")
print("test dataset size: ", digit_recon_test_csv.size, "\n")
# print(digit_recon_tran_csv.head(1))
# print(digit_recon_tran_csv.head(1).label)
tran_label = digit_recon_tran_csv.label.values
tran_image = (
digit_recon_tran_csv.loc[:, digit_recon_tran_csv.columns != "label"].values / 255
) # normalization
test_image = digit_recon_test_csv.values / 255
print("train label size: ", tran_label.shape)
print("train image size: ", tran_image.shape)
print("test image size: ", test_image.shape)
from sklearn.model_selection import train_test_split
train_image, valid_image, train_label, valid_label = train_test_split(
tran_image, tran_label, test_size=0.2, random_state=42
) #
print("train size: ", train_image.shape)
print("valid size: ", valid_image.shape)
import torch
import torch.nn as nn
import numpy as np
import torchvision
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
print(torch.__version__)
class MNIST_data(Dataset):
"""MNIST dtaa set"""
def __init__(
self,
data,
transform=transforms.Compose(
[
transforms.ToPILImage(),
transforms.RandomAffine(30, (0.1, 0.1)),
transforms.ToTensor(),
]
),
):
if len(data) == 1:
# test data
self.X = data[0].reshape(-1, 28, 28)
self.y = None
else:
# training data
self.X = data[0].reshape(-1, 28, 28)
self.y = data[1].astype(np.long)
self.transform = transform
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
if self.y is not None:
return self.transform(self.X[idx]), self.y[idx]
else:
return self.transform(self.X[idx])
# test mnist dataset
import matplotlib.pyplot as plt
test_mnist_data = MNIST_data((train_image, train_label))
test_mnist_loader = torch.utils.data.DataLoader(
dataset=test_mnist_data, batch_size=1, shuffle=True
)
for batch_idx, (images, labels) in enumerate(test_mnist_loader):
plt.imshow(images.view(28, 28).numpy())
plt.axis("off")
plt.title(str(labels.numpy()))
plt.show()
break
# visual
import matplotlib.pyplot as plt
plt.imshow(test_image[10].reshape(28, 28))
plt.axis("off")
plt.show()
batch_size = 64 # 2^5=64
train_dataset = MNIST_data((train_image, train_label))
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset, batch_size=batch_size, shuffle=True
)
valid_dataset = MNIST_data((valid_image, valid_label))
valid_loader = torch.utils.data.DataLoader(
dataset=valid_dataset, batch_size=batch_size, shuffle=False
)
class YANNet(nn.Module):
def __init__(self):
super(YANNet, self).__init__()
self.conv = nn.Sequential(
# size: 28*28
nn.Conv2d(
1, 8, 3, 1, 1
), # in_channels out_channels kernel_size stride padding
nn.ReLU(),
nn.Conv2d(8, 16, 3, 1, 1),
nn.ReLU(),
nn.MaxPool2d(2),
# size: 14*14
nn.Conv2d(16, 16, 3, 1, 1),
nn.ReLU(),
nn.Conv2d(16, 8, 3, 1, 1),
nn.ReLU(),
nn.MaxPool2d(2),
)
self.fc = nn.Sequential(
# size: 7*7
nn.Linear(8 * 7 * 7, 256),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(256, 256),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(256, 10),
)
def forward(self, img):
x = self.conv(img)
o = self.fc(x.view(x.shape[0], -1))
return o
model = YANNet()
error = nn.CrossEntropyLoss()
if torch.cuda.is_available():
model = model.cuda()
error = error.cuda()
optim = torch.optim.SGD(model.parameters(), lr=0.1)
# exp_lr_scheduler = lr_scheduler.StepLR(optim, step_size=10, gamma=0.1)
num_epoc = 10
from torch.autograd import Variable
for epoch in range(num_epoc):
epoc_train_loss = 0.0
epoc_train_corr = 0.0
epoc_valid_corr = 0.0
print("Epoch:{}/{}".format(epoch, num_epoc))
model.train()
# exp_lr_scheduler.step()
for batch_idx, (images, labels) in enumerate(train_loader):
if torch.cuda.is_available():
images = images.cuda()
labels = labels.cuda()
images = Variable(images)
labels = Variable(labels)
outputs = model(images)
optim.zero_grad()
loss = error(outputs, labels)
loss.backward()
optim.step()
epoc_train_loss += loss.data
outputs = torch.max(outputs.data, 1)[1]
epoc_train_corr += torch.sum(outputs == labels.data)
with torch.no_grad():
model.eval()
for batch_idx, (images, labels) in enumerate(valid_loader):
if torch.cuda.is_available():
images = images.cuda()
labels = labels.cuda()
images = Variable(images)
labels = Variable(labels)
outputs = model(images)
outputs = torch.max(outputs.data, 1)[1]
epoc_valid_corr += torch.sum(outputs == labels.data)
print(
"loss is :{:.4f},Train Accuracy is:{:.4f}%,Test Accuracy is:{:.4f}%".format(
epoc_train_loss / len(train_dataset),
100 * epoc_train_corr / len(train_dataset),
100 * epoc_valid_corr / len(valid_dataset),
)
)
model = model.cpu()
model.eval()
plt.imshow(test_image[100].reshape(28, 28))
plt.axis("off")
plt.show()
one_test = test_image[100]
one_test = torch.from_numpy(one_test).view(1, 1, 28, 28)
one_output = model(one_test)
print(torch.max(one_output.data, 1)[1].numpy())
digit_recon_submission_csv = pd.read_csv(
"/kaggle/input/digit-recognizer/sample_submission.csv", dtype=np.float32
)
print(digit_recon_submission_csv.head(10))
print(test_image.shape)
test_results = np.zeros((test_image.shape[0], 2), dtype="int32")
print(test_results.shape)
for i in range(test_image.shape[0]):
one_image = torch.from_numpy(test_image[i]).view(1, 1, 28, 28)
one_output = model(one_image)
test_results[i, 0] = i + 1
test_results[i, 1] = torch.max(one_output.data, 1)[1].numpy()
print(test_results.shape)
Data = {"ImageId": test_results[:, 0], "Label": test_results[:, 1]}
DataFrame = pd.DataFrame(Data)
DataFrame.to_csv("submission.csv", index=False, sep=",")
|
#
# # Table of Contents
# 1. [Introduction](#introduction)
# 1. [Install libraries and packages](#install_libraries_and_packages)
# 1. [Import libraries](#import_libraries)
# 1. [Configure hyper-parameters](#configure_hyper_parameters)
# 1. [Define useful classes](#define_useful_classes)
# 1. [Start the inference process](#start_the_inference_process)
# 1. [Save the submission](#save_the_submission)
# 1. [Conclusion](#conclusion)
# # Introduction
# So, I have successfully trained a classifier by using a bunch of datasets listed in the [*Other useful datasets*](https://www.kaggle.com/c/deepfake-detection-challenge/discussion/128954) discussion and upload it into Kaggle as an external [*dataset*](https://www.kaggle.com/phunghieu/dfdcmultifacef5-resnet18). Now, let's use this model to infer all videos in the test set then complete this end-to-end solution by submitting the final result to the host :) .
# If you do not know how to train the classifier, please follow this [*link*](https://www.kaggle.com/phunghieu/dfdc-multiface-training).
# ---
# ## Multiface's general diagram
# 
# ---
# ## Implementation
# I will loop through all test videos and try to get face images by using the same strategy as I have applied to the validation process in the [*DFDC-Multiface-Training*](https://www.kaggle.com/phunghieu/dfdc-multiface-training) kernel. The only difference is instead of having well-prepared data, I must run a face-detector, the same as I used to prepare the training dataset in the [*Data Preparation*](https://www.kaggle.com/phunghieu/deepfake-detection-face-extractor) kernel, to directly extract faces from each frame of one input video.
# If I fail to get enough faces from a video, I will mark it as `invalid` and assign a `default predicted value` (probability) to this video near the end of the notebook.
# ---
# ## Pipeline
# This end-to-end solution includes 3 steps:
# 1. [*Data Preparation*](https://www.kaggle.com/phunghieu/deepfake-detection-face-extractor)
# 1. [*Training*](https://www.kaggle.com/phunghieu/dfdc-multiface-training)
# 1. *Inference* <- **you're here**
# [Back to Table of Contents](#toc)
# # Install libraries and packages
# [Back to Table of Contents](#toc)
# Install facenet-pytorch
from facenet_pytorch.models.inception_resnet_v1 import get_torch_home
torch_home = get_torch_home()
# Copy model checkpoints to torch cache so they are loaded automatically by the package
#
# # Import libraries
# [Back to Table of Contents](#toc)
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchvision.models import resnet18
from facenet_pytorch import MTCNN
from albumentations import Normalize, Compose
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
from PIL import Image
from tqdm.notebook import tqdm
import os
import glob
import multiprocessing as mp
if torch.cuda.is_available():
device = "cuda:0"
torch.set_default_tensor_type("torch.cuda.FloatTensor")
else:
device = "cpu"
print(f"Running on device: {device}")
#
# # Configure hyper-parameters
# [Back to Table of Contents](#toc)
TEST_DIR = "/kaggle/input/deepfake-detection-challenge/test_videos/"
MODEL_PATH = "/kaggle/input/dfdcmultifacef5-resnet18/f5_resnet18.pth"
N_FACES = 5
BATCH_SIZE = 64
NUM_WORKERS = mp.cpu_count()
FRAME_SCALE = 0.25
FACE_BATCH_SHAPE = (N_FACES * 3, 160, 160)
DEFAULT_PROB = 0.5
#
# # Define useful classes
# [Back to Table of Contents](#toc)
class DeepfakeClassifier(nn.Module):
def __init__(self, encoder, in_channels=3, num_classes=1):
super(DeepfakeClassifier, self).__init__()
self.encoder = encoder
# Modify input layer.
self.encoder.conv1 = nn.Conv2d(
in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False
)
# Modify output layer.
self.encoder.fc = nn.Linear(512 * 1, num_classes)
def forward(self, x):
return torch.sigmoid(self.encoder(x))
def freeze_all_layers(self):
for param in self.encoder.parameters():
param.requires_grad = False
def freeze_middle_layers(self):
self.freeze_all_layers()
for param in self.encoder.conv1.parameters():
param.requires_grad = True
for param in self.encoder.fc.parameters():
param.requires_grad = True
def unfreeze_all_layers(self):
for param in self.encoder.parameters():
param.requires_grad = True
class TestVideoDataset(Dataset):
def __init__(
self,
test_dir,
frame_resize=None,
face_detector=None,
n_faces=1,
preprocess=None,
):
self.test_dir = test_dir
self.test_video_paths = glob.glob(os.path.join(self.test_dir, "*.mp4"))
self.face_detector = face_detector
self.n_faces = n_faces
self.frame_resize = frame_resize
self.preprocess = preprocess
def __len__(self):
return len(self.test_video_paths)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
test_video_path = self.test_video_paths[idx]
test_video = test_video_path.split("/")[-1]
# Get faces until enough (try limit: n_faces)
faces = []
for i in range(self.n_faces):
# Create video reader and find length
v_cap = cv2.VideoCapture(test_video_path)
v_len = int(v_cap.get(cv2.CAP_PROP_FRAME_COUNT))
stride = int(v_len / (self.n_faces**2))
sample = np.linspace(
i * stride, (v_len - 1) + i * stride, self.n_faces
).astype(int)
frames = []
# Get frames
for j in range(v_len):
success = v_cap.grab()
if j in sample:
success, frame = v_cap.retrieve()
if not success:
continue
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = Image.fromarray(frame)
# Resize frame to desired size
if self.frame_resize is not None:
frame = frame.resize(
[int(d * self.frame_resize) for d in frame.size]
)
frames.append(frame)
if len(frames) > 0:
all_faces_in_frames = [
detected_face
for detected_faces in self.face_detector(frames)
if detected_faces is not None
for detected_face in detected_faces
]
faces.extend(all_faces_in_frames)
if len(faces) >= self.n_faces: # Get enough faces
break
v_cap.release()
if len(faces) >= self.n_faces: # Get enough faces
faces = faces[: self.n_faces] # Get top
if self.preprocess is not None:
for j in range(len(faces)):
augmented = self.preprocess(
image=faces[j].cpu().detach().numpy().transpose(1, 2, 0)
)
faces[j] = augmented["image"]
faces = np.concatenate(faces, axis=-1).transpose(2, 0, 1)
return {"video_name": test_video, "faces": faces, "is_valid": True}
else:
return {
"video_name": test_video,
"faces": np.zeros(FACE_BATCH_SHAPE, dtype=np.float32),
"is_valid": False, # Those invalid videos will get DEFAULT_PROB
}
#
# # Start the inference process
# [Back to Table of Contents](#toc)
# Load face detector.
face_detector = MTCNN(
margin=14, keep_all=True, factor=0.5, post_process=False, device=device
).eval()
encoder = resnet18(pretrained=False)
classifier = DeepfakeClassifier(encoder=encoder, in_channels=3 * N_FACES, num_classes=1)
classifier.to(device)
state = torch.load(MODEL_PATH, map_location=lambda storage, loc: storage)
classifier.load_state_dict(state["state_dict"])
classifier.eval()
preprocess = Compose(
[Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], p=1)]
)
test_dataset = TestVideoDataset(
TEST_DIR,
frame_resize=FRAME_SCALE,
face_detector=face_detector,
n_faces=N_FACES,
preprocess=preprocess,
)
test_dataloader = DataLoader(
test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=NUM_WORKERS
)
submission = []
with torch.no_grad():
try:
for videos in tqdm(test_dataloader):
y_pred = classifier(videos["faces"]).squeeze(dim=-1).cpu().detach().numpy()
submission.extend(
list(
zip(
videos["video_name"],
y_pred,
videos["is_valid"].cpu().detach().numpy(),
)
)
)
except Exception as e:
print(e)
submission = pd.DataFrame(submission, columns=["filename", "label", "is_valid"])
submission.sort_values("filename", inplace=True)
submission.loc[submission.is_valid == False, "label"] = DEFAULT_PROB
#
# # Save the submission
# [Back to Table of Contents](#toc)
submission[["filename", "label"]].to_csv("submission.csv", index=False)
plt.hist(submission.label, 20)
plt.show()
|
# ## Loading Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
init_notebook_mode(connected=True)
plt.style.use("ggplot")
from collections import Counter
from wordcloud import WordCloud
from PIL import Image
import urllib.request
import random
from sklearn.preprocessing import StandardScaler
# # Data Preprocessing
# Loading the dataset and gathering a glimpse:
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
df = pd.read_csv("../input/new-york-city-current-job-postings/nyc-jobs.csv")
df.head()
df.info()
# #### Columns Description:
# - **Job ID**: The Unique Job ID for each opening
# - **Posting Type**: The opening type, whether internal or external, for the job.
# - **# of Positions**: The number of positions available for a certain opening
# - **Business Title**: The position the candidate would hold.
# - **Civil Service Title**: The Broad Title the position would be classified under
# - **Title Code No**: The Code for a particular title
# - **Level**: The authority the certain opening would bring with it
# - **Job Category**: Broad Classification of where all the jobs would fall in
# - **Full-time/Part-Time**: Time frame of a job.
# - **Salary Range From**: The beginning salary cap for that particular opening
# - **Salary Range To**: The highest cap for that particular job opening.
# - **Salary Frequency**: The payment factor for the job, hourly or annual
# - **Work Location**: The location of the workplace
# - **Division/Work Unit**: Broad working units for all the jobs
# - **Job Description**: A brief idea of what the job will contain
# - **Minimum Qual Requirements**: The minimum qualifications a candidate must possess for the job
# - **Preferred Skills**: Optimal skills which the posting is looking for
# - **Additional Information**: Any additional information provided with the job opening
# - **Hours/Shift**: The timings for the job
# - **Work Location 1**: Additional information for the work location
# - **Recruitment Contact**: Empty field, supposed to contain numbers
# - **Residency Requirement**: Whether the employee must be a resident of NYC.
# - **Posting date**: When the opening was announced.
# - **Post Until**: The closing date.
# - **Posting Updated**: The time when the posting was updated for the opening.
# - **Process Date**: When the posting process was completed
# Phew! That was a lot of columns, well then, let's get to exploring them!
# # Data Preprocessing
def missing_values_table(df):
# Total missing values
mis_val = df.isnull().sum()
# Percentage of missing values
mis_val_percent = 100 * df.isnull().sum() / len(df)
# Make a table with the results
mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)
# Rename the columns
mis_val_table_columns = mis_val_table.rename(
columns={0: "Missing Values", 1: "% of Total Values"}
)
# Sort the table by percentage of missing descending
# .iloc[:, 1]!= 0: filter on missing missing values not equal to zero
mis_val_table_columns = (
mis_val_table_columns[mis_val_table_columns.iloc[:, 1] != 0]
.sort_values("% of Total Values", ascending=False)
.round(2)
) # round(2), keep 2 digits
# Print some summary information
print(
"Dataset has {} columns.".format(df.shape[1])
+ "\n"
+ "There are {} columns that have missing values.".format(
mis_val_table_columns.shape[0]
)
)
# Return the dataframe with missing information
return mis_val_table_columns
missing_values_table(df)
df = df.drop(
["Recruitment Contact", "Hours/Shift", "Post Until", "Work Location 1"], axis=1
)
# As we see from the above step that Recruitment Contact, Hours/Shift, Post Until, Work Location 1has more than 50% null values, so it's abvious to drop these columns
df = df.drop(["Additional Information"], axis=1)
# Even 'Additional Information' is not relevant to our requirement, so it has to be removed
missing_values_table(df)
for column in [
"Job Category",
"Residency Requirement",
"Posting Date",
"Posting Updated",
"Process Date",
"To Apply",
]:
df[column] = df[column].fillna(df[column].mode()[0])
# Replacing null values of few variables which has less than 0.1% of null values with mode of respective features
# # Exploratory Data Analysis
# ### Highest High Salary Range
high_sal_range = (
df.groupby("Civil Service Title")["Salary Range To"].mean().nlargest(10)
).reset_index()
fig = px.bar(
high_sal_range,
y="Civil Service Title",
x="Salary Range To",
orientation="h",
title="Highest High Salary Range",
color="Salary Range To",
color_continuous_scale=px.colors.qualitative.G10,
).update_yaxes(categoryorder="total ascending")
fig.show()
# Oh. It seems that **Senior General Deputy Manager**, in general, has the highest avergae salary range, ranging upto $230,000 per year!
# Now that's an impressive amount.
# Most of the openigns in the top ten highest salary seem to be from executive fields, or higher posts. These are the fields which rake in most of the money, on average, paving way for the high salaries people seem to hear about!
popular_categories = df["Job Category"].value_counts()[:5]
popular_categories
# ### Top 10 Job Openings via Category
job_categorydf = (
df["Job Category"]
.value_counts(sort=True, ascending=False)[:10]
.rename_axis("Job Category")
.reset_index(name="Counts")
)
job_categorydf = job_categorydf.sort_values("Counts")
trace = go.Scatter(
y=job_categorydf["Job Category"],
x=job_categorydf["Counts"],
mode="markers",
marker=dict(
size=job_categorydf["Counts"].values / 2,
color=job_categorydf["Counts"].values,
colorscale="Viridis",
showscale=True,
colorbar=dict(title="Opening Counts"),
),
text=job_categorydf["Counts"].values,
)
data = [(trace)]
layout = go.Layout(
autosize=False,
width=1000,
height=750,
title="Top 10 Job Openings Count",
hovermode="closest",
xaxis=dict(showgrid=False, zeroline=False, showline=False),
yaxis=dict(
title="Job Openings Count",
ticklen=2,
gridwidth=5,
showgrid=False,
zeroline=True,
showline=False,
),
showlegend=False,
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
# # Feature Engineering
num_cols = df._get_numeric_data().columns
num_cols
cat_cols = list(set(df.columns) - set(num_cols))
today = pd.datetime.today()
redudant_cols = [
"Job ID",
"# Of Positions",
"Posting Updated",
"Minimum Qual Requirements",
"To Apply",
"Business Title",
"Level",
]
df[cat_cols]
# Based on the business problem given in the problem statement, it can be said that personal information(Posting date,process date,resident details) will be of no use for our employee segregeration
df = df.drop(redudant_cols, axis=1)
df
# ### Data Cleaning and Transformation
def parse_categories(x):
l = x.replace("&", ",").split(",")
l = [x.strip().rstrip(",") for x in l]
key_categories.extend(l)
def parse_keywords(x, l):
x = x.lower()
tokens = nltk.word_tokenize(x)
stop_words = set(stopwords.words("english"))
token_l = [w for w in tokens if not w in stop_words and w.isalpha()]
l.extend(token_l)
def preferred_skills(x):
kwl = []
df[df["Job Category"] == x]["Preferred Skills"].dropna().apply(
parse_keywords, l=kwl
)
kwl = pd.Series(kwl)
return kwl.value_counts()[:20]
key_categories = []
df["Job Category"].dropna().apply(parse_categories)
key_categories = pd.Series(key_categories)
key_categories = key_categories[key_categories != ""]
popular_categories = key_categories.value_counts().iloc[:25]
key_categories
df["cat"] = key_categories
plt.figure(figsize=(10, 10))
sns.countplot(y=key_categories, order=popular_categories.index, palette="YlGn")
salary_table = df[["Civil Service Title", "Salary Range From", "Salary Range To"]]
jobs_highest_high_range = pd.DataFrame(
salary_table.groupby(["Civil Service Title"])["Salary Range To"].mean().nlargest(10)
).reset_index()
plt.figure(figsize=(8, 6))
sns.barplot(
y="Civil Service Title",
x="Salary Range To",
data=jobs_highest_high_range,
palette="Greys",
)
def plot_wordcloud(text):
wordcloud = WordCloud(background_color="white", width=1024, height=720).generate(
text
)
plt.clf()
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
job_description_keywords = []
df["Job Description"].apply(parse_keywords, l=job_description_keywords)
plt.figure(figsize=(10, 8))
counter = Counter(job_description_keywords)
common = [x[0] for x in counter.most_common(40)]
plot_wordcloud(" ".join(common))
# From the above wordcloud, it can be seen that work, city, project, water, new are most frequently used words in the Job description, whereas staff system,management, planning, design, support e.t.c are required skills which are demanded mostly by the employer
words = []
counts = []
for letter, count in counter.most_common(10):
words.append(letter)
counts.append(count)
import matplotlib.cm as cm
from matplotlib import rcParams
colors = cm.rainbow(np.linspace(0, 1, 10))
rcParams["figure.figsize"] = 20, 10
plt.title("Top words in the Job description vs their count")
plt.xlabel("Count")
plt.ylabel("Words")
plt.barh(words, counts, color=colors)
# So, here we can remove the words which doesn't necessarily depict any information related to skills
df["Posting Date"] = pd.to_datetime(df["Posting Date"])
df["Process Date"] = pd.to_datetime(df["Process Date"])
# As there is no column for years of exprience, so we can assume that process date is the date when either latest or new posting has been published by the employer
df["years of exprience"] = df["Process Date"] - df["Posting Date"]
df["years of exprience"] = df["years of exprience"].dt.days
df_cluster = df[["cat", "Salary Range To", "years of exprience"]]
df_cluster.isna().sum()
df_cluster["cat"].value_counts()
df_cluster["cat"].fillna("Others", inplace=True)
df_cluster = df_cluster.replace("\*", "", regex=True)
df_cluster
# we are creating new dataframe with job category, maximum salary for the respective role and years of exprience. Reason of taking max salary instead of mean salary is to categorize those set of job which demands niche skills and higher salary
# Calculating the Hopkins statistic
from sklearn.neighbors import NearestNeighbors
from random import sample
from numpy.random import uniform
import numpy as np
from math import isnan
def hopkins(X):
d = X.shape[1]
# d = len(vars) # columns
n = len(X) # rows
m = int(0.1 * n)
nbrs = NearestNeighbors(n_neighbors=1).fit(X.values)
rand_X = sample(range(0, n, 1), m)
ujd = []
wjd = []
for j in range(0, m):
u_dist, _ = nbrs.kneighbors(
uniform(np.amin(X, axis=0), np.amax(X, axis=0), d).reshape(1, -1),
2,
return_distance=True,
)
ujd.append(u_dist[0][1])
w_dist, _ = nbrs.kneighbors(
X.iloc[rand_X[j]].values.reshape(1, -1), 2, return_distance=True
)
wjd.append(w_dist[0][1])
H = sum(ujd) / (sum(ujd) + sum(wjd))
if isnan(H):
print(ujd, wjd)
H = 0
return H
# Let's check the Hopkins measure
hopkin_df = df_cluster
hopkins(hopkin_df.drop(["cat"], axis=1))
# 0.99 is a good Hopkins score. Hence the data is very much suitable for clustering. Preliminary check is now done.
# We can do standardisation again or else we can skip this step as well.
df_cluster_std = df_cluster
X_C = df_cluster_std.drop(["cat"], axis=1)
df_cluster_std = StandardScaler().fit_transform(X_C)
df_cluster
# # K-means Clustering
# Let's check the silhouette score first to identify the ideal number of clusters
# To perform KMeans clustering
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
sse_ = []
for k in range(2, 10):
kmeans = KMeans(n_clusters=k).fit(df_cluster_std)
sse_.append([k, silhouette_score(df_cluster_std, kmeans.labels_)])
plt.plot(pd.DataFrame(sse_)[0], pd.DataFrame(sse_)[1])
# The sihouette score reaches a peak at around 4 clusters indicating that it might be the ideal number of clusters.
# The sihouette score reaches a peak at around 4 clusters indicating that it might be the ideal number of clusters.
# Let's use the elbow curve method to identify the ideal number of clusters.
ssd = []
for num_clusters in list(range(1, 10)):
model_clus = KMeans(n_clusters=num_clusters, max_iter=50)
model_clus.fit(df_cluster_std)
ssd.append(model_clus.inertia_)
plt.plot(ssd)
# A distinct elbow is formed at around 2-5 clusters. Let's finally create the clusters and see for ourselves which ones fare better
# K-means with k=4 clusters
model_clus4 = KMeans(n_clusters=4, max_iter=50)
model_clus4.fit(df_cluster_std)
dat4 = df_cluster
dat4.index = pd.RangeIndex(len(dat4.index))
dat_km = pd.concat([dat4, pd.Series(model_clus4.labels_)], axis=1)
dat_km.columns = ["cat", "salary_max", "exp", "ClusterID"]
dat_km
dat_km["ClusterID"].value_counts()
dat_km
# One thing we noticed is all distinct clusters are being formed except cluster 1 with more data points
# Now let's create the cluster means wrt to the various variables mentioned in the question and plot and see how they are related
df_final = pd.merge(df, dat_km, on="cat")
df_final
df_final.info()
# Along Job category and years of exprience
sns.scatterplot(x="cat", y="exp", hue="ClusterID", data=df_final)
# Along Job category and years of exprience
sns_plot = sns.scatterplot(x="Salary Range To", y="exp", hue="cat", data=df_final)
# From the above plot, it can be seen that different salary ranges based on job category(cat) and years of experience(exp).
#
fig = sns_plot.get_figure()
fig.savefig("output.png")
# As Job categories are more, x-axis in the graph is not visible but we can make a clear depiction below
# let's take a look at those Job category clusters and try to make sense if the clustering process worked well.
df_final_on_jobcat = df_final[df_final["ClusterID"] == 1]
df_final_on_jobcat["cat"].value_counts()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
data = pd.read_csv(
"/kaggle/input/gtd/globalterrorismdb_0718dist.csv", encoding="ISO-8859-1"
)
# drop all NaN columns
# df = data.dropna(axis=1)
# for look at all raws and cols
# pd.set_option("display.max.columns", None)
# pd.set_option("display.max.rows", None)
# for 4 number after ,
# pd.set_option("display.precision", 4)
data.rename(
columns={
"iyear": "Year",
"imonth": "Month",
"iday": "Day",
"country_txt": "Country",
"region_txt": "Region",
"attacktype1_txt": "AttackType",
"target1": "Target",
"nkill": "Killed",
"nwound": "Wounded",
"summary": "Summary",
"gname": "Group",
"targtype1_txt": "Target_type",
"weaptype1_txt": "Weapon_type",
"motive": "Motive",
},
inplace=True,
)
df = data[
[
"Year",
"Month",
"Day",
"Country",
"Region",
"city",
"latitude",
"longitude",
"AttackType",
"Killed",
"Wounded",
"Target",
"Summary",
"Group",
"Target_type",
"Weapon_type",
"Motive",
]
]
df.info()
df.columns
df.tail()
df.corr()
# correlation map
sns.set(font_scale=1.5)
f, ax = plt.subplots(figsize=(15, 15))
sns.heatmap(df.corr(), annot=True, linewidths=0.5, fmt=".1f", ax=ax)
plt.show()
# Line Plot
df.Year.plot(
kind="line",
color="g",
label="Year",
linewidth=1,
alpha=0.5,
grid=True,
linestyle="-.",
figsize=(12, 12),
)
plt.legend(loc="upper right")
plt.xlabel("Terror")
plt.ylabel("Year")
plt.title("Terror-Year Line Plot")
plt.show()
# Scatter Plot
plt.subplots(figsize=(12, 12))
sns.scatterplot(data=df, x="Year", y="Killed")
plt.show()
# Histogram Plot
df.Year.plot(kind="hist", bins=100, figsize=(20, 12))
plt.title("Country Histogram Plot")
plt.show()
# Histogram
plt.subplots(figsize=(12, 12))
sns.countplot(
"Region",
data=df,
palette="RdYlGn_r",
edgecolor=sns.color_palette("dark", 7),
order=df["Region"].value_counts().index,
)
plt.xticks(rotation=90)
plt.title("Number Of Terrorist Activities Each Region")
plt.show()
terrorInTurkey = df[(df["Country"] == "Turkey")]
terrorInTurkey
# Histogram
plt.subplots(figsize=(35, 10))
sns.countplot(
"Group",
data=terrorInTurkey,
palette="RdYlGn_r",
edgecolor=sns.color_palette("dark", 7),
)
plt.xticks(rotation=90)
plt.title("Number Of Terror Group Activities in Turkey")
plt.show()
terrorInIstanbul = df[(df["city"] == "Istanbul") & (df["Year"] > 2000)]
terrorInIstanbul
plt.subplots(figsize=(20, 10))
sns.countplot(
terrorInIstanbul["AttackType"],
palette="inferno",
order=terrorInIstanbul["AttackType"].value_counts().index,
)
plt.xticks(rotation=90)
plt.title("Number of Attack Type Activities After 2000 in Istanbul")
plt.show()
# DICTIONARY
dic = {"Riot Games": "LoL", "Rockstar Games": "GTA", "Valve": "CSGO"}
print(dic.keys())
print(dic.values())
dic["Riot Games"] = "LoR" # update entry
dic["CD Projekt"] = "Witcher 3" # add new entyr
print(dic)
print("LoR" in dic) # check inlude or not
print("Riot Games" in dic)
dic.clear() # clear dictionary
print(dic)
# WHILE AND FOR LOOPS
i = 0
while i < 10:
print(i * i)
i += 1
lis = [1, 2, 3, 4, 5]
for index, value in enumerate(lis):
print("index:", index, "value:", value)
dic = {"Riot Games": "LoL", "Rockstar Games": "GTA", "Valve": "CSGO"}
for key, value in dic.items():
print(key + ":", value)
for index, value in terrorInIstanbul[["Killed"]][:].iterrows():
print(index, value)
|
# # GOAL
# ### This notebook gives a summary of a study and explains how regression techniques can be used to predict the price of a house.
# ### The features in the dataset represent different attributes of each house in the dataset.
# ### It is the objective of this project to apply regression models in order to predict the values for the SalePrice feature.
# ## ML Methods used:
# ### **The models that I used in this task:**
# **`Linear Regression`,**
# **`Lasso Regression`,**
# **`Ridge Regression`,**
# **`Support Vector Regressor`,**
# **`KNeighbours Regressor`,**
# **`Random Forest Regressor`**
# # Imports packages
# No Data without you guys!
import numpy as np
import pandas as pd
# Visualizations
from IPython.display import display, HTML
import plotly.express as px
import matplotlib.pyplot as plt
# Statistics
from scipy.stats import skew
# Metrics
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.metrics import mean_squared_error, r2_score
# Models
from xgboost import XGBRegressor
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
# An effective way to set the display option for all the columns in the dataframe
pd.set_option("display.max_columns", None)
# Ignore in case of warnings
import warnings
warnings.filterwarnings(action="ignore")
# # Load Data
# Load train data and make a copy of it
train = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
df_train = train.copy()
# Load df_test data
df_test = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/test.csv"
)
# Load sample_submission
sample_data = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/sample_submission.csv"
)
# # Checking train Dataset
print("**Dataset:**")
print("-------------Train-------------------\n")
display(df_train.head())
print("--------------Train shape ------------------\n")
print("**Dataset shape:**", df_train.shape)
print("--------------Train columns ------------------\n")
display(df_train.columns)
print("--------------------Train description----------------------------")
display(df_train.describe().T.head(10))
print("--------------------Train Types----------------------------------")
display(df_train.dtypes.sort_values(ascending=False))
print("--------------------Train NA Check-------------------------------")
display(df_train.isna().sum().sort_values(ascending=False).head(20))
# ### Description of checking Train data result:
# * **Data contains 81 columns with 1460 samples.**
# * **Data contains Object, int and float type vlues**
# * **Data contains null values which the most are in "PoolQC" (Need to be check for the ratio of null values in case of how to handle the null values)**
# ### Checking the ratio of Null values
print("--------------------Null values ratio-----------------------\n")
display(df_train.isnull().mean() * 100)
print("--------------------Plotting the ratio of null values-----------------\n")
(df_train.isnull().mean() * 100).plot.bar(figsize=(12, 6))
plt.ylabel("Percentage of missing values %")
plt.xlabel("Variables")
plt.title("Quantifying missing data")
df_train.columns[df_train.isnull().mean() * 100 > 20]
df_train = df_train.drop(
columns=["Alley", "FireplaceQu", "PoolQC", "Fence", "MiscFeature"], axis=1
)
display(df_train)
for column in df_train.columns:
if df_train[column].dtype == np.object:
df_train[column].fillna(df_train[column].mode()[0], inplace=True)
elif df_train[column].dtype == np.float64:
df_train[column].fillna(df_train[column].median(), inplace=True)
elif df_train[column].dtype == np.int64:
df_train[column].fillna(df_train[column].median(), inplace=True)
print("---------------Checking for the null value again-----------------\n")
display(df_train.isnull().sum())
print("The totall amount of null values:", df_train.isnull().sum().sum())
print("---------------------The final train data set-------------------\n")
display(df_train)
# ### Description of checking ratio of null values and the handling ways result:
# **It's best to aim for as few null values as possible, as they can affect the accuracy and reliability of analysis.
# However, there is no hard and fast rule for what ratio of null values is acceptable.**
# **As a general guideline, if the percentage of null values in the dataset is less than 5%, we can consider imputing the missing values using methods such as mean or median imputation.**
# **If the percentage is greater than 5%, we may want to consider other methods such as dropping rows or columns with null values**
# **Since the amount of data is 1460 and based on the ratio of the null values so I decieded to drop 'Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'.**
# **For the rest of columns that contain null values, I take the median of columns contains int and float types and for the object types, I take mode of the columns.**
# # Exploratory data analysis (EDA) and Feature Engineering
# ### Checking the Skew and Normal distribution
# **Skewness is a measure of the asymmetry of a distribution. If the distribution of a dataset is not symmetric, it is said to be skewed.**
# **A dataset can be skewed in either direction, that is, it can have a positive or negative skew. The most common way to measure skewness is to calculate the skewness coefficient.**
# **The skewness coefficient is a numerical measure of the degree of skewness in a dataset. It is calculated using the following formula:**
# skewness = 3 * (mean - median) / standard deviation
# **If the skewness coefficient is zero, the distribution is perfectly symmetrical. If the skewness coefficient is negative, the distribution is skewed to the left (negative skew), and if it is positive, the distribution is skewed to the right (positive skew).**
# **In general, a skewness coefficient between -0.5 and 0.5 indicates a relatively symmetrical distribution. A skewness coefficient between -1 and -0.5 or between 0.5 and 1 indicates a moderately skewed distribution, while a skewness coefficient less than -1 or greater than 1 indicates a highly skewed distribution.**
#
numeric = ["int64", "float64"]
for column in df_train.columns:
if df_train[column].dtypes in numeric:
skew = df_train[column].skew()
if skew < -1 or skew > 2:
print(f"The skewness of {column} is : {skew}.\n")
df_train[column] = np.log1p(df_train[column])
new_skew = df_train[column].skew()
print(
f"After apply log transform, The skewness of {column} is: {new_skew}."
)
print("-----------------------------------------\n")
else:
continue
# ### Description of checking skewness of columns and the handling ways result:
# **There are several methods for handling skewness in a dataset:**
# **Square root transformation**
# **Box-Cox transformation**
# **Winsorization**
# **Robust statistics**
# **Log transformation: One common method for reducing skewness is to apply a logarithmic transformation to the data. This can help to spread out the values at the high end of the distribution and compress the values at the low end, resulting in a more symmetrical distribution.**
# **After applying the log transformation the skewness gets much more better. In general, it's a good idea to explore the distribution of the data visually using histograms or other plots to determine the level of skewness and the most appropriate method for handling it.**
# **For plotting the histogram of columns,I used one of the Contributor code plotly which I found it interesting. Thanks OLEKSII ZHUKOV**
def histo_plot(data, column_name_str):
fig = px.histogram(
data,
x=column_name_str,
template="simple_white",
color_discrete_sequence=["red"],
)
fig.update_layout(
xaxis_title=column_name_str,
yaxis_title="Frequency",
showlegend=True,
font=dict(size=14),
title={"text": "Distribution", "y": 0.95, "x": 0.5},
)
# Display
fig.show()
print("----------------Skewness value-----------------\n")
print(data[column_name_str].skew())
# **df_test the distribution of PoolArea and LotArea in Train data set and the target SalePrice**
histo_plot(df_train, "SalePrice")
histo_plot(df_train, "PoolArea")
histo_plot(df_train, "LotArea")
# **The most values in PoolArea is zero and after log transform still not normally distributed so I drop it.**
df_train.drop(["PoolArea"], axis=1, inplace=True)
# **Till here I handled the numerical value but what about object and category data! Lets check the object values in train data**
object_features = []
for column_name in df_train.columns:
if df_train[column_name].dtype == "object":
object_features.append(column_name)
print(
f"----------------Object type features name ----------> number of object features:{len(object_features)} <----------\n "
)
display(object_features)
# **it's important to convert object values (i.e. strings or categorical variables) into a numerical format that can be used as input to a machine learning algorithm. There are several methods for doing this:**
# **One-hot encoding**
# **Binary encoding**
# **Hashing encoding**
# **Label encoding: This method involves assigning a unique numerical value to each category in a categorical variable.**
# ## Label Encoding
label_encode = LabelEncoder()
for column in df_train.columns:
if df_train[column].dtype == "object":
df_train[column] = label_encode.fit_transform(df_train[column])
df_train.head()
# ## Let's see the relation between SalePrice our target and some of the features in the dataset:
# **it's important to check for linear relationships between the target variable and the other variables in the dataset. This is because many machine learning algorithms, such as linear regression, assume that there is a linear relationship between the target variable and the input variables. If this assumption is violated, the algorithm may not perform well and may produce inaccurate predictions.**
# **If there is a linear relationship between the target variable and the input variables, it may be possible to improve the performance of the machine learning algorithm by including additional features that capture non-linear relationships or by transforming the input variables to create a more linear relationship.**
# **If there is no linear relationship between the target variable and the input variables, it may be necessary to use a non-linear machine learning algorithm or to engineer new features that better capture the underlying relationship between the variables.**
selected_columns = [
"LotFrontage",
"LotArea",
"YearBuilt",
"MasVnrArea",
"GrLivArea",
"GarageCars",
"GarageArea",
"OverallQual",
"TotalBsmtSF",
]
for column_name in selected_columns:
fig = px.scatter(
df_train,
x="SalePrice",
y=column_name,
opacity=0.65,
trendline="ols",
trendline_color_override="darkblue",
)
fig.show()
# ### Let's do some Feature Engineering! I think we need it :)
# * **I remove unnecessary columns Based on what I know ! :)**
df_train.drop(["Id", "Street", "YearRemodAdd", "MiscVal"], axis=1, inplace=True)
# * **Change the type of the 'MSSubClass', 'OverallCond', 'YrSold' and 'MoSold' because these are contains numerical values but these numbers are has meaning by their numeric values (Check the data_description.txt)**
df_train["MSSubClass"] = df_train["MSSubClass"].apply(str)
df_train["OverallCond"] = df_train["OverallCond"].astype(str)
df_train["YrSold"] = df_train["YrSold"].astype(str)
df_train["MoSold"] = df_train["MoSold"].astype(str)
# **Lets go to the df_test dataset**
# # Same Steps for the df_test Dataset
print("**Dataset:**")
print("-------------df_test-------------------\n")
display(df_test.head())
print("--------------df_test shape ------------------\n")
print("**Dataset shape:**", df_test.shape)
print("--------------df_test columns ------------------\n")
display(df_test.columns)
print("--------------------df_test description----------------------------")
display(df_test.describe().T.head(10))
print("--------------------df_test Types----------------------------------")
display(df_test.dtypes.sort_values(ascending=False))
print("--------------------df_test NA Check-------------------------------")
display(df_test.isna().sum().sort_values(ascending=False).head(20))
print("--------------------Null values ratio-----------------------\n")
display(df_test.isnull().mean() * 100)
print("--------------------Plotting the ratio of null values-----------------\n")
(df_test.isnull().mean() * 100).plot.bar(figsize=(12, 6))
plt.ylabel("Percentage of missing values %")
plt.xlabel("Variables")
plt.title("Quantifying missing data")
df_test.columns[df_test.isnull().mean() * 100 > 20]
df_test = df_test.drop(
columns=["Alley", "FireplaceQu", "PoolQC", "Fence", "MiscFeature"], axis=1
)
display(df_test)
for column in df_test.columns:
if df_test[column].dtype == np.object:
df_test[column].fillna(df_test[column].mode()[0], inplace=True)
elif df_test[column].dtype == np.float64:
df_test[column].fillna(df_test[column].median(), inplace=True)
elif df_test[column].dtype == np.int64:
df_test[column].fillna(df_test[column].median(), inplace=True)
print("---------------Checking for the null value again-----------------\n")
display(df_test.isnull().sum())
print("The totall amount of null values:", df_test.isnull().sum().sum())
print("---------------------The final train data set-------------------\n")
display(df_test)
numeric = ["int64", "float64"]
for column in df_test.columns:
if df_test[column].dtypes in numeric:
skew = df_test[column].skew()
if skew < -1 or skew > 1:
print(f"The skewness of {column} is : {skew}.\n")
df_test[column] = np.log1p(df_test[column])
new_skew = df_test[column].skew()
print(
f"After apply log transform, The skewness of {column} is: {new_skew}."
)
print("-----------------------------------------\n")
else:
continue
def histo_plot(data, column_name_str):
fig = px.histogram(
data,
x=column_name_str,
template="simple_white",
color_discrete_sequence=["red"],
)
fig.update_layout(
xaxis_title=column_name_str,
yaxis_title="Frequency",
showlegend=True,
font=dict(size=14),
title={"text": "Distribution", "y": 0.95, "x": 0.5},
)
# Display
fig.show()
print("----------------Skewness value-----------------\n")
print(data[column_name_str].skew())
histo_plot(df_test, "PoolArea")
histo_plot(df_test, "LotArea")
df_test.drop(["PoolArea"], axis=1, inplace=True)
object_features = []
for column_name in df_test.columns:
if df_test[column_name].dtype == "object":
object_features.append(column_name)
print(
f"----------------Object type features name ----------> number of object features:{len(object_features)} <----------\n "
)
display(object_features)
label_encode = LabelEncoder()
for column in df_test.columns:
if df_test[column].dtype == "object":
df_test[column] = label_encode.fit_transform(df_test[column])
df_test.head()
df_test.drop(["Street", "YearRemodAdd", "MiscVal"], axis=1, inplace=True)
df_test["MSSubClass"] = df_test["MSSubClass"].apply(str)
df_test["OverallCond"] = df_test["OverallCond"].astype(str)
df_test["YrSold"] = df_test["YrSold"].astype(str)
df_test["MoSold"] = df_test["MoSold"].astype(str)
# # Regression Models
# ### **The models that I used in this task `Linear Regression`,`Lasso Regression`,`Ridge Regression`,`Support Vector Regressor`,`KNeighbours Regressor` and `Random Forest Regressor`**
# **Let's make our evaluation metrics function that we want to know from our model**
def evaluation(y_actual, y_predicted):
mse = mean_squared_error(y_actual, y_predicted)
rmse = np.sqrt(mse)
accuracy = r2_score(y_actual, y_predicted)
return mse, rmse, accuracy
# **Define X and Y and X_test from test dataset (I removed the Id before :))**
X = df_train.drop(columns=["SalePrice"])
Y = df_train["SalePrice"]
X_df_test = df_test
# **Split data into train and test with the test size of 30%**
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, random_state=42, test_size=0.3
)
# ## Linear Regression
lr = LinearRegression()
lr.fit(X_train, Y_train)
y_pred_lr = lr.predict(X_test)
mse_lr, rmse_lr, accuracy_lr = evaluation(Y_test, y_pred_lr)
# ## Lasso Regression
#
lasso = Lasso()
lasso.fit(X_train, Y_train)
y_pred_lasso = lasso.predict(X_test)
mse_lasso, rmse_lasso, accuracy_lasso = evaluation(Y_test, y_pred_lasso)
# ## Ridge Regression
ridge = Ridge()
ridge.fit(X_train, Y_train)
y_pred_ridge = ridge.predict(X_test)
mse_ridge, rmse_ridge, accuracy_ridge = evaluation(Y_test, y_pred_ridge)
# ## Support Vector Regressor
#
svr = SVR()
svr.fit(X_train, Y_train)
y_pred_svr = svr.predict(X_test)
mse_svr, rmse_svr, accuracy_svr = evaluation(Y_test, y_pred_svr)
# ## KNeighbours Regressor
knr = KNeighborsRegressor()
knr.fit(X_train, Y_train)
y_pred_knr = knr.predict(X_test)
mse_knr, rmse_knr, accuracy_knr = evaluation(Y_test, y_pred_knr)
# ## Random Forest Regressor
rfr = RandomForestRegressor(
n_estimators=7000,
max_depth=15,
min_samples_split=5,
min_samples_leaf=5,
max_features=None,
oob_score=True,
random_state=42,
)
rfr.fit(X_train, Y_train)
y_pred_rfr = rfr.predict(X_test)
mse_rfr, rmse_rfr, accuracy_rfr = evaluation(Y_test, y_pred_rfr)
models = [
"Linear Regression",
"Lasso Regression",
"Ridge Regression",
"Support Vector Regressor",
"KNeighbours Regressor",
"Random Forest Regressor",
]
rmse_scores = [rmse_lr, rmse_lasso, rmse_ridge, rmse_svr, rmse_knr, rmse_rfr]
mse_scores = [mse_lr, mse_lasso, mse_ridge, mse_svr, mse_knr, mse_rfr]
accuracy_scores = [
accuracy_lr,
accuracy_lasso,
accuracy_ridge,
accuracy_svr,
accuracy_knr,
accuracy_rfr,
]
model_scores = pd.DataFrame(
{
"Regression Models": models,
"RMSE Scores": rmse_scores,
"MSE_Scores": mse_scores,
"Accuracy_Scores": accuracy_scores,
}
)
results = model_scores.sort_values("RMSE Scores", ascending=True, ignore_index=True)
results
# # Submission
prediction = rfr.predict(df_test.drop(columns=["Id"], axis=1))
ids = df_test["Id"]
Final_sub = pd.DataFrame({"Id": ids, "SalePrice": prediction})
Final_sub.to_csv("final_submission.csv", index=False)
Final_sub
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas_profiling # Profiling is a process that helps us on understanding the data
import plotly.offline # importing plotly in offline mode
import cufflinks as cf # importing cufflinks in offline mode
cf.go_offline()
cf.set_config_file(offline=False, world_readable=True)
import pprint #
from IPython.core.interactiveshell import (
InteractiveShell,
) # Printing all the outputs of a cell
InteractiveShell.ast_node_interactivity = "all"
# import pdb #python debugger
# pdb.pm()
gender_submission = pd.read_csv("../input/titanic/gender_submission.csv")
test_df = pd.read_csv("../input/titanic/test.csv")
train_df = pd.read_csv("../input/titanic/train.csv")
train_df.profile_report()
train_df.iplot()
pprint.pprint(test_df.head(5), width=50)
#
# Tip: Use blue boxes (alert-info) for tips and notes.
# If it’s a note, you don’t have to include the word “Note”.
# Example: Yellow Boxes are generally used to include additional examples or mathematical formulas.
# Use green box only when necessary like to display links to related content.
# It is good to avoid red boxes but can be used to alert users to not delete some important part of code etc.
#
test_df.head(5)
train_df.head(5)
# # drop unnecessary columns, these columns won't be useful in analysis and prediction
titanic_df = train_df.drop(["PassengerId", "Name", "Ticket"], axis=1)
test_df = test_df.drop(["Name", "Ticket"], axis=1)
# # Embarked
# only in titanic_df, fill the two missing values with the most occurred value, which is "S".
titanic_df["Embarked"] = titanic_df["Embarked"].fillna("S")
# create dummy variables for Embarked feature
dummeis_embark_titanic = pd.get_dummies(titanic_df["Embarked"])
dummeis_embark_test = pd.get_dummies(test_df["Embarked"])
# merge the dummies
titanic_df = titanic_df.join(dummeis_embark_titanic)
test_df = test_df.join(dummeis_embark_test)
# # Feature: Fare
# there is a missing "Fare" values in test_df
test_df["Fare"].fillna(test_df("Fare").median(), inplace=True)
# convert from float to int
titanic_df["Fare"] = titanic_df["Fare"].astype(int)
test_df["Fare"] = test_df["Fare"].astype(int)
# # Feature: Age
# # Feature: Cabin
# It have lot of NaN values,hence it won't cause a remarkable impact on prediction
titanic_df.drop("Cabin", axis=1, inplace=True)
test_df.drop("Cabin", axis=1, inplace=True)
|
import pandas as pd
import numpy as np
# # Reading the Movie Rating dataset
# Reading the file:
movie_dataset = pd.read_csv("../input/movie_ratings.csv")
# Dropping user name column:
movie_ratings = movie_dataset.drop("users", axis=1)
print(movie_ratings.head(5))
print(" Shape of movie matrix is :", movie_ratings.shape)
# # Content based Matrix factorization using regularization:
# Function for matrix factorization:
def matrix_factorization(R, P, Q, K, steps=10000, alpha=0.0002, beta=0.02):
Q = Q.T
for step in range(steps):
for i in range(len(R)):
for j in range(len(R[i])):
if R[i][j] > 0:
eij = R[i][j] - np.dot(P[i, :], Q[:, j])
for k in range(K):
P[i][k] = P[i][k] + alpha * (2 * eij * Q[k][j] - beta * P[i][k])
Q[k][j] = Q[k][j] + alpha * (2 * eij * P[i][k] - beta * Q[k][j])
eR = np.dot(P, Q)
e = 0
for i in range(len(R)):
for j in range(len(R[i])):
if R[i][j] > 0:
e = e + pow(R[i][j] - np.dot(P[i, :], Q[:, j]), 2)
for k in range(K):
e = e + (beta / 2) * (pow(P[i][k], 2) + pow(Q[k][j], 2))
# print(e)
if e < 100:
break
return P, Q.T, e
# # Defining all the parameters and calling the function:
# Defining all the parameters:
R = np.array(movie_ratings)
N = len(R)
M = len(R[0])
K = 3
P = np.random.rand(N, K)
# print(P.shape)
Q = np.random.rand(M, K)
# print(Q.shape)
# Calling the function:
nP, nQ, e = matrix_factorization(R, P, Q, K)
print("Breakout Error form the function is", e)
# Combining it again as a full matrix
nR = np.dot(nP, nQ.T)
print(nR)
# # Comparing matrices and inserting predicted values in the original matrix :
# Replacing all the movie rating by -1 that are already watched by users:
def eliminating_watched_movies(R):
for i in range(len(R)):
for j in range(len(R[i])):
if not np.isnan(R[i][j]):
R[i][j] = -1
return R
# Adding predictions to this matrix:
def Adding_predictions(R, nR):
# First calling the function eliminating_watched_movies:
eliminating_watched_movies(R)
for i in range(len(R)):
for j in range(len(R[i])):
if np.isnan(R[i][j]):
R[i][j] = nR[i][j]
return R
# # Preparing the final matrix:
# Final Matrix of ratings to use for recommendation:
Adding_predictions(R, nR)
# # Adding column name and indexes::
# Defing a list ranging 1-50 to add 50 users:
mylist = list(range(1, 51))
print(mylist)
# Add indexes and column name:
Ratings = pd.DataFrame(
R,
columns=[
"movie1",
"movie2",
"movie3",
"movie4",
"movie5",
"movie6",
"movie7",
"movie8",
"movie9",
"movie10",
],
index=mylist,
)
print(Ratings)
# # Sorting the ratings:
# For each user recommendation ratings are sorted:
def sorted_ratings():
for i in range(len(mylist)):
a = Ratings.iloc[i]
print(a.sort_values(ascending=False))
sorted_ratings()
# We can pick top 2 or top 3 movies to send recommendation to users.
# # Top 2 recommended movie for a specific user:
def top_2_recommendations_for_user(user_id):
# Using loc to get index based values:
a = Ratings.loc[user_id].sort_values(ascending=False)
if a[0] > 0 and a[1] > 0:
print(a[0:2])
if a[0] > 0 and a[1] < 0:
print(a[0:1])
if a[0] < 0:
print("No Recommendation")
top_2_recommendations_for_user(3)
# There is no user who has watched all the movies
# Try only one recommendation with movie 48
# Try two recommendation with movie 1
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_csv("/kaggle/input/data-science-london-scikit-learn/train.csv")
test = pd.read_csv("/kaggle/input/data-science-london-scikit-learn/test.csv")
train_y = pd.read_csv("/kaggle/input/data-science-london-scikit-learn/trainLabels.csv")
train.head()
test.head()
sorted(train.columns)[:5]
sorted(test.columns)[:5]
train_y.head()
train_y_cols = np.array(train_y.columns)
train_y_cols = train_y_cols.reshape(1, train_y_cols.shape[0])
pd.DataFrame(train_y_cols, columns=["1"])
train_y_with_column = pd.concat(
[train_y, pd.DataFrame(train_y_cols, columns=["1"])], axis=0, ignore_index=True
)
train_y_with_column.head()
train_y_with_column.rename(columns={1: "y"}, inplace=True)
train_y_with_column.info()
train_y_with_column = train_y_with_column.astype(float)
y = train_y_with_column.values
y.shape
train.shape
test.shape
train_cols = np.array(train.columns)
test_cols = np.array(test.columns)
train_cols = train_cols.reshape(1, train_cols.shape[0])
test_cols = test_cols.reshape(1, test_cols.shape[0])
pd.DataFrame(train_cols)
pd.DataFrame(test_cols)
new_columns_train = dict()
new_columns_test = dict()
cols_train = list(train.columns)
cols_test = list(test.columns)
for i in range(train.shape[1]):
new_columns_train[cols_train[i]] = i
new_columns_test[cols_test[i]] = i
train.rename(columns=new_columns_train, inplace=True)
test.rename(columns=new_columns_test, inplace=True)
train_with_column = pd.concat(
[train, pd.DataFrame(train_cols)], axis=0, ignore_index=True
)
test_with_column = pd.concat([test, pd.DataFrame(test_cols)], axis=0, ignore_index=True)
train_with_column.tail()
test_with_column.tail()
train_test = pd.concat([train_with_column, test_with_column], axis=0, ignore_index=True)
train_with_column.shape
test_with_column.shape
train_test.shape # 1000 train, 9000 test
train_test.head()
train_test.isna().sum()
train_test.info()
train_test = train_test.astype(float)
train_test.corr()
X = train_with_column.values
y = train_y_with_column.values
X_submission = test_with_column.values
print("X.shape: {}".format(X.shape))
print("y.shape: {}".format(y.shape))
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
import xgboost as xgb
xgb_model = xgb.XGBClassifier()
xgb_model.fit(X_train, y_train)
xgb_model.score(X_test, y_test)
xgb_pred = xgb_model.predict(X_submission)
xgb_pred = pd.DataFrame(xgb_pred, columns=["Solution"])
xgb_pred = xgb_pred.reset_index()
xgb_pred.rename(columns={"index": "Id"}, inplace=True)
xgb_pred["Id"] = xgb_pred["Id"] + 1
xgb_pred["Solution"] = xgb_pred["Solution"].astype(int)
xgb_pred.head()
xgb_pred.tail()
xgb_pred.to_csv("Submission.csv", index=False)
|
# ### Importing the usual suspects
try:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from collections import Counter
import seaborn as sns
plt.style.use("ggplot")
from nltk.corpus import stopwords
from nltk.util import ngrams
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from sklearn.feature_extraction.text import CountVectorizer
stop = set(stopwords.words("english"))
stop = stop.union(set(["http", "https"]))
import gensim
import re
import string
import time
print("Success: Packages Loaded!")
except:
print("Error: One or more packages failed to load.")
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
pwd
# ### Reading Data and other checks.
data = pd.read_csv("train.csv")
data.head()
test = pd.read_csv("test.csv")
print("Rows = {}, Colums = {}".format(data.shape[0], data.shape[1]))
shape1 = (data.shape[0], data.shape[1])
data = data[data["text"].notnull()]
if data.shape == shape1:
print("Data Consistent")
else:
print("Data Inconsistent")
sns.set_style("whitegrid")
x = data.target.value_counts()
sns.barplot(x.index, x)
plt.gca().set_ylabel("samples")
print("0: Not Disaster Tweets, 1: Disaster Tweets")
def createCorpus(df, target):
corpus = []
for i in df[df["target"] == target]["text"]:
text = i.split()
corpus.extend([w.lower() for w in text if w.lower() not in stop])
return corpus
# ### Cleaning
df = pd.concat([data, test], sort=False)
df.shape
def remove_URL(text):
url = re.compile(r"https?://\S+|www\.\S+")
return url.sub(r"", text)
def remove_html(text):
html = re.compile(r"<.*?>")
return html.sub(r"", text)
def remove_emoji(text):
emoji_pattern = re.compile(
"["
"\U0001F600-\U0001F64F" # emoticons
"\U0001F300-\U0001F5FF" # symbols & pictographs
"\U0001F680-\U0001F6FF" # transport & map symbols
"\U0001F1E0-\U0001F1FF" # flags (iOS)
"\U00002702-\U000027B0"
"\U000024C2-\U0001F251"
"]+",
flags=re.UNICODE,
)
return emoji_pattern.sub(r"", text)
def remove_punct(text):
table = str.maketrans("", "", string.punctuation)
return text.translate(table)
df["text"] = df["text"].apply(lambda x: remove_URL(x))
print("[CLEAN] Removing URLs")
time.sleep(0.5)
df["text"] = df["text"].apply(lambda x: remove_html(x))
print("[CLEAN] Removing HTML Tags")
time.sleep(0.5)
df["text"] = df["text"].apply(lambda x: remove_emoji(x))
print("[CLEAN] Removing Emoticons")
time.sleep(0.5)
df["text"] = df["text"].apply(lambda x: remove_punct(x))
print("[CLEAN] Removing Punctuations")
time.sleep(0.5)
print("Success: Text Cleaned!")
def createCorpusNGrams(tweetCorpus, n=2):
output = []
s = [i.lower() for i in tweetCorpus]
s = [re.sub(r"[^a-zA-Z0-9\s]", " ", i) for i in s]
for i in range(1, n + 1):
output = list(ngrams(s, i))
return output
def ngramCreation(notes, num_ngram=2):
ngramList = []
if len(notes) == 0:
return ""
for nrange in range(1, num_ngram + 1):
ngramss = ngrams(notes, nrange)
for grams in ngramss:
ngramList = ngramList + ["_".join(list(grams))]
return ngramList
def lemmatize_stemming(text):
# return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v'))
return WordNetLemmatizer().lemmatize(text, pos="v")
def preprocess(text):
result = []
for token in gensim.utils.simple_preprocess(text):
if (token not in stop) and (len(token) >= 3):
result.append(lemmatize_stemming(token))
if len(result) == 0:
continue
return result
try:
df["text"] = df["text"].apply(preprocess)
df["text"] = df["text"].apply(ngramCreation)
print("Success: ngrams Created")
except:
print(
"Error: Either Operation has already been performed or it cannot be completed at the moment."
)
try:
tweetCorpus = createCorpus(df, 1)
print("Success: Corpus Created\nTotal Words = {}".format(len(tweetCorpus)))
except:
print("Error: Corpus Creation Failed!")
text = list(df["text"])
dictionary = gensim.corpora.Dictionary(textData)
# dictionary.filter_extremes(no_below=15, no_above=0.5, keep_n=100000)
print("Success: Dictionary Generated from df['text']")
dataset = ["driving car ", "drive car carefully", "student and university"]
# be sure to split sentence before feed into Dictionary
dataset = [d.split() for d in dataset]
vocab = gensim.corpora.Dictionary(dataset)
print(vocab)
print(dictionary)
bow_corpus = [dictionary.doc2bow(doc) for doc in textData]
bow_doc_2 = bow_corpus[1]
for i in range(len(bow_doc_2)):
print(
'Word {} ("{}") appears {} time.'.format(
bow_doc_2[i][0], dictionary[bow_doc_2[i][0]], bow_doc_2[i][1]
)
)
for i in range(10):
print(dictionary[i])
tfidf = models.TfidfModel(corpus, smartirs="ntc")
|
# ## Goal
# In this notebook, we apply the Intelligent search methods like Differential Evolution Algorithm to find the best ML algorithm hyper-parameters.
# Previous options are using either predetermined or randomly generated parameters for the ML algorithms.
# Some of these searching methods are actually a simulation of Intelligent agents in nature like the folk of birds or school of fishes.
# 
# * GIF Ref: https://giphy.com/gifs/c4d-human-ai-8hYQgBIIHkCPjRTmai
import pandas as pd
import numpy as np
import csv
from matplotlib import pyplot as plt
from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
from sklearn.metrics import make_scorer, accuracy_score
import xgboost as xgb
from sklearn.model_selection import cross_val_score
# ## Searching Algorithm of your choice
# Lets use DE Algorithm.
# * Reference for algorithm codes: https://pablormier.github.io/2017/09/05/a-tutorial-on-differential-evolution-with-python/#
#
def De_Algorithm(fobj, bounds, mut=0.8, crossp=0.7, popsize=100, its=1000):
dimensions = len(bounds)
pop = np.random.rand(popsize, dimensions)
min_b, max_b = np.asarray(bounds).T
diff = np.fabs(min_b - max_b)
pop_denorm = min_b + pop * diff
fitness = np.asarray([fobj(ind) for ind in pop_denorm])
best_idx = np.argmin(fitness)
best = pop_denorm[best_idx]
for i in range(its):
for j in range(popsize):
idxs = [idx for idx in range(popsize) if idx != j]
a, b, c = pop[np.random.choice(idxs, 3, replace=False)]
mutant = np.clip(a + mut * (b - c), 0, 1)
cross_points = np.random.rand(dimensions) < crossp
if not np.any(cross_points):
cross_points[np.random.randint(0, dimensions)] = True
trial = np.where(cross_points, mutant, pop[j])
trial_denorm = min_b + trial * diff
f = fobj(trial_denorm)
if f < fitness[j]:
fitness[j] = f
pop[j] = trial
if f < fitness[best_idx]:
best_idx = j
best = trial_denorm
# print("Iteration number= %s" % (i))
# print("Best Fitness= %s" % (fitness[best_idx]))
# print("Best values= %s" % (best))
yield best, fitness[best_idx]
# ## ML algorithm of your choice
# Let's use the most Common ML Competition algorithm which is XGBoost.
def xgb2(X_training, y_training, X_valid, y_valid, w):
w[1] = round(w[1])
w[2] = round(w[2])
w[6] = round(w[6])
w[7] = round(w[7])
w[8] = round(w[8])
w[9] = round(w[9])
w[10] = round(w[10])
params = {
"eta": w[0], # 0.3,
"tree_method": "hist",
"grow_policy": "lossguide",
"max_leaves": w[1], # 1400,
"max_depth": w[2], # 0,
"subsample": w[3], # 0.9,
"colsample_bytree": w[4], # 0.7,
"colsample_bylevel": w[5], # 0.7,
"min_child_weight": w[6], # 0,
"alpha": w[7], # 4,
"objective": "binary:logistic",
"scale_pos_weight": w[8], # 9,
"eval_metric": "auc",
"nthread": w[9], # 8,
"random_state": w[10], # 99,
"silent": True,
}
dtrain = xgb.DMatrix(X_training, y_training)
dvalid = xgb.DMatrix(X_valid, y_valid)
watchlist = [(dtrain, "train"), (dvalid, "valid")]
model = xgb.train(
params,
dtrain,
100,
watchlist,
maximize=True,
early_stopping_rounds=25,
verbose_eval=0,
)
# make predictions for test data
X_valid = xgb.DMatrix(X_valid)
y_pred = model.predict(X_valid, ntree_limit=model.best_ntree_limit)
predictions = [round(value) for value in y_pred]
# evaluate predictions
accuracy = accuracy_score(y_valid, predictions)
# print("Accuracy: %.2f%%" % (accuracy * 100.0))
return model, accuracy
# ## Data-set of your choice
# Let's use the TalkingData set which is available on Kaggle.
# address_train = '../input/talkingdata-adtracking-fraud-detection/train_sample.csv'
address_train = "../input/talkingdata-adtracking-fraud-detection/train.csv"
# ### Some handy functions
def Drop_cols(df, x):
df.drop(labels=x, axis=1, inplace=True)
return df
def Plot_Hist_column(df, x):
pyplot.hist(df[x], log=True)
pyplot.title(x)
pyplot.show()
def Plot_Hist_columns(df, xlist):
[Plot_Hist_column(df, x) for x in xlist]
pyplot.show()
def Make_X_Y(df):
Y = pd.DataFrame()
Y["is_attributed"] = df["is_attributed"]
X = df.copy()
X.drop(labels=["is_attributed"], axis=1, inplace=True)
return X, Y
def Train_Test_training_valid(X, Y, ratio):
Num_of_line = 100
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=ratio)
X_training, X_valid, y_training, y_valid = train_test_split(
X_train, y_train, test_size=ratio, random_state=0
)
return X_training, y_training, X_valid, y_valid
def read_train_test_data_balanced(address_train):
# Read Training data, all class 1 and add same amount 0
iter_csv = pd.read_csv(
address_train, iterator=True, chunksize=10000000, parse_dates=["click_time"]
)
df_train_1 = pd.concat([chunk[chunk["is_attributed"] > 0] for chunk in iter_csv])
iter_csv = pd.read_csv(
address_train,
iterator=True,
chunksize=10000000,
parse_dates=["click_time"],
skiprows=range(1, 120000000),
nrows=2000000,
)
df_train_0 = pd.concat([chunk[chunk["is_attributed"] == 0] for chunk in iter_csv])
# seperate same number values as train data with class 1
df_train_0 = df_train_0.head(len(df_train_1))
# Merge 0 and 1 data
df_train = Merge_data(df_train_1, df_train_0)
return df_train
def Merge_data(df1, df2):
frames = [df1, df2]
df = pd.concat(frames)
return df
# ## Preparing a dataset
# read data balanced. Read all 1 values from the train data set and then add the same number of 0 and keep it. Now we do have a balance data set with an equal number of 0 and 1.
df_train = read_train_test_data_balanced(address_train)
df_train.head(3)
# See the output paramters distribution
xlist = ["is_attributed"]
Plot_Hist_columns(df_train, xlist)
# Lets drop click_time and attributed_time for sack of simplicity
df_train.drop(labels=["click_time", "attributed_time"], axis=1, inplace=True)
# Devide data set to input (X) and output (Y) paramters.
X, Y = Make_X_Y(df_train)
X_training, y_training, X_valid, y_valid = Train_Test_training_valid(X, Y, 0.1)
# check out the ML algorithm and make sure it works.
# Here you can run the XGBoost algorithm on data with your favorite hyperparameters.
# w = [learning_rate,
# n_estimators,
# max_depth,
# min_child_weight,
# gamma,
# subsample,
# colsample_bytree,
# nthread,
# scale_pos_weight]
"""w = [0.1, 3, 3, 1, 0, 0.8, 0.8, 4, 1]
Trained_XGBoost_Model, XGBoost_accuracy = Train_XGBoost(X_training, y_training, X_valid, y_valid, w)"""
# check out the ML algorithm and make sure it works.
# Here you can run the XGBoost algorithm on data with your favorite hyperparameters.
# w = [learning_rate,
# n_estimators,
# max_depth,
# min_child_weight,
# gamma,
# subsample,
# colsample_bytree,
# nthread,
# scale_pos_weight]
w = [0.1, 1400, 0, 0.9, 0.7, 0.7, 0, 4, 9, 8, 99]
model2, accuracy = xgb2(X_training, y_training, X_valid, y_valid, w)
"""#Define an objective funtion.
def Objective_Function(w):
w = w.tolist()
Trained_XGBoost_Model, XGBoost_accuracy = Train_XGBoost(X_training, y_training, X_valid, y_valid, w)
return (1-XGBoost_accuracy)"""
# Define an objective funtion.
def Objective_Function2(w):
w = w.tolist()
model2, accuracy = xgb2(X_training, y_training, X_valid, y_valid, w)
return 1 - accuracy
"""#Run the DE algorithm on objective function in your favorite range of hyperparameters.
result = list(De_Algorithm(Objective_Function,
[(0.001, 1), # learning_rate
(3, 1000), # n_estimators
(2, 20), # max_depth
(1, 20), # min_child_weight
(0.001, 1), # gamma
(0.001, 1), # subsample
(0.001, 1), # colsample_bytree
(2, 8), # nthread
(1, 8)], # scale_pos_weight]
mut=0.4, crossp=0.8, popsize=10, its=30))"""
# Run the DE algorithm on objective function in your favorite range of hyperparameters.
result = list(
De_Algorithm(
Objective_Function2,
[
(0.001, 1), # eta
(3, 1500), # max_leaves
(0, 20), # max_depth
(0, 1), # subsample
(0.001, 1), # colsample_bytree
(0.001, 1), # colsample_bylevel
(0.001, 1), # min_child_weight
(2, 8), # alpha
(1, 10), # scale_pos_weight
(1, 10), # nthread
(1, 10),
], # random_state
mut=0.4,
crossp=0.8,
popsize=10,
its=40,
)
)
df = pd.DataFrame(result)
# seperate the best of hyperparamters.
def Best_coffs(df):
# df['w1'], df['w2'], df['w3'], df['w4'], df['w5'], df['w6'], df['w7'], df['w8'], df['w9'] = zip(*df[0]) # Unzip
(
df["w1"],
df["w2"],
df["w3"],
df["w4"],
df["w5"],
df["w6"],
df["w7"],
df["w8"],
df["w9"],
df["w10"],
df["w11"],
) = zip(
*df[0]
) # Unzip
cols = [0] # Drop the first column
df.drop(df.columns[cols], axis=1, inplace=True) # Drop the first column
df.columns.values[0] = "Fitness" # name the first column as Fitness
best_coff = df.iloc[
len(df) - 1, 1:
] # insert the best coefficients into the best_coff
return best_coff
Best_coffs(df)
def Plot_DEA_Evolution(df):
data_ncol = len(df.columns) # number of paramters
fig = plt.figure(
figsize=(20, 15)
) # you may change these to change the distance between plots.
for i in range(1, (data_ncol + 1)):
if i < (data_ncol):
plt.subplot(3, 4, i)
plt.plot(df["w{}".format(i)], "bo", markersize=4)
plt.xlabel("Iteration")
plt.ylabel("w{}".format(i))
plt.grid(True)
else:
plt.subplot(3, 4, data_ncol)
plt.plot(df["Fitness"], "red", markersize=4)
plt.xlabel("Iteration")
plt.ylabel("Fitness")
plt.grid(True)
plt.show()
# ## Visualization of searching progress
Plot_DEA_Evolution(df)
df = pd.DataFrame(result)
def Best_coffs(df):
# df['learning_rate'],df['n_estimators'], df['max_depth'],df['min_child_weight'], df['gamma'],df['subsample'], df['colsample_bytree'],df['nthread'], df['scale_pos_weight'] = zip(*df[0]) # Unzip
(
df["eta"],
df["max_leaves"],
df["max_depth"],
df["subsample"],
df["colsample_bytree"],
df["colsample_bylevel"],
df["min_child_weight"],
df["alpha"],
df["scale_pos_weight"],
df["nthread"],
df["random_state"],
) = zip(
*df[0]
) # Unzip
cols = [0] # Drop the first column
df.drop(df.columns[cols], axis=1, inplace=True) # Drop the first column
df.columns.values[0] = "Fitness" # name the first column as Fitness
best_Parameters = df.iloc[
len(df) - 1, 1:
] # insert the best coefficients into the best_coff
return best_Parameters
def print_hyper_parameters(df):
"""best_Parameters = Best_coffs(df)
best_Parameters[1] = round(best_Parameters[1])
best_Parameters[2] = round(best_Parameters[2])
best_Parameters[3] = round(best_Parameters[3])
best_Parameters[7] = round(best_Parameters[7])
best_Parameters[8] = round(best_Parameters[8])"""
best_Parameters = Best_coffs(df)
best_Parameters[1] = round(best_Parameters[1])
best_Parameters[2] = round(best_Parameters[2])
best_Parameters[6] = round(best_Parameters[6])
best_Parameters[7] = round(best_Parameters[7])
best_Parameters[8] = round(best_Parameters[8])
best_Parameters[9] = round(best_Parameters[9])
best_Parameters[10] = round(best_Parameters[10])
print(best_Parameters)
print_hyper_parameters(df)
# ## Train XGBoost using best hyperparamters
def xgb2(X_training, y_training, X_valid, y_valid, w):
w[1] = round(w[1])
w[2] = round(w[2])
w[6] = round(w[6])
w[7] = round(w[7])
w[8] = round(w[8])
w[9] = round(w[9])
w[10] = round(w[10])
params = {
"eta": w[0], # 0.3,
"tree_method": "hist",
"grow_policy": "lossguide",
"max_leaves": w[1], # 1400,
"max_depth": w[2], # 0,
"subsample": w[3], # 0.9,
"colsample_bytree": w[4], # 0.7,
"colsample_bylevel": w[5], # 0.7,
"min_child_weight": w[6], # 0,
"alpha": w[7], # 4,
"objective": "binary:logistic",
"scale_pos_weight": w[8], # 9,
"eval_metric": "auc",
"nthread": w[9], # 8,
"random_state": w[10], # 99,
"silent": True,
}
dtrain = xgb.DMatrix(X_training, y_training)
dvalid = xgb.DMatrix(X_valid, y_valid)
watchlist = [(dtrain, "train"), (dvalid, "valid")]
model = xgb.train(
params,
dtrain,
1000,
watchlist,
maximize=True,
early_stopping_rounds=25,
verbose_eval=5,
)
# make predictions for test data
X_valid = xgb.DMatrix(X_valid)
y_pred = model.predict(X_valid, ntree_limit=model.best_ntree_limit)
predictions = [round(value) for value in y_pred]
# evaluate predictions
accuracy = accuracy_score(y_valid, predictions)
# print("Accuracy: %.2f%%" % (accuracy * 100.0))
return model, accuracy
df = pd.DataFrame(result)
w = list(Best_coffs(df))
Trained_Model, accuracy = xgb2(X_training, y_training, X_valid, y_valid, w)
# ## Read Test data, edit it, Fit, and Predict using hyperparameters and XGBoost
address_test = "../input/talkingdata-adtracking-fraud-detection/test.csv"
df_test = pd.read_csv(address_test, parse_dates=["click_time"])
df_test.head()
# Lets drop click_time and attributed_time for sack of simplicity
df_test.drop(labels=["click_time", "click_id"], axis=1, inplace=True)
df_test.head()
def predict_And_Submit_using_xgb(df, Trained_Model):
Num_of_line = 100
print(Num_of_line * "=")
# sub = pd.DataFrame()
# sub['click_id'] = df['click_id'].astype('int')
# df['clicks_by_ip'] = df['clicks_by_ip'].astype('uint16')
data_to_submit = pd.DataFrame()
data_to_submit["click_id"] = range(0, len(df))
dtest = xgb.DMatrix(df)
del df
predict = Trained_Model.predict(dtest, ntree_limit=Trained_Model.best_ntree_limit)
data_to_submit["is_attributed"] = predict
print(Num_of_line * "=")
print("data_to_submit = \n", data_to_submit.head(5))
pyplot.hist(data_to_submit["is_attributed"], log=True)
# data_to_submit.to_csv('Amin_csv_to_submit.csv', index = False)
return data_to_submit
data_to_submit = predict_And_Submit_using_xgb(df_test, Trained_Model)
data_to_submit2 = pd.DataFrame()
data_to_submit2["is_attributed"] = [
0 if i < 0.1 else 1 for i in data_to_submit["is_attributed"]
]
pyplot.hist(data_to_submit2["is_attributed"], log=True)
data_to_submit2 = pd.DataFrame()
data_to_submit2["is_attributed"] = [
0 if i < 0.2 else 1 for i in data_to_submit["is_attributed"]
]
pyplot.hist(data_to_submit2["is_attributed"], log=True)
data_to_submit2 = pd.DataFrame()
data_to_submit2["is_attributed"] = [
0 if i < 0.3 else 1 for i in data_to_submit["is_attributed"]
]
pyplot.hist(data_to_submit2["is_attributed"], log=True)
data_to_submit2 = pd.DataFrame()
data_to_submit2["is_attributed"] = [
0 if i < 0.4 else 1 for i in data_to_submit["is_attributed"]
]
pyplot.hist(data_to_submit2["is_attributed"], log=True)
data_to_submit.to_csv("Amin_csv_to_submit.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input/glass/glass.csv"):
for filename in filenames:
print(os.path.join(dirname, glass / glass.csv))
# Any results you write to the current directory are saved as output.
# defining path
os.path.isfile("/kaggle/input/glass/glass.csv")
# importing data
Shan = pd.read_csv("/kaggle/input/glass/glass.csv")
Shan.tail(10)
Shan.describe()
missing_values = Shan.isnull()
missing_values.head(10)
import numpy as np
import pandas as pd
# to save model
import pickle
# Import visualization modules
import matplotlib.pyplot as plt
import seaborn as sns
Shan.dtypes
sns.pairplot(Shan)
Shan.shape
mask = np.zeros_like(Shan.corr(), dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(16, 12))
plt.title("Pearson Correlation Matrix", fontsize=25)
sns.heatmap(
Shan.corr(),
linewidths=0.25,
vmax=0.7,
square=True,
cmap="BuGn",
# "BuGn_r" to reverse
linecolor="w",
annot=True,
annot_kws={"size": 8},
mask=mask,
cbar_kws={"shrink": 0.9},
)
x = Shan[["Al"]]
y = Shan["Type"]
# Split data into Train and test
# Import module to split dataset
from sklearn.model_selection import train_test_split
# Split data set into training and test sets
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=100
)
# Checking file types created
print(x.shape)
# print(X_test.head())
# print(y_train.head())
print(y.shape)
# Run the model
# Import model for fitting
from sklearn.linear_model import LogisticRegression
# Create instance (i.e. object) of LogisticRegression
# model = LogisticRegression()
# You can try follwoing variation on above model, above is just default one
model = LogisticRegression()
# Fit the model using the training data
# X_train -> parameter supplies the data features
# y_train -> parameter supplies the target labels
output_model = model.fit(x, y)
# output =x_test
# output['vehicleTypeId'] = y_test
output_model
from sklearn import linear_model
# Save the model in pickle
# Save to file in the current working directory
pkl_filename = "pickle_model.pkl"
with open(pkl_filename, "wb") as file:
pickle.dump(model, file)
# Load from file
with open(pkl_filename, "rb") as file:
pickle_model = pickle.load(file)
# Calculate the accuracy score and predict target values
score = pickle_model.score(x_test, y_test)
# print(score)
print("Test score: {0:.2f} %".format(100 * score))
Ypredict = pickle_model.predict(x_test)
model.predict(x_train)
print(x_test.shape)
print(y_test.shape)
print(x_test)
print(y_test)
df = pd.DataFrame({"Actual": y_test, "Predicted": Ypredict.flatten()})
df
from sklearn import metrics
print("Mean Absolute Error:", metrics.mean_absolute_error(y_test, Ypredict))
print("Mean Squared Error:", metrics.mean_squared_error(y_test, Ypredict))
print("Root Mean Squared Error:", np.sqrt(metrics.mean_squared_error(y_test, Ypredict)))
ax = plt.axes()
ax.scatter(x, y)
plt.title("Input Data and regression line ")
ax.plot(x_test, Ypredict, color="Red")
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.axis("tight")
plt.show()
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
predictions = model.predict(x_test)
# print("",classification_report(y_test, predictions))
# print("confusion_matrix",confusion_matrix(y_test, predictions))
# print("accuracy_score",accuracy_score(y_test, predictions))
##**Accuracy is a classification metric. You can't use it with a regression. See the documentation for info on the various metrics.
# For regression problems you can use: R2 Score, MSE (Mean Squared Error), RMSE (Root Mean Squared Error).
# print("Score",score(y_test, X_test))
# score(self, X, y, sample_weight=None)
## setting plot style
plt.style.use("fivethirtyeight")
## plotting residual errors in training data
plt.scatter(
model.predict(x_train),
model.predict(x_train) - y_train,
color="green",
s=1,
label="Train data",
linewidth=5,
)
## plotting residual errors in test data
plt.scatter(
model.predict(x_test),
model.predict(x_test) - y_test,
color="blue",
s=1,
label="Test data",
linewidth=4,
)
## plotting line for zero residual error
plt.hlines(y=0, xmin=0, xmax=4, linewidth=2)
## plotting legend
plt.legend(loc="upper right")
## plot title
plt.title("Residual errors")
## function to show plot
plt.show()
from sklearn.ensemble import RandomForestRegressor
rf_regressor = RandomForestRegressor(n_estimators=28, random_state=0)
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import explained_variance_score
rf_regressor.fit(x_train, y_train)
rf_regressor.score(x_test, y_test)
rf_pred = rf_regressor.predict(x_test)
rf_score = rf_regressor.score(x_test, y_test)
expl_rf = explained_variance_score(rf_pred, y_test)
print(
"Random Forest regression Model Score is",
round(rf_regressor.score(x_test, y_test) * 100),
)
# Split data into 'X' features and 'y' target label sets
X1 = Shan[["RI", "Na", "Mg", "Al", "Si", "K", "Ca"]]
y1 = Shan["Type"]
from sklearn.model_selection import train_test_split
# Split data set into training and test sets
X1_train, X1_test, y1_train, y1_test = train_test_split(
X1, y1, test_size=0.25, random_state=100
)
# Run the model
# Import model for fitting
from sklearn.linear_model import LogisticRegression
# Create instance (i.e. object) of LogisticRegression
# model = LogisticRegression()
# You can try follwoing variation on above model, above is just default one
model = LogisticRegression()
# Fit the model using the training data
# X_train -> parameter supplies the data features
# y_train -> parameter supplies the target labels
output_model = model.fit(x, y)
# output =x_test
# output['vehicleTypeId'] = y_test
output_model
model = LogisticRegression()
output_model = model.fit(X1_train, y1_train)
output_model
pkl_filename = "pickle_model.pkl"
with open(pkl_filename, "wb") as file:
pickle.dump(model, file)
# Load from file
with open(pkl_filename, "rb") as file:
pickle_model = pickle.load(file)
# Calculate the accuracy score and predict target values
score = pickle_model.score(X1_test, y1_test)
print("Test score: {0:.2f} %".format(100 * score))
Ypredict = pickle_model.predict(X1_test)
df = pd.DataFrame({"Actual": y1_test, "Predicted": Ypredict.flatten()})
df
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
df = pd.read_csv("../input/housesalesprediction/kc_house_data.csv")
df.head()
df.keys()
# Check null values
df.isnull().sum()
# Overview of dataset
df.describe().transpose()
# Since it is a continuous label, I would like to see a histogram/ distribution of the label
plt.figure(figsize=(10, 6))
sns.distplot(df["price"])
# Looks like our houses are falling between 0 and ~1.5 million dollars. We might want to drop the outliers (expensive houses) if they are just a few points. We can then build a model that realistically predict the house price 0-2 million dollars. Since there are not many in the market that are that expensive, it may not be useful for our model to train on these outliers.
# **Analyses of different features**
# Categorical - Bedrooms
plt.figure(figsize=(10, 6))
sns.countplot(df["bedrooms"])
df.corr().price.sort_values()
# Square feet living space has a high correlation to the house price
# Exploring highly correlated features with the label through SCATTERPLOT
plt.figure(figsize=(10, 6))
sns.scatterplot(x="price", y="sqft_living", data=df)
# Boxplot of no. of bedrooms and the price
plt.figure(figsize=(10, 6))
sns.boxplot(x="bedrooms", y="price", data=df)
# There is quite a bit of variation in 3~7 bedrooms , which makes sense because in the countplot from before, majority of the houses have 3~7 bedrooms, meaning there is a large variety of prices.
# **Geographical Properties**
# Longitude vs. Price
plt.figure(figsize=(12, 8))
sns.scatterplot(x="price", y="long", data=df)
# Latitude vs. Price
plt.figure(figsize=(12, 8))
sns.scatterplot(x="price", y="lat", data=df)
# Looking at both Lat and Long with a hue of Price
plt.figure(figsize=(12, 8))
sns.scatterplot(x="long", y="lat", hue="price", data=df)
# Image of King County in U.S.
# 
# I'm not getting quite a color gradient as I would like, because of the really expensive outlier house prices.
# So now let's clean up by dropping these outliers
df.sort_values("price", ascending=False).head(20)
# Sample out top 1% of all houses
len(df) * (0.01)
bottom_99_perc = df.sort_values("price", ascending=False).iloc[216:]
plt.figure(figsize=(12, 8))
sns.scatterplot(
x="long",
y="lat",
data=bottom_99_perc,
hue="price",
palette="RdYlGn",
edgecolor=None,
alpha=0.2,
)
# **Other features**
# Whether or not house is in front of waterfront
sns.boxplot(x="waterfront", y="price", data=df)
# **Working with feature data**
df = df.drop("id", axis=1)
# TO datetime object
df["date"] = pd.to_datetime(df["date"])
# New column Year
df["year"] = df["date"].apply(lambda date: date.year)
# New column Month
df["month"] = df["date"].apply(lambda date: date.month)
# See if whether house prices fluctuate seasonally
# Monthly
plt.figure(figsize=(10, 6))
sns.boxplot(x="month", y="price", data=df)
# Mean Price varying throughout the months
df.groupby("month").mean()["price"].plot()
# Mean Price varying throught the years
df.groupby("year").mean()["price"].plot()
# Dropping the date
df = df.drop("date", axis=1)
df.columns
# Zipcode is numerical but not a continuous feature!!
# Zipcode mapping is hard and there is no clear continuous distribution of these actual zipcodes, so need to treat them as a categorical feature.
df["zipcode"].value_counts()
# There are 70 different zipcodes in the data. It is not feasible to get_dummies 70 different categories. So, I will drop the zipcodes.
df = df.drop("zipcode", axis=1)
# could make sense due to scaling, higher should correlate to more value
df["yr_renovated"].value_counts()
# The higher the value of this year renovated than the more likely that the House is going to have a higher sale price.
# And since 0 actually follows along with this correlation it's almost like the lowest year possible.
# Then we should expect that to also have little value.
df["sqft_basement"].value_counts()
# Same tihng goes for 'sqft_basement', where 0 means there is no basement
# **Scaling and Train Test Split**
X = df.drop("price", axis=1)
y = df["price"]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=101
)
# **Scaling**
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
X_train.shape
X_test.shape
# **Creating a model**
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.optimizers import Adam
model = Sequential()
model.add(Dense(19, activation="relu"))
model.add(Dense(19, activation="relu"))
model.add(Dense(19, activation="relu"))
model.add(Dense(19, activation="relu"))
model.add(Dense(1))
model.compile(optimizer="adam", loss="mse")
# model.fit(x=X_train, y=y_train, )
|
# data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
# visualization
import seaborn as sns
import matplotlib.pyplot as plt
# machine learning
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
from sklearn.metrics import (
f1_score,
recall_score,
precision_score,
accuracy_score,
confusion_matrix,
log_loss,
)
from sklearn.model_selection import KFold
import itertools
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
train = pd.read_csv("/kaggle/input/titanic/train.csv")
test = pd.read_csv("/kaggle/input/titanic/test.csv")
train.head(3)
train_x = train.drop(["Survived"], axis=1)
train_x2 = train.drop(["Survived"], axis=1)
train_y = train["Survived"]
# # データ理解(EDA)
# モデルや特徴量を作る中で、データ理解は重要。
# 事前にモデルや仮説を想定せず、様々な角度からデータを見ていく探索的データ分析(EDA)をしていく。
train_x = train_x.drop(["PassengerId", "Name", "Ticket", "Cabin"], axis=1)
train_x2 = train_x2.drop(["PassengerId", "Name", "Ticket", "Cabin"], axis=1)
test_x = test.drop(["PassengerId", "Name", "Ticket", "Cabin"], axis=1)
test_x2 = test.drop(["PassengerId", "Name", "Ticket", "Cabin"], axis=1)
# xgboost用
from sklearn.preprocessing import LabelEncoder
# カテゴリ変数にlabel encoding
for columns in ["Sex", "Embarked"]:
le = LabelEncoder()
le.fit(train_x[columns].fillna("NA"))
# 変換
train_x[columns] = le.transform(train_x[columns].fillna("NA"))
test_x[columns] = le.transform(test_x[columns].fillna("NA"))
print(train_x.shape)
train_x.head()
# Logistic regression用
# one-hot encoding
from sklearn.preprocessing import OneHotEncoder
cat_cols = ["Sex", "Embarked", "Pclass"]
ohe = OneHotEncoder(categories="auto", sparse=False)
ohe.fit(train_x2[cat_cols].fillna("NA"))
# one-hot encodingのダミー変数の列名を作成
ohe_columns = []
for i, c in enumerate(cat_cols):
ohe_columns += [f"{c}_{v}" for v in ohe.categories_[i]]
# one-hot encodingによる変換
ohe_train_x2 = pd.DataFrame(
ohe.transform(train_x2[cat_cols].fillna("NA")), columns=ohe_columns
)
ohe_test_x2 = pd.DataFrame(
ohe.transform(test_x2[cat_cols].fillna("NA")), columns=ohe_columns
)
# one-hot encoding済みの変数を除外する
train_x2 = train_x2.drop(cat_cols, axis=1)
test_x2 = test_x2.drop(cat_cols, axis=1)
# one-hot encodingで変換された変数を結合する
train_x2 = pd.concat([train_x2, ohe_train_x2], axis=1)
test_x2 = pd.concat([test_x2, ohe_test_x2], axis=1)
# 数値変数の欠損値を学習データの平均で埋める
num_cols = ["Age", "SibSp", "Parch", "Fare"]
for col in num_cols:
train_x2[col].fillna(train_x2[col].mean(), inplace=True)
test_x2[col].fillna(test_x2[col].mean(), inplace=True)
# 変数Fareを対数変換する
train_x2["Fare"] = np.log1p(train_x2["Fare"])
test_x2["Fare"] = np.log1p(test_x2["Fare"])
model_xgb = XGBClassifier(eta=1, n_estimators=100, random_state=0, max_depth=7)
model_xgb.fit(train_x, train_y)
pred_xgb = model_xgb.predict(test_x)
model_lr = LogisticRegression(solver="lbfgs", max_iter=300)
model_lr.fit(train_x2, train_y)
pred_lr = model_lr.predict(test_x2)
pred = pred_xgb * 0.8 + pred_lr * 0.2
submission = pd.DataFrame({"PassengerId": test["PassengerId"], "Survived": pred})
submission.to_csv("submission.csv", index=False)
pred.shape
|
import pandas as pd
from json import load
import urllib.request, json
from pandas.io.json import json_normalize
import seaborn as sns
import pylab as plt
cur_url = "https://zenodo.org/api/records/?sort=mostrecent&type=dataset&access_right=open&size=1000"
counter = 0
while True:
print(cur_url)
with urllib.request.urlopen(cur_url) as url:
data = json.loads(url.read().decode())
with open("outputs/%02d.json" % (counter), "w") as outfile:
json.dump(data, outfile, sort_keys=True, indent=4, ensure_ascii=False)
counter += 1
if "next" in data["links"]:
next_url = data["links"]["next"]
next_page = int(next_url.split("page=")[1].split("&")[0])
if next_page == 10:
last_date = data["hits"]["hits"][-1]["created"].split("+")[0]
next_url = (
"https://zenodo.org/api/records/?sort=mostrecent&q=created%3A%5B%2A+TO+"
+ last_date
+ "%5D&page=1&type=dataset&access_right=open&size=1000"
)
cur_url = next_url
else:
break
|
import numpy as np
import pandas as pd
import tensorflow as tf
import efficientnet.tfkeras as efn
import glob, os
import pickle
keras = tf.keras
layers = keras.layers
TRAIN_PATH = glob.glob(r"/kaggle/input/cifar10-python/cifar-10-batches-py/data*")
TEST_PATH = [r"/kaggle/input/cifar10-python/cifar-10-batches-py/test_batch"]
BATCH_META = r"/kaggle/input/cifar10-python/cifar-10-batches-py/batches.meta"
BATCH_SIZE = 100
TRAIN_SIZE = 50000
TEST_SIZE = 10000
EPOCH = 40
AUTOTUNE = tf.data.experimental.AUTOTUNE
def load_data(path_lib, dataset_size):
image = []
label = []
for path in path_lib:
with open(path, "rb") as file:
dataset = pickle.load(file, encoding="latin1")
x = dataset["data"]
y = dataset["labels"]
image.append(x)
label.append(y)
image = np.concatenate(image, axis=0)
label = np.concatenate(label, axis=0)
image = np.reshape(image, [dataset_size, 3, 32, 32])
image = np.moveaxis(image, 1, 2)
image = np.moveaxis(image, 2, 3)
label = np.array(label)
return image, label
def load_meta(path):
with open(path, "rb") as file:
dictionary = pickle.load(file, encoding="latin1")
label_to_name = dict(
(index, name) for index, name in enumerate(dictionary["label_names"])
)
return label_to_name
dictionary = load_meta(BATCH_META)
test_image, test_label = load_data(TEST_PATH, TEST_SIZE)
train_image, train_label = load_data(TRAIN_PATH, TRAIN_SIZE)
train_dataset = tf.data.Dataset.from_tensor_slices((train_image, train_label))
test_dataset = tf.data.Dataset.from_tensor_slices((test_image, test_label))
train_dataset = (
train_dataset.shuffle(TRAIN_SIZE).repeat().batch(BATCH_SIZE).prefetch(AUTOTUNE)
)
test_dataset = test_dataset.batch(BATCH_SIZE).prefetch(AUTOTUNE)
optimizer = keras.optimizers.Adam(1e-3)
loss = keras.losses.SparseCategoricalCrossentropy()
metrics = keras.metrics.SparseCategoricalAccuracy()
learning_rate_callback = keras.callbacks.ReduceLROnPlateau(
monitor="val_loss", factor=0.5, patience=5, min_lr=1e-5
)
base_network = keras.applications.DenseNet201(
weights="imagenet", include_top=False, input_shape=[128, 128, 3]
)
# base_network = efn.EfficientNetB7(include_top=False,input_shape=(128,128,3),weights='imagenet')
network = keras.Sequential(
[
layers.UpSampling2D(size=[2, 2], input_shape=[32, 32, 3]),
layers.UpSampling2D(size=[2, 2]),
base_network,
layers.GlobalAveragePooling2D(),
layers.Dense(2048),
layers.BatchNormalization(),
layers.ReLU(),
layers.Dense(512),
layers.BatchNormalization(),
layers.ReLU(),
layers.Dense(10, activation="softmax"),
]
)
network.summary()
network.compile(optimizer=optimizer, loss=loss, metrics=[metrics])
network.fit(
train_dataset,
epochs=EPOCH,
steps_per_epoch=TRAIN_SIZE // BATCH_SIZE,
validation_data=test_dataset,
validation_steps=TEST_SIZE // BATCH_SIZE,
callbacks=[learning_rate_callback],
)
network.save(r"./Network.out")
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
import nltk
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# Load all the train, test and submission data
tweet_train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
tweet_test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
target = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv")
tweet_train.head()
# Missing values in the train data
tweet_train.isnull().sum()
# The output shows location has many null values and keyword with few null values.
# Lets see how many real and fake tweets are present in the train dataset.
tweet_train["target"].value_counts()
# EDA for Real and fake tweets in the dataset
real = len(tweet_train[tweet_train["target"] == 1])
fake = len(tweet_train[tweet_train["target"] == 0])
df_count_pie = pd.DataFrame({"Class": ["Real", "Not Real"], "Counts": [real, fake]})
df_count_pie.Counts.groupby(df_count_pie.Class).sum().plot(
kind="pie", autopct="%1.1f%%"
)
plt.axis("equal")
plt.title("Tweeta which are Real or Not")
plt.show()
# **NLP**
# Preprocessing the text data in the train and test dataset.
#
tweet_train["text"][:3]
# The above output shows the text column which needs pre-processing like convert the words to lowercase, remove puntuations and stopwords, tokenize the text to words.
stopword = stopwords.words("english")
def text_processing(text):
text = re.sub("[^\w\d\s]+", "", text)
text = text.lower()
tok = nltk.word_tokenize(text)
words = [word for word in tok if word not in stopword]
return words
def join_words(words):
words = " ".join(words)
return words
# preprocess the train text data
tweet_train["text_pre"] = tweet_train["text"].apply(lambda x: text_processing(x))
tweet_train["text"] = tweet_train["text_pre"].apply(lambda x: join_words(x))
# preprocess the test text data
tweet_test["text_pre"] = tweet_test["text"].apply(lambda x: text_processing(x))
tweet_test["text"] = tweet_test["text_pre"].apply(lambda x: join_words(x))
# Look at the train dataframe after the text is preprocessed.
tweet_train.head(3)
# Let us convert the words into vectors using the NLP CountVectorizer method. This vector is used for the training the model.
wrd_vec = CountVectorizer()
word_vector = wrd_vec.fit_transform(tweet_train["text"])
test_vector = wrd_vec.transform(tweet_test["text"])
# Let us see the sample submission file
target.head(3)
tar = target["target"]
# **Prediction model**
# Logistic Regression
clf = LogisticRegression(C=1.0)
clf.fit(word_vector, tweet_train["target"])
# Predict the tweets for the test data
pred = clf.predict(test_vector)
log_score = cross_val_score(clf, word_vector, tweet_train["target"], cv=3)
print(log_score)
# Random Forest Classifier
ran_model = RandomForestClassifier(
n_estimators=150, bootstrap=True, max_features="sqrt"
)
ran_model.fit(word_vector, tweet_train["target"])
ran_pred = ran_model.predict(test_vector)
ran_score = cross_val_score(ran_model, word_vector, tweet_train["target"], cv=3)
print(ran_score)
target["target"] = ran_pred
target.head()
# submission file
sub_file = pd.DataFrame({"ID": target["id"], "target": target["target"]})
sub_file.to_csv("tweet_submission_file.csv", index=False)
|
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn import preprocessing
import matplotlib.pyplot as plt
dc = pd.read_csv("/kaggle/input/santa-2022/image.csv")
ds = pd.read_csv("/kaggle/input/santa-2022/sample_submission.csv")
print(dc)
print(ds)
dc.head()
ds.head()
dc.shape
ds.shape
dc.dtypes
ds.dtypes
dc.isnull().sum()
ds.isnull().sum()
dc.info()
ds.info()
dc.describe()
ds.describe()
cc = dc.corr()
print(cc)
cs = ds.corr()
print(cs)
sns.heatmap(cc)
sns.heatmap(cc, annot=True)
sns.pairplot(dc)
dc.to_csv("image.csv")
ds.to_csv("sample_submission.csv")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Dense,
InputLayer,
Flatten,
BatchNormalization,
Dropout,
)
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.regularizers import l2
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
print(tf.__version__)
train_data = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
test_data = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
print("Training Data shape: ", train_data.shape)
print("Test Data shape: ", test_data.shape)
train_data
X_train = train_data.iloc[:, 1:].values.reshape(-1, 28, 28, 1)
y_train = train_data.iloc[:, :1].values
X_test = test_data.values.reshape(-1, 28, 28, 1)
plt.imshow(X_train[0])
# Normalise data
X_train = X_train / 255
X_test = X_test / 255
# One hot encode
y_train_cat = to_categorical(y_train, 10)
X_train.shape[1:]
# build a simple model, adding additional Conv2D/MaxPooling2D layers may improve accuracy
model = tf.keras.models.Sequential(
[
tf.keras.layers.Conv2D(32, (3, 3), activation="relu", input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Conv2D(64, (3, 3), activation="relu"),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation="relu"),
tf.keras.layers.Dense(10, activation="softmax"),
]
)
model.compile("Adam", "categorical_crossentropy", metrics=["accuracy"])
model.fit(X_train, y_train_cat, epochs=3, batch_size=128)
# Make the predictions using the Test, unseen data after training
predictions = model.predict(X_test).argmax(axis=1)
submission = pd.DataFrame(predictions, columns=["Label"])
submission.index.name = "ImageId"
submission = submission.rename(columns={0: "Label"}).reset_index()
submission["ImageId"] = submission["ImageId"] + 1
submission.head()
submission.to_csv("submission.csv", index=False)
print("Submission complete")
|
# # Setup
import datetime
from typing import Dict, List
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
np.set_printoptions(precision=3)
pd.set_option("display.precision", 3)
company_df = pd.read_csv("app/resources/Client_Master.csv", dtype=str)
company_df.head()
detail_df = pd.read_excel("app/resources/company_data_document.xlsx", dtype=str)
detail_df.columns = detail_df.iloc[0]
detail_df = detail_df[1:]
detail_df = detail_df[detail_df["Is Use - Manual"] == "True"]
detail_df.reset_index(drop=True, inplace=True)
detail_df.head()
# # Preprocessing
# - Fill null with default value: corresponding type
# - Convert type: int, date, string
# - Fill null by algorithm: min, max, frequency, encoder
# - Handle wrong date
# - Drop null
# - Drop column have many unique value
# ## Drop null
company_na_count = company_df.isna().sum()
company_na_count = company_na_count * 100 / len(company_df)
company_df = company_df.loc[:, company_na_count < 95]
company_df.head()
# ## Drop many unique value
# number_unique_value = {}
#
# for column in company_df.columns:
# if len(company_df[column].unique()) < len(company_df) * 90 / 100:
# number_unique_value[column] = len(company_df[column].unique())
#
# number_unique_value = dict(
# sorted(number_unique_value.items(), key=lambda x: x[1], reverse=True)
# )
#
# company_df = company_df.loc[:, number_unique_value.keys()]
# company_df.head()
company_df = company_df[detail_df["Japanese Column Name"].tolist()]
company_df.head()
# ## Fill null by default and convert to original type
mapping_original_column_to_type = {}
mapping_target_column_to_type = {}
for x in detail_df.iloc:
mapping_original_column_to_type[x["Japanese Column Name"]] = x["Column Type"]
mapping_target_column_to_type[x["Japanese Column Name"]] = x["Target Type"]
set(mapping_original_column_to_type.values())
default_string_value = "<empty>"
string_columns = []
for column in company_df.columns:
if mapping_original_column_to_type[column] == "STRING":
company_df[column] = company_df[column].fillna(default_string_value)
company_df[column] = company_df[column].astype(str)
string_columns.append(column)
if mapping_original_column_to_type[column] == "INT":
company_df[column] = company_df[column].astype(float)
# ### Simple Label Encoder
label_encoders: Dict[str, LabelEncoder] = dict.fromkeys(string_columns)
for column in label_encoders.keys():
label_encoders[column] = LabelEncoder()
company_df[column] = label_encoders[column].fit_transform(company_df[column])
# ### Fill null by top k frequency
def get_median_top_frequency_from_series(series_data: pd.Series, k: int = 10):
"""Get mean bottom from series."""
return sorted(series_data.value_counts().head(k).index)[k // 2]
median_frequency_columns: Dict[int, List[str]] = {
1: [
"資本金(千円)",
"前々期業績税引後利益(千円)",
"前々期業績自己資本比率(%)",
"前期業績税引後利益(千円)",
"前期業績自己資本比率(%)",
"最新期業績税引後利益(千円)",
"最新期業績自己資本比率(%)",
"事業所数",
],
10: [
"前期業績売上高(百万円)",
"評点",
"全国社数",
"全国ランキング",
"評点",
"全国社数",
"全国ランキング",
"都道府県別社数",
"都道府県別ランキング",
"前々期業績売上高(百万円)",
"最新期業績売上高(百万円)",
"代表者生年",
],
8: [
"従業員数",
],
4: [
"株主数",
],
}
for k, columns in median_frequency_columns.items():
for column in columns:
fill_value = get_median_top_frequency_from_series(company_df[column], k)
company_df[column] = company_df[column].fillna(fill_value)
company_df[column] = company_df[column].astype(int)
date_columns = [
"最終コンタクト日",
"【DM】最新の発送日",
"代表者生年月日",
"前々期業績決算期",
"前期業績決算期",
"COSMOS2更新年月日",
]
for column in date_columns:
fill_value = get_median_top_frequency_from_series(company_df[column], 1)
company_df[column] = company_df[column].fillna(fill_value)
company_df.isna().sum().sort_values(ascending=False)
# ### Convert string to pandas date
YEAR_FLAG_MAPPING = {
"明治": 1868,
"大正": 1912,
"昭和": 1927,
"平成": 1990,
"令和": 2019,
}
def preprocessing_japanese_special_date(jp_date_string: str):
"""Preprocessing Japanese Special Date."""
def corresponding_year(year_flag_str: str):
"""."""
if year_flag_str in jp_date_string:
if "元年" in jp_date_string:
return jp_date_string.replace("明治元年", YEAR_FLAG_MAPPING[year_flag_str])
si = jp_date_string.find(year_flag_str)
ei = jp_date_string.find("年")
year_jp_number = jp_date_string[si:ei] if ei > si else jp_date_string[si:]
year_jp_number = int(year_jp_number.replace(flag, ""))
return jp_date_string.replace(
f"{year_flag_str}{year_jp_number}",
str(year_jp_number - 1 + YEAR_FLAG_MAPPING[flag]),
)
for flag in YEAR_FLAG_MAPPING.keys():
if flag in jp_date_string:
return corresponding_year(flag)
return jp_date_string
def japanese_date_to_standard_date(japanese_date_string: str):
"""Japanese Date to standard date.
Convert 2018年3月 -> 03/01/2018.
"""
japanese_date_string = preprocessing_japanese_special_date(japanese_date_string)
if "年" in japanese_date_string:
japanese_date_string = japanese_date_string.split("年")
year = japanese_date_string[0]
month = japanese_date_string[1][:-1]
return f"{month}/01/{year}"
if "-" in japanese_date_string:
japanese_date = japanese_date_string.split("-")
return f"{japanese_date[1]}/{japanese_date[2]}/{japanese_date[0]}"
return japanese_date_string
date_columns = [
"前々期業績決算期",
"前期業績決算期",
]
datetime_columns = [
"登録日",
"最終更新日",
]
for column in datetime_columns:
company_df[column] = company_df[column].apply(lambda x: x.split(" ")[0])
for column in date_columns + datetime_columns:
company_df[column] = company_df[column].apply(
lambda x: japanese_date_to_standard_date(x)
)
company_df[column] = pd.to_datetime(company_df[column])
# ### Convert int to date
def convert_int_date(int_value: int) -> datetime.date:
"""Convert int to date."""
date_obj = datetime.datetime(1899, 12, 30) + datetime.timedelta(days=int_value)
new_date_format = date_obj.strftime("%-m/%-d/%Y")
return new_date_format
int2date_columns = [
"最終コンタクト日",
"【DM】最新の発送日",
"代表者生年月日",
]
for column in int2date_columns:
company_df[column] = company_df[column].apply(lambda x: convert_int_date(x))
company_df[column] = pd.to_datetime(company_df[column])
# ## Drop not use
not_use_columns = [
"COSMOS2更新年月日",
]
company_df.drop(columns=not_use_columns, inplace=True)
company_df
# # Add new features
# ## Handle date column and add new
def encode(data, col, max_val):
data[col + "_sin"] = np.sin(2 * np.pi * data[col] / max_val)
data[col + "_cos"] = np.cos(2 * np.pi * data[col] / max_val)
return data
all_date_columns = []
for column in company_df.columns:
if str(company_df[column].dtype) == "datetime64[ns]":
all_date_columns.append(column)
for column in all_date_columns:
company_df[f"day_{column}"] = company_df[column].dt.day
company_df[f"month_{column}"] = company_df[column].dt.month
company_df[f"year_{column}"] = company_df[column].dt.year
company_df = encode(company_df, f"day_{column}", 31)
company_df = encode(company_df, f"month_{column}", 12)
company_df.drop(columns=all_date_columns, inplace=True)
company_df.head()
# ## Scaler
# # Export
company_df.to_csv("app/resources/company_data_preprocessed.csv", index=False)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
import seaborn as sns
sns.set()
from matplotlib.pylab import rcParams
rcParams["figure.figsize"] = 11, 5
data = pd.read_csv(
"/kaggle/input/ps.csv", index_col="timestamp", parse_dates=True, dayfirst=True
)
data.head()
data["Day"] = data.index.weekday_name
data["Day"] = data["Day"].replace(
{
"Saturday": 1,
"Sunday": 1,
"Monday": 0,
"Tuesday": 0,
"Wednesday": 0,
"Thursday": 0,
"Friday": 0,
}
)
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
import math
from sklearn.externals import joblib
Xfeatures = ["sub_meter_1", "sub_meter_2", "Day"]
X = data[Xfeatures]
y = data["main_meter"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
reg = RandomForestRegressor(n_estimators=50, max_depth=30, n_jobs=-1, warm_start=True)
a = reg.fit(X_train, y_train)
training_accuracy = reg.score(X_train, y_train)
test_accuracy = reg.score(X_test, y_test)
rmse_train = np.sqrt(mean_squared_error(reg.predict(X_train), y_train))
rmse_test = np.sqrt(mean_squared_error(reg.predict(X_test), y_test))
print(
"Training Accuracy = %0.3f, Test Accuracy = %0.3f, RMSE (train) = %0.3f, RMSE (test) = %0.3f"
% (training_accuracy, test_accuracy, rmse_train, rmse_test)
)
# ## DURBIN_WATSON_FOR_AUTOCORR_ON_statsmodel_MLR
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy as sp
import seaborn as sns
import statsmodels.api as sm
import statsmodels.tsa.api as smt
import warnings
X_with_constant = sm.add_constant(X_train)
model = sm.OLS(y_train, X_with_constant)
results = model.fit()
results.params
print(results.summary())
# ### Auto_Corr_Check - IS AUTO_CORR
# Durbin-Watson:1.970, n = 26400
#
# Hence, as DWT= 1.970 we have auto_correlation
# REMOVE AUTO_CORR
from pandas import Series
from matplotlib import pyplot
from statsmodels.tsa.ar_model import AR
from sklearn.metrics import mean_squared_error
J = data.value
train, test = J[1 : len(J) - 7], J[len(J) - 7 :]
model = AR(train)
model_fit = model.fit()
print("Lag: %s" % model_fit.k_ar)
print("Coefficients: %s" % model_fit.params)
predictions = model_fit.predict(
start=len(train), end=len(train) + len(test) - 1, dynamic=False
)
for t in range(len(predictions)):
print("predicted=%f, expected=%f" % (predictions[t], test[t]))
error = mean_squared_error(test, predictions)
print("Test MSE: %.3f" % error)
pyplot.plot(test)
pyplot.plot(predictions, color="red")
pyplot.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# Step 1: Import data analysis modules
# for basic mathematics operation
import numpy as np
import pandas as pd
from pandas import plotting
# for visualizations
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("fivethirtyeight")
# for interactive visualizations
import plotly.offline as py
from plotly.offline import init_notebook_mode, iplot
import plotly.graph_objs as go
from plotly import tools
init_notebook_mode(connected=True)
import plotly.figure_factory as ff
# for path
import os
# importing the dataset
# Step 2 : Data import
input_file = pd.read_csv(
"/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv"
)
input_file.head(5)
# checking if there is any NULL data
input_file.isnull().any().any()
input_file.dtypes
import warnings
warnings.filterwarnings("ignore")
plt.rcParams["figure.figsize"] = (18, 8)
plt.subplot(1, 2, 1)
sns.set(style="whitegrid")
sns.distplot(input_file["MonthlyCharges"])
plt.title("Distribution of Monthly Charges", fontsize=20)
plt.xlabel("Range of Monthly Charges")
plt.ylabel("Count")
labels = ["Female", "Male"]
size = input_file["gender"].value_counts()
colors = ["green", "orange"]
explode = [0, 0.1]
plt.rcParams["figure.figsize"] = (9, 9)
plt.pie(
size, colors=colors, explode=explode, labels=labels, shadow=True, autopct="%.2f%%"
)
plt.title("Gender", fontsize=20)
plt.axis("off")
plt.legend()
plt.show()
x = input_file.iloc[:, [3, 4]].values
# let's check the shape of x
print(x.shape)
# Hierarchial Clustering****
import scipy.cluster.hierarchy as sch
dendrogram = sch.dendrogram(sch.linkage(Data, method="ward"))
plt.title("Dendrogam", fontsize=20)
plt.xlabel("MonthlyCharges")
plt.ylabel("tenure")
plt.show()
from sklearn.cluster import AgglomerativeClustering
hc = AgglomerativeClustering(n_clusters=5, affinity="euclidean", linkage="ward")
y_hc = hc.fit_predict(Data)
Data = np.array(
Data
) # Else ValueError: could not convert string to float: '7590-VHVEG'
plt.scatter(Data[y_hc == 0, 0], Data[y_hc == 0, 1], s=100, c="pink", label="miser")
plt.scatter(Data[y_hc == 1, 0], Data[y_hc == 1, 1], s=100, c="yellow", label="general")
plt.scatter(Data[y_hc == 2, 0], Data[y_hc == 2, 1], s=100, c="cyan", label="target")
plt.scatter(
Data[y_hc == 3, 0], Data[y_hc == 3, 1], s=100, c="magenta", label="spendthrift"
)
plt.scatter(Data[y_hc == 4, 0], Data[y_hc == 4, 1], s=100, c="orange", label="careful")
plt.scatter(
km.cluster_centers_[:, 0],
km.cluster_centers_[:, 1],
s=50,
c="blue",
label="centeroid",
)
plt.style.use("fivethirtyeight")
plt.title("Hierarchial Clustering", fontsize=20)
plt.ylabel("tenure")
plt.legend()
plt.grid()
plt.show()
# Elbow Method****
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 11):
km = KMeans(n_clusters=i, max_iter=300, n_init=10, random_state=0)
km.fit(Data)
wcss.append(km.inertia_)
plt.plot(range(1, 11), wcss)
plt.title("The Elbow Method", fontsize=20)
plt.xlabel("No. of Clusters")
plt.ylabel("wcss")
plt.show()
Data = input_file.iloc[:, [5, 18]] # Conversion of datatype float(18) to int
kmeans = KMeans(n_clusters=4, init="k-means++", max_iter=300, n_init=10, random_state=0)
ymeans = kmeans.fit_predict(x)
plt.rcParams["figure.figsize"] = (10, 10)
plt.title("Cluster of Ages", fontsize=30)
plt.scatter(
x[ymeans == 0, 0], x[ymeans == 0, 1], s=100, c="pink", label="Usual Customers"
)
plt.scatter(
x[ymeans == 1, 0], x[ymeans == 1, 1], s=100, c="orange", label="Priority Customers"
)
plt.scatter(
x[ymeans == 2, 0],
x[ymeans == 2, 1],
s=100,
c="lightgreen",
label="Target Customers(Young)",
)
plt.scatter(
x[ymeans == 3, 0], x[ymeans == 3, 1], s=100, c="red", label="Target Customers(Old)"
)
plt.scatter(
kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=50, c="black"
)
plt.style.use("fivethirtyeight")
plt.xlabel("MonthlyCharges")
plt.ylabel("tenure")
plt.legend()
plt.grid()
plt.show()
|
import numpy as np
import PIL.Image
import glob
import matplotlib.pyplot as plt
import os
from cv2 import resize
import tqdm.auto as tqdm
import cv2
import matplotlib
def format(shape):
x_position = range(0, shape[1], 125)
x_label = [str(x // 5) for x in x_position]
y_position = np.arange(0, shape[0], 125)
y_label = [str(y // 5) for y in y_position]
y_position = shape[0] - y_position
plt.xticks(x_position, x_label)
plt.yticks(y_position, y_label)
plt.grid(linestyle="--", color="gray")
plt.ylabel("Range distance to origin [km]")
plt.xlabel("Azimuth distance to origin [km]")
def colormap(key, color="gray"):
cmap = matplotlib.cm.get_cmap(key).copy()
cmap.set_bad(color=color)
cmap.set_under(color=color)
return cmap
folders = sorted(glob.glob("/kaggle/input/medisar*/*/*/*"), reverse=True)
i = 0
pbar = tqdm.tqdm(folders, smoothing=0.001)
for folder in pbar:
content = os.listdir(folder)
if "era5_wind_speed_256.png" not in content:
continue
wind = np.array(PIL.Image.open(folder + "/era5_wind_speed_256.png")) / 10
mean_wind = np.nanmean(wind[wind > 0])
# if mean_wind > 6: continue
mask = np.array(PIL.Image.open(folder + "/mask.png")).astype("float")
if mask.mean() > 0.5:
continue # Removing example with too much land
rain = np.array(PIL.Image.open(folder + "/rain.png"))
if rain[:, :, 2][rain[:, :, 3] != 0].mean() < 0.002 * 255:
continue # Remove example with too low rain
sar = np.array(PIL.Image.open(glob.glob(folder + "/*vv*")[0]))
sar_vh = np.array(PIL.Image.open(glob.glob(folder + "/*vh*")[0]))
rain = resize(rain, sar.shape[::-1], interpolation=cv2.INTER_NEAREST)
rain[rain[:, :, 3] < 128] = (128, 128, 128, 255)
plt.figure(figsize=(20, 6))
plt.suptitle(os.path.split(folder)[1])
plt.subplot(131)
plt.imshow(sar, cmap="gray", vmin=0, vmax=2**15)
format(sar.shape)
plt.subplot(132)
plt.imshow(sar_vh, cmap="gray", vmin=0, vmax=2**12)
format(sar.shape)
plt.subplot(133)
plt.imshow(rain)
format(sar.shape)
plt.tight_layout()
plt.show()
plt.close()
i += 1
if i == 50:
break
i = 0
max_slicks = 0
pbar = tqdm.tqdm(folders, smoothing=0.001)
for folder in pbar:
content = os.listdir(folder)
if "era5_wind_speed_256.png" not in content:
continue
wind = np.array(PIL.Image.open(folder + "/era5_wind_speed_256.png")) / 10
if wind[wind > 0].mean() > 5:
continue # Remove example with too luch wind
mask = np.array(PIL.Image.open(folder + "/mask.png")).astype("float")
if mask.mean() > 0.5:
continue # Removing example with too much land
slicks = np.array(PIL.Image.open(folder + "/biological_slicks.png")) / 255
slicks[slicks < 0.75] = 0
max_slicks = max(max_slicks, slicks.mean())
pbar.set_description(f"{max_slicks}")
if slicks.mean() < 0.03:
continue # Remove example with too low slicks
sar = np.array(PIL.Image.open(glob.glob(folder + "/*vv*")[0]))
slicks = resize(slicks, sar.shape[::-1], interpolation=cv2.INTER_NEAREST)
slicks[resize(mask, slicks.shape[::-1]) > 0.5] = np.nan
plt.figure(figsize=(20, 8))
plt.suptitle(os.path.split(folder)[1])
plt.subplot(121)
plt.imshow(sar, cmap="gray", vmin=0, vmax=2**12)
plt.colorbar(orientation="horizontal", fraction=0.046)
format(sar.shape)
plt.subplot(122)
plt.imshow(slicks, cmap=colormap("gray"), vmin=-(10**-5), vmax=1)
plt.colorbar(orientation="horizontal", fraction=0.046)
format(sar.shape)
plt.tight_layout()
plt.show()
plt.close()
i += 1
if i == 50:
break
i = 0
pbar = tqdm.tqdm(folders, smoothing=0.001)
for folder in pbar:
content = os.listdir(folder)
if "era5_wind_speed_256.png" not in content:
continue
wind = np.array(PIL.Image.open(folder + "/era5_wind_speed_256.png")) / 10
mean_wind = np.nanmean(wind[wind > 0])
if mean_wind < 9:
continue
if mean_wind > 12:
continue
mask = np.array(PIL.Image.open(folder + "/mask.png")).astype("float")
convection = np.array(PIL.Image.open(folder + "/convection.png")) / 255
if convection.mean() < 0.05:
continue # Remove example with too low convection
if convection.mean() > 0.1:
continue # Remove example with too much convection (probably false alarm)
sar = np.array(PIL.Image.open(glob.glob(folder + "/*vv*")[0]))
convection = resize(convection, sar.shape[::-1], interpolation=cv2.INTER_NEAREST)
convection[resize(mask, convection.shape[::-1]) > 0.5] = np.nan
plt.figure(figsize=(20, 8))
plt.suptitle(os.path.split(folder)[1])
plt.subplot(121)
plt.imshow(sar, cmap="gray", vmin=0, vmax=2**15)
plt.colorbar(orientation="horizontal", fraction=0.046)
format(sar.shape)
plt.subplot(122)
plt.imshow(convection, cmap=colormap("gray"), vmin=-(10**-5), vmax=1)
plt.colorbar(orientation="horizontal", fraction=0.046)
format(sar.shape)
plt.tight_layout()
plt.show()
plt.close()
i += 1
if i == 50:
break
|
# Analysis of Pokémon Database
# Creator: alopez247
# Notebook Author: João Paulo Ribeiro dos Santos (joaopauloribsantos)
# This notebook is intended to apply some concepts and methods that I am learning. For this reason, that it will be updated frequently, until I can answer two questions that have always puzzled me about pokemons:
#
# What are the ten most powerful pokemons? And how is the distribution of their stats?
# Which attributes stands out most in my favorite pokemon?
#
#
# Libraries Import
# The following code expresses the main libraries that we will use on this notebook
import numpy as np
import pandas as pd
import sklearn as sk
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import rc
from matplotlib.colors import ListedColormap
from sklearn import preprocessing
# Let's Go
# Notebook Configurations
pd.set_option("display.max_columns", None)
# Database Import
# In the code below we are creating a dataframe from the pokemon database
df_pokemon = pd.read_csv("../input/pokemon/pokemon_alopez247.csv")
# Dataframe Operations/ Analysis
# Dataframe dimension
df_pokemon.shape
# 721 Rows and 23 Columns
# Columns of dataframe
df_pokemon.columns
# Columns Types
df_pokemon.dtypes
# Viewing the 3 first rows
df_pokemon.head(3)
# Viewing the 3 last rows
df_pokemon.tail(3)
# The column 'Number' on the dataframe is like the pokemon ID, so we can consider it the index of the dataframe
df_pokemon.set_index(["Number"], inplace=True)
# Handling Null/ Nan Values
# Verifying if exists NAN values on dataset
null_columns = df_pokemon.columns[df_pokemon.isnull().any()]
df_pokemon[null_columns].isnull().sum()
# Percentage of Null/ Nan Values per column in dataframe
df_pokemon[null_columns].isnull().sum() * 100 / len(df_pokemon)
# Creating a HeatMap plot to show the null of all values in the entire dataframe
plt.figure(figsize=(20, 10))
pl = sns.heatmap(df_pokemon.isnull(), cmap="Greens", cbar=False)
pl.set_xticklabels(pl.get_xticklabels(), rotation=30)
plt.show()
# Veifying types of data in column Type_2
df_pokemon["Type_2"].value_counts(dropna=False)
# Veifying types of data in column Pr_Male
df_pokemon["Pr_Male"].value_counts(dropna=False)
# Veifying types of data in column Egg_Group_2
df_pokemon["Egg_Group_2"].value_counts(dropna=False)
# According to website bulbapedia (https://bulbapedia.bulbagarden.net/wiki/Egg_Group):
## Egg Groups are categories which determine which Pokémon are able to interbreed.
## The concept was introduced in Generation II, along with breeding. Similar to types,
## a Pokémon may belong to either one or two Egg Groups
# Replacing null values
df_pokemon["Egg_Group_2"].fillna("Undiscovered", inplace=True)
# According to website bulbapedia (https://bulbapedia.bulbagarden.net/wiki/%3F%3F%3F_(type):
## The ??? type is a type that exists only in Generations II, III, and IV.
## It was removed in the Generation V games and has not returned.
# Replacing null values
df_pokemon["Type_2"].fillna("???", inplace=True)
# Checking more about the column 'Pr_Male'
# Pr_Male = Probability of a pokemon being male
df_pokemon[df_pokemon["Pr_Male"].isnull()].loc[
:, ["Name", "Type_1", "Type_2", "isLegendary", "hasGender"]
]
# The pokemons that hasn't gender are the same pokemons who doesn't have probability of being male
df_pokemon[df_pokemon["hasGender"] == False & df_pokemon["Pr_Male"].isnull()].loc[
:, ["Name", "Type_1", "Type_2", "isLegendary", "hasGender"]
]
df_pokemon.columns
# As the variable 'Pr_Male' is totally dependent on the variable 'Has_Gender',
# and until that moment there is no reason to consider it in the analysis,
# that the variable will be disregarded in a new dataframe.
# Creating the new Dataframe
df_pokemon_an_01 = df_pokemon.drop(["Pr_Male"], axis=1)
df_pokemon_an_01.head(3)
# Exploratory Data Analysis (EDA)
df_pokemon_eda = df_pokemon_an_01.drop(["Name"], axis=1)
# Show the main dataframe statiscs
df_pokemon_eda.describe()
# The variables 'Name' and describes only categorical pokemon characteristics,
# as well as the number of football player's shirts
# The previous code showed the boxplot of all variables / columns.
plt.figure(figsize=(20, 15))
sns.boxplot(data=df_pokemon_eda)
plt.show()
# Generating a table with the correlation of all variables
df_pokemon_eda[
[
"Total",
"HP",
"Attack",
"Defense",
"Sp_Atk",
"Sp_Def",
"Speed",
"Generation",
"Height_m",
"Weight_kg",
"Catch_Rate",
]
].corr()
# As we saw earlier, the correlation between some variables is significantly weak, however,
# there are some columns with a relatively high correlation, such as 'Total' and 'Attack.'
sns.lmplot(x="Attack", y="Total", data=df_pokemon_eda)
plt.show()
# Detecting the outliers
# In this section, we will create a function that returns the main data related to outliers
def fn_validate_catching_outliers_values(p_df_dataframe, p_column):
"""
Description:
Validates information related to the
dataframe and its column, before proceeding
with the function 'fn_catching_outliers'.
Keyword arguments:
p_df_dataframe -- the dataframe
p_column -- the dataframe column
Return:
None
Exception:
Validates that the dataframe is empty;
Validates whether the column exists on the dataframe;
Validates whether the column is a numeric type
"""
if p_df_dataframe.empty:
raise Exception("The dataframe is empty")
if p_column not in p_df_dataframe.columns:
raise Exception("The column does not exist in the dataframe")
if not np.issubdtype(p_df_dataframe[p_column].dtype, np.number):
raise Exception("The informed column doesn't have the numeric type.")
def fn_catching_outliers(p_df_dataframe, p_column):
"""
Description:
Function that locates outliers in an informed dataframe.
Keyword arguments:
p_df_dataframe -- the dataframe
p_column -- the dataframe column
Return:
df_with_outliers -- Dataframe with the outliers located
df_without_outliers -- Dataframe without the outilers
Exception:
None
"""
# Check if the information passed is valid.
fn_validate_catching_outliers_values(p_df_dataframe, p_column)
# Calculate the first and the third qurtile of the dataframe
quartile_1, quartile_3 = np.percentile(p_df_dataframe[p_column], [25, 75])
# Calculate the interquartile value
iqr = quartile_1 - quartile_3
# Generating the fence hig and low values
fence_high = quartile_3 + (1.5 * iqr)
fence_low = quartile_1 - (1.5 * iqr)
# And Finally we are generating two dataframes, onde with the outliers values and the second with the values within values
df_without_outliers = p_df_dataframe[
(p_df_dataframe[p_column] < fence_low) & (p_df_dataframe[p_column] > fence_high)
]
df_with_outliers = p_df_dataframe[
~p_df_dataframe.isin(df_without_outliers)
].dropna()
if df_with_outliers.empty:
print("No outliers were detected.")
return df_with_outliers, df_without_outliers
df_pokemon_out, _ = fn_catching_outliers(df_pokemon_eda, "Attack")
df_pokemon_out.head(3)
# To provide greater accuracy to the model, it will be necessary to apply some statistical methods to
# the categorical variables, such as 'dummies', 'label enconding', etc ...
# Identify the amount of unique data per non-numeric column.
df_pokemon_eda[df_pokemon_eda.select_dtypes(exclude=np.number).columns].nunique()
# Given that categorical variables / columns have more than 10 different types of values,
# it will be necessary to apply the scikit-leran method / function, label encondig.
encoder = preprocessing.LabelEncoder()
categorical_columns = [
"Type_1",
"Type_2",
"Color",
"Egg_Group_1",
"Egg_Group_2",
"Body_Style",
]
for col in categorical_columns:
df_pokemon_lb_encoding = encoder.fit_transform(df_pokemon_eda[col])
df_pokemon_eda["encoder_" + col] = pd.DataFrame(
df_pokemon_lb_encoding, columns=["encoder_" + col]
)
df_pokemon_eda.head(3)
# An interesting point to highlight is Catch_Rate, which corresponds to the chances of capturing a pokemon,
# which varies from 3 to 245 and it is often not clear whether the pokemon is really
# difficult or not to be captured. For this reason, it will be necessary to convert this data into a percentage
df_pokemon_eda["Catch_Rate"] = (df_pokemon_eda["Catch_Rate"] * 100) / 245
# Generating a table with the correlation of all variables
df_pokemon_correlation = df_pokemon_eda[
df_pokemon_eda.select_dtypes(exclude=["object"]).columns
].corr()
df_pokemon_correlation.columns
df_pokemon_correlation
# The table above contains information on the variables that most correlate.
# Next, a heatmap will be created so that we can see this correlation in a more interesting way.
mask_pk = np.zeros_like(df_pokemon_correlation, dtype=np.bool)
mask_pk[np.triu_indices_from(mask_pk)] = True
plt.figure(figsize=(22, 18))
heat_map = sns.heatmap(
df_pokemon_correlation, vmin=-1, cmap="coolwarm", annot=True, mask=mask_pk
)
heat_map.set_xticklabels(heat_map.get_xticklabels(), rotation=35)
plt.show()
# Regarding the correlation, we can highlight:
# - Given that the Total is the result of the sum of the variables, Attack, Defense, Sp_Atk and Sp_Def, the relatively high correlation between both is normal;
# - There is a somewhat strong and negative correlation between Total and Catch_Rate, after all, most of the time, more powerful pokemons are more difficult to be captured;
# - Strangely the correlation between Height_m and Weight_kg is not very strong, but just as there are tall people but with little weight, there are also pokemon with these characteristics, like Rayquasa
# Creating a regression plot to analyze the Height_m and Weight_km variables
sns.lmplot(x="Height_m", y="Weight_kg", data=df_pokemon_eda)
plt.show()
# What are the ten most powerful pokemons? And how is the distribution of their stats?
df_pokemon_top_10_total = df_pokemon.sort_values(by="Total", ascending=False).head(10)
df_pokemon_top_10_total.set_index(["Name"], inplace=True)
df_pokemon_top_10_total.drop(
columns=[
"Type_1",
"Type_2",
"Generation",
"isLegendary",
"Color",
"hasGender",
"Pr_Male",
"Egg_Group_1",
"Egg_Group_2",
"hasMegaEvolution",
"Height_m",
"Weight_kg",
"Catch_Rate",
"Body_Style",
"HP",
"Speed",
],
inplace=True,
)
df_pokemon_top_10_total.transpose()
# With the table above, it is already possible to answer which are the
# 10 most powerful pokemons according to the sum of their stats.
# Below is a code that gives us the same view of the table, but using bar graphs.
# List with thw pokwmons names
lst_pokemons_names = df_pokemon_top_10_total.index.values.tolist()
# Array for each studied stats
np_top10_pokemons_attack = np.array(df_pokemon_top_10_total["Attack"].values.tolist())
np_top10_pokemons_defense = np.array(df_pokemon_top_10_total["Defense"].values.tolist())
np_top10_pokemons_sp_atk = np.array(df_pokemon_top_10_total["Sp_Atk"].values.tolist())
np_top10_pokemons_sp_def = np.array(df_pokemon_top_10_total["Sp_Def"].values.tolist())
# Array Sums
snum = (
np_top10_pokemons_attack
+ np_top10_pokemons_defense
+ np_top10_pokemons_sp_atk
+ np_top10_pokemons_sp_def
)
# Normalizing the data of arrays
np_top10_pokemons_attack = np_top10_pokemons_attack / snum * 100.0
np_top10_pokemons_defense = np_top10_pokemons_defense / snum * 100.0
np_top10_pokemons_sp_atk = np_top10_pokemons_sp_atk / snum * 100.0
np_top10_pokemons_sp_def = np_top10_pokemons_sp_def / snum * 100.0
# Figure / graph size
plt.figure(figsize=(20, 20))
plt.title(
"Distribution of the stats of the 10 most powerful pokemons",
fontdict={"fontsize": 36},
)
# Setting fonts and sizes
font = {"weight": "bold", "size": 20}
plt.rc("font", **font)
# Generating the bar graph of each stats
plt.bar(lst_pokemons_names, np_top10_pokemons_attack, label="Attack")
plt.bar(
lst_pokemons_names,
np_top10_pokemons_defense,
bottom=np_top10_pokemons_attack,
label="Defense",
)
plt.bar(
lst_pokemons_names,
np_top10_pokemons_sp_atk,
bottom=np_top10_pokemons_attack + np_top10_pokemons_defense,
label="Special Attack",
)
plt.bar(
lst_pokemons_names,
np_top10_pokemons_sp_def,
bottom=np_top10_pokemons_attack
+ np_top10_pokemons_defense
+ np_top10_pokemons_sp_atk,
label="Special Defense",
)
# Adding a text with the percentage of each status in relation to the total amount
for xpos, ypos, yval in zip(
lst_pokemons_names, np_top10_pokemons_attack / 2, np_top10_pokemons_attack
):
plt.text(xpos, ypos, "%.1f" % yval + "%", ha="center", va="center")
for xpos, ypos, yval in zip(
lst_pokemons_names,
np_top10_pokemons_attack + np_top10_pokemons_defense / 2,
np_top10_pokemons_defense,
):
plt.text(xpos, ypos, "%.1f" % yval + "%", ha="center", va="center")
for xpos, ypos, yval in zip(
lst_pokemons_names,
np_top10_pokemons_sp_atk + np_top10_pokemons_attack + np_top10_pokemons_defense / 2,
np_top10_pokemons_sp_atk,
):
plt.text(xpos, ypos, "%.1f" % yval + "%", ha="center", va="center")
for xpos, ypos, yval in zip(
lst_pokemons_names,
np_top10_pokemons_sp_def
+ np_top10_pokemons_sp_atk
+ np_top10_pokemons_attack
+ np_top10_pokemons_defense / 2,
np_top10_pokemons_sp_def,
):
plt.text(xpos, ypos, "%.1f" % yval + "%", ha="center", va="center")
plt.ylim(0, 110)
plt.legend(bbox_to_anchor=(1.01, 0.5), loc="center left")
plt.show()
# Which attributes stands out most in my favorite pokemon?
# First of all I must emphasize that I like dragons a lot and since I was a child I've always loved pokemons with this style, so my favorite pokemon has always been the Charizard
# Finding my favorite Pokémon
df_pokemon_favorite = df_pokemon.set_index(["Name"]).loc[
["Charizard"],
["Total", "HP", "Attack", "Defense", "Sp_Atk", "Sp_Def", "Speed", "Catch_Rate"],
]
df_pokemon_favorite["Catch_Rate"] = (df_pokemon_favorite["Catch_Rate"] * 100) / 245
df_pokemon_favorite
# Next we will compare the attributes of my favorite pokemon with the average of all other pokemon.
bar_width = 0.25
bars_pokemon = df_pokemon_favorite.values.tolist()[0]
bars_mean_pokemon = np.array(
df_pokemon_eda.loc[
:,
["Total", "HP", "Attack", "Defense", "Sp_Atk", "Sp_Def", "Speed", "Catch_Rate"],
]
.mean()
.values.tolist()
)
str_pokemon_favorite_name = "".join(df_pokemon_favorite.index.format())
r1 = np.arange(len(bars_pokemon))
r2 = [x + bar_width for x in r1]
plt.figure(figsize=(20, 20))
plt.title(
"Comparison of the stats of the favorite pokemon with the average of all pokemon.",
fontdict={"fontsize": 36},
)
plt.bar(
r1,
bars_pokemon,
color="red",
width=bar_width,
edgecolor="white",
label=str_pokemon_favorite_name,
)
plt.bar(
r2,
bars_mean_pokemon,
color="#557f2d",
width=bar_width,
edgecolor="white",
label="Mean of All Pokemons",
)
plt.xticks(
[r + bar_width for r in range(len(bars_pokemon))],
df_pokemon_favorite.columns.values.tolist(),
)
plt.legend(bbox_to_anchor=(1.01, 0.5), loc="center left")
plt.show()
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option("display.float_format", lambda x: "%.3f" % x)
pd.set_option("display.max_rows", 1000)
pd.set_option("display.max_columns", 1000)
plt.rcParams["figure.figsize"] = (11, 5)
train_df = pd.read_csv("/kaggle/input/titanic/train.csv", index_col=False)
train_df.head(100)
train_df.describe()
train_df.isna().sum()
train_df["GenderCode"] = train_df.Sex.astype("category").cat.codes
train_df["FamilySize"] = train_df.SibSp + train_df.Parch + 1
train_df["hasCabin"] = ~train_df.Cabin.isna()
train_df.Age.fillna(
train_df.groupby(["Pclass", "Sex", "FamilySize"]).Age.transform("median"),
inplace=True,
)
train_df.Embarked.fillna(train_df.Embarked.mode()[0], inplace=True)
train_df.isna().sum()
def plot_feature(train_df, feature):
survived_df = train_df[train_df.Survived == True][feature].value_counts()
dead_df = train_df[train_df.Survived == False][feature].value_counts()
counts_df = pd.DataFrame([survived_df, dead_df], index=["Survived", "Dead"])
counts_df.plot(
kind="barh", stacked=True, legend=True, title="Frequency by " + feature
)
(counts_df.div(counts_df.sum(axis=1), axis=0) * 100).plot(
kind="barh", stacked=True, legend=True, title="Proportion by " + feature
)
def plot_kde(train_df, split_feature, kde_feature):
legend = sorted(train_df.dropna(subset=[split_feature])[split_feature].unique())
for key in legend:
sns.kdeplot(
train_df[train_df[split_feature] == key][kde_feature], shade=True
) # .plot(kind='kde',legend=True)
plt.legend(tuple(legend))
plot_feature(train_df, "Sex")
plot_feature(train_df, "Pclass")
plot_feature(train_df, "Embarked")
plot_feature(train_df, "FamilySize")
plt.clf()
plot_kde(train_df, "Pclass", "Fare")
plt.show()
plt.clf()
plot_kde(train_df, "Pclass", "Age")
plt.show()
plt.rcParams["figure.figsize"] = (11, 5)
plt.clf()
plot_kde(train_df, "Embarked", "Fare")
plt.show()
# plt.clf()
# plot_kde(train_df,'Embarked','Age')
# plt.show()
plt.rcParams["figure.figsize"] = (11, 5)
plt.clf()
plot_kde(train_df, "Sex", "Fare")
plt.show()
plt.clf()
plot_kde(train_df, "Sex", "Age")
plt.show()
plt.rcParams["figure.figsize"] = (12, 8)
survived_df = train_df[train_df.Survived == True]
dead_df = train_df[train_df.Survived == False]
sns.scatterplot(
train_df.Age,
train_df.Fare,
hue=train_df.Survived,
size=train_df.Fare,
palette={0: "red", 1: "green"},
)
plt.rcParams["figure.figsize"] = (13, 10)
sns.heatmap(train_df.corr(), annot=True)
train_df["GenderAndClass"] = list(zip(train_df.Sex, train_df.Pclass))
train_df.sort_values(by="GenderAndClass", inplace=True)
sns.violinplot(
train_df.Sex,
train_df.Age,
hue=train_df.Survived,
split=True,
palette={0: "r", 1: "g"},
)
sns.violinplot(
train_df.GenderAndClass,
train_df.Age,
hue=train_df.Survived,
split=True,
palette={0: "r", 1: "g"},
)
sns.violinplot(
train_df.Embarked,
train_df.Fare,
hue=train_df.Survived,
split=True,
palette={0: "r", 1: "g"},
)
plt.rcParams["figure.figsize"] = (9, 6)
train_df.groupby("Pclass").mean()[["Fare", "Age"]].plot(
kind="bar", title="Mean Age and Fare by Class"
)
plt.hist(
[
train_df[(train_df.Survived == False)]["Fare"],
train_df[(train_df.Survived == True)]["Fare"],
],
color=["r", "g"],
label=["Dead", "Survived"],
stacked=True,
bins=30,
)
plt.title("Ticket Fare Histogram")
plt.xlabel("Fare")
plt.ylabel("Frequency")
plt.legend()
plt.hist(
[train_df[train_df.Survived == False].Age, train_df[train_df.Survived == True].Age],
color=["r", "g"],
stacked=True,
label=["Dead", "Survived"],
bins=20,
)
plt.title("Age Histogram")
plt.xlabel("Age")
plt.ylabel("Frequency")
plt.legend()
plt.hist(
[
train_df[(train_df.Survived == False) & (train_df.Sex == "male")].Age,
train_df[(train_df.Survived == True) & (train_df.Sex == "male")].Age,
train_df[(train_df.Survived == False) & (train_df.Sex == "female")].Age,
train_df[(train_df.Survived == True) & (train_df.Sex == "female")].Age,
],
color=["red", "green", "indianred", "lime"],
stacked=True,
label=["Dead Male", "Rescued Male", "Dead Female", "Rescued Female"],
bins=20,
)
plt.title("Age Histogram")
plt.xlabel("Age")
plt.ylabel("Frequency")
plt.legend()
# Extract titles and do EDA
# Tease info from
# Figure out how to judge importance of features
# Apply basic models
# Learn how to clean NaNs by binning according to specific categories and then taking median
train_df["Title"] = train_df.Name.apply(lambda x: x.split(",")[1].split(".")[0].strip())
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
import scipy
from scipy import stats
from scipy.stats import norm, skew
import warnings
def ignore_warn(*args, **kwargs):
pass
warnings.warn = ignore_warn # ignore warnings from imported libraries
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Raditya Pratama
# #
# # Forecasting video game sales prediction using regression
# The following model involves to **predict the sales outcome of a video game title in a given period of time**. It tries to forecast the copies sold in the observed regions across the world, predominantly from North America, Europe, Japan and etc...
# **Scope of the problem addressed is how video game publishers can predict the value and profitability from making the sales in video games**, ranging from the features that affect the sales towards the audience liking, for example, how attractive the *name* is, what popular *platforms* do players use at that time, along with the important factor of *genre* which determines the users' liking on a certain game aspect and observing the global sales overtime to show which region does a specific video game have the high rating for the database worldwide - region varies due to the predominance of income, status and access to technology.
# In this project, it will adopt the linear regression method to be applied in the imported dataset. Although the main issue is that there is no quantitative independent variables, therefore the categorical variables such as *platform, genre, publisher* must be converted to numeric first before proceeding.
# The dataset description involves the registered list of video game titles with over 100,000 copies sold in multiple regions.
# Variable fields include:
# Rank - Ranking of overall sales
# * Name - The games name
# * Platform - Platform of the games release (Wii, Xbox, PS, PC)
#
# * Year - Year of the game's release
#
# * Genre - Genre of the game
#
# * Publisher - Publisher of the game
#
# * NA_Sales - Sales in North America (in millions)
#
# * EU_Sales - Sales in Europe (in millions)
#
# * JP_Sales - Sales in Japan (in millions)
#
# * Other_Sales - Sales in the rest of the world (in millions)
#
# * Global_Sales - Total worldwide sales.
# * Critic_Score - given score from reviewers
# * User_Score - given score from audiences who bought the game
# * Developer - Developer of the game
# * Rating - Age rating of the video game mentioned
# # Initialization of preprocessing the data:
# * Collect the data: gather the data on various features that affect the sale of the video game, such as the platform used (Switch, Xbox, PS), its rating, its publisher and marketing budget.
# * Prepare the data: clean the data and remove the missing values or outliers. Then to encode it into numerical variables (preprocessing with scikit) using one-hot encoding/label encoding
# * Split the data: data will be splut into training and testing sets, the training data is used to train the regression model and the testing set will be used to evaluate performance
# * Regression model selection: **random forest regression and support vector regression** will be used in this project to *predict global sales*
# * Train the model: Fit the regression model into training data and tune the hyperparameters like cross validation
# * Evaluate the model: Mean squared error or R squared can be used to evaluate the performance
# * Make predictions: use the trained model to predict new video game title sales forecast and what factors driven to make the sales increase
# # Data preprocessing steps:
# Continuation of the discussion based on platform, genre and publisher, it will be solved by the following method:
# To solve this, the model will use the label encoding method for handling the categorical variables aforementioned; each label will be assigned with a unique integer based on the alphabetical ordering so it can be read by the machine learning model for preprocessing in supervised learning.
# Therefore, the first step of the preprocessing is to use the label encoding from sklearn library to replace the categorical value between 0 and the number of classes -1. 0 and n-1
# the proposed step:
# * from sklearn.preprocessing import LabelEncoder
# Methodology used in data preprocessing:
# 
# Data preparation and analysis:
# Statistical aspects:
# Check data dimension
# Rows, columns and column names
# Data types
# Cleaning the data
# look for missing data and outliers
# identify and convert the categorical values to numerical representation
# Statistical calculation:
# find relationship of columns and how it affects
# check correlation and chi square
# correlation relates to numerical columns and chi square relates to categorical columns
# Graphical representation of data:
# Perform visualization on dataset
data = pd.read_csv(
"/kaggle/input/video-games-sales-as-at-22-dec-2016csv/Video_Games_Sales_as_at_22_Dec_2016.csv"
)
data1 = data.copy()
display(data1.head())
display(data1.tail())
# given 16 columns:
# * global sales will be taken as the variable for dependent variable - the ones we are trying to predict
# * independent variables - will either be the critic or the user score: this defines the liking of a certain user
data1.info()
print(data.shape)
# identifying outliers / missing data
fig, ax = plt.subplots()
ax.scatter(x=data["Critic_Score"], y=data["Global_Sales"])
plt.ylabel("Global_Sales", fontsize=13)
plt.xlabel("Critic_Score", fontsize=13)
plt.show()
# on the global sales y axis, there is one depicted outlier on y value of 80 - we need to eradicate it
data = data.drop(data[(data["Critic_Score"] > 60) & (data["Global_Sales"] > 60)].index)
print(data)
# identifying outliers / missing data
fig, ax = plt.subplots()
ax.scatter(x=data["Critic_Score"], y=data["Global_Sales"])
plt.ylabel("Global_Sales", fontsize=13)
plt.xlabel("Critic_Score", fontsize=13)
plt.show()
# outlier(s) removed
# Check distribution of dependent variable, target dependency the user needs to predict is the *Global_Sales* variable
sns.distplot(data["Global_Sales"], fit=norm)
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(data["Global_Sales"])
print("\n mu = {:.2f} and sigma = {:.2f}\n".format(mu, sigma))
# Now plot the distribution
plt.legend(
["Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )".format(mu, sigma)], loc="best"
)
plt.ylabel("Frequency")
plt.title("Global_Sales distribution")
# Get also the QQ-plot
fig = plt.figure()
res = stats.probplot(data["Global_Sales"], plot=plt)
plt.show()
# The results illustrate that the distribution graph of Global_Sales is not normally distributed at all, it is aligned far to the left. Meaning that further investigation is required before splitting the data into training and testing set.
# To avoid this type of problem, implement the feature correlations so that the variables do not suffer from autocorrelation. This can be an issue in linear regression if need to find intercepts.
# Now, we need to plot correlation heatmap with the aid of seaborn.
str_list = [] # empty list to contain columns with strings (words)
for colname, colvalue in data.iteritems():
if type(colvalue[2]) == str:
str_list.append(colname)
# Get to the numeric columns by inversion
num_list = data.columns.difference(str_list)
# Create Dataframe containing only numerical features
data_num = data[num_list]
f, ax = plt.subplots(figsize=(14, 11))
plt.title("Pearson Correlation of Video Game Numerical Features")
# Draw the heatmap using seaborn
sns.heatmap(
data_num.astype(float).corr(),
linewidths=0.25,
vmax=1.0,
square=True,
cmap="cubehelix_r",
linecolor="k",
annot=True,
)
# Now, the purpose of creating this heatmap is improve the confidence of choosing which variable can be selected for correlation. The independent variables mentioned in this heatmap are not highly correlated, *except for the sales number which relates to each other.* Therefore, sales are the factor of success in video game publishing, since if sales are well in a region, then the other region will soon follow. This means that the confidence of using Global_Sales variable as the measurement of correlation is highly recommended.
# # Steps to remove what variables are not required and keeping the crucial variables that relates to Global_Sales
# **most importantly is to remove values with N/A or none data inside**
# Afterwards, is to derive features through feature engineering
data_na = (data.isnull().sum() / len(data)) * 100
data_na = data_na.drop(data_na[data_na == 0].index).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({"Missing Ratio": data_na})
missing_data.head(16)
# There are lots of missing data with critic_score, which accounts for 51.33 % ratio of data considered as N/A, User_Score and Critic_Score are more or less the same, which has great confidence as the independent vaiable, however there are many missing data, therefore it cannot easily be filled with median values.
# The users needs to check other variables that can account into factor of game sales, preferably the video game consoles then choose which consoles are the famous or highly rated in this data (common occurring) to cover the missing ratio of independent variables changed.
print(pd.value_counts(data["Platform"]))
# In this century, it is best to find the relevant consoles at the time, values above 140 is best considered, which ranges from Wii U until PS4/XboxOne as the new generation consoles.
# This prevents the minimal data issue by keeping relevant consoles.
# use | as OR function in Python
data = data[
(data["Platform"] == "PS3")
| (data["Platform"] == "PS4")
| (data["Platform"] == "X360")
| (data["Platform"] == "XOne")
| (data["Platform"] == "Wii")
| (data["Platform"] == "WiiU")
| (data["Platform"] == "PC")
]
# Let's double check the value counts to be sure
print(pd.value_counts(data["Platform"]))
# Let's see the shape of the data again
print(data.shape)
# Lets see the missing ratios again
data_na = (data.isnull().sum() / len(data)) * 100
data_na = data_na.drop(data_na[data_na == 0].index).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({"Missing Ratio": data_na})
missing_data.head(16)
# 38% missing data points is still too large to implement median, therefore, drop all rows that have N/A content in Critic_Score column
data = data.dropna(subset=["Critic_Score"])
# Let's see the shape of the data again
print(data.shape)
# Lets see the missing ratios again
data_na = (data.isnull().sum() / len(data)) * 100
data_na = data_na.drop(data_na[data_na == 0].index).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({"Missing Ratio": data_na})
missing_data.head(16)
# Dealing further with none values
# Publisher, fill N/A with mode:
data["Publisher"] = data["Publisher"].fillna(data["Publisher"].mode()[0])
# Developer fill N/A with mode
data["Developer"] = data["Developer"].fillna(data["Developer"].mode()[0])
# Rating fill N/A with mode
data["Rating"] = data["Rating"].fillna(data["Rating"].mode()[0])
# Release year with median
data["Year_of_Release"] = data["Year_of_Release"].fillna(
data["Year_of_Release"].median()
)
# User score and User Count with median
data["User_Score"] = data["User_Score"].replace("tbd", None)
data["User_Score"] = data["User_Score"].fillna(data["User_Score"].median())
data["User_Count"] = data["User_Count"].fillna(data["User_Count"].median())
# check for missing ratios
data_na = (data.isnull().sum() / len(data)) * 100
data_na = data_na.drop(data_na[data_na == 0].index).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({"Missing Ratio": data_na})
missing_data.head(16)
# Create dummies for the variables aforementioned:
print(data.shape) # pre-dummies
data = pd.get_dummies(data=data, columns=["Platform", "Genre", "Rating"])
print(data.shape) # post-dummies
data.head
# # Important step: At this point, when all of the data is held and before splitting it into training and testing data
# **Ensure that ONLY required data is needed to be trained and tested, which the variables of User_Score, Release Year, Critic Score, User Count, Platform are taken into factor of measurement**
print(data.columns) # easy to copy-paste the values to rearrange from here
X = data[
[
"Year_of_Release",
"Critic_Score",
"Critic_Count",
"User_Score",
"User_Count",
"Platform_PC",
"Platform_PS3",
"Platform_PS4",
"Platform_Wii",
"Platform_WiiU",
"Platform_X360",
"Platform_XOne",
"Genre_Action",
"Genre_Adventure",
"Genre_Fighting",
"Genre_Misc",
"Genre_Platform",
"Genre_Puzzle",
"Genre_Racing",
"Genre_Role-Playing",
"Genre_Shooter",
"Genre_Simulation",
"Genre_Sports",
"Genre_Strategy",
"Rating_E",
"Rating_E10+",
"Rating_M",
"Rating_RP",
"Rating_T",
]
]
Y = data[["Global_Sales"]]
# Double checking the shape
print(X.shape)
print(Y.shape)
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state=42)
# Let's check the shape of the split data as a precaution
print("X_train shape: {}".format(X_train.shape))
print("Y_train shape: {}".format(Y_train.shape))
print("X_test shape: {}".format(X_test.shape))
print("Y_test shape: {}".format(Y_test.shape))
# Now, before fitting the models in final, scale the data first by using log transformation of log(1+x), this imports the function from numpy that is called log1p
Y_train = np.log1p(Y_train)
Y_test = np.log1p(Y_test)
# Draw the new distribution
Y_log_transformed = np.log1p(
data["Global_Sales"]
) # For comparison to earlier, here's the whole Y transformed
sns.distplot(Y_log_transformed, fit=norm)
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(Y_log_transformed)
print("\n mu = {:.2f} and sigma = {:.2f}\n".format(mu, sigma))
# plot the distribution
plt.legend(
["Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )".format(mu, sigma)], loc="best"
)
plt.ylabel("Frequency")
plt.title("Global_Sales distribution")
# Draw the QQ-plot
fig = plt.figure()
res = stats.probplot(Y_log_transformed, plot=plt)
plt.show()
# Now to fit the data into the models itself. We will use the X_train and scale the variable with X_Test using the MinMax Scaler from Scikit to fit all the indenpent variables (changing variables) in similar ranges.
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# # Regression Model: Code
# 1. Support Vector Regressor
# 2. Random Forest
# begin making the model by making parameter grids
param_grid_lr = [{}]
# ----SVR----#
param_grid_svr = [
{"C": [0.01, 0.1, 1, 10], "gamma": [0.0001, 0.001, 0.01, 0.1, 1], "kernel": ["rbf"]}
]
# ----Random Forest----#
param_grid_rf = [
{
"n_estimators": [3, 10, 30, 50, 70],
"max_features": [2, 4, 6, 8, 10, 12],
"max_depth": [2, 3, 5, 7, 9],
}
]
# After creating the parameters, implement the models sequentially and assess which model is best to forecast video game sales
# To assess the models, use **RMSE (Root Mean Squared Error)**; t*he standard deviation of residuals or known as prediction errors, basically a way to measure the differneces between values and to measure the error of a given model when it is used to predict quantitative data.*
# RMSE is a judgement factor of how accurately the model can predict the outcome of the video game sales response.
# By using RMSE, it will tell the error in actual sales unit.
# Note: exponential transformation is requied on RMSE scores, if the RMSE was in logarithmic values - otherwise it remains on log(Global_Sales)
# Use cross validation:
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import GridSearchCV
grid_search_lr = GridSearchCV(
LinearRegression(), param_grid_lr, scoring="neg_mean_squared_error", cv=5
)
grid_search_lr.fit(X_train, Y_train)
print("Best parameters: {}".format(grid_search_lr.best_params_))
lr_best_cross_val_score = np.sqrt(-grid_search_lr.best_score_)
print("Best cross-validation score: {:.2f}".format(np.expm1(lr_best_cross_val_score)))
lr_score = np.sqrt(-grid_search_lr.score(X_test, Y_test))
print("Test set score: {:.2f}".format(np.expm1(lr_score)))
# # IMPLEMENTATION OF MODELS
# INTRODUCTION, DEFINITIONS, RESULT, SCORE AND EVALUATION
# SUPPORT VECTOR REGRESSION:
# What is Support Vector Regression?
# Support Vector Regression (SVR) is a type of regression algorithm that is based on the concept of *Support Vector Machines (SVM) to determine the outcome of relationships between dependent and independent variables*. It is one of the machine learning models that is utilized to predict continous variables such as stock prices, housing prices and etc based on input variables or features.
# The main aspect of SVR is to *identify a hyperplane that best fits the training data so that the error between predicted values and actual values is reduced*. So it tries its best to fit in the correct position of hyperplane to separate the data by the largest margin and uses it to predict the value of the dependent variable for the new data.
# This is unique compared to MSE often adopted in ordinary linear regression models to minimize the distance between predicted and actual value according to users' configuration.
# The SVR hyperplane is chosen to have a maximum margin; *maximum distance between hyperplane and nearest data points on each side - this is used to segregate the data into 2 classes*: one that lies above and other that lies below the hyperplane.
# Goal:** minimize prediction errors and maximize margins**
# **SVR Formula**:
# *The basic formula for Support Vector Regression (SVR) is similar to that of linear regression, but with additional parameters and constraints to find the optimal hyperplane:*
# y = w^T * x + b
# where:
# y is the predicted output value
# x is the input feature vector
# w is the weight vector that represents the coefficients of the hyperplane
# b is the bias term or intercept
# To find the optimal hyperplane, SVR introduces two additional parameters:
# C: a regularization parameter that controls the tradeoff between the model complexity and the error on the training data.
# ε: a margin parameter that defines the acceptable range of error from the true output value.
# The optimization problem for SVR is to find the weight vector w and the bias term b that minimize the following objective function subject to the constraints:
# minimize (1/2) * ||w||^2 + C * Σ(max(0, |y - y_hat| - ε))
# subject to:
# y_hat = w^T * x_i + b, for all i = 1,2,..,n
# |y - y_hat| <= ε, for all i = 1,2,..,n
# where:
# n is the number of training examples
# y is the true output value
# y_hat is the predicted output value
# ||w||^2 is the L2 norm of the weight vector w
# Σ(max(0, |y - y_hat| - ε)) is the sum of the errors above the margin ε
# The objective function is a tradeoff between the complexity of the model (controlled by ||w||^2) and the error on the training data (controlled by the sum of errors). The constraints ensure that the predicted output values are within the margin ε from the true output values.
# modeling the relationship of 2 variables; independent and dependent variables, the SVR uses kernel function to transform input data into feature space that has higher dimension. Kernel calculates the similarities of the data pairs that will derive its scalar to represent similarity. Then afterwards, SVR creates the new feature space are then used to predict value of the dependent variable for new data.
# **Given in this model, this Support Vector Regressor model uses** ***RBF - Radial Basis Function Kernel***
# A kernel function used to transform input data into higher dimensional feature space to implement a linear regression model to fit the data. So what the RBF kernel does is that it measures the similarity between two examples in the input space using a Gaussian Function in an infinite feature space.
# K(x, x') = exp(-gamma ||x - x'||^2)
# where x and x' are the input examples, ||x - x'||^2 is the squared Euclidean distance between the examples, and gamma is a hyperparameter that determines the width of the Gaussian function.
# Advantages and Disadvantages of RBF:
# Adv:
# 1. RBF is able to capture complex non linear relationship in data which can be mapped into higher dimensional space then a linear hyperplane can be drawn to segregate the data
# 2. Computationally efficient; does not need to compute the full kernel matrix - only the pairwise distances between support vectors and new examples
# 3. Able to handle complex data and can be tuned with gamma parameter
# 4. Only has one hyperparameter: width of Gaussian kernel, so it is simple to adjust compared to other kernels
# Disadv:
# 1. RBF kernel is sensitive to choice of hyperparameter and performance is highly dependent on the value of the width of Gaussian Kernel. It requires complex calculation and manual selection of finding the best or optimal value for hyperparamter which requires expertise
# 2. RBF kernel is prone to overfitting if the width of Gaussian kernel is tiny / underfitting if width is too large. Balancing can be difficult for differentiating complexity and generalization
# 3. Scaling sensitivity of input features since it requires Euclidean distance between examples. As shown in this model, it has been scaled - if it is not scaled in other circumstances, some features might dominate the distance measure which can affect the model's performance
# SVR CODE
from sklearn.svm import SVR
grid_search_svr = GridSearchCV(
SVR(), param_grid_svr, cv=5, scoring="neg_mean_squared_error"
)
grid_search_svr.fit(X_train, Y_train)
print("Best parameters: {}".format(grid_search_svr.best_params_))
svr_best_cross_val_score = np.sqrt(-grid_search_svr.best_score_)
print("Best cross-validation score: {:.2f}".format(np.expm1(svr_best_cross_val_score)))
svr_score = np.sqrt(-grid_search_svr.score(X_test, Y_test))
print("Test set score: {:.2f}".format(np.expm1(svr_score)))
# Random Forest Regression: What is Random Forest Regression?
# Random forest regression is a machine learning algorithm used in regression problems. It is one of the variant from Random Forest model that is used classification problems. Essentially, it is an approach of regression based on collection of decision trees, hence the name random forest.
# Random forest regressor generates decision trees on randomly selected subsets of input data and outputs the mean or median prediction of individual trees as final prediction.
# ===Keypoint: Builds several decision trees and combine prediction results of each of the decision trees to provide accurate predictions...
# Procedural steps of how the algorithm/model work:
# 1. Random subsets of input data are selected with replacement, this technique is known as bagging - each subset is used to train decision tree model
# 2. At each node of decision tree, random subset of input features are selected, best feature is chosen to split the node
# 3. Decision tree will be grown until stopping criterion is reached, maximum depth of minimum number of samples are required to split the node
# 4. Make a forest of decision trees
# 5. Make prediction for new example, Random Forest Regressor algorithm takes average or median of predictions of decision trees generated in the forest
# RF Regressor Formula:
# The way how the model/algorithm work is that:
# 1. Initiate the set of decision trees are trained on different subsets of the input data using bagging and feature randomization
# 2. In new input X, RF regressor predicts target variable Y by aggregating the predictions of all individual decision trees = can be done taking average or median of predicted values
# Given a training set {(X1, Y1), (X2, Y2), ..., (Xn, Yn)} where Xi is the input example with m features and Yi is the target variable, the Random Forest Regressor algorithm generates T decision trees, each trained on a random subset of the input data and a random subset of the input features.
# Let the predicted value of the ith decision tree be denoted as Yi^t(X), where t is the index of the decision tree.
# Then, the predicted value of the Random Forest Regressor algorithm for a new input example X is given by:
# Y = (1/T) * ∑(i=1 to T) Yi^t(X)
# Y = final predicted value
# sum is taken over all T decision trees
# RF regressor uses weighted average or median of predicted values; weights are proportional to accuracy of each decision tree.
# Advantage and Disadvantages of Random Forest Regressor:
# Adv:
# 1. Random Forest can handle both categorical and continous input features
# 2. Can handle missing/noisy data, also performs well with datasets that has high dimensionality. Handles data that does not have normal distribution as well
# 3. Can identiy the most significant independent variables that affects the variablity of dependent variable
# 4. Less prone to overfitting compared to normal individual decision trees, since it adopts averaging or using median based aggregation of predictions with the help of multiple trees
# 5. Provides importance scores for input features that allows identification of most significant input features in prediction
# Disadv:
# 1. Random Forest Regressor are slower to train and making predictions can be hindered compared to simpler regression algorithms (higher computational time complexity)
# 2. More difficult to interpret than linear regression model or individual decision trees; does not provide clear relationship explanation of independent and dependent variables overloads the system resource usage which decreases performance that requires the user to tune it carefully
# 3. Hyperparameters: number of trees, depth of trees and number of features used at each node severely
# 4. Random Forest Regressor does not perform well on datasets that has imbalanced class distributions, since the majority will dominate the tree building process that might lead to biased predictions result
from sklearn.ensemble import RandomForestRegressor
grid_search_rf = GridSearchCV(
RandomForestRegressor(), param_grid_rf, cv=5, scoring="neg_mean_squared_error"
)
grid_search_rf.fit(X_train, Y_train)
print("Best parameters: {}".format(grid_search_rf.best_params_))
rf_best_cross_val_score = np.sqrt(-grid_search_rf.best_score_)
print("Best cross-validation score: {:.2f}".format(np.expm1(rf_best_cross_val_score)))
rf_score = np.sqrt(-grid_search_rf.score(X_test, Y_test))
print("Test set score: {:.2f}".format(np.expm1(rf_score)))
# # Conclusion of the 2 models used:
# Plotting feature importance of what determines a good video game sales forecast:
#
from sklearn.svm import SVR
# =======Depicting feature importance using Support Vector Regressor=======
feature_importance = grid_search_svr.best_estimator_.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + 0.5
plt.figure(figsize=(20, 10))
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align="center")
plt.yticks(
pos, X_train.columns.values[sorted_idx]
) # Not 100 % sure the feature names match the importances correctly...
plt.xlabel("Relative Importance")
plt.title("Variable Importance")
plt.show()
# Apparently, SVR does not support feature importances unlike Random Forest to Visualize the factors of making a good video game sales prediction
# =======*** Depicting feature importance using Random Forest Regressor ***=======
feature_importance = grid_search_rf.best_estimator_.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + 0.5
plt.figure(figsize=(20, 10))
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align="center")
plt.yticks(
pos, X_train.columns.values[sorted_idx]
) # Not 100 % sure the feature names match the importances correctly...
plt.xlabel("Relative Importance")
plt.title("Variable Importance")
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import matplotlib.pyplot as plt
import seaborn as sns
import datetime as dt
df = pd.read_csv("/kaggle/input/avocado-prices/avocado.csv", index_col=None)
del df["Unnamed: 0"]
df.head()
df.shape
df.info()
df["Date"] = pd.to_datetime(df["Date"])
df.head()
df["region"].value_counts()
df["region"].astype("category", inplace=True)
df["type"].value_counts()
df["type"].astype("category", inplace=True)
df.info()
df.head(3)
df.describe()
type(df["type"])
print(df.head())
print(df["AveragePrice"].mean())
print(df["AveragePrice"].median())
print(df["AveragePrice"].mode())
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas_profiling
test1 = pd.read_csv("../input/test1.csv")
train1 = pd.read_csv("../input/train1.csv")
train1.info()
test1.info()
train1.describe()
test1.describe()
train1.drop(train1.columns[[0]], axis=1, inplace=True)
test1.drop(test1.columns[[0]], axis=1, inplace=True)
train1[train1.isnull().any(axis=1)].count()
test1[test1.isnull().any(axis=1)].count()
test1.isna().sum()
train1.isna().sum()
train1["PROD_CD"].unique()
train1.info()
train1["PROD_CD"].value_counts()
train1["SLSMAN_CD"].value_counts()
train1["PLAN_MONTH"].value_counts()
train1["PLAN_YEAR"].value_counts()
train1["TARGET_IN_EA"].value_counts()
train1["ACH_IN_EA"].value_counts()
import re
p = re.compile(r"\D")
train1["TARGET_IN_EA"] = [p.sub("", x) for x in train1["TARGET_IN_EA"]]
train1["ACH_IN_EA"] = [p.sub("", x) for x in train1["ACH_IN_EA"]]
test1["TARGET_IN_EA"] = [p.sub("", x) for x in test1["TARGET_IN_EA"]]
train1.head()
train1["TARGET_IN_EA"] = pd.to_numeric(train1["TARGET_IN_EA"])
train1["ACH_IN_EA"] = pd.to_numeric(train1["ACH_IN_EA"])
test1["TARGET_IN_EA"] = pd.to_numeric(test1["TARGET_IN_EA"])
train1.info()
test1.info()
pandas_profiling.ProfileReport(train1)
plt.scatter(x="TARGET_IN_EA", y="ACH_IN_EA", data=train1)
sns.pairplot(train1.iloc[:, :])
sns.pairplot(test1.iloc[:, :])
train1.boxplot()
test1.boxplot()
string_column = ["PROD_CD", "SLSMAN_CD"]
from sklearn import preprocessing
number = preprocessing.LabelEncoder()
for i in string_column:
train1[i] = number.fit_transform(train1[i])
test1[i] = number.fit_transform(test1[i])
colnames = list(train1.columns)
# colnames=list(test1.columns)
train1.info()
test1.info()
train1.head(10)
test1.head(10)
plt.hist("ACH_IN_EA", data=train1)
train1["ACH_IN_EA"].value_counts()[:20].plot(kind="barh")
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
train, test = train_test_split(train1, test_size=0.2, random_state=0)
x = colnames[:5]
y = colnames[5]
# model=RandomForestClassifier(n_jobs=3,oob_score=True,n_estimators=100,criterion='entropy')
# model.fit(train[x],train[y])
##pred=model.predict(test[x])
# pd.Series(pred).value_counts()
# np.mean(pd.Series(train1.ACH_IN_EA).reset_index(drop=True)==pd.Series(model.predict(train[x])))
##
##model=RandomForestClassifier(n_jobs=3,oob_score=True,n_estimators=100,criterion='entropy')
# model.fit(train1,test1)
# pred=model.predict(test1)
# pd.Series(pred).value_counts()
# np.mean(pd.Series(train1.ACH_IN_EA).reset_index(drop=True)==pd.Series(model.predict(train1)))
# np.mean(pred==test.ACH_IN_EA)
|
# # Introduction
# This notebook is a development of the final exercise from the ‘[Intro to Machine Learning](https://www.kaggle.com/learn/intro-to-machine-learning)’ micro-course.
#
import math
from operator import itemgetter
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import KFold
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import OneHotEncoder
# # Data
# Meanings of all features are described [here](https://www.kaggle.com/c/home-data-for-ml-course/data).
train_df = pd.read_csv("../input/train.csv", index_col="Id")
train_df
X_train = train_df.drop(columns="SalePrice")
y_train = train_df["SalePrice"]
X_test = pd.read_csv("../input/test.csv", index_col="Id")
X_test
X_all = pd.concat([X_train, X_test])
# # Numeric features
numer_features = set(X_train.select_dtypes(exclude=["category", "object"]).columns)
sorted(numer_features)
X_all[numer_features].hist(
bins=50, log=True, figsize=(15, 40), layout=(math.ceil(len(numer_features) / 3), 3)
)
# Check if there are missing values:
nan_counts = X_all[numer_features].isna().sum()
nan_counts[nan_counts > 0].sort_index()
# Most of these columns seem to refer to properties of a non-existent garage or basement. Set all these areas and numbers of bathrooms to zeros (note the distributions above already have peaks at zeros):
for feature in [
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtFullBath",
"BsmtHalfBath",
"BsmtUnfSF",
"GarageArea",
"GarageCars",
"MasVnrArea",
"TotalBsmtSF",
]:
for X in [X_train, X_test]:
X.loc[X[feature].isna(), feature] = 0.0
# Missing built year for garage does not necessarily imply that the property doesn't have a garage:
X_all[~X_all["GarageYrBlt"].isna()]["GarageArea"].hist()
# Assume that the garage was built in the same year as the house then. Also change the year for a time-travelling garage from the XXIII century, which can be seen in the distributions above.
for X in [X_train, X_test]:
selection = X["GarageYrBlt"].isna() | (X["GarageYrBlt"] > 2015)
X.loc[selection, "GarageYrBlt"] = X.loc[selection, "YearBuilt"]
# The last column with missing values is `LotFrontage`. It's not clear what is the reason for having missing value there. Could they indicate that the house is situated at a substantial distance from the road? Then this would probably affect the average price, but it doesn't seem to:
y_train.groupby(X_train["LotFrontage"].isna()).mean()
# Try to predict the missing values from other features. The single most relevant one is probably `LotArea`. The first plot below compares the distributions of the area for properties with known and missing lengths of the frontage. Although in the latter case the distribution has a heavier tail, the difference is not drastic. The second plot illustrates the dependence between `LotFrontage` and square root of `LotArea`. There is a somewhat linear trend with a large variance.
selection = ~X_all["LotFrontage"].isna()
fig = plt.figure(figsize=(12.8, 4.8))
axes = fig.add_subplot(1, 2, 1)
axes.hist(
[X_all.loc[selection, "LotArea"], X_all.loc[~selection, "LotArea"]],
label=["Present", "Missing"],
bins=30,
density=True,
)
axes.set_yscale("log")
axes.set_xlabel("LotArea")
axes.legend()
axes = fig.add_subplot(1, 2, 2)
axes.scatter(
X_all.loc[selection, "LotArea"].apply(np.sqrt),
X_all.loc[selection, "LotFrontage"],
s=1,
)
axes.set_xlabel("sqrt(LotArea)")
axes.set_ylabel("LotFrontage")
# I don't expect `LotFrontage` to bring a lot of information on top of the area, property type, neighbourhood, and so on. Not to spend much time on it, I'm going to impute it with a simple kNN regressor. It captures the linear scaling in the bulk of the distribution but at the same time does not extrapolate it to extreme values, which is probably a sane model. For a more accurate treatment see [this notebook](https://www.kaggle.com/ogakulov/lotfrontage-fill-in-missing-values-house-prices).
sel = ~X_train["LotFrontage"].isna()
fX = X_train.loc[sel, "LotArea"].apply(np.sqrt).to_numpy().reshape(-1, 1)
fy = X_train.loc[sel, "LotFrontage"]
frontage_model = KNeighborsRegressor(50)
frontage_model.fit(fX, fy)
fig = plt.figure()
axes = fig.add_subplot(111)
axes.scatter(fX, fy, s=1)
x = np.linspace(0.0, 300.0)
axes.plot(x, frontage_model.predict(x.reshape(-1, 1)), c="C1")
axes.set_xlim(x[0], x[-1])
axes.set_xlabel("sqrt(LotArea)")
axes.set_ylabel("LotFrontage")
fig
for X in [X_train, X_test]:
sel = X["LotFrontage"].isna()
X.loc[sel, "LotFrontage"] = frontage_model.predict(
X.loc[sel, "LotArea"].apply(np.sqrt).to_numpy().reshape(-1, 1)
)
# Lastly, column `MSSubClass` actually holds categorical data. Convert it accordingly.
for X in [X_train, X_test, X_all]:
X["MSSubClass"] = X["MSSubClass"].astype("category")
numer_features.remove("MSSubClass")
# ## Categorical features
cat_features = set(X_train.select_dtypes(include=["category", "object"]).columns)
cat_features
for feature in sorted(cat_features):
print(feature, set(X_all[feature].unique()))
# There is a number of ordinal features, which can be converted into numbers in a straightforward manner. Examples are the various quality measures, such as `ExterQual` and `KitchenQual`. Although it's hard to guess what does ‘TA’ mean. Let's see how the quality label correlate with the price for a few features:
y_train.groupby("BsmtCond").mean().sort_values()
y_train.groupby("HeatingQC").mean().sort_values()
# It seems ‘TA’ means average (‘typical’?) quality. Let's convert all quality features to numbers. Map NaNs to the neutral value.
converted_cat_features = set()
def convert_features(features, mapping):
if isinstance(features, str):
features = [features]
for feature in features:
for X in [X_train, X_test]:
X[feature] = X[feature].map(mapping)
converted_cat_features.update(features)
quality_features = set(X_train.columns[X_train.isin(["TA", "Ex"]).any()])
quality_transform = {"Po": -2, "Fa": -1, "TA": 0, "Gd": 1, "Ex": 2, np.nan: 0}
convert_features(quality_features, quality_transform)
# Proceed with other straightforward cases:
convert_features(
"Alley", {"Grvl": 1, "Pave": 2, np.nan: 0}
) # Assume NaN means no alley
convert_features("CentralAir", {"N": 0, "Y": 1})
convert_features(
"GarageFinish", {"Unf": 0, "RFn": 1, "Fin": 2, np.nan: 0}
) # 'RFn' for 'rough finish'?
convert_features("PavedDrive", {"N": 0, "P": 1, "Y": 2}) # 'P' for 'partial'?
convert_features("Street", {"Grvl": 0, "Pave": 1})
convert_features(
"LandSlope", {"Gtl": 0, "Mod": 1, "Sev": 2}
) # Gentle, moderate, severe
convert_features("BsmtExposure", {"No": 0, "Mn": 1, "Av": 2, "Gd": 3, np.nan: 0})
# Drop features for which the most frequent value is found in more than 90% of instances.
drop_features = set()
for feature in sorted(cat_features):
counts = X_train[feature].value_counts(dropna=False).sort_values(ascending=False)
if counts.iloc[0] / len(X_train) > 0.9:
drop_features.add(feature)
print(drop_features)
for X in [X_train, X_test]:
X.drop(columns=drop_features, inplace=True)
# Fill all NaNs in the remaning categorical columns:
for X in [X_train, X_test, X_all]:
for feature in [
"BsmtFinType1",
"BsmtFinType2",
"Exterior1st",
"Exterior2nd",
"Fence",
"GarageType",
"MSZoning",
"MasVnrType",
"SaleType",
]:
X[feature].fillna("None", inplace=True)
# Check remaining categorical features:
for feature in sorted(cat_features - converted_cat_features - drop_features):
print(feature, sorted(list(X_all[feature].unique())))
# I will mostly use target encoding for them. Here I apply one-hot encoding to `BldgType` just to have an example of it:
one_hot_features = ["BldgType"]
one_hot_encoder = OneHotEncoder(handle_unknown="ignore", sparse=False)
one_hot_encoder.fit(X_train[one_hot_features])
def one_hot_encode(X):
t = pd.DataFrame(one_hot_encoder.transform(X[one_hot_features]))
t.index = X.index
t.columns = one_hot_encoder.get_feature_names(one_hot_features)
return X.drop(columns=one_hot_features).join(t)
X_train = one_hot_encode(X_train)
X_test = one_hot_encode(X_test)
one_hot_features = set(one_hot_features)
# To the remaining categorical features, I apply the [target encoding](https://maxhalford.github.io/blog/target-encoding-done-the-right-way/), implemented as a combination of the versions with cross-validation and additive smoothing. The training set is split into $k$ folds, and all examples of category $c$ within the fold $i$ are assigned the value
# $$t_c = \frac{n_c \mu_c + n_\text{reg} \mu}{n_c + n_\text{reg}},$$
# where $n_c$ and $\mu_c$ are the number of examples of category $c$ in the remaining $k - 1$ folds and the mean value of the target computed over them, $\mu$ is the global mean value of the target computed with all examples in the training set regardless of their categories, and $n_\text{reg}$ is a regularization parameter that pushes $t_c$ closer to $\mu$ for underpopulated categories. On the test set, $t_c$ is set to
# $$t_c^\text{test} = \frac{\frac{k - 1}{k} n_c \mu_c + n_\text{reg} \mu}{\frac{k - 1}{k} n_c + n_\text{reg}},$$
# where $n_c$ and $\mu_c$ are now computed over the whole training set, and the factor $(k - 1) / k$ sets the same balance between $\mu_c$ and $\mu$ as used in the training.
# Since this is a supervised encoding, I only define here the transformer class and will apply it in the training pipeline. This way the potential overfitting due to the target encoding won't affect the cross-validation estimate of the error of the regression model.
class TargetEncoder(BaseEstimator, TransformerMixin):
def __init__(self, columns, k_folds=5, n_reg=10, random_state=None):
self.columns = columns
self.k_folds = k_folds
self.n_reg = n_reg
self.random_state = random_state
self._test_transform = {}
self._global_mean = None
def fit(self, X, y=None):
raise NotImplemented("Only fit_transform is supported.")
def fit_transform(self, X, y):
"""Fit and apply encoding."""
X_trans = X.copy()
self._test_transform = {}
self._global_mean = y.mean()
for column in self.columns:
X_trans[
column
] = 0.0 # Needed to prevent type error when assigning to a categorical column in Pandas
self._test_transform[column] = self._build_replace_map(
y.groupby(X[column]).agg(["count", "mean"]), test=True
)
kfold = KFold(
n_splits=self.k_folds, shuffle=True, random_state=self.random_state
)
for source_indices, update_indices in kfold.split(X):
replace_map = self._build_replace_map(
y.iloc[source_indices]
.groupby(X[column].iloc[source_indices])
.agg(["count", "mean"])
)
X_trans.iloc[update_indices, X_trans.columns.get_loc(column)] = (
X[column]
.iloc[update_indices]
.apply(lambda cat: replace_map.get(cat, self._global_mean))
)
return X_trans
def transform(self, X, y=None):
"""Apply encoding on test set."""
X_trans = X.copy()
for column in self.columns:
X_trans[column] = X[column].apply(
lambda cat: self._test_transform[column].get(cat, self._global_mean)
)
return X_trans
def _build_replace_map(self, stats, test=False):
"""Build replacement map for a single feature.
Arguments:
stats: DataFrame whose index is categories of this feature and columns are the number of examples and
mean value of the target in each category.
test: Boolean distinguishing training and test sets.
Return value:
Map from category to its numeric representation.
"""
replace_map = {}
for category in stats.index:
if isinstance(category, int):
# Work-around for this bug: https://github.com/pandas-dev/pandas/issues/17569
n_c, mu_c = stats.loc[pd.CategoricalIndex([category])].iloc[0]
else:
n_c, mu_c = stats.loc[category]
if test:
n_c *= (self.k_folds - 1) / self.k_folds
replace_map[category] = (n_c * mu_c + self.n_reg * self._global_mean) / (
n_c + self.n_reg
)
return replace_map
target_encoder_features = (
cat_features - converted_cat_features - drop_features - one_hot_features
)
target_encoder = TargetEncoder(target_encoder_features, random_state=7210)
# # Regression
# In contrast to what is claimed in the [description](https://www.kaggle.com/c/home-data-for-ml-course/overview/evaluation) of the competition, the scoring is based on the mean absolute error (mentioned [here](https://www.kaggle.com/c/home-data-for-ml-course/discussion/105838) and confirmed by comparing the score computed locally against the one reported in the leaderboard).
# Cannot use the built-in pipeline because `TargetEncoder` does not provide method `fit` and the regressor used does not provide `fit_predict`.
def cross_validate(regressor, cv=5, drop_features=[]):
scores = []
feature_importances = np.zeros(X_train.shape[1] - len(drop_features))
kfold = KFold(cv, shuffle=True, random_state=3404)
for train_indices, test_indices in kfold.split(X_train):
X = X_train.iloc[train_indices]
y = y_train.iloc[train_indices]
X = target_encoder.fit_transform(X, y)
regressor.fit(X.drop(columns=drop_features), y)
X = X_train.iloc[test_indices]
y = y_train.iloc[test_indices]
X = target_encoder.transform(X, y)
y_pred = regressor.predict(X.drop(columns=drop_features))
scores.append(mean_absolute_error(y, y_pred))
feature_importances += regressor.feature_importances_
return np.asarray(scores), feature_importances / cv
regressor = RandomForestRegressor(n_estimators=1000, random_state=8266)
scores, feature_importances = cross_validate(regressor)
print("{} +- {}".format(scores.mean(), scores.std()))
feature_importances = list(zip(X_train.columns, feature_importances))
feature_importances.sort(key=itemgetter(1), reverse=True)
for i, (feature, importance) in enumerate(feature_importances):
print("{:2d} {:17} {:.3f}".format(i, feature, importance))
top_n = 30
drop_features = list(map(itemgetter(0), feature_importances[top_n:]))
scores, _ = cross_validate(regressor, drop_features=drop_features)
print("{} +- {}".format(scores.mean(), scores.std()))
# | Num. features | CV score |
# |---|---|
# | 10 | 18389 ± 571 |
# | 20 | 17589 ± 344 |
# | 30 | 17297 ± 236 |
# | 40 | 17261 ± 257 |
# | 50 | 17309 ± 198 |
# With the full set of features, tried transforming the target as $y \mapsto \tilde y = y^\alpha$ to reduce its spread. This can reduce the mean score a bit, but at the price of increasing the variance. Decided not to apply such a transformation.
# | α | CV score |
# |---|---|
# | 0.4 | 17338 +- 472 |
# | 0.6 | 17212 +- 526 |
# | 0.75 | 17156 +- 446 |
# | 0.85 | 17215 +- 371 |
# | 1 | 17274 +- 193 |
# CV scores in different setups:
# * **17888 \[*V3\]** Random forest with all numeric features.
# * **17486 \[*V5\]** (public LB 15916) Random forest with all numeric features. Categorical features with little diversity dropped, ordinal features encoded as numbers, one-hot encoding applied to nominal features.
# * **17274 \[V1\]** (public LB 15588) Target encoder for most of categorical features.
# * **17297 \[V2\]** Only keep top-30 features.
# # Submission
X = target_encoder.fit_transform(X_train, y_train)
X = X.drop(columns=drop_features)
regressor.fit(X, y_train)
train_predictions = regressor.predict(X)
print("Train error:", mean_absolute_error(train_predictions, y_train))
X = target_encoder.transform(X_test)
X = X.drop(columns=drop_features)
test_predictions = regressor.predict(X)
output = pd.DataFrame({"Id": X_test.index, "SalePrice": test_predictions})
output.to_csv("submission.csv", index=False)
|
# # Introduction
# The purpose of this kernel is to build a predictive model in domain of anomaly detection using Tennessee Eastman Process Simulation Dataset.
# I will use some statistical and neural network approaches.
import numpy as np
import pandas as pd
import pyreadr
# # Data preparation
# The original dataset consists of 4 RData files with train and test parts for normal and faulty processes descriptions.
# Let's convert them and concat in order to get two pandas dataframes for train and test data.
train_df = pd.concat(
[
pyreadr.read_r(
"/kaggle/input/tennessee-eastman-process-simulation-dataset/TEP_FaultFree_Training.RData"
)["fault_free_training"],
pyreadr.read_r(
"/kaggle/input/tennessee-eastman-process-simulation-dataset/TEP_Faulty_Training.RData"
)["faulty_training"],
]
)
test_df = pd.concat(
[
pyreadr.read_r(
"/kaggle/input/tennessee-eastman-process-simulation-dataset/TEP_FaultFree_Testing.RData"
)["fault_free_testing"],
pyreadr.read_r(
"/kaggle/input/tennessee-eastman-process-simulation-dataset/TEP_Faulty_Testing.RData"
)["faulty_testing"],
]
)
train_df.head()
train_df.head()
test_df.head()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# # Loading Data
# Here, we'll load the data in data frames of pandas.
train_csv = "/kaggle/input/Kannada-MNIST/train.csv"
test_csv = "/kaggle/input/Kannada-MNIST/test.csv"
val_csv = "/kaggle/input/Kannada-MNIST/Dig-MNIST.csv"
train_df = pd.read_csv(train_csv)
test_df = pd.read_csv(test_csv)
valid_df = pd.read_csv(val_csv)
# Just checking out how does the data frame looks
valid_df.head()
# # Visualizing Data
# Here, we're seeing how many samples are present there for each number of classes. To avoid any bias by the network, we'll balance the data by augumentation in case there are no approximately equal number of samples per class
import seaborn as sns
sns.distplot(train_df["label"], kde=False)
# Looks like there are exactly equal number of samples per class. Thus, we don't need any data augumentation to overcome biasing.
# # Formatting Data
X_train = train_df.drop("label", axis=1).values
y_train = train_df["label"].values
X_val = valid_df.drop("label", axis=1).values
y_val = valid_df["label"].values
# ### Reshaping data in (28, 28, 1) dims
X_train = X_train.reshape(X_train.shape[0], 28, 28)
X_val = X_val.reshape(X_val.shape[0], 28, 28)
X_train = np.expand_dims(X_train, axis=3)
X_val = np.expand_dims(X_val, axis=3)
n_classes = 10 # 0 through 9
# Converting labels to one-hot encoding
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_val = to_categorical(y_val)
# ### Increasing number of samples
# Using ImageDataGenerator here.
# Ref: Link
from tensorflow.keras.preprocessing.image import ImageDataGenerator
image_data_gen = ImageDataGenerator(
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
zoom_range=0.1, # Randomly zoom image
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=False, # randomly flip images
vertical_flip=False,
) # randomly flip images
image_data_gen.fit(X_train)
image_data_gen.fit(X_val)
print(X_train.shape)
print(y_train.shape)
print(X_val.shape)
print(y_val.shape)
img_shape = (28, 28, 1)
# # Model Definition
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Dense,
Conv2D,
MaxPool2D,
Flatten,
Dropout,
BatchNormalization,
)
from tensorflow.keras.optimizers import Adam
# Model Ref: Link
model = Sequential()
model.add(
Conv2D(
filters=128,
kernel_size=(3, 3),
padding="Same",
activation="relu",
input_shape=(28, 28, 1),
)
)
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.15))
model.add(Conv2D(filters=64, kernel_size=(3, 3), padding="Same", activation="relu"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.15))
model.add(Conv2D(filters=32, kernel_size=(3, 3), padding="Same", activation="relu"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.15))
model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.4))
model.add(Dense(10, activation="softmax"))
model.compile(
loss="categorical_crossentropy",
optimizer=Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999),
metrics=["accuracy"],
)
model.summary()
# # Training
model.fit_generator(
image_data_gen.flow(X_train, y_train, batch_size=64),
steps_per_epoch=len(X_train) // 32,
epochs=10,
validation_data=(image_data_gen.flow(X_val, y_val)),
)
# # Testing on Test Dataset
test_df.head()
x_test = test_df.drop("id", axis=1).values.reshape(len(test_df), 28, 28, 1)
predictions = model.predict(x_test)
submit_df = pd.DataFrame(columns=["id", "label"])
submit_df.index.name = "id"
for index, pred in enumerate(predictions):
df = pd.DataFrame({"id": [index], "label": [pred.argmax()]})
submit_df = submit_df.append(df)
submit_df.head()
submit_df.to_csv("./submission.csv", index=False)
|
import warnings
warnings.filterwarnings("ignore")
# Utils
from os import path
# Data
import numpy as np
import pandas as pd
# Viz
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
sns.set(style="darkgrid")
print(f"Matplotlib Version : {mpl.__version__}")
data_path = "../input/"
test = pd.read_csv(data_path + "titanic/test.csv")
train = pd.read_csv(data_path + "titanic/train.csv")
# df = pd.read_csv(data_path+'imdb.csv',error_bad_lines=False);
df = train
# ### 1. Viz training dataset
df
df.columns = [i.lower() for i in df.columns]
df.head(3)
sns.set(style="white", palette="muted", color_codes=True)
f, axes = plt.subplots(3, 2, figsize=(14, 14))
sns.despine(left=True) # hides the border
sns.countplot(x="pclass", data=df, hue="sex", ax=axes[0, 0])
sns.boxplot(x="age", hue="sex", data=df, ax=axes[0, 1])
sns.countplot(x="survived", data=df, hue="sex", ax=axes[1, 0])
sns.swarmplot(x="embarked", y="pclass", data=df, ax=axes[1, 1])
sns.swarmplot(x="survived", y="fare", data=df, ax=axes[2, 0])
sns.boxplot(x="embarked", y="survived", data=df, ax=axes[2, 1])
plt.tight_layout()
C, Q, S = (
len(df[df["embarked"] == "C"]),
len(df[df["embarked"] == "Q"]),
len(df[df["embarked"] == "S"]),
)
# ### Route exploration
import folium
iceburg = [41.7666636, -50.2333324]
southampton = [50.909698, -1.404351]
cherbourg = [49.630001, -1.620000]
queenstown = [51.850334, -8.294286]
m = folium.Map(location=iceburg, tiles="Stamen Terrain", zoom_start=3)
tooltip = "Click me!"
folium.Marker(
southampton,
popup="<h3>1. Southampton, 10 April 1912 </h3> <i> Titanic successfully arrives at Southampton shortly after midnight</i>",
tooltip=tooltip,
).add_to(m)
folium.Marker(
cherbourg, popup="<h3>2.Cherbourg, 10 April 1912 </h1>", tooltip=tooltip
).add_to(m)
folium.Marker(
queenstown, popup="<h3>3.Queenstown, 11 April 1912 </h1> ", tooltip=tooltip
).add_to(m)
# https://latitude.to/articles-by-country/general/942/sinking-of-the-rms-titanic
folium.Marker(
iceburg,
popup="<h3>4. Crash - 15 April 1912 </h3>",
tooltip=tooltip,
icon=folium.Icon(color="red", icon="info-sign"),
).add_to(m)
folium.PolyLine([southampton, cherbourg], fill_color="red").add_to(m)
folium.PolyLine([cherbourg, queenstown], fill_color="red").add_to(m)
folium.PolyLine([queenstown, iceburg], fill_color="red").add_to(m)
m
plt.rcParams["figure.figsize"] = (7, 7)
wordcloud = WordCloud(
stopwords=STOPWORDS,
background_color="white",
width=1000,
height=1000,
max_words=200,
).generate(" ".join(df["name"]))
plt.imshow(wordcloud)
plt.axis("off")
plt.title("Passenger Names", fontsize=10)
plt.show()
|
sns.set_style("darkgrid")
pd.set_option("display.max_columns", 500)
train = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
test = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/test.csv")
train.head()
train.info()
train.describe()
# # Checking # of NA
# tmp = pd.isnull(train).sum()
# for i in range(len(tmp)):
# if tmp[i] != 0:
# print('%s %s %d %s (%.2f%s)'%(tmp.index[i],' '*(20-len(tmp.index[i])),
# tmp[i], ' '*(6 - len(str(tmp[i]))),
# tmp[i]/len(train)*100, '%'))
# Checking NA's
tmp = pd.isnull(train).sum()
tmp = tmp.sort_values(ascending=False)
tmp = tmp[tmp > 0]
plt.xticks(rotation=90)
sns.barplot(x=tmp.index, y=tmp)
plt.show()
tmp = pd.isnull(test).sum()
tmp = tmp.sort_values(ascending=False)
tmp = tmp[tmp > 0]
plt.xticks(rotation=90)
sns.barplot(x=tmp.index, y=tmp)
plt.show()
# Categorizing variables
num_col = []
cat_col = []
for column in train.columns[~pd.Series(train.columns).isin(["Id", "SalePrice"])]:
if type(train[column][0]) is not str:
num_col.append(column)
else:
cat_col.append(column)
tmp = [
"MSSubClass",
"Alley",
"FireplaceQu",
"PoolQC",
"Fence",
"MiscFeature",
"MoSold",
] # Handwork !
num_col = list(set(num_col) - set(tmp))
cat_col += tmp
for col in tmp:
train[col] = train[col].astype(str)
# ### Target Variable
# skew and log transformation for the target variable
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
print("Before > Mean: %f, Standard Deviation: %f" % norm.fit(train["SalePrice"]))
sns.distplot(train["SalePrice"], ax=ax[0])
stats.probplot(train["SalePrice"], plot=ax[1])
plt.show()
# after log transformation to the target var
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
tmp = np.log1p(train["SalePrice"]) # log(1+x)
print("After > Mean: %f, Standard Deviation: %f" % norm.fit(tmp))
sns.distplot(tmp, ax=ax[0])
stats.probplot(tmp, plot=ax[1])
plt.show()
# vars' corr with the target var
col_corr = []
for col in num_col:
na_idx = pd.isnull(train[col])
corr = np.corrcoef(x=train[col][~na_idx], y=train["SalePrice"][~na_idx])[0, 1]
col_corr.append((col, corr))
col_corr.sort(key=lambda x: -abs(x[1]))
col_corr
# Top N's scatter plots
N = 16
top_N_num = [set[0] for set in col_corr[:N]]
fig, ax = plt.subplots(int(np.ceil(N / 2)), 2, figsize=(20, N * 2))
for i, col in enumerate(top_N_num):
sns.scatterplot(
data=train, x=col, y="SalePrice", alpha=0.4, color="red", ax=ax[i // 2][i % 2]
)
ax[i // 2][i % 2].set_xlabel(col, fontsize=18)
ax[i // 2][i % 2].set_ylabel("SalePrice", fontsize=18)
plt.show()
# pairplot: top 5 highest correlated vars and target var
N = 5
top_N_num = [set[0] for set in col_corr[:N]]
top_N_num.append("SalePrice")
plt.figure(figsize=(10, 8))
sns.pairplot(train[top_N_num])
plt.show()
# top N's lowest P-value variables
col_aov = []
for col in cat_col:
result = ols("SalePrice ~ {}".format(col), data=train).fit()
aov_table = sm.stats.anova_lm(result)
aov_pr = aov_table["PR(>F)"][0]
col_aov.append((col, aov_pr))
col_aov.sort(key=lambda x: x[1]) # ascending order of p-value
col_aov
# Box plot of Top N Categorical Vars
N = 16
top_N_num = [set[0] for set in col_aov[:N]]
fig, ax = plt.subplots(int(np.ceil(N / 2)), 2, figsize=(20, N * 2))
plt.setp(ax[0][0].get_xticklabels(), rotation=45)
for i, col in enumerate(top_N_num):
sns.boxplot(x=col, y="SalePrice", data=train, ax=ax[i // 2][i % 2])
ax[i // 2][i % 2].set_xlabel(col, fontsize=18)
ax[i // 2][i % 2].set_ylabel("SalePrice", fontsize=18)
plt.show()
# ### Independant Variables
# Correlation of each other
plt.figure(figsize=(10, 8))
corr_table = train[num_col].corr()
sns.heatmap(corr_table)
plt.show()
# N highest corr pairs
high_cor_list = []
tmp = corr_table[abs(corr_table) > 0.4]
for col in tmp.columns:
for row in tmp[col][~pd.isnull(tmp[col])].index:
if col == row:
break
high_cor_list.append((col, row, tmp[col][row]))
high_cor_list.sort(key=lambda x: -x[2])
high_cor_list
# Correlation of each other
plt.figure(figsize=(10, 8))
corr_table = test[num_col].corr()
sns.heatmap(corr_table)
plt.show()
# N highest corr pairs
high_cor_list = []
tmp = corr_table[abs(corr_table) > 0.4]
for col in tmp.columns:
for row in tmp[col][~pd.isnull(tmp[col])].index:
if col == row:
break
high_cor_list.append((col, row, tmp[col][row]))
high_cor_list.sort(key=lambda x: -x[2])
high_cor_list
# ### Outlier
# outlier found in the above graph
sns.scatterplot(data=train, x="GrLivArea", y="SalePrice")
plt.show() # 2 ourliers at the bottom right
# outlier checking with univariate anlaysis
scaler = StandardScaler()
tmp = scaler.fit_transform(train[["SalePrice"]])
tmp_sorted = sorted(np.squeeze(tmp))
for a, b in zip(tmp_sorted[:10], tmp_sorted[-10:]):
print("{} {} {}".format(round(a, 5), " " * 10, round(b, 5)))
train[tmp > 7] # Be careful of these!
# ### Checking Homoscedascity
skews = []
for col in num_col:
skews.append((col, skew(train[col])))
skews.sort(key=lambda x: -abs(x[1]))
skews
sns.distplot(train["TotalBsmtSF"])
plt.xlim(0, 10e03)
plt.show()
# After log transformation on skewed variables( 1. GrLivArea)
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
dat = train.copy()
dat["SalePrice"] = np.log1p(dat["SalePrice"])
dat["GrLivArea"] = np.log1p(dat["GrLivArea"])
dat["MasVnrArea"] = np.log1p(dat["MasVnrArea"])
sns.scatterplot(data=dat, x="GrLivArea", y="SalePrice", ax=ax[0])
sns.scatterplot(data=dat, x="GarageArea", y="SalePrice", ax=ax[1])
plt.show()
# ### Train VS Test
fig, ax = plt.subplots(20, 2, figsize=(20, 80))
for i, col in enumerate(num_col):
row = int(i / 2)
sns.distplot(train[col][~pd.isnull(train[col])], ax=ax[row][0])
sns.distplot(test[col][~pd.isnull(test[col])], ax=ax[row][1])
plt.show()
# ### Preprocessing
train = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
test = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/test.csv")
# Removing Outliers
train = train.drop(
train[(train["GrLivArea"] > 4000) & (train["SalePrice"] < 200000)].index
) # Removing
sns.scatterplot(data=train, x="GrLivArea", y="SalePrice") # After
plt.show()
# Combining
test["SalePrice"] = 0
data = pd.concat([train, test], axis=0)
data.reset_index(drop=True, inplace=True)
# NA Handling
data["MSZoning"].fillna(data["MSZoning"].mode()[0], inplace=True)
data["Utilities"].fillna(data["Utilities"].mode()[0], inplace=True)
data["Exterior1st"].fillna(data["Exterior1st"].mode()[0], inplace=True)
data["Exterior2nd"].fillna(data["Exterior2nd"].mode()[0], inplace=True)
data["MasVnrType"].fillna("None", inplace=True)
data["MasVnrArea"].fillna(0, inplace=True)
data["BsmtQual"].fillna("None", inplace=True)
data["BsmtCond"].fillna("None", inplace=True)
data["BsmtExposure"].fillna("None", inplace=True)
data["BsmtFinType1"].fillna("None", inplace=True)
data["BsmtFinSF1"].fillna(0, inplace=True)
data["BsmtFinType2"].fillna("None", inplace=True)
data["BsmtFinSF2"].fillna(0, inplace=True)
data["BsmtUnfSF"].fillna(0, inplace=True)
data["TotalBsmtSF"].fillna(0, inplace=True)
data["Electrical"].fillna(data["Electrical"].mode()[0], inplace=True)
data["BsmtFullBath"].fillna(0, inplace=True)
data["BsmtHalfBath"].fillna(0, inplace=True)
data["KitchenQual"].fillna(data["KitchenQual"].mode()[0], inplace=True)
data["Functional"].fillna("None", inplace=True)
data["GarageType"].fillna("None", inplace=True)
data["GarageFinish"].fillna("None", inplace=True)
data["GarageYrBlt"].fillna(data["GarageYrBlt"].mode()[0], inplace=True)
data["GarageArea"].fillna(0, inplace=True)
data["GarageCars"].fillna(0, inplace=True)
data["GarageQual"].fillna("None", inplace=True)
data["SaleType"].fillna(data["SaleType"].mode()[0], inplace=True)
data["GarageCond"].fillna("None", inplace=True)
data["PoolQC"].fillna("None", inplace=True)
data["Fence"].fillna("None", inplace=True)
data["MiscFeature"].fillna("None", inplace=True)
data["LotFrontage"].fillna(0, inplace=True)
data["Alley"].fillna("None", inplace=True)
data["FireplaceQu"].fillna("None", inplace=True)
# Dropping columns with too many NAs
cols = [
"MiscFeature",
"Alley",
"PoolQC",
"Fence",
"FireplaceQu",
"GarageCond",
"LotFrontage",
]
data.drop(cols, axis=1, inplace=True)
# Adding/Revising Columns
# Num Columns
data["Total_SF"] = data["TotalBsmtSF"] + data["1stFlrSF"] + data["2ndFlrSF"]
data["Total_Bath"] = (
data["BsmtFullBath"]
+ data["BsmtHalfBath"] * 0.5
+ data["FullBath"]
+ data["HalfBath"] * 0.5
)
data["Total_Footage"] = (
data["BsmtFinSF1"] + data["BsmtFinSF2"] + data["1stFlrSF"] + data["2ndFlrSF"]
)
data["Age"] = 2020 - data["YearBuilt"]
data["Age_rmd"] = 2020 - data["YearRemodAdd"]
data["Age_Garage"] = 3020 - data["GarageYrBlt"]
data["qul_grliv"] = data["OverallQual"] * data["GrLivArea"]
data["garage"] = data["GarageCars"] * data["GarageArea"]
data["Years"] = data["YearBuilt"] + data["YearRemodAdd"] + data["GarageYrBlt"]
data["Age_not_rmd"] = data["Age"] - data["Age_rmd"]
data["Overall"] = data["OverallQual"] + data["OverallCond"]
data["porch"] = data["OpenPorchSF"] + data["EnclosedPorch"] + data["ScreenPorch"]
data["haspool"] = data["PoolArea"].apply(lambda x: 1 if x > 0 else 0)
data["has2ndfloor"] = data["2ndFlrSF"].apply(lambda x: 1 if x > 0 else 0)
data["hasgarage"] = data["GarageArea"].apply(lambda x: 1 if x > 0 else 0)
data["hasbsmt"] = data["TotalBsmtSF"].apply(lambda x: 1 if x > 0 else 0)
data["hasfireplace"] = data["Fireplaces"].apply(lambda x: 1 if x > 0 else 0)
# Cat Columns
# data['Ext_Kit'] = data['ExterQual'] + data['KitchenQual']
# data['Bsm_Heat'] = data['BsmtQual'] + data['HeatingQC']
# data['Gara_TP1'] = data['GarageFinish'] + data['BsmtFinType1']
data["Season"] = data["MoSold"].apply(
lambda x: "Spring"
if x <= 3
else ("Summer" if x <= 6 else ("Fall" if x <= 9 else "Winter"))
)
data.drop(["MoSold"], axis=1, inplace=True)
# Processing Types of Variables
to_cat = ["MSSubClass"]
data[to_cat] = data[to_cat].astype(str)
# Dividing columsn into num and cat
num_col = []
cat_col = []
for col in data.columns:
if col == "Id" or col == "SalePrice":
continue
elif data[col].dtype == "object":
cat_col.append(col)
else:
num_col.append(col)
print(len(num_col))
print(len(cat_col))
print(len(data.columns))
# Label Encoding & Dummy
# labels_col = ['LotShape', 'LandContour', 'LandSlope', 'HouseStyle', 'ExterQual',
# 'ExterCond', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1',
# 'BsmtFinType2', 'HeatingQC', 'CentralAir', 'KitchenQual', 'Functional',
# 'GarageFinish', 'GarageQual', 'PavedDrive']
# for col in labels_col:
# data[col] = pd.factorize(data[col])[0]
# data['LotShape'] = data['LotShape'].apply(lambda x: {'IR3':0, 'IR2':1, 'IR1':2, 'Reg':3}[x])
# data['LandContour'] = data['LandContour'].apply(lambda x: {'Low':0, 'HLS':1, 'Bnk':2, 'Lvl':3}[x])
# data['LandSlope'] = data['LandSlope'].apply(lambda x: {'Gtl':0, 'Mod':1, 'Sev':2}[x])
# data['HouseStyle'] = data['HouseStyle'].apply(lambda x: {'1Story':0, '1.5Unf':1, '1.5Fin':2,
# '2Story':3, '2.5Unf':4, '2.5Fin':5,
# 'SFoyer':6, 'SLvl':7}[x])
# data['CentralAir'] = data['CentralAir'].apply(lambda x: {'N':0, 'Y':1}[x])
# data['Functional'] = data['Functional'].apply(lambda x: {'Sal':0, 'Sev':1, 'Maj2':2, 'Maj1':3, 'Mod':4,
# 'Min2':5, 'Min1':6, 'Typ':7, 'None':-99}[x])
# data['BsmtExposure'] = data['BsmtExposure'].apply(lambda x: {'No':0, 'Mn':1, 'Av':2, 'Gd':3, 'None':-99}[x])
# data['GarageFinish'] = data['GarageFinish'].apply(lambda x: {'Unf':0, 'RFn':1, 'Fin':2, 'None':-99}[x])
# data['PavedDrive'] = data['PavedDrive'].apply(lambda x: {'N':0, 'P':1, 'Y':2}[x])
# for col in ['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'HeatingQC', 'KitchenQual', 'GarageQual']:
# data[col] = data[col].apply(lambda x: {'Po':0, 'Fa':1, 'TA':2, 'Gd':3, 'Ex':4, 'None':-99}[x])
# for col in ['BsmtFinType1', 'BsmtFinType2']:
# data[col] = data[col].apply(lambda x: {'Unf':0, 'LwQ':1, 'Rec':2, 'BLQ':3, 'ALQ':4, 'GLQ':5,'None':-99}[x])
for col in cat_col:
# for col in list(set(cat_col) - set(labels_col)):
data = pd.concat([data, pd.get_dummies(data[col], prefix=(col))], axis=1)
data.drop([col], axis=1, inplace=True)
# Solving Skew Problem
skews = []
for col in num_col:
skews.append((col, skew(data[col])))
skews.sort(key=lambda x: -abs(x[1]))
for col, value in skews:
if abs(value) > 0.5:
data[col] = boxcox1p(data[col], 0.15) # boxcox
data["SalePrice"] = np.log1p(data["SalePrice"]) # log(x+1)
### Dropping columns that have high corr with other vars
cols = ["GarageArea", "GarageYrBlt", "TotRmsAbvGrd", "2ndFlrSF", "BsmtFullBath"]
data.drop(cols, axis=1, inplace=True)
num_col = list(set(num_col) - set(cols))
# Dropping Sparse Columns
cols = []
for col in data.columns:
major_ratio = data[col].value_counts().iloc[0] / len(data[col])
if major_ratio > 0.999:
cols.append(col)
data.drop(cols, axis=1, inplace=True)
print("# of columns dropped : {}".format(len(cols)))
# PCA !
# pca = PCA()
# # data[num_col] = pca.fit_transform(data[num_col])
# data[num_col[:4]] = pca.fit_transform(data[num_col])[:,:4]
# data.drop(num_col[4:], axis=1, inplace=True)
data.shape
# Dividing
data.reset_index(drop=True, inplace=True)
train = data[data["Id"] <= 1460]
test = data[data["Id"] > 1460]
X = list(set(train.columns) - set(["SalePrice"]))
y = "SalePrice"
# ### Baseline Models
def rmse_cv(model):
result = cross_val_score(
cv=5,
estimator=model,
X=train[X].values,
y=train[y].values,
scoring="neg_mean_squared_error",
)
result = (-result) ** (1 / 2)
return np.mean(result), np.std(result)
# Scoring Options of 'cross_val_score' function:
# 'accuracy', 'adjusted_mutual_info_score', 'adjusted_rand_score', 'average_precision', 'completeness_score', 'explained_variance',
# 'f1', 'f1_macro', 'f1_micro', 'f1_samples', 'f1_weighted', 'fowlkes_mallows_score', 'homogeneity_score', 'mutual_info_score',
# 'neg_log_loss', 'neg_mean_absolute_error', 'neg_mean_squared_error', 'neg_mean_squared_log_error', 'neg_median_absolute_error',
# 'normalized_mutual_info_score', 'precision', 'precision_macro', 'precision_micro', 'precision_samples', 'precision_weighted', 'r2',
# 'recall', 'recall_macro', 'recall_micro', 'recall_samples', 'recall_weighted', 'roc_auc', 'v_measure_score'
def rmsle(pred, actual):
result = 0
for i in range(len(pred)):
result += (pred[i] - actual[i]) ** 2
result /= len(pred)
return result ** (1 / 2)
EN = make_pipeline(
RobustScaler(), ElasticNet(alpha=1e-3, l1_ratio=0.7, max_iter=1e4)
) # alpha는 l1_ratio에 곱해짐
# EN = ElasticNet(alpha=1e-3, l1_ratio=0.8, max_iter=1e+04)
rmse_cv(EN)
# # Objective Function of ElasticNet
# 1 / (2 * n_samples) * ||y - Xw||^2_2
# + alpha * l1_ratio * ||w||_1
# + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
lasso = make_pipeline(RobustScaler(), Lasso(alpha=5e-04, max_iter=1e04))
# lasso = Lasso(alpha=1e-04, max_iter=1e+04)
rmse_cv(lasso)
KRR = KernelRidge(
alpha=1e-2, kernel="polynomial", degree=1, coef0=2
) # kernel: 'linear', 'laplacian', 'rbf'
rmse_cv(KRR)
GBoost = GradientBoostingRegressor(
n_estimators=3000,
learning_rate=0.05,
max_depth=4,
max_features="sqrt",
min_samples_leaf=30,
min_samples_split=10,
loss="huber",
random_state=5,
)
# rmse_cv(GBoost) # corr 높은 변수들 안잘라내니까 확실히 성능떨어짐
model_xgb = xgb.XGBRegressor(
colsample_bytree=0.3,
gamma=0.0468,
learning_rate=0.05,
max_depth=3,
min_child_weight=1.3,
n_estimators=1024,
reg_alpha=0.3,
reg_lambda=0.4,
subsample=0.5,
silent=1,
random_state=7,
nthread=-1,
)
# rmse_cv(model_xgb)
model_lgb = lgb.LGBMRegressor(
objective="regression",
num_leaves=4,
learning_rate=0.05,
n_estimators=1024,
max_bin=40,
bagging_fraction=0.9,
bagging_freq=20,
feature_fraction=0.1, # W 0.6 > 0.2 로 낮추니 엄청난효과!
feature_fraction_seed=9,
bagging_seed=9,
min_data_in_leaf=2,
min_sum_hessian_in_leaf=1,
random_state=5,
)
# rmse_cv(model_lgb)
def baseline_model():
model = Sequential()
model.add(
Dense(64, input_dim=len(X), kernel_initializer="normal", activation="relu")
)
# model.add(BatchNormalization())
model.add(Dense(8, input_dim=64, kernel_initializer="normal", activation="relu"))
model.add(Dense(1, kernel_initializer="normal"))
opt = optimizers.Adam(learning_rate=0.005)
model.compile(loss="mean_squared_error", optimizer=opt)
return model
nn = KerasRegressor(
build_fn=baseline_model, epochs=3000, batch_size=len(train), verbose=0
)
# rmse_cv(nn)
rf = RandomForestRegressor(
n_estimators=128, min_samples_split=4, min_samples_leaf=2, random_state=42
)
rmse_cv(rf)
rf
#
# ### Meta Model
class Meta_Regressor(BaseEstimator):
def __init__(self, base_models, meta_models):
self.base_models = base_models # self.A = B 에서 A와 B가 이름이 같아야한다.. 뭐지
self.meta_models = meta_models
def fit(self, X, y):
self.base_models_ = [[] for _ in self.base_models]
self.meta_models_ = clone(self.meta_models)
Kf = KFold(n_splits=5, shuffle=True, random_state=5)
out_fold_pred = np.zeros((len(X), len(self.base_models)))
for i, model in enumerate(self.base_models):
for train_idx, val_idx in Kf.split(X):
model = clone(self.base_models[i])
model.fit(X[train_idx], y[train_idx])
pred = model.predict(X[val_idx])
out_fold_pred[val_idx, i] = pred
self.base_models_[i].append(model)
self.meta_models_.fit(X=out_fold_pred, y=y)
def predict(self, X):
meta_features = np.column_stack(
[
np.column_stack([model.predict(X) for model in sub_models]).mean(axis=1)
for sub_models in self.base_models_
]
)
scores = self.meta_models_.predict(meta_features)
return scores
meta_regressor = Meta_Regressor(
base_models=[lasso, KRR, GBoost, model_xgb, model_lgb], meta_models=EN
)
rmse_cv(meta_regressor)
meta_regressor = Meta_Regressor(
base_models=[EN, KRR, GBoost, model_xgb, model_lgb], meta_models=lasso
)
rmse_cv(meta_regressor)
meta_regressor = Meta_Regressor(
base_models=[lasso, KRR, GBoost, model_xgb, EN], meta_models=model_lgb
)
rmse_cv(meta_regressor)
# random_state해서 이제 결과가 완전히 동일하다
# 1. preprocessing에서 뉴컬럼 뺴고 다시 결과값 비교해보기
# 2. 1 결과를 보고 성능 올릴만한 좋은 방법 더 생각해보기
# 3. 앙상블도 밸리데이션 해보기
# ### Average Ensemble
class Average_Ensemble(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, models):
self.models = models # self.A = B 에서 A와 B가 이름이 같아야한다.. 뭐지
def fit(self, X, y):
self.models_ = [clone(model) for model in self.models]
for model in self.models_:
model.fit(X, y)
def predict(self, x):
scores = np.column_stack([model.predict(x) for model in self.models_])
return scores.mean(axis=1)
average_ensemble = Average_Ensemble(models=[EN, KRR, GBoost, model_xgb, model_lgb])
rmse_cv(average_ensemble)
rmse_cv(average_ensemble)
# ### Weighted Ensemble with Meta Model
class Weighted_Ensemble(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, models):
self.models = models
def fit(self, X, y):
self.models_ = [clone(model) for model in self.models]
for model in self.models_:
model.fit(X, y)
def predict(self, x):
results = np.zeros(len(x))
scores = [model.predict(x) for model in self.models_]
# for i, model in enumerate(scores):
# results += scores[i] * self.weights[i]
return scores
meta_regressor = Meta_Regressor(
base_models=[lasso, KRR, GBoost, model_xgb, model_lgb, nn, rf], meta_models=EN
)
weighted_ensemble = Weighted_Ensemble(
models=[lasso, KRR, GBoost, model_xgb, model_lgb, nn, rf, EN, meta_regressor]
)
train_x, test_x, train_y, test_y = train_test_split(
train[X].values, train[y].values, test_size=0.3, random_state=4
)
weighted_ensemble.fit(train_x, train_y)
scores = weighted_ensemble.predict(test_x)
weights = [0.1, 0.2, 0.2, 0.06, 0.09, 0.01, 0.04, 0.2, 0.1] #
# lasso, KRR, GBoost, xgb, lgb, nn ,rf EN meta
score = np.zeros(len(scores[0]))
for i, model in enumerate(scores):
score += scores[i] * weights[i]
rmsle(score, test_y)
meta_regressor = Meta_Regressor(base_models=[lasso, KRR, GBoost], meta_models=EN)
weighted_ensemble = Weighted_Ensemble(models=[model_xgb, model_lgb, meta_regressor])
train_x, test_x, train_y, test_y = train_test_split(
train[X].values, train[y].values, test_size=0.3, random_state=4
)
weighted_ensemble.fit(train_x, train_y)
scores = weighted_ensemble.predict(test_x)
weights = [0.05, 0.05, 0.9]
score = np.zeros(len(scores[0]))
for i, model in enumerate(scores):
score += scores[i] * weights[i]
rmsle(score, test_y)
# ### Final Prediction
# meta_ensemble
meta_regressor = Meta_Regressor(
base_models=[lasso, KRR, GBoost, model_xgb, model_lgb, nn, rf], meta_models=EN
)
weighted_ensemble = Weighted_Ensemble(models=[KRR, GBoost, model_xgb, EN])
weighted_ensemble.fit(train[X].values, train[y].values)
weights = [0.2, 0.2, 0.05, 0.55] #
scores = weighted_ensemble.predict(test[X].values)
score = np.zeros(len(scores[0]))
for i, model in enumerate(scores):
score += scores[i] * weights[i]
pred = np.expm1(score)
# meta_regerssor
meta_regressor = Meta_Regressor(
base_models=[lasso, KRR, GBoost, model_xgb, model_lgb], meta_models=EN
) # the model with the highest Val score
meta_regressor.fit(train[X].values, train[y].values)
pred = np.expm1(meta_regressor.predict(test[X].values))
submission = pd.DataFrame({"Id": range(1461, 2920), "SalePrice": pred})
submission.to_csv("submission.csv", index=False)
|
# # Explanation
# > This data includes 9 Column
# > But variable values not enough. For example ther's not Player Shoot Power, PointCount,AssistCount etc... Therefore some analiysing will be short.
# 1. We will firstly study the columns and data
# 1. Something analysing with using pandas libraray and we will know pandas more some
# 1. After then basic visualization with seaborn library
# 1. And lastly we will a good visualization with Pyplot library
# > So let's start
# [1.Reading data and basicly studying](#1)
# [2.Analysing with Pandas](#2)
# [3.Basic Visualization with Seaborn](#3)
# [4.Last Visualization with PyPlot](#4)
#
# ## Reading data and basicly studying
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# import plotly.plotly as py
from plotly.offline import init_notebook_mode, iplot
init_notebook_mode(connected=True)
import plotly.graph_objs as go
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
nbadata = pd.read_csv(
"../input/nba-all-star-game-20002016/NBA All Stars 2000-2016 - Sheet1.csv"
)
nbadata.head()
nbadata.info()
# we have to fix it already if used object columns instead of int or float
nbadata.describe() # achives istatistical data ---> ".describe()" --> not a required method yet :)
# We'll don't use Selection Type column. Therefore will drop that.
nbadata.drop(["Selection Type"], axis=1, inplace=True)
nbadata.head()
#
# ## Analysing with Pandas
# Which positions is there in data
nbadata.Pos.value_counts()
# Filtering DataFrame
JamesHarden = nbadata[(nbadata.Player == "James Harden") & (nbadata.Year == 2014)]
JamesHarden
# > using function shoulnd't be with "def" everytime
# for i in age:
# print(2020-i)
#
# OR
# [2020-i for i in age]
#
# > I cant use inch instead of meters OR again pounds instead of kilogram. Therefore i will conversion values in dataframe.
# Pounds to -----> KG
WTList = list(nbadata["WT"])
KGList = [round(float(i) * 0.45, 1) for i in WTList]
# KGList
nbadata["KGValues"] = KGList
nbadata.head()
# I could use apply() method but i like in this way.
# Feet and Inch to Cm
HTData = list(nbadata["HT"])
CmData = [
round((float(HTData[i][0]) * 12 + float(HTData[i][2])) * 2.54, 1)
for i in range(0, len(HTData))
]
# CmData
nbadata["CmValues"] = CmData
nbadata.head()
# Find ratio of cmValues more then 200 to less then 200 in percent
MoreThen200 = len(nbadata[nbadata.CmValues > 200])
LessThen200 = len(nbadata[nbadata.CmValues < 200])
# MoreThen200
# LessThen200 # we can look results if we want
Ratio = round((MoreThen200 * 100) / LessThen200, 3)
Ratio
# we find as a result %68
#
# ## Visualization with Seaborn
Teamsin2016 = nbadata[nbadata.Year == 2016].Team.value_counts()
Teamsin2016
Teamsin2016 = list(nbadata[nbadata.Year == 2016].Team.value_counts())
Teamsin2016
# Visualizate Teams Count of NbaAllStar2016
teamdata = list(nbadata[nbadata.Year == 2016].Team.unique())
teamdata
# ### Bar Plot
data = pd.DataFrame({"team_name_in2016allstar": teamdata, "count_of_team": Teamsin2016})
# visualization
plt.figure(figsize=(12, 8))
sns.barplot(x=data.team_name_in2016allstar, y=data.count_of_team)
plt.xticks(rotation=90)
plt.xlabel("Team Names")
plt.ylabel("Count of Teams")
plt.title("Team Name in 2016 All Star")
plt.show()
# ### Pie Plot
labels = teamdata
explode = np.zeros(18, dtype=int)
sizes = Teamsin2016
plt.figure(figsize=(7, 7))
plt.pie(sizes, explode=explode, labels=labels, autopct="%1.1f%%")
plt.title("Team Counts Ratio in 2016 All Star", color="blue", fontsize=15)
plt.show()
# ### Point Plot
# > Actually not more fit Point Plot for this data. But I will for repeat and more learn
# Team counts of player between of 2010 and 2016
year_list0 = list(nbadata.Year.unique())
year_list = year_list0[:7]
year_list = year_list[::-1]
hawks = []
lakers = []
bulls = []
clippers = []
clevland = []
for i in year_list:
x = nbadata[nbadata.Year == i]
hawks.append(len(x[x.Team == "Atlanta Hawks"]))
lakers.append(len(x[x.Team == "Los Angeles Lakers"]))
bulls.append(len(x[x.Team == "Chicago Bulls"]))
clippers.append(len(x[x.Team == "Los Angeles Clippers"]))
clevland.append(len(x[x.Team == "Cleveland Cavaliers"]))
df = pd.DataFrame(
{
"Atlanta Hawks": hawks,
"Los Angeles Lakers": lakers,
"Chicago Bulls": bulls,
"Los Angeles Clippers": clippers,
"Cleveland Cavaliers": clevland,
"Year": year_list,
}
)
# Visualization
f, ax1 = plt.subplots(figsize=(16, 10))
sns.pointplot(x="Year", y="Atlanta Hawks", data=df, color="orange", alpha=0.9)
sns.pointplot(x="Year", y="Los Angeles Lakers", data=df, color="yellow", alpha=0.9)
sns.pointplot(x="Year", y="Chicago Bulls", data=df, color="blue", alpha=0.4)
sns.pointplot(x="Year", y="Los Angeles Clippers", data=df, color="grey", alpha=0.8)
sns.pointplot(x="Year", y="Cleveland Cavaliers", data=df, color="purple", alpha=0.9)
plt.text(0, 4, "Atlanta Hawks", color="orange", fontsize=17, style="italic")
plt.text(0, 3.8, "Los Angeles Lakers", color="yellow", fontsize=17, style="italic")
plt.text(0, 3.6, "Chicago Bulls", color="blue", fontsize=17, style="italic")
plt.text(0, 3.4, "Los Angeles Clippers", color="grey", fontsize=17, style="italic")
plt.text(0, 3.2, "Cleveland Cavaliers", color="purple", fontsize=17, style="italic")
plt.xlabel("Years")
plt.ylabel("Players Count in Team")
plt.title("Players Count in Team Between of 2010 and 2016", fontsize=20)
plt.grid()
plt.show()
# ### Joint Plot
#
sns.jointplot("Year", "Atlanta Hawks", data=df, color="orange", ratio=2)
sns.jointplot("Year", "Los Angeles Lakers", data=df, color="red", ratio=2)
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pandas_profiling as pp
df = pd.read_csv("/kaggle/input/wine-quality-dataset/WineQT.csv")
df.head()
# Removing Id as it wont help in Model building
df.pop("Id")
df.head()
df.describe()
df.info()
# Using Pandas Profiling
pp.ProfileReport(df)
# Using Pairplot to get knowledge about data
sns.pairplot(df, hue="quality", height=3)
# Removing duplicate values from the dataframe
df.drop_duplicates()
# Plotting Histograms instead of Boxplots to get visual intuition of outliers as data is less
sns.histplot(x=df["fixed acidity"])
sns.histplot(x=df["volatile acidity"])
sns.histplot(x=df["citric acid"])
sns.histplot(x=df["residual sugar"])
sns.histplot(x=df["chlorides"])
# Removing Outliers
df.drop(df[df["chlorides"] >= 0.3].index, inplace=True)
df.describe()
df.info()
# Removing Outliers
df.drop(df[df["residual sugar"] >= 10].index, inplace=True)
sns.histplot(x=df["residual sugar"])
df.info()
sns.histplot(x=df["free sulfur dioxide"])
sns.histplot(x=df["total sulfur dioxide"])
# Removing Outliers
df.drop(df[df["total sulfur dioxide"] >= 250].index, inplace=True)
df.drop(df[df["free sulfur dioxide"] >= 60].index, inplace=True)
df.info()
sns.histplot(x=df["density"])
sns.histplot(x=df["pH"])
sns.histplot(x=df["sulphates"])
# Removing Outliers
df.drop(df[df["sulphates"] >= 1.5].index, inplace=True)
df.info()
sns.histplot(x=df["alcohol"])
# # **Model Building**
# Removing features with low correlation with respect to Quality since it wont help in model building
df.corr()
df.pop("pH")
df.pop("residual sugar")
df.pop("free sulfur dioxide")
df.corr()
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
Y = df.pop("quality")
X = df
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25)
X_train
Y_train
# ## **Random Forest**
rf = RandomForestRegressor(max_depth=15, n_estimators=80)
rf.fit(X_train, Y_train)
rf.score(X_train, Y_train)
rf.score(X_test, Y_test)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
wd = "/kaggle/input/jamp-hackathon-drive-1/train_set/"
images_dir = os.listdir(wd)
data = []
for i in images_dir:
path = os.path.join(wd, i)
path_list = os.listdir(path)
for j in path_list:
data.append(os.path.join(path, j))
df = pd.DataFrame()
df["Images"] = data
classes = df["Images"].str.split("/", n=6, expand=True)[5]
df["Label"] = classes
df = df.sample(frac=1).reset_index(drop=True)
df.head()
df["Label"].value_counts()
## train test split
from sklearn.model_selection import train_test_split
train, test = train_test_split(df, test_size=0.2, stratify=df["Label"])
sample = plt.imread(train["Images"].iloc[0])
plt.imshow(sample)
sample.shape
# importing required libraries
from keras.models import Sequential
get_ipython().magic("matplotlib inline")
import matplotlib.pyplot as plt
import keras
from keras.layers import Dense
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
from keras.applications.vgg16 import decode_predictions
from tqdm import tqdm
import pickle
train_img = []
for i in tqdm(df["Images"]):
temp_img = image.load_img(i, target_size=(224, 224))
temp_img = image.img_to_array(temp_img)
train_img.append(temp_img)
# converting train images to array and applying mean subtraction processing
train_img = np.array(train_img)
train_img = preprocess_input(train_img)
test_wd = "/kaggle/input/jamp-hackathon-drive-1/test_set/"
test_dir = os.listdir(test_wd)
test_data = []
for i in test_dir:
test_data.append(os.path.join(test_wd, i))
test_df = pd.DataFrame()
test_df["Images"] = test_data
test_df.head()
test_img = []
for i in tqdm(test_df["Images"]):
temp_img = image.load_img(i, target_size=(224, 224))
temp_img = image.img_to_array(temp_img)
test_img.append(temp_img)
test_img = np.array(test_img)
test_img = preprocess_input(test_img)
model = VGG16(weights="imagenet", include_top=False)
train_img.shape, test_img.shape
features_train = model.predict(train_img)
features_test = model.predict(test_img)
features_train.shape, features_test.shape
train_x = features_train.reshape(1027, -1)
test_x = features_test.reshape(256, -1)
# converting target variable to array
train_y = np.asarray(df["Label"])
# performing one-hot encoding for the target variable
train_y = pd.get_dummies(train_y)
train_y = np.array(train_y)
# creating training and validation set
from sklearn.model_selection import train_test_split
X_train, X_valid, Y_train, Y_valid = train_test_split(
train_x, train_y, test_size=0.3, random_state=42
)
test_y = np.asarray(test["Label"])
test_y = pd.get_dummies(test_y)
test_y = np.array(test_y)
train_x.shape, train_y.shape, test_x.shape, test_y.shape
# creating a mlp model
from keras.layers import Dense, Activation
model = Sequential()
model.add(Dense(1000, input_dim=25088, activation="relu", kernel_initializer="uniform"))
keras.layers.core.Dropout(0.3, noise_shape=None, seed=None)
model.add(Dense(500, input_dim=1000, activation="sigmoid"))
keras.layers.core.Dropout(0.4, noise_shape=None, seed=None)
model.add(Dense(150, input_dim=500, activation="sigmoid"))
keras.layers.core.Dropout(0.2, noise_shape=None, seed=None)
model.add(Dense(units=2))
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(
X_train, Y_train, epochs=20, batch_size=128, validation_data=(X_valid, Y_valid)
)
model.predict(X_valid)
model.evaluate(X_valid, Y_valid)
scores = model.evaluate(test_x, test_y)
print(f"Accuracy is {scores[1]*100} %")
output = np.argmax(model.predict(test_x), axis=1)
submission = pd.DataFrame()
submission["name"] = (
test_df["Images"]
.str.split("/", n=6, expand=True)[5]
.str.split(".", n=6, expand=True)[0]
)
submission["class"] = output
submission.head()
submission.to_csv("submission.csv", index=False)
|
from matplotlib import pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import PIL.Image as Image, PIL.ImageDraw as ImageDraw, PIL.ImageFont as ImageFont
import random
import os
import cv2
import gc
from tqdm.auto import tqdm
import numpy as np
import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.models import clone_model
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import plot_model
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import datetime as dt
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
train_data = pd.read_csv("/kaggle/input/bengaliai-cv19/train.csv")
train_data = pd.merge(
pd.read_parquet(f"/kaggle/input/bengaliai-cv19/train_image_data_0.parquet"),
train_data,
on="image_id",
).drop(["image_id"], axis=1)
train_labels = train_data[
["grapheme_root", "vowel_diacritic", "consonant_diacritic", "grapheme"]
]
train_data = train_data.drop(
["grapheme_root", "vowel_diacritic", "consonant_diacritic", "grapheme"], axis=1
)
def resize(df, size=64, need_progress_bar=True):
resized = {}
for i in range(df.shape[0]):
image = cv2.resize(df.loc[df.index[i]].values.reshape(137, 236), (size, size))
resized[df.index[i]] = image.reshape(-1)
resized = pd.DataFrame(resized).T
return resized
train_data = resize(train_data) / 255
train_data = train_data.values.reshape(-1, 64, 64, 1)
model_dict = {
"grapheme_root": Sequential(),
"vowel_diacritic": Sequential(),
"consonant_diacritic": Sequential(),
}
for model_type, model in model_dict.items():
model.add(Conv2D(64, 7, activation="relu", padding="same", input_shape=[64, 64, 1]))
model.add(layers.BatchNormalization(momentum=0.15))
model.add(MaxPooling2D(2))
model.add(Conv2D(128, 3, activation="relu", padding="same"))
model.add(Conv2D(128, 3, activation="relu", padding="same"))
model.add(MaxPooling2D(2))
model.add(Conv2D(256, 3, activation="relu", padding="same"))
model.add(Conv2D(256, 3, activation="relu", padding="same"))
model.add(MaxPooling2D(2))
model.add(Flatten())
model.add(Dense(1024, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(512, activation="relu"))
model.add(Dropout(0.5))
if model_type == "grapheme_root":
model.add(layers.Dense(168, activation="softmax", name="root_out"))
elif model_type == "vowel_diacritic":
model.add(layers.Dense(11, activation="softmax", name="vowel_out"))
elif model_type == "consonant_diacritic":
model.add(layers.Dense(7, activation="softmax", name="consonant_out"))
model.compile(
optimizer="adam", loss=["categorical_crossentropy"], metrics=["accuracy"]
)
batch_size = 32
epochs = 5
history_list = []
model_types = ["grapheme_root", "vowel_diacritic", "consonant_diacritic"]
for target in model_types:
Y_train = train_labels[target]
Y_train = pd.get_dummies(Y_train).values
x_train, x_test, y_train, y_test = train_test_split(
train_data, Y_train, test_size=0.1, random_state=123
)
datagen = ImageDataGenerator()
datagen.fit(x_train)
history = model_dict[target].fit_generator(
datagen.flow(x_train, y_train, batch_size=batch_size),
epochs=epochs,
validation_data=(x_test, y_test),
)
history_list.append(history)
plt.figure()
for i in range(3):
plt.plot(
np.arange(0, epochs),
history_list[i].history["accuracy"],
label="train_accuracy",
)
plt.plot(
np.arange(0, epochs),
history_list[i].history["val_accuracy"],
label="val_accuracy",
)
plt.title(model_types[i])
plt.xlabel("Epoch #")
plt.ylabel("Accuracy")
plt.legend(loc="lower right")
plt.show()
preds_dict = {"grapheme_root": [], "vowel_diacritic": [], "consonant_diacritic": []}
target = [] # model predictions placeholder
row_id = [] # row_id place holder
for i in range(4):
print("Parquet: {}".format(i))
df_test_img = pd.read_parquet(
"/kaggle/input/bengaliai-cv19/test_image_data_{}.parquet".format(i)
)
df_test_img.set_index("image_id", inplace=True)
X_test = resize(df_test_img, need_progress_bar=False) / 255
X_test = X_test.values.reshape(-1, 64, 64, 1)
for i, p in preds_dict.items():
preds = model_dict[i].predict(X_test)
preds_dict[i] = np.argmax(preds, axis=1)
for k, id in enumerate(df_test_img.index.values):
for i, comp in enumerate(model_types):
id_sample = id + "_" + comp
row_id.append(id_sample)
target.append(preds_dict[comp][k])
del df_test_img
del X_test
gc.collect()
df_sample = pd.DataFrame(
{"row_id": row_id, "target": target}, columns=["row_id", "target"]
)
df_sample.to_csv("submission.csv", index=False)
df_sample.head()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# Read csv
world_location_dataset = pd.read_csv("../input/world-cities-datasets/worldcities.csv")
world_location_dataset
# Get US state
us_location_dataset = world_location_dataset[world_location_dataset["iso2"] == "US"]
us_location_dataset
# Get top 15 cities based on index of US state
selected_cities_location = us_location_dataset.head(15)
selected_cities_location
# for each cities, get the latitude and longitude only
selected_cities_location = selected_cities_location[["city", "lat", "lng"]]
selected_cities_location = selected_cities_location.reset_index()
selected_cities_location
# Calculate distance from/to each cities (Euclidean)
def distance(x1, y1, x2, y2):
return round(((abs(x2) - abs(x1)) ** 2 + (abs(y2) - abs(y1)) ** 2) ** 0.5, 2)
# Create distance matrix
dmat = np.zeros((15, 15))
for i in range(15):
for j in range(i, 15):
if i == j:
pass
elif i > j:
break
else:
x1, y1 = selected_cities_location.loc[i, ["lat", "lng"]]
x2, y2 = selected_cities_location.loc[j, ["lat", "lng"]]
calc = distance(x1, y1, x2, y2)
dmat[i][j] = calc
dmat[j][i] = calc
# Visualization Distance Matrix
dmat_df = pd.DataFrame(dmat)
dmat_df
# Visualization
fig, ax = plt.subplots()
ax.scatter(selected_cities_location["lat"], selected_cities_location["lng"])
for i, txt in enumerate(selected_cities_location["city"]):
ax.annotate(
txt, (selected_cities_location["lat"][i], selected_cities_location["lng"][i])
)
ax.title.set_text("15 US Cities")
ax.set_xlabel("Latitude")
ax.set_ylabel("Longitude")
# Ref: https://stackoverflow.com/questions/14432557/matplotlib-scatter-plot-with-different-text-at-each-data-point
|
# # Импорты
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Считаем данные и подготовим их к дальнейшему анализу
df = pd.read_csv("../input/lending-club/accepted_2007_to_2018Q4.csv.gz")
# df.head()
with pd.option_context("display.max_columns", None):
display(df.head())
df.info()
# Почистим данные
# Столбцы со всеми уникальными значениями можем сразу выкинуть,
# так как всё уже отображено в sub_grade
df = df.drop(
[
"id",
"policy_code",
"out_prncp",
"out_prncp_inv",
"url",
"pymnt_plan",
"hardship_flag",
"grade",
],
axis=1,
)
# Столбцы с слишком большим количеством нулевых значений отбрасываются
# Чтобы упростить задачу классификации,
# столбцы должны быть заполнены хотя бы на 90%,
# поэтому сохраняется только около 50 столбцов.
# ((df.isnull().sum()) / len(df) * 100).plot.bar(title='Процент пропущенных значений')
percent = df.isnull().sum() / len(df)
nan_cols = percent.iloc[np.where(np.array(percent) > 0.1)].index
print(len(nan_cols), "- удалено")
df = df.drop(nan_cols, axis=1)
# Посмотрим на `loan_status`
sn.countplot(y="loan_status", data=df)
# Нас интересуют только просроченные `Charged Off` и полностью погашенные `Fully Paid` кредиты. Собственно, установим 0 и 1, соответсвенно, остальное - выкинем
df = df[(df["loan_status"] == "Fully Paid") | (df["loan_status"] == "Charged Off")]
df["label"] = df.apply(lambda r: 1 if r["loan_status"] == "Fully Paid" else 0, axis=1)
df = df.drop("loan_status", axis=1)
df["loan_status"].head(5)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
gender_submission = pd.read_csv("../input/titanic/gender_submission.csv")
test = pd.read_csv("../input/titanic/test.csv")
train = pd.read_csv("../input/titanic/train.csv")
train["set"] = "train"
test["set"] = "test"
test.info()
test["Survived"] = np.nan
data = pd.concat([train, test], sort=True)
# finding out missing values
missing = data.isna().sum().sort_values(ascending=False)
percentage = (data.isna().sum() / data.isna().count()).sort_values(ascending=False)
values = pd.concat([missing, percentage], axis=1, keys=("missing", "percentage"))
values
# cabin variable is not having any signifinace and having a lot of NA's So drop this variable
del data["Cabin"]
del data["PassengerId"]
# EDA
sns.countplot(data["Survived"])
# Distribution of survival according to Age
sns.boxplot(data["Survived"], data["Age"])
# Distribution of survival according to Age and Gender
sns.boxplot(data["Survived"], data["Age"], hue=data["Sex"])
# Distribution of survival according to Fare
sns.boxplot(data["Survived"], data["Fare"])
# There seems to be more survivals according to more fare
sns.catplot(x="Sex", y="Survived", hue="Pclass", kind="bar", data=data)
g = sns.catplot(
x="Fare",
y="Survived",
row="Pclass",
kind="box",
orient="h",
height=1.5,
aspect=4,
data=data.query("Fare > 0"),
)
g.set(xscale="log")
# NA treatment for Age
# Distribution of age according to Pclass
sns.boxplot(data["Pclass"], data["Age"])
# Imputing NA in Age variable
data["Age"].fillna(
data.groupby(["Sex", "Pclass"])["Age"].transform("median"), inplace=True
)
# Check NA in Age variable
print(data["Age"].isna().sum())
# Imputing NA in Fare variable
data["Fare"].fillna(data.groupby(["Pclass"])["Age"].transform("median"), inplace=True)
print(data["Fare"].isna().sum())
# Distribution of Embarked with Pclass
sns.countplot(data["Embarked"], hue=data["Pclass"])
# Replace NA in Embarked with highest frequency Pclass
data["Embarked"].fillna("S", inplace=True)
data = pd.get_dummies(data, columns=["Embarked"], drop_first=True)
data = pd.get_dummies(data, columns=["Sex"], drop_first=True)
train_set = data[data["set"] == "train"]
del train_set["set"]
train_set.info()
sns.heatmap(train_set.isnull(), yticklabels=False, cbar=False, cmap="viridis")
test_set = data[data["set"] == "test"]
del test_set["set"]
del test_set["Survived"]
del train_set["Name"]
del test_set["Name"]
del train_set["Ticket"]
del test_set["Ticket"]
# Finally, lets look the correlation of df_train
plt.figure(figsize=(15, 12))
plt.title("Correlation of Features for Train Set")
sns.heatmap(train_set.astype(float).corr(), vmax=1.0, annot=True)
plt.show()
# Model Building
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
train_set.drop("Survived", axis=1),
train_set["Survived"],
test_size=0.30,
random_state=101,
)
logmodel = LogisticRegression()
logmodel.fit(X_train, y_train)
predictions = logmodel.predict(X_test)
from sklearn.metrics import classification_report
print(classification_report(y_test, predictions))
test_predictions = logmodel.predict(test_set)
submission = pd.DataFrame(
{"PassengerId": test["PassengerId"], "Survived": test_predictions}
)
# Convert DataFrame to a csv file that can be uploaded
# This is saved in the same directory as your notebook
filename = "Titanic_Predictions.csv"
submission.to_csv(filename, index=False)
print("Saved file: " + filename)
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators=35, max_depth=5, random_state=1)
model_rf = classifier.fit(X_train, y_train)
prediction_rf = model_rf.predict(X_test)
print(classification_report(y_test, prediction_rf))
sub_predict_rf = model_rf.predict(test_set)
submission1 = pd.DataFrame(
{"PassengerId": test["PassengerId"], "Survived": sub_predict_rf}
)
submission1.to_csv("Titanic_prediction1.csv", index=False)
|
# ## Current & Forecast weather data collection
# ### 1. Overview
# In this notebook, I am exploring how to summarize and visualize weather forecast information in a way that's easy and useful for people. For example, given all the weather data we could retrieve, which ones are the most relevant to people's everyday lives? What are some easier ways for people to digest and act upon the information? What visualizations or prompts would be helpful?
# ### 2. Data Profile
# [OpenWeather API](https://openweathermap.org/api) provides a number of weather data collections, including current weather, historical data and forecast. For example, you can search 16 day weather forecasts with daily average parameters by city name, geographic coordinates or ZIP code. All weather data can be obtained in JSON and XML formats. For a Free account, there’s a limited number of no more than 60 calls per minute. In addition, some of the data collections are available only if you buy paid subscription plans. A Free account may access current weather API, 5 days/3 hour forecast API, weather maps 1.0, UV index and weather alerts.
# In this mini project, I am specifically exploring the [5 day weather forecast API](https://openweathermap.org/forecast5).
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# ### 3. Analysis
# I am taking Seattle as the example city to explore the data collection throughout this mini project.
# First of all, I am accessing the API to retrieve data. Given the city name "Seattle", the API responds with weather forecast information for 5 days in every 3 hours, including temperature, pressure, sea level, humidity and other weather conditions.
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
secret_value_0 = user_secrets.get_secret("openWeather")
import requests, json
api_key = secret_value_0
base_url = "http://api.openweathermap.org/data/2.5/forecast?"
city_name = "Seattle"
complete_url = base_url + "q=" + city_name + "&appid=" + api_key
response = requests.get(complete_url)
results = response.json()
results
# Next, based on the retrieved data, I would summarize the data based on some of the most important questions people have when checking future weather information. The questions are as follows:
# 1. Is it going to be warm or cold for the next 5 days?
# 2. Is it going to rain for the next 5 days?
# 3. If it's going to be cold, will it snow for the next 5 days?
# ### Q1. Is it going to be warm or cold for the next 5 days?
# Here's a plot of how the temperature and feels-like temperature (in Calcius degree) for the next 5 days will go (temperature in red and feels-like temperature in blue). Interestingly, overall it always feels colder than it really is.
from matplotlib.pyplot import plot
import pytemperature
feels_like_temp = []
for i in range(len(results["list"])):
celsius = pytemperature.k2c(
results["list"][i]["main"]["feels_like"]
) # convert temperature in Kelvin to Celsius
feels_like_temp.append(celsius)
temp = []
for i in range(len(results["list"])):
celsius = pytemperature.k2c(
results["list"][i]["main"]["temp"]
) # convert temperature in Kelvin to Celsius
temp.append(celsius)
plot(feels_like_temp, color="b")
plot(temp, color="r")
# ### Q2. Is it going to rain for the next 5 days?
# This is an ever-lasting question that every Seattleites ask every single day. Here's a list of the times that would rain in the next 5 days, the severity of rain is identified as well.
count = 0
for i in range(len(results["list"])):
if "Rain" in results["list"][i]["weather"][0]["main"]:
print(
"%s at %s"
% (
results["list"][i]["weather"][0]["description"],
results["list"][i]["dt_txt"],
)
)
count = count + 1
if count == 0:
print("incredible! no rain in the next 5 days in Seattle!")
else:
print("remember to bring an umbrella and wear your rain boots!")
# ### Q3. If it's going to be cold, will it snow for the next 5 days?
#
count = 0
for i in range(len(results["list"])):
if "Snow" in results["list"][i]["weather"][0]["main"]:
print(
"%s at %s"
% (
results["list"][i]["weather"][0]["description"],
results["list"][i]["dt_txt"],
)
)
count = count + 1
if count == 0:
print("snow day is not happening for the next 5 days!")
else:
print("stay warm and build a snowman!")
|
# # Linear regression (predicting a continuous value):
# *** Question:**
# > Weather in Szeged 2006-2016: Is there a relationship between humidity and temperature? What about between humidity and apparent temperature? Can you predict the apparent temperature given the humidity?
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
import operator
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
weatherHistory = pd.read_csv("../input/szeged-weather/weatherHistory.csv")
weatherHistory.head(2)
weatherHistory.info()
weatherHistory.describe().T
# Extract 3 columns 'Temperature (C)','Apparent Temperature (C)', 'Humidity' for pure and better showing
weatherHistory_df = weatherHistory[
["Temperature (C)", "Apparent Temperature (C)", "Humidity"]
]
# And called again
weatherHistory_df.columns = ["Temperature", "Apparent_Temperature", "Humidity"]
weatherHistory_df = weatherHistory_df[:][
:500
] # lets take limit for speed regression calculating
weatherHistory_df.head(2)
# See picture with scatter or plot method
sns.pairplot(weatherHistory_df, kind="reg")
# see how many null values we have
weatherHistory_df.isnull().sum()
# Features chose
y = np.array(weatherHistory_df["Humidity"]).reshape(-1, 1)
X = np.array(weatherHistory_df["Apparent_Temperature"]).reshape(-1, 1)
# Chosen just 'Apparent_Temperature' feature if you want can also for 'Temperature' feature
# Split data as %20 is test and %80 is train set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.20, random_state=42
)
# # 1.Linear Regression
from sklearn.linear_model import LinearRegression
lin_df = LinearRegression()
lin_df.fit(X_train, y_train)
y_pred = lin_df.predict(X_test) # Predict Linear Model
accuracy_score = lin_df.score(X_test, y_test) # Accuracy score
print("Linear Regression Model Accuracy Score: " + "{:.1%}".format(accuracy_score))
from sklearn.metrics import mean_squared_error, r2_score
print("R2 Score: " + "{:.3}".format(r2_score(y_test, y_pred)))
# Finally draw figure of Linear Regression Model
plt.scatter(X_test, y_test, color="r")
plt.plot(X_test, y_pred, color="g")
plt.show()
# # 2.Multiple Linear Regression
mlin_df = LinearRegression()
mlin_df = mlin_df.fit(X_train, y_train)
mlin_df.intercept_ # constant b0
mlin_df.coef_ # variable coefficient
y_pred = mlin_df.predict(X_train) # predict Multi linear Reg model
rmse = np.sqrt(mean_squared_error(y_train, mlin_df.predict(X_train)))
print("RMSE Score for Test set: " + "{:.2}".format(rmse))
print("R2 Score for Test set: " + "{:.3}".format(r2_score(y_train, y_pred)))
# this is test error score
# ## 2.1.Multiple Linear Regression Model Tunning
# cross validation method is giving better and clear result
cross_val_score(mlin_df, X, y, cv=10, scoring="r2").mean()
mlin_df.score(X_train, y_train) # r2 value
np.sqrt(
-cross_val_score(mlin_df, X_train, y_train, cv=10, scoring="neg_mean_squared_error")
).mean()
# Finally draw figure of Multiple Linear Regression Model
plt.scatter(X_train, y_train, s=100)
# sort the values of x before line plot
sort_axis = operator.itemgetter(0)
sorted_zip = sorted(zip(X_train, y_pred), key=sort_axis)
X_test, y_pred = zip(*sorted_zip)
plt.plot(X_train, y_train, color="r")
plt.show()
# * This was just for train set and you can also do for test set.
# # 3.Polynomial Regression
from sklearn.preprocessing import PolynomialFeatures
poly_df = PolynomialFeatures(degree=5)
transform_poly = poly_df.fit_transform(X_train)
linreg2 = LinearRegression()
linreg2.fit(transform_poly, y_train)
polynomial_predict = linreg2.predict(transform_poly)
rmse = np.sqrt(mean_squared_error(y_train, polynomial_predict))
r2 = r2_score(y_train, polynomial_predict)
print("RMSE Score for Test set: " + "{:.2}".format(rmse))
print("R2 Score for Test set: " + "{:.2}".format(r2))
plt.scatter(X_train, y_train, s=50)
# sort the values of x before line plot
sort_axis = operator.itemgetter(0)
sorted_zip = sorted(zip(X_train, polynomial_predict), key=sort_axis)
X_train, polynomial_predict = zip(*sorted_zip)
plt.plot(X_train, polynomial_predict, color="m")
plt.show()
# * This was just for train set and you can also do for test set.
# # 4.Decision Tree Regression
from sklearn.tree import DecisionTreeRegressor
dt_reg = DecisionTreeRegressor() # create DecisionTreeReg with sklearn
dt_reg.fit(X_train, y_train)
dt_predict = dt_reg.predict(X_train)
# dt_predict.mean()
plt.scatter(X_train, y_train, color="red") # scatter draw
X_grid = np.arange(min(np.array(X_train)), max(np.array(X_train)), 0.01)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.plot(X_grid, dt_reg.predict(X_grid), color="g") # line draw
plt.xlabel("Temperature")
plt.ylabel("Salinity")
plt.title("Decision Tree Model")
plt.show()
rmse = np.sqrt(mean_squared_error(y_train, dt_predict))
r2 = r2_score(y_train, dt_predict)
print("RMSE Score for Test set: " + "{:.2}".format(rmse))
print("R2 Score for Test set: " + "{:.2}".format(r2))
# * This was just for train set and you can also do for test set.
# # 5.Random Forest Model
from sklearn.ensemble import RandomForestRegressor
rf_reg = RandomForestRegressor(n_estimators=5, random_state=0)
rf_reg.fit(X_train, y_train)
rf_predict = rf_reg.predict(X_train)
# rf_predict.mean()
plt.scatter(X_train, y_train, color="red") # scatter draw
X_grid = np.arange(min(np.array(X_train)), max(np.array(X_train)), 0.01)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.plot(X_grid, rf_reg.predict(X_grid), color="b") # line draw
plt.xlabel("Temperature")
plt.ylabel("Salinity")
plt.title("Decision Tree Model")
plt.show()
rmse = np.sqrt(mean_squared_error(y_train, rf_predict))
r2 = r2_score(y_train, rf_predict)
print("RMSE Score for Test set: " + "{:.2}".format(rmse))
print("R2 Score for Test set: " + "{:.2}".format(r2))
|
# **Objective**
# The objective of milestone-1 is to perform data preprocessing and EDA to understand customer churn. The output of milestone-1 will be used in milestones-2 and 3 to build machine learning models to predict customer churn and to create an interactive dashboard for decision making. In order to do that, follow the solution approach given in the capstone project overview and use the learners file for milestone-1 to answer the business questions.
# ## Let us start by importing the required libraries
# Import the libraries to help with reading and manipulating data
import numpy as np
import pandas as pd
# Libraries to help with data visualization
import matplotlib.pyplot as plt
import seaborn as sns
# Removes the limit for the number of displayed columns
pd.set_option("display.max_columns", None)
# Sets the limit for the number of displayed rows
pd.set_option("display.max_rows", 200)
# ## Import the data
# Loading the dataset
df = pd.read_csv("//kaggle/input/milestone1-dataset/milestone1_dataset.csv")
# ## Data Understanding
# ### Write a code to display the first and last 5 rows of the dataset.
# diplay first 5 ROWS of the dataset
df.head()
# diplay first 5 ROWS of the dataset
df.tail()
# ### How many rows and columns are present in the data?
# display number of rows and columns
df.shape
# The dataset has 6499 rows and 21 columns
# ### What are the datatypes of the different columns in the dataset?
# info : columns , count, non-null and datatype
df.info()
# Write down the observations as per the information provided by running the above code.
# - There are 5 numerical columns and 16 object type columns in the dataset.
# - CustomerID can be changed to string or deleted.
# - Gender and Seinor_citizen are read as integer but they are categories.
# - The other object type columns are categories.
# - 9 obserservations has a missing value in Total_charges.
# - All the other columns are not null, however we will check for missing value of categorical variables farther in the analysis.
# ### Correct the datatype of those columns which are not correct. Also, convert all object datatype columns into category datatypes.
# Convert CustomerID to string , also gender and Senior citizen to categories
df = df.astype(
{"CustomerID": "string", "Gender": "category", "Senior_Citizen": "category"}
)
# Create a list of categorical variables to use it further in the analysis.
cat_col = ["Gender", "Senior_Citizen"]
# Display the type
df.dtypes
# Convert all the columns object to categories
for feature in df.columns: # Loop through all columns in the dataframe
if df[feature].dtype == "object": # Only apply for columns Object
df[feature] = df[feature].astype("category") # Replace object with category
cat_col.append(
feature
) # add category column to the list created in the privious step
# display Infos
df.info()
# ### Check the statistical summary of the data
# The summary statistics of the data
df.describe().T
# Write down the observations after running the above code.
# - The minimum Tenure is 0, which may mean that customers may not have finished their contract or they may canceled it just after subscription.
# - Monthly charge is between 18 and 118 and the average is 65 USD.
# - The range of total charges is between 18.80 and 88684.80 USD.
# ## Data Preprocessing
# ### Check for duplicate entries in the data
# Check for duplicate entries in the data
df.duplicated().value_counts()
# There are no duplicate observations.
# ### Check if any discrepancy is present in the categorical column values.
# Write the code here
for column in cat_col:
print(df[column].value_counts())
print("-" * 40)
# - The column Internet_Service has 2 variables Fiber-optic and Fiber optic that have the same meaning. so we will correct that.
# - No internet service and No phone service are not missing values as it was specifically saying there is no internet service or phone in this area or location
# #### Replace the incorrect value with the correct value in the categorical column
# Correct the value
df.Internet_Service.replace("Fiber-optic", "Fiber optic", inplace=True)
# ### Are there any missing values in the data? If so, treat them using an appropriate method.
# find the missing values
df.isna().sum()
# let's visualize observations with missing value in total_charges
df.loc[df["Total_Charges"].isna()]
# We notice that all this observations have Tenure=0.
# Let's also check all observations with tenure=0.
df.loc[df["Tenure"] == 0]
# The relationship between tenure = 0 and total charge null is one-to-one. Which may means that these customers have not been billed.
# so we can replace total_charges with 0.
# Replace null by 0
df["Total_Charges"] = df["Total_Charges"].fillna(0)
# display the missing values to ensure that there is no missing value
df.isna().sum()
# #### Let's try to fill in the missing value with the median value of the column.
# NB: O seems to be better than median
# ##### Now, after handling the missing values and also checking the data types, let's visualise the dataset.
# ## Data Visualization
# In addition to the observations, perform each of the five relevant univariate and bivariate analyses of different columns with the help of the below functions.
# ### Univariate Analysis
# Before doing the visualization, please run the below codes:
def histogram_boxplot(data, feature, figsize=(12, 7), kde=False, bins=None):
"""
Boxplot and histogram combined
data: dataframe
feature: dataframe column
figsize: size of figure (default (12,7))
kde: whether to show the density curve (default False)
bins: number of bins for histogram (default None)
"""
f2, (ax_box2, ax_hist2) = plt.subplots(
nrows=2, # Number of rows of the subplot grid= 2
sharex=True, # x-axis will be shared among all subplots
gridspec_kw={"height_ratios": (0.25, 0.75)},
figsize=figsize,
) # creating the 2 subplots
sns.boxplot(
data=data, x=feature, ax=ax_box2, showmeans=True, color="violet"
) # boxplot will be created and a star will indicate the mean value of the column
sns.histplot(
data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins, palette="winter"
) if bins else sns.histplot(
data=data, x=feature, kde=kde, ax=ax_hist2
) # For histogram
ax_hist2.axvline(
data[feature].mean(), color="green", linestyle="--"
) # Add mean to the histogram
ax_hist2.axvline(
data[feature].median(), color="black", linestyle="-"
) # Add median to the histogram
# function to create labeled barplots
def labeled_barplot(data, feature, perc=False, n=None):
"""
Barplot with percentage at the top
data: dataframe
feature: dataframe column
perc: whether to display percentages instead of count (default is False)
n: displays the top n category levels (default is None, i.e., display all levels)
"""
total = len(data[feature]) # length of the column
count = data[feature].nunique()
if n is None:
plt.figure(figsize=(count + 1, 5))
else:
plt.figure(figsize=(n + 1, 5))
plt.xticks(rotation=90, fontsize=15)
ax = sns.countplot(
data=data,
x=feature,
palette="Paired",
order=data[feature].value_counts().index[:n].sort_values(),
)
for p in ax.patches:
if perc == True:
label = "{:.1f}%".format(
100 * p.get_height() / total
) # percentage of each class of the category
else:
label = p.get_height() # count of each level of the category
x = p.get_x() + p.get_width() / 2 # width of the plot
y = p.get_height() # height of the plot
ax.annotate(
label,
(x, y),
ha="center",
va="center",
size=12,
xytext=(0, 5),
textcoords="offset points",
) # annotate the percentage
plt.show() # show the plot
def stacked_barplot(data, predictor, target):
"""
Print the category counts and plot a stacked bar chart
data: dataframe
predictor: independent variable
target: target variable
"""
count = data[predictor].nunique()
sorter = data[target].value_counts().index[-1]
tab1 = pd.crosstab(data[predictor], data[target], margins=True).sort_values(
by=sorter, ascending=False
)
print(tab1)
print("-" * 120)
tab = pd.crosstab(data[predictor], data[target], normalize="index").sort_values(
by=sorter, ascending=False
)
tab.plot(kind="bar", stacked=True, figsize=(count + 5, 5))
plt.legend(
loc="lower left",
frameon=False,
)
plt.legend(loc="upper left", bbox_to_anchor=(1, 1))
plt.show()
### function to plot distributions wrt target
def distribution_plot_wrt_target(data, predictor, target):
fig, axs = plt.subplots(2, 2, figsize=(12, 10))
target_uniq = data[target].unique()
axs[0, 0].set_title("Distribution of target for target=" + str(target_uniq[0]))
sns.histplot(
data=data[data[target] == target_uniq[0]],
x=predictor,
kde=True,
ax=axs[0, 0],
color="teal",
stat="density",
)
axs[0, 1].set_title("Distribution of target for target=" + str(target_uniq[1]))
sns.histplot(
data=data[data[target] == target_uniq[1]],
x=predictor,
kde=True,
ax=axs[0, 1],
color="orange",
stat="density",
)
axs[1, 0].set_title("Boxplot w.r.t target")
sns.boxplot(data=data, x=target, y=predictor, ax=axs[1, 0], palette="gist_rainbow")
axs[1, 1].set_title("Boxplot (without outliers) w.r.t target")
sns.boxplot(
data=data,
x=target,
y=predictor,
ax=axs[1, 1],
showfliers=False,
palette="gist_rainbow",
)
plt.tight_layout()
plt.show()
# #### Do the univariate analysis of different integer and categorical variables and write down the observations.
# Hint: Plot different visualizations using the above function
# #### Tenure :
histogram_boxplot(df, "Tenure")
# - There are two peaks at both ends of the interval which consist of 30% of the observations.
# - The number of tenures with a value 65.
# - For the rest the distribution is approximately equal with some slight differences.
# - There is no outliers.
# #### Monthly charges :
histogram_boxplot(df, "Monthly_Charges")
# - The distribution is left-skewed.
# - There is a peak in the lower bound of the interval, which means that a high number of customers are subscribed to the cheapest services.
# - The number of contrats that cost between 75 and 90 is slightly higher.
# - There is no outliers.
# - The mean and the median are both above 60 $.
# #### Total Charges
histogram_boxplot(df, "Total_Charges")
# - The distribution of total charges is right-skewed.
# - Most customers (75 %) have total charges <= 4000.
# - There is a peak in the lower bound of the interval, which could be due to tenure and monthly charges.
# - There is no outliers.
# #### Gender
labeled_barplot(df, "Gender")
# - Male are more than female. but the difference is not too big.
# #### Senior Citizen:
labeled_barplot(df, "Senior_Citizen")
# There are fewer seniors citizen in the dataset
# #### Partner:
labeled_barplot(df, "Partner")
# There is slight difference between customers with partners and customers without partner.
# #### Dependents:
labeled_barplot(df, "Dependents")
# Most of customers are without dependants.
# #### Phone Service:
labeled_barplot(df, "Phone_Service")
# 90 % of customers have phone service.
# #### Multiple Lines:
labeled_barplot(df, "Multiple_Lines")
# - Nearly 42 % of customers have multiple lines and 48 % don't. The reste (~10 %) doesn't have phone service.
# #### Internet Service:
labeled_barplot(df, "Internet_Service")
# - Approx 44% customers have Fiber optic.
# - Approx 34% customers have ADSL.
# - The reste ~ 22 % doesn't have Internet sevice.
# #### Online Security:
labeled_barplot(df, "Online_Security")
# - Approx 50% don't have Online security.
# - Approx 29% customers have online security.
# - The reste ~ 21 % doesn't have Internet sevice.
# #### Online Backup:
labeled_barplot(df, "Online_Backup")
# - Approx 43% don't have Online backup.
# - Approx 34% customers have online backup.
# - The reste ~ 23 % doesn't have Internet sevice.
# #### Device_Protection:
labeled_barplot(df, "Device_Protection")
# - Approx 43% don't have device protection.
# - Approx 34% customers have device protection.
# - The reste ~ 23 % doesn't have Internet sevice.
# #### Tech_Support :
labeled_barplot(df, "Tech_Support")
# - Approx 49% don't have Tech support.
# - Approx 29% customers have Tech support.
# - The reste ~ 22 % doesn't have Internet sevice.
# #### Streaming TV :
labeled_barplot(df, "Streaming_TV")
# - Approx 40% don't have Streaming TV.
# - Approx 38% customers have Streaming TV.
# - The reste ~ 22 % doesn't have Streaming TV.
# #### Streaming_Moviest :
labeled_barplot(df, "Streaming_Movies")
# - Approx 39% don't have Streaming Movies.
# - Approx 38% customers have Streaming movies.
# - The reste ~ 2# % doesn't have Internet service.
# #### Contract:
labeled_barplot(df, "Contract")
# - Approx 55% have Month to month contract.
# - Approx 24% customers have 2 years contract.
# - Approx 20% customers have 1 year contract.
# #### Paperless_Billing
labeled_barplot(df, "Paperless_Billing")
# - Majority of customers are using paperless billing.
# #### Payment Method:
# - Majority of customers (60 %) has paperless billing.
# #### Payment Method :
labeled_barplot(df, "Payment_Method")
# - Electronic checks is the payment method most used by customers, followed by mailed checks.
# - There is no big difference between Bank transfert and Credit card.
# #### Churn:
labeled_barplot(df, "Churn")
# - We have 73% observations for non-churn and 27% observations for churn.
# - This variable is our target variable. we will analyze what are the factors that contribute to this result by machine learning.
# ### Bivariate Analysis
# #### Plot the heatmap.
# Visualization with heatmap
plt.figure(figsize=(15, 7))
sns.heatmap(df.corr(), annot=True, vmin=-1, vmax=1, fmt=".2f", cmap="Spectral")
plt.show()
# Observations
# - There is a strong correlation between total charges and tenure.
# - There is a positive correlation between monthly charges and total charges. it can be considered moderate to strong.
# - There is a weak correlation between Monthly Charges and Tenure.
# #### Do the bivariate analysis between column name - "Churn" and different integer and categorical variables and write down the observations.
# Hint: Plot different visualizations using the above function
# #### Monthly Charges and Churn:
distribution_plot_wrt_target(df, "Monthly_Charges", "Churn")
# - we can see that the density of monthly charges is higher when churn = 'yes'.
# - we can also see that The first 3 quartile of the distribution of churn = 'yes' is higher compared to churn = 'no'.
# - This shows that high monthly charges are more likely to churn.
# #### Total charges and churn:
distribution_plot_wrt_target(df, "Total_Charges", "Churn")
# - we can see that the distribution of density of total charges is lower when churn = 'yes'.
# - we can also see that The distribution of churn = 'yes' is lower compared to churn = 'no'.
# - This shows that observations with churn = 'yes' have mostly less total charges, wich is obvious, churn = yes indicates loss of profit.
# - There is some outliers for churn='yes'.
# #### Tenure and churn:
distribution_plot_wrt_target(df, "Tenure", "Churn")
# - we can see that the distribution of density of Tenure is higher when churn = 'no'.
# - we can see that both Churn = 'yes' and 'no' have peak in tenure between 0 and 5.
# - we can see that there is a peak when churn = 'no' for tenure over 65.
# - we can also see that The first 3 quartile of the distribution of churn = 'yes' is lower compared to churn = 'no'.
# - This shows that observations with churn = 'yes' have mostly lower Tenure, wich is obvious.
# - There is some outliers for churn='yes'.
# #### Churn Vs Gender
stacked_barplot(df, "Gender", "Churn")
# There is no difference between the percentage of customers who has Churn ='yes' in both genders males and females.
# #### Chrun Vs Senior citizen:
stacked_barplot(df, "Senior_Citizen", "Churn")
# - Senior citizens have a bigger pourcentage of churn = 'yes'.
# #### Churn Vs Partner
stacked_barplot(df, "Partner", "Churn")
# - Customers without partners have a bigger pourcentage of churn = 'yes'.
# #### Churn Vs Dependant
stacked_barplot(df, "Dependents", "Churn")
# - Customers without dependents have a bigger pourcentage of churn = 'yes'.
# #### Churn Vs Phone service:
stacked_barplot(df, "Phone_Service", "Churn")
# - There is no difference in churn between customers who have phone service and those who don't.
# #### Churn Vs Multiple_Lines
stacked_barplot(df, "Multiple_Lines", "Churn")
# - The churn rate of customers who have multiple lines is slightly higher than the churn rate of customers who don't have one or those who do not have telephone service.
# - There is a no difference in churn between customers who don't have phone service and those who don't have multiples lines.
# #### Chrun Vs Internet Service:
stacked_barplot(df, "Internet_Service", "Churn")
# - The churn rate of customers who have fiber optic is higher than the churn rate of customers who have DSL and those who do not have any Internet service.
# - The rate of customers who don't have internet service is the lowest.
# ### Chrun Vs Online security:
stacked_barplot(df, "Online_Security", "Churn")
# - The churn rate of customers who don't have online security is higher than the churn rate of customers who have it and those who don't have any Internet service.
# - The chrun rate of customers who don't have internet service is the lowest.
# ### Chrun Vs Online Backup:
stacked_barplot(df, "Online_Backup", "Churn")
# - The churn rate of customers who don't have online backup is higher than the churn rate of customers who have it and those who don't have any Internet service.
# - The chrun rate of customers who don't have internet service is the lowest.
# ### Chrun Vs Device_Protection:
stacked_barplot(df, "Device_Protection", "Churn")
# - The churn rate of customers who don't have device protection is higher than the churn rate of customers who have it and those who don't have any Internet service.
# - The chrun rate of customers who don't have internet service is the lowest.
# ### Chrun Vs Tech support:
stacked_barplot(df, "Tech_Support", "Churn")
# - The churn rate of customers who don't have Tech support is higher than the churn rate of customers who have it and those who don't have any Internet service.
# - The chrun rate of customers who don't have internet service is the lowest.
# ### Chrun Vs Streaming TV:
stacked_barplot(df, "Streaming_TV", "Churn")
# - There is no (considerable) difference between the churn rate of customers who have Treaming TV and those who don't.
# - The chrun rate of customers who don't have internet service is the lowest.
# ### Chrun Vs Streaming Movies:
stacked_barplot(df, "Streaming_Movies", "Churn")
# - There is no (considerable) difference between the churn rate of customers who have Treaming Movies and those who don't.
# - The chrun rate of customers who don't have internet service is the lowest.
# ### Churn Vs Contract:
stacked_barplot(df, "Contract", "Churn")
# - Month to month contract has the highest chrun rate followed by one year contract and then two year.
# - The longer the contract, the less the risk of the customer to churn.
# #### Churn Vs Paperless_Billing:
stacked_barplot(df, "Paperless_Billing", "Churn")
# - The chrun rate of customers who choose paperless billing is the highest.
# #### Churn Vs Payment method:
stacked_barplot(df, "Payment_Method", "Churn")
# - The chrun rate of customers who paid with electronic check is the highest followed by mailed check.
# - There is no difference between the churn rate in credit card and bank transfer.
# ## Answering business questions
# ##### [Q1] - Display a table to show the relationship between contracts and payment method and write down the observations.
dfgp = df.groupby(["Contract", "Payment_Method"], as_index=False)["CustomerID"].count()
pd.pivot_table(
dfgp, index=dfgp["Contract"], columns=dfgp["Payment_Method"], values=["CustomerID"]
)["CustomerID"]
# - The most used payment method in month-to-month contracts is the Electronic check followed by Mailed check.
# - There is no big difference in payment methods for one-year contracts, but credit cards and bank transfer are slightly higher than cheques.
# - Credit cards and bank transfers are the most used in two-year contracts, postal checks are moderately used but electronic checks are the least used.
# ##### [Q2] - Display a table to show the relationship between payment and internet service and write down the observations.
dfgp = df.groupby(["Internet_Service", "Payment_Method"], as_index=False)[
"CustomerID"
].count()
pd.pivot_table(
dfgp,
index=dfgp["Internet_Service"],
columns=dfgp["Payment_Method"],
values=["CustomerID"],
)["CustomerID"]
# - There is no big difference in payment methods for DSL, but credit cards and bank transfer are slightly lower than cheques.
# - The payment method most used by customers with Optical Fiber is the Electronic Check followed by Bank Transfer and Bank Card.
# - Customers who don't have Internet service mainly use Mailed checks.
# ##### [Q3] - Display a table to show the relationship between contracts and internet service and write down the observations.
#
dfgp = df.groupby(["Contract", "Internet_Service"], as_index=False)[
"CustomerID"
].count()
pd.pivot_table(
dfgp,
index=dfgp["Contract"],
columns=dfgp["Internet_Service"],
values=["CustomerID"],
)["CustomerID"]
# - Fiber optic customers mostly use the Month-to-month contract type, followed by one year and then two year.
# - DSL customers mostly use the Month-to-month contract type, followed by two year and then one year.
# - No internet service users mostly use the Two year contrat type, followed by Month to month and then one year
# ##### [Q4] - Are there any outliers in the numerical columns
# Hint: Write a code and check the percentile values of different columns
# select numerical variables
df_num = df[["Tenure", "Monthly_Charges", "Total_Charges"]]
# calculate percentile
q1 = df_num.quantile(0.25)
q3 = df_num.quantile(0.75)
IQR = q3 - q1
# select observations that are above q3+1.5*IQR and less than q1-1.5*IQR
# dropna to leave only True values
outliers = df_num[((df_num < (q1 - 1.5 * IQR)) | (df_num > (q3 + 1.5 * IQR)))].dropna()
print("number of outliers: " + str(outliers.shape[0]))
# Observations
# - There is no outliers in the dataset.
# ##### [Q5] - What is the percentage of customers who cancelled their subscription?
# Calculate prctg using shape
print(
"The percentage of customers who cancelled their subscription is : {:.2f}%".format(
round((df.loc[df["Churn"] == "Yes"].shape[0] / df.shape[0]) * 100, 2)
)
)
# ##### [Q6] - What is the average monthly charge for different types of internet services?
print("The average monthly charge for each types of internet service is :")
# Calculate the average using groupby and mean
round(df.groupby("Internet_Service")["Monthly_Charges"].mean(), 2).to_frame()
# Monthly charges of Fiber optic is the highest.
# ##### [Q7] - What is the total revenue for different Internet Services?
print("The total revenue for different Internet Services is :")
# Calculate the total revenue using groupby and sum
df.groupby("Internet_Service")["Total_Charges"].sum().to_frame()
# Despite the churn, optical fiber has the largest amount. For this purpose fiber is the most important source of revenue.
# ##### [Q8] - What is the average tenure for different contracts?
print("The average tenure for different contracts is :")
# Calculate the average using groupby and mean
round(df.groupby("Contract")["Tenure"].mean(), 2).to_frame()
# The longer the contract, the longer the customer's average tenure time.
# ##### [Q9] - What percentage of customers have online protection?
# #### Online_Security:
# Calculate prctg using shape
print(
"The percentage of customers who have online Security is : {:.2f}%".format(
round((df.loc[df["Online_Security"] == "Yes"].shape[0] / df.shape[0]) * 100, 2)
)
)
# #### Device_Protection:
# Calculate prctg using shape
print(
"The percentage of customers who have Device Protection is : {:.2f}%".format(
round(
(df.loc[df["Device_Protection"] == "Yes"].shape[0] / df.shape[0]) * 100, 2
)
)
)
# ##### [Q10] - Write a code to replace 'Yes' with 1 and 'No' with 0 in the Churn column.
df["Churn"].replace(("Yes", "No"), (1, 0), inplace=True)
# ## Summary:
# - High monthly charges are more likely to churn, the company may revise pricing.
# - Seniors Citizens, customers without partners and customers without dependents have a higher percentage of churn = 'yes', to avoid this the company could make offers adapted to these customers.
# - Customers who don't have this products are more likely to churn:
# - Online backup
# - Online security
# - Device protection
# - Tech support.
# - However, the number of customers with these products remains modest. Thus, the company could make more effort to market these products.
# - The company may encourage cutomers to use long-term contracts.
# - Check if there are problems to resolve in paperless billing and in check because there is more churn at this level.
# - The churn rate for customers equipped with fiber optics is the highest. Fiber optic observations also contain high monthly fees and month to month contracts - that paid mostly with checks- which we have identified as a likely causes for churn.
# Since optical fiber is the highest source of revenue, the company should investigate what is the real cause of problem of this service and improve it.
# ### Now our EDA part is done. Export the data and use the exported data to be used in Milestone 2 for building machine learning models.
### Export the dataset to be used for next milestone
df.to_csv("A2Z_milestone_2_3_input.csv", index=False)
|
# ###### Notebook created by: Arnav Chavan (@[carnav0400](https://www.kaggle.com/carnav0400)), Udbhav Bamba (@[ubamba98](https://www.kaggle.com/ubamba98))
# ## NOTE: Turn on the Internet and GPU for this kernal before starting
# # How to add dataset to the kernal
# * Click on "Add Data"
# * Search "CLabsCVcomp"
# * Click on "Add"
# * Done
# ensembling my submissions with the most accuracy.
# all my submissions are at https://www.kaggle.com/sakshamaggarwal/submissions-csv
import pandas as pd
import numpy as np
data0 = pd.read_csv("../input/submissions-csv/submission0.csv")
data1 = pd.read_csv("../input/submissions-csv/submission1.csv")
data2 = pd.read_csv("../input/submissions-csv/submission2.csv")
data3 = pd.read_csv("../input/submissions-csv/submission3.csv")
data4 = pd.read_csv("../input/submissions-csv/submission4.csv")
data5 = pd.read_csv("../input/submissions-csv/submission5.csv")
data6 = pd.read_csv("../input/submissions-csv/submission6.csv")
data7 = pd.read_csv("../input/submissions-csv/submission7.csv")
data8 = pd.read_csv("../input/submissions-csv/submission8.csv")
data9 = pd.read_csv("../input/submissions-csv/submission9.csv")
data10 = pd.read_csv("../input/submissions-csv/submission10.csv")
data11 = pd.read_csv("../input/submissions-csv/submission11.csv")
data12 = pd.read_csv("../input/submissions-csv/submission12.csv")
data13 = pd.read_csv("../input/submissions-csv/submission13.csv")
data14 = pd.read_csv("../input/submissions-csv/submission14.csv")
data15 = pd.read_csv("../input/submissions-csv/submission15.csv")
data16 = pd.read_csv("../input/submissions-csv/submission16.csv")
from collections import Counter
a = {}
for i in range(0, 29996):
data = Counter(
[
data4.genres[i],
data0.genres[i],
data7.genres[i],
data11.genres[i],
data16.genres[i],
data9.genres[i],
data5.genres[i],
]
)
a[i] = data.most_common(1)[0][0]
list = [(v) for k, v in a.items()]
list
dataarray = np.asarray(list)
dataarray = dataarray.reshape(-1, 1)
id = np.asarray(data0.id)
id = id.reshape(-1, 1)
g = np.concatenate([id, dataarray], axis=1)
df = pd.DataFrame(g, columns=["id", "genres"])
df.to_csv("final-submissions1.csv", index=False)
|
#
# Note: This is a work in progress notebook!
# - Lot can be done in the feature engineering.
# - Also passing Spark dataframe to Tensorflow without converting it to pandas.
import tensorflow as tf
from tensorflow.keras import Sequential, Model
from pyspark.sql import SparkSession
from pyspark.sql import functions as f
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
tf.__version__
spark = SparkSession.builder.getOrCreate()
spark
# # Read Inputs
sdf_shops = spark.read.csv(
"/kaggle/input/competitive-data-science-predict-future-sales/shops.csv",
inferSchema=True,
header=True,
)
col_shops = ["shop_name", "shop_id"]
sdf_item_categories = spark.read.csv(
"/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv",
inferSchema=True,
header=True,
)
col_item_categories = ["item_category_name", "item_category_id"]
sdf_sales_train = spark.read.csv(
"/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv",
inferSchema=True,
header=True,
)
col_sales_train = ["date", "date_block_num", "shop_id", "item_id", "item_cnt_day"]
sdf_items = spark.read.csv(
"/kaggle/input/competitive-data-science-predict-future-sales/items.csv",
inferSchema=True,
header=True,
)
col_items = ["item_name", "item_id", "item_category_id"]
sdf_test = spark.read.csv(
"/kaggle/input/competitive-data-science-predict-future-sales/test.csv",
inferSchema=True,
header=True,
)
col_test = ["ID", "shop_id", "item_id"]
sdf_sample_submission = spark.read.csv(
"/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv",
inferSchema=True,
header=True,
)
col_sample_submission = ["ID", "item_cnt_month"]
# sdf_sample_submission.limit(5).toPandas().T
# sdf_sales_train.withColumn('date', f.from_unixtime(f.unix_timestamp(sdf_sales_train['date'],'%d.%m.%Y')))
# # sdf_sales_train.limit(10).toPandas().T
# sales_data = sdf_sales_train.toPandas()
# sales_data.dtypes
# # Negative Values ( Returns ) exclude or predict ?
# change all of them to 0!
from pyspark.sql import functions as f
sdf_sales_train = sdf_sales_train.withColumn(
"item_cnt_day",
f.when(sdf_sales_train["item_cnt_day"] < 0, 0).otherwise(
sdf_sales_train["item_cnt_day"]
),
)
sdf_sales_train = sdf_sales_train.withColumn(
"item_price",
f.when(sdf_sales_train["item_price"] < 0, 0).otherwise(
sdf_sales_train["item_price"]
),
)
sdf_sales_train.where(
(sdf_sales_train["item_cnt_day"] < 0) | (sdf_sales_train["item_price"] < 0)
).count()
# # Type casting / data cleaning
sales_data = sdf_sales_train.toPandas()
sales_data["date"] = pd.to_datetime(sales_data["date"], format="%d.%m.%Y")
sales_data.dtypes
# sales_data.T
dataset = sales_data.pivot_table(
index=["shop_id", "item_id"],
values=["item_cnt_day"],
columns=["date_block_num"],
fill_value=0,
aggfunc="sum",
)
dataset.reset_index(inplace=True)
dataset.head()
test_data = sdf_test.toPandas()
dataset = pd.merge(test_data, dataset, on=["item_id", "shop_id"], how="left")
dataset.fillna(0, inplace=True)
dataset.head()
dataset.drop(["shop_id", "item_id", "ID"], inplace=True, axis=1)
dataset.head()
# # Train Test Split
# X we will keep all columns execpt the last one
X_train = np.expand_dims(dataset.values[:, :-1], axis=2)
# the last column is our label
y_train = dataset.values[:, -1:]
# for test we keep all the columns execpt the first one
X_test = np.expand_dims(dataset.values[:, 1:], axis=2)
# lets have a look on the shape
print(X_train.shape, y_train.shape, X_test.shape)
# # TF Model building
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.LSTM(32, input_shape=X_train.shape[-2:]))
model.add(tf.keras.layers.Dropout(0.4))
model.add(tf.keras.layers.Dense(1, activation="relu"))
model.compile(optimizer=tf.keras.optimizers.RMSprop(), loss="mae")
# model.compile(loss = 'mse',optimizer = 'adam', metrics = ['mean_squared_error'])
model.summary()
tf.keras.utils.plot_model(model, show_layer_names=True, show_shapes=True)
# # Train the model
model.fit(X_train, y_train, batch_size=4096, epochs=10)
# # Use trained model for Prediction
# creating submission file
submission_pfs = model.predict(X_test)
# creating dataframe with required columns
submission = pd.DataFrame(
{"ID": test_data["ID"], "item_cnt_month": submission_pfs.ravel()}
)
# creating csv file from dataframe
submission.T
submission.to_csv("submission.csv", index=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.